1 // SPDX-License-Identifier: GPL-2.0 2 /* Renesas Ethernet AVB device driver 3 * 4 * Copyright (C) 2014-2019 Renesas Electronics Corporation 5 * Copyright (C) 2015 Renesas Solutions Corp. 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 7 * 8 * Based on the SuperH Ethernet driver 9 */ 10 11 #include <linux/cache.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/err.h> 16 #include <linux/etherdevice.h> 17 #include <linux/ethtool.h> 18 #include <linux/if_vlan.h> 19 #include <linux/kernel.h> 20 #include <linux/list.h> 21 #include <linux/module.h> 22 #include <linux/net_tstamp.h> 23 #include <linux/of.h> 24 #include <linux/of_mdio.h> 25 #include <linux/of_net.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/slab.h> 29 #include <linux/spinlock.h> 30 #include <linux/reset.h> 31 #include <linux/math64.h> 32 #include <net/ip.h> 33 34 #include "ravb.h" 35 36 #define RAVB_DEF_MSG_ENABLE \ 37 (NETIF_MSG_LINK | \ 38 NETIF_MSG_TIMER | \ 39 NETIF_MSG_RX_ERR | \ 40 NETIF_MSG_TX_ERR) 41 42 void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, 43 u32 set) 44 { 45 ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg); 46 } 47 48 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) 49 { 50 int i; 51 52 for (i = 0; i < 10000; i++) { 53 if ((ravb_read(ndev, reg) & mask) == value) 54 return 0; 55 udelay(10); 56 } 57 return -ETIMEDOUT; 58 } 59 60 static int ravb_set_opmode(struct net_device *ndev, u32 opmode) 61 { 62 u32 csr_ops = 1U << (opmode & CCC_OPC); 63 u32 ccc_mask = CCC_OPC; 64 int error; 65 66 /* If gPTP active in config mode is supported it needs to be configured 67 * along with CSEL and operating mode in the same access. This is a 68 * hardware limitation. 69 */ 70 if (opmode & CCC_GAC) 71 ccc_mask |= CCC_GAC | CCC_CSEL; 72 73 /* Set operating mode */ 74 ravb_modify(ndev, CCC, ccc_mask, opmode); 75 /* Check if the operating mode is changed to the requested one */ 76 error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops); 77 if (error) { 78 netdev_err(ndev, "failed to switch device to requested mode (%u)\n", 79 opmode & CCC_OPC); 80 } 81 82 return error; 83 } 84 85 static void ravb_set_rate_gbeth(struct net_device *ndev) 86 { 87 struct ravb_private *priv = netdev_priv(ndev); 88 89 switch (priv->speed) { 90 case 10: /* 10BASE */ 91 ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR); 92 break; 93 case 100: /* 100BASE */ 94 ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR); 95 break; 96 case 1000: /* 1000BASE */ 97 ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR); 98 break; 99 } 100 } 101 102 static void ravb_set_rate_rcar(struct net_device *ndev) 103 { 104 struct ravb_private *priv = netdev_priv(ndev); 105 106 switch (priv->speed) { 107 case 100: /* 100BASE */ 108 ravb_write(ndev, GECMR_SPEED_100, GECMR); 109 break; 110 case 1000: /* 1000BASE */ 111 ravb_write(ndev, GECMR_SPEED_1000, GECMR); 112 break; 113 } 114 } 115 116 static void ravb_set_buffer_align(struct sk_buff *skb) 117 { 118 u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1); 119 120 if (reserve) 121 skb_reserve(skb, RAVB_ALIGN - reserve); 122 } 123 124 /* Get MAC address from the MAC address registers 125 * 126 * Ethernet AVB device doesn't have ROM for MAC address. 127 * This function gets the MAC address that was used by a bootloader. 128 */ 129 static void ravb_read_mac_address(struct device_node *np, 130 struct net_device *ndev) 131 { 132 int ret; 133 134 ret = of_get_ethdev_address(np, ndev); 135 if (ret) { 136 u32 mahr = ravb_read(ndev, MAHR); 137 u32 malr = ravb_read(ndev, MALR); 138 u8 addr[ETH_ALEN]; 139 140 addr[0] = (mahr >> 24) & 0xFF; 141 addr[1] = (mahr >> 16) & 0xFF; 142 addr[2] = (mahr >> 8) & 0xFF; 143 addr[3] = (mahr >> 0) & 0xFF; 144 addr[4] = (malr >> 8) & 0xFF; 145 addr[5] = (malr >> 0) & 0xFF; 146 eth_hw_addr_set(ndev, addr); 147 } 148 } 149 150 static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) 151 { 152 struct ravb_private *priv = container_of(ctrl, struct ravb_private, 153 mdiobb); 154 155 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); 156 } 157 158 /* MDC pin control */ 159 static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level) 160 { 161 ravb_mdio_ctrl(ctrl, PIR_MDC, level); 162 } 163 164 /* Data I/O pin control */ 165 static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output) 166 { 167 ravb_mdio_ctrl(ctrl, PIR_MMD, output); 168 } 169 170 /* Set data bit */ 171 static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value) 172 { 173 ravb_mdio_ctrl(ctrl, PIR_MDO, value); 174 } 175 176 /* Get data bit */ 177 static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl) 178 { 179 struct ravb_private *priv = container_of(ctrl, struct ravb_private, 180 mdiobb); 181 182 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0; 183 } 184 185 /* MDIO bus control struct */ 186 static const struct mdiobb_ops bb_ops = { 187 .owner = THIS_MODULE, 188 .set_mdc = ravb_set_mdc, 189 .set_mdio_dir = ravb_set_mdio_dir, 190 .set_mdio_data = ravb_set_mdio_data, 191 .get_mdio_data = ravb_get_mdio_data, 192 }; 193 194 /* Free TX skb function for AVB-IP */ 195 static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) 196 { 197 struct ravb_private *priv = netdev_priv(ndev); 198 struct net_device_stats *stats = &priv->stats[q]; 199 unsigned int num_tx_desc = priv->num_tx_desc; 200 struct ravb_tx_desc *desc; 201 unsigned int entry; 202 int free_num = 0; 203 u32 size; 204 205 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { 206 bool txed; 207 208 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * 209 num_tx_desc); 210 desc = &priv->tx_ring[q][entry]; 211 txed = desc->die_dt == DT_FEMPTY; 212 if (free_txed_only && !txed) 213 break; 214 /* Descriptor type must be checked before all other reads */ 215 dma_rmb(); 216 size = le16_to_cpu(desc->ds_tagl) & TX_DS; 217 /* Free the original skb. */ 218 if (priv->tx_skb[q][entry / num_tx_desc]) { 219 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), 220 size, DMA_TO_DEVICE); 221 /* Last packet descriptor? */ 222 if (entry % num_tx_desc == num_tx_desc - 1) { 223 entry /= num_tx_desc; 224 dev_kfree_skb_any(priv->tx_skb[q][entry]); 225 priv->tx_skb[q][entry] = NULL; 226 if (txed) 227 stats->tx_packets++; 228 } 229 free_num++; 230 } 231 if (txed) 232 stats->tx_bytes += size; 233 desc->die_dt = DT_EEMPTY; 234 } 235 return free_num; 236 } 237 238 static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) 239 { 240 struct ravb_private *priv = netdev_priv(ndev); 241 unsigned int ring_size; 242 unsigned int i; 243 244 if (!priv->gbeth_rx_ring) 245 return; 246 247 for (i = 0; i < priv->num_rx_ring[q]; i++) { 248 struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i]; 249 250 if (!dma_mapping_error(ndev->dev.parent, 251 le32_to_cpu(desc->dptr))) 252 dma_unmap_single(ndev->dev.parent, 253 le32_to_cpu(desc->dptr), 254 GBETH_RX_BUFF_MAX, 255 DMA_FROM_DEVICE); 256 } 257 ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); 258 dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring, 259 priv->rx_desc_dma[q]); 260 priv->gbeth_rx_ring = NULL; 261 } 262 263 static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) 264 { 265 struct ravb_private *priv = netdev_priv(ndev); 266 unsigned int ring_size; 267 unsigned int i; 268 269 if (!priv->rx_ring[q]) 270 return; 271 272 for (i = 0; i < priv->num_rx_ring[q]; i++) { 273 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; 274 275 if (!dma_mapping_error(ndev->dev.parent, 276 le32_to_cpu(desc->dptr))) 277 dma_unmap_single(ndev->dev.parent, 278 le32_to_cpu(desc->dptr), 279 RX_BUF_SZ, 280 DMA_FROM_DEVICE); 281 } 282 ring_size = sizeof(struct ravb_ex_rx_desc) * 283 (priv->num_rx_ring[q] + 1); 284 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], 285 priv->rx_desc_dma[q]); 286 priv->rx_ring[q] = NULL; 287 } 288 289 /* Free skb's and DMA buffers for Ethernet AVB */ 290 static void ravb_ring_free(struct net_device *ndev, int q) 291 { 292 struct ravb_private *priv = netdev_priv(ndev); 293 const struct ravb_hw_info *info = priv->info; 294 unsigned int num_tx_desc = priv->num_tx_desc; 295 unsigned int ring_size; 296 unsigned int i; 297 298 info->rx_ring_free(ndev, q); 299 300 if (priv->tx_ring[q]) { 301 ravb_tx_free(ndev, q, false); 302 303 ring_size = sizeof(struct ravb_tx_desc) * 304 (priv->num_tx_ring[q] * num_tx_desc + 1); 305 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], 306 priv->tx_desc_dma[q]); 307 priv->tx_ring[q] = NULL; 308 } 309 310 /* Free RX skb ringbuffer */ 311 if (priv->rx_skb[q]) { 312 for (i = 0; i < priv->num_rx_ring[q]; i++) 313 dev_kfree_skb(priv->rx_skb[q][i]); 314 } 315 kfree(priv->rx_skb[q]); 316 priv->rx_skb[q] = NULL; 317 318 /* Free aligned TX buffers */ 319 kfree(priv->tx_align[q]); 320 priv->tx_align[q] = NULL; 321 322 /* Free TX skb ringbuffer. 323 * SKBs are freed by ravb_tx_free() call above. 324 */ 325 kfree(priv->tx_skb[q]); 326 priv->tx_skb[q] = NULL; 327 } 328 329 static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) 330 { 331 struct ravb_private *priv = netdev_priv(ndev); 332 struct ravb_rx_desc *rx_desc; 333 unsigned int rx_ring_size; 334 dma_addr_t dma_addr; 335 unsigned int i; 336 337 rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 338 memset(priv->gbeth_rx_ring, 0, rx_ring_size); 339 /* Build RX ring buffer */ 340 for (i = 0; i < priv->num_rx_ring[q]; i++) { 341 /* RX descriptor */ 342 rx_desc = &priv->gbeth_rx_ring[i]; 343 rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); 344 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, 345 GBETH_RX_BUFF_MAX, 346 DMA_FROM_DEVICE); 347 /* We just set the data size to 0 for a failed mapping which 348 * should prevent DMA from happening... 349 */ 350 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 351 rx_desc->ds_cc = cpu_to_le16(0); 352 rx_desc->dptr = cpu_to_le32(dma_addr); 353 rx_desc->die_dt = DT_FEMPTY; 354 } 355 rx_desc = &priv->gbeth_rx_ring[i]; 356 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 357 rx_desc->die_dt = DT_LINKFIX; /* type */ 358 } 359 360 static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) 361 { 362 struct ravb_private *priv = netdev_priv(ndev); 363 struct ravb_ex_rx_desc *rx_desc; 364 unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 365 dma_addr_t dma_addr; 366 unsigned int i; 367 368 memset(priv->rx_ring[q], 0, rx_ring_size); 369 /* Build RX ring buffer */ 370 for (i = 0; i < priv->num_rx_ring[q]; i++) { 371 /* RX descriptor */ 372 rx_desc = &priv->rx_ring[q][i]; 373 rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ); 374 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, 375 RX_BUF_SZ, 376 DMA_FROM_DEVICE); 377 /* We just set the data size to 0 for a failed mapping which 378 * should prevent DMA from happening... 379 */ 380 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 381 rx_desc->ds_cc = cpu_to_le16(0); 382 rx_desc->dptr = cpu_to_le32(dma_addr); 383 rx_desc->die_dt = DT_FEMPTY; 384 } 385 rx_desc = &priv->rx_ring[q][i]; 386 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 387 rx_desc->die_dt = DT_LINKFIX; /* type */ 388 } 389 390 /* Format skb and descriptor buffer for Ethernet AVB */ 391 static void ravb_ring_format(struct net_device *ndev, int q) 392 { 393 struct ravb_private *priv = netdev_priv(ndev); 394 const struct ravb_hw_info *info = priv->info; 395 unsigned int num_tx_desc = priv->num_tx_desc; 396 struct ravb_tx_desc *tx_desc; 397 struct ravb_desc *desc; 398 unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * 399 num_tx_desc; 400 unsigned int i; 401 402 priv->cur_rx[q] = 0; 403 priv->cur_tx[q] = 0; 404 priv->dirty_rx[q] = 0; 405 priv->dirty_tx[q] = 0; 406 407 info->rx_ring_format(ndev, q); 408 409 memset(priv->tx_ring[q], 0, tx_ring_size); 410 /* Build TX ring buffer */ 411 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; 412 i++, tx_desc++) { 413 tx_desc->die_dt = DT_EEMPTY; 414 if (num_tx_desc > 1) { 415 tx_desc++; 416 tx_desc->die_dt = DT_EEMPTY; 417 } 418 } 419 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); 420 tx_desc->die_dt = DT_LINKFIX; /* type */ 421 422 /* RX descriptor base address for best effort */ 423 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q]; 424 desc->die_dt = DT_LINKFIX; /* type */ 425 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 426 427 /* TX descriptor base address for best effort */ 428 desc = &priv->desc_bat[q]; 429 desc->die_dt = DT_LINKFIX; /* type */ 430 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); 431 } 432 433 static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q) 434 { 435 struct ravb_private *priv = netdev_priv(ndev); 436 unsigned int ring_size; 437 438 ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); 439 440 priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size, 441 &priv->rx_desc_dma[q], 442 GFP_KERNEL); 443 return priv->gbeth_rx_ring; 444 } 445 446 static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) 447 { 448 struct ravb_private *priv = netdev_priv(ndev); 449 unsigned int ring_size; 450 451 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); 452 453 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, 454 &priv->rx_desc_dma[q], 455 GFP_KERNEL); 456 return priv->rx_ring[q]; 457 } 458 459 /* Init skb and descriptor buffer for Ethernet AVB */ 460 static int ravb_ring_init(struct net_device *ndev, int q) 461 { 462 struct ravb_private *priv = netdev_priv(ndev); 463 const struct ravb_hw_info *info = priv->info; 464 unsigned int num_tx_desc = priv->num_tx_desc; 465 unsigned int ring_size; 466 struct sk_buff *skb; 467 unsigned int i; 468 469 /* Allocate RX and TX skb rings */ 470 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], 471 sizeof(*priv->rx_skb[q]), GFP_KERNEL); 472 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], 473 sizeof(*priv->tx_skb[q]), GFP_KERNEL); 474 if (!priv->rx_skb[q] || !priv->tx_skb[q]) 475 goto error; 476 477 for (i = 0; i < priv->num_rx_ring[q]; i++) { 478 skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL); 479 if (!skb) 480 goto error; 481 ravb_set_buffer_align(skb); 482 priv->rx_skb[q][i] = skb; 483 } 484 485 if (num_tx_desc > 1) { 486 /* Allocate rings for the aligned buffers */ 487 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + 488 DPTR_ALIGN - 1, GFP_KERNEL); 489 if (!priv->tx_align[q]) 490 goto error; 491 } 492 493 /* Allocate all RX descriptors. */ 494 if (!info->alloc_rx_desc(ndev, q)) 495 goto error; 496 497 priv->dirty_rx[q] = 0; 498 499 /* Allocate all TX descriptors. */ 500 ring_size = sizeof(struct ravb_tx_desc) * 501 (priv->num_tx_ring[q] * num_tx_desc + 1); 502 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, 503 &priv->tx_desc_dma[q], 504 GFP_KERNEL); 505 if (!priv->tx_ring[q]) 506 goto error; 507 508 return 0; 509 510 error: 511 ravb_ring_free(ndev, q); 512 513 return -ENOMEM; 514 } 515 516 static void ravb_csum_init_gbeth(struct net_device *ndev) 517 { 518 bool tx_enable = ndev->features & NETIF_F_HW_CSUM; 519 bool rx_enable = ndev->features & NETIF_F_RXCSUM; 520 521 if (!(tx_enable || rx_enable)) 522 goto done; 523 524 ravb_write(ndev, 0, CSR0); 525 if (ravb_wait(ndev, CSR0, CSR0_TPE | CSR0_RPE, 0)) { 526 netdev_err(ndev, "Timeout enabling hardware checksum\n"); 527 528 if (tx_enable) 529 ndev->features &= ~NETIF_F_HW_CSUM; 530 531 if (rx_enable) 532 ndev->features &= ~NETIF_F_RXCSUM; 533 } else { 534 if (tx_enable) 535 ravb_write(ndev, CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4, CSR1); 536 537 if (rx_enable) 538 ravb_write(ndev, CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4, 539 CSR2); 540 } 541 542 done: 543 ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0); 544 } 545 546 static void ravb_emac_init_gbeth(struct net_device *ndev) 547 { 548 struct ravb_private *priv = netdev_priv(ndev); 549 550 if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { 551 ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35); 552 ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0); 553 } else { 554 ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35); 555 ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 556 CXR31_SEL_LINK0); 557 } 558 559 /* Receive frame limit set register */ 560 ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR); 561 562 /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */ 563 ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) | 564 ECMR_TE | ECMR_RE | ECMR_RCPT | 565 ECMR_TXF | ECMR_RXF, ECMR); 566 567 ravb_set_rate_gbeth(ndev); 568 569 /* Set MAC address */ 570 ravb_write(ndev, 571 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 572 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); 573 ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 574 575 /* E-MAC status register clear */ 576 ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR); 577 578 ravb_csum_init_gbeth(ndev); 579 580 /* E-MAC interrupt enable register */ 581 ravb_write(ndev, ECSIPR_ICDIP, ECSIPR); 582 } 583 584 static void ravb_emac_init_rcar(struct net_device *ndev) 585 { 586 /* Receive frame limit set register */ 587 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); 588 589 /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ 590 ravb_write(ndev, ECMR_ZPF | ECMR_DM | 591 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | 592 ECMR_TE | ECMR_RE, ECMR); 593 594 ravb_set_rate_rcar(ndev); 595 596 /* Set MAC address */ 597 ravb_write(ndev, 598 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 599 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); 600 ravb_write(ndev, 601 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 602 603 /* E-MAC status register clear */ 604 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR); 605 606 /* E-MAC interrupt enable register */ 607 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR); 608 } 609 610 /* E-MAC init function */ 611 static void ravb_emac_init(struct net_device *ndev) 612 { 613 struct ravb_private *priv = netdev_priv(ndev); 614 const struct ravb_hw_info *info = priv->info; 615 616 info->emac_init(ndev); 617 } 618 619 static int ravb_dmac_init_gbeth(struct net_device *ndev) 620 { 621 int error; 622 623 error = ravb_ring_init(ndev, RAVB_BE); 624 if (error) 625 return error; 626 627 /* Descriptor format */ 628 ravb_ring_format(ndev, RAVB_BE); 629 630 /* Set DMAC RX */ 631 ravb_write(ndev, 0x60000000, RCR); 632 633 /* Set Max Frame Length (RTC) */ 634 ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC); 635 636 /* Set FIFO size */ 637 ravb_write(ndev, 0x00222200, TGC); 638 639 ravb_write(ndev, 0, TCCR); 640 641 /* Frame receive */ 642 ravb_write(ndev, RIC0_FRE0, RIC0); 643 /* Disable FIFO full warning */ 644 ravb_write(ndev, 0x0, RIC1); 645 /* Receive FIFO full error, descriptor empty */ 646 ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2); 647 648 ravb_write(ndev, TIC_FTE0, TIC); 649 650 return 0; 651 } 652 653 static int ravb_dmac_init_rcar(struct net_device *ndev) 654 { 655 struct ravb_private *priv = netdev_priv(ndev); 656 const struct ravb_hw_info *info = priv->info; 657 int error; 658 659 error = ravb_ring_init(ndev, RAVB_BE); 660 if (error) 661 return error; 662 error = ravb_ring_init(ndev, RAVB_NC); 663 if (error) { 664 ravb_ring_free(ndev, RAVB_BE); 665 return error; 666 } 667 668 /* Descriptor format */ 669 ravb_ring_format(ndev, RAVB_BE); 670 ravb_ring_format(ndev, RAVB_NC); 671 672 /* Set AVB RX */ 673 ravb_write(ndev, 674 RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR); 675 676 /* Set FIFO size */ 677 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC); 678 679 /* Timestamp enable */ 680 ravb_write(ndev, TCCR_TFEN, TCCR); 681 682 /* Interrupt init: */ 683 if (info->multi_irqs) { 684 /* Clear DIL.DPLx */ 685 ravb_write(ndev, 0, DIL); 686 /* Set queue specific interrupt */ 687 ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE); 688 } 689 /* Frame receive */ 690 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); 691 /* Disable FIFO full warning */ 692 ravb_write(ndev, 0, RIC1); 693 /* Receive FIFO full error, descriptor empty */ 694 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); 695 /* Frame transmitted, timestamp FIFO updated */ 696 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); 697 698 return 0; 699 } 700 701 /* Device init function for Ethernet AVB */ 702 static int ravb_dmac_init(struct net_device *ndev) 703 { 704 struct ravb_private *priv = netdev_priv(ndev); 705 const struct ravb_hw_info *info = priv->info; 706 int error; 707 708 /* Set CONFIG mode */ 709 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); 710 if (error) 711 return error; 712 713 error = info->dmac_init(ndev); 714 if (error) 715 return error; 716 717 /* Setting the control will start the AVB-DMAC process. */ 718 return ravb_set_opmode(ndev, CCC_OPC_OPERATION); 719 } 720 721 static void ravb_get_tx_tstamp(struct net_device *ndev) 722 { 723 struct ravb_private *priv = netdev_priv(ndev); 724 struct ravb_tstamp_skb *ts_skb, *ts_skb2; 725 struct skb_shared_hwtstamps shhwtstamps; 726 struct sk_buff *skb; 727 struct timespec64 ts; 728 u16 tag, tfa_tag; 729 int count; 730 u32 tfa2; 731 732 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8; 733 while (count--) { 734 tfa2 = ravb_read(ndev, TFA2); 735 tfa_tag = (tfa2 & TFA2_TST) >> 16; 736 ts.tv_nsec = (u64)ravb_read(ndev, TFA0); 737 ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) | 738 ravb_read(ndev, TFA1); 739 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 740 shhwtstamps.hwtstamp = timespec64_to_ktime(ts); 741 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, 742 list) { 743 skb = ts_skb->skb; 744 tag = ts_skb->tag; 745 list_del(&ts_skb->list); 746 kfree(ts_skb); 747 if (tag == tfa_tag) { 748 skb_tstamp_tx(skb, &shhwtstamps); 749 dev_consume_skb_any(skb); 750 break; 751 } else { 752 dev_kfree_skb_any(skb); 753 } 754 } 755 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); 756 } 757 } 758 759 static void ravb_rx_csum_gbeth(struct sk_buff *skb) 760 { 761 __wsum csum_ip_hdr, csum_proto; 762 u8 *hw_csum; 763 764 /* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4 765 * bytes appended to packet data. First 2 bytes is ip header checksum 766 * and last 2 bytes is protocol checksum. 767 */ 768 if (unlikely(skb->len < sizeof(__sum16) * 2)) 769 return; 770 771 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); 772 csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); 773 774 hw_csum -= sizeof(__sum16); 775 csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); 776 skb_trim(skb, skb->len - 2 * sizeof(__sum16)); 777 778 /* TODO: IPV6 Rx checksum */ 779 if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto) 780 skb->ip_summed = CHECKSUM_UNNECESSARY; 781 } 782 783 static void ravb_rx_csum(struct sk_buff *skb) 784 { 785 u8 *hw_csum; 786 787 /* The hardware checksum is contained in sizeof(__sum16) (2) bytes 788 * appended to packet data 789 */ 790 if (unlikely(skb->len < sizeof(__sum16))) 791 return; 792 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); 793 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); 794 skb->ip_summed = CHECKSUM_COMPLETE; 795 skb_trim(skb, skb->len - sizeof(__sum16)); 796 } 797 798 static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry, 799 struct ravb_rx_desc *desc) 800 { 801 struct ravb_private *priv = netdev_priv(ndev); 802 struct sk_buff *skb; 803 804 skb = priv->rx_skb[RAVB_BE][entry]; 805 priv->rx_skb[RAVB_BE][entry] = NULL; 806 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), 807 ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE); 808 809 return skb; 810 } 811 812 /* Packet receive function for Gigabit Ethernet */ 813 static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) 814 { 815 struct ravb_private *priv = netdev_priv(ndev); 816 const struct ravb_hw_info *info = priv->info; 817 struct net_device_stats *stats; 818 struct ravb_rx_desc *desc; 819 struct sk_buff *skb; 820 dma_addr_t dma_addr; 821 int rx_packets = 0; 822 u8 desc_status; 823 u16 pkt_len; 824 u8 die_dt; 825 int entry; 826 int limit; 827 int i; 828 829 entry = priv->cur_rx[q] % priv->num_rx_ring[q]; 830 limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; 831 stats = &priv->stats[q]; 832 833 desc = &priv->gbeth_rx_ring[entry]; 834 for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) { 835 /* Descriptor type must be checked before all other reads */ 836 dma_rmb(); 837 desc_status = desc->msc; 838 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; 839 840 /* We use 0-byte descriptors to mark the DMA mapping errors */ 841 if (!pkt_len) 842 continue; 843 844 if (desc_status & MSC_MC) 845 stats->multicast++; 846 847 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) { 848 stats->rx_errors++; 849 if (desc_status & MSC_CRC) 850 stats->rx_crc_errors++; 851 if (desc_status & MSC_RFE) 852 stats->rx_frame_errors++; 853 if (desc_status & (MSC_RTLF | MSC_RTSF)) 854 stats->rx_length_errors++; 855 if (desc_status & MSC_CEEF) 856 stats->rx_missed_errors++; 857 } else { 858 die_dt = desc->die_dt & 0xF0; 859 switch (die_dt) { 860 case DT_FSINGLE: 861 skb = ravb_get_skb_gbeth(ndev, entry, desc); 862 skb_put(skb, pkt_len); 863 skb->protocol = eth_type_trans(skb, ndev); 864 if (ndev->features & NETIF_F_RXCSUM) 865 ravb_rx_csum_gbeth(skb); 866 napi_gro_receive(&priv->napi[q], skb); 867 rx_packets++; 868 stats->rx_bytes += pkt_len; 869 break; 870 case DT_FSTART: 871 priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc); 872 skb_put(priv->rx_1st_skb, pkt_len); 873 break; 874 case DT_FMID: 875 skb = ravb_get_skb_gbeth(ndev, entry, desc); 876 skb_copy_to_linear_data_offset(priv->rx_1st_skb, 877 priv->rx_1st_skb->len, 878 skb->data, 879 pkt_len); 880 skb_put(priv->rx_1st_skb, pkt_len); 881 dev_kfree_skb(skb); 882 break; 883 case DT_FEND: 884 skb = ravb_get_skb_gbeth(ndev, entry, desc); 885 skb_copy_to_linear_data_offset(priv->rx_1st_skb, 886 priv->rx_1st_skb->len, 887 skb->data, 888 pkt_len); 889 skb_put(priv->rx_1st_skb, pkt_len); 890 dev_kfree_skb(skb); 891 priv->rx_1st_skb->protocol = 892 eth_type_trans(priv->rx_1st_skb, ndev); 893 if (ndev->features & NETIF_F_RXCSUM) 894 ravb_rx_csum_gbeth(skb); 895 napi_gro_receive(&priv->napi[q], 896 priv->rx_1st_skb); 897 rx_packets++; 898 stats->rx_bytes += pkt_len; 899 break; 900 } 901 } 902 903 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; 904 desc = &priv->gbeth_rx_ring[entry]; 905 } 906 907 /* Refill the RX ring buffers. */ 908 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { 909 entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; 910 desc = &priv->gbeth_rx_ring[entry]; 911 desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); 912 913 if (!priv->rx_skb[q][entry]) { 914 skb = netdev_alloc_skb(ndev, info->max_rx_len); 915 if (!skb) 916 break; 917 ravb_set_buffer_align(skb); 918 dma_addr = dma_map_single(ndev->dev.parent, 919 skb->data, 920 GBETH_RX_BUFF_MAX, 921 DMA_FROM_DEVICE); 922 skb_checksum_none_assert(skb); 923 /* We just set the data size to 0 for a failed mapping 924 * which should prevent DMA from happening... 925 */ 926 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 927 desc->ds_cc = cpu_to_le16(0); 928 desc->dptr = cpu_to_le32(dma_addr); 929 priv->rx_skb[q][entry] = skb; 930 } 931 /* Descriptor type must be set after all the above writes */ 932 dma_wmb(); 933 desc->die_dt = DT_FEMPTY; 934 } 935 936 stats->rx_packets += rx_packets; 937 *quota -= rx_packets; 938 return *quota == 0; 939 } 940 941 /* Packet receive function for Ethernet AVB */ 942 static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) 943 { 944 struct ravb_private *priv = netdev_priv(ndev); 945 const struct ravb_hw_info *info = priv->info; 946 int entry = priv->cur_rx[q] % priv->num_rx_ring[q]; 947 int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) - 948 priv->cur_rx[q]; 949 struct net_device_stats *stats = &priv->stats[q]; 950 struct ravb_ex_rx_desc *desc; 951 struct sk_buff *skb; 952 dma_addr_t dma_addr; 953 struct timespec64 ts; 954 u8 desc_status; 955 u16 pkt_len; 956 int limit; 957 958 boguscnt = min(boguscnt, *quota); 959 limit = boguscnt; 960 desc = &priv->rx_ring[q][entry]; 961 while (desc->die_dt != DT_FEMPTY) { 962 /* Descriptor type must be checked before all other reads */ 963 dma_rmb(); 964 desc_status = desc->msc; 965 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; 966 967 if (--boguscnt < 0) 968 break; 969 970 /* We use 0-byte descriptors to mark the DMA mapping errors */ 971 if (!pkt_len) 972 continue; 973 974 if (desc_status & MSC_MC) 975 stats->multicast++; 976 977 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | 978 MSC_CEEF)) { 979 stats->rx_errors++; 980 if (desc_status & MSC_CRC) 981 stats->rx_crc_errors++; 982 if (desc_status & MSC_RFE) 983 stats->rx_frame_errors++; 984 if (desc_status & (MSC_RTLF | MSC_RTSF)) 985 stats->rx_length_errors++; 986 if (desc_status & MSC_CEEF) 987 stats->rx_missed_errors++; 988 } else { 989 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; 990 991 skb = priv->rx_skb[q][entry]; 992 priv->rx_skb[q][entry] = NULL; 993 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), 994 RX_BUF_SZ, 995 DMA_FROM_DEVICE); 996 get_ts &= (q == RAVB_NC) ? 997 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : 998 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; 999 if (get_ts) { 1000 struct skb_shared_hwtstamps *shhwtstamps; 1001 1002 shhwtstamps = skb_hwtstamps(skb); 1003 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 1004 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) << 1005 32) | le32_to_cpu(desc->ts_sl); 1006 ts.tv_nsec = le32_to_cpu(desc->ts_n); 1007 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); 1008 } 1009 1010 skb_put(skb, pkt_len); 1011 skb->protocol = eth_type_trans(skb, ndev); 1012 if (ndev->features & NETIF_F_RXCSUM) 1013 ravb_rx_csum(skb); 1014 napi_gro_receive(&priv->napi[q], skb); 1015 stats->rx_packets++; 1016 stats->rx_bytes += pkt_len; 1017 } 1018 1019 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; 1020 desc = &priv->rx_ring[q][entry]; 1021 } 1022 1023 /* Refill the RX ring buffers. */ 1024 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { 1025 entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; 1026 desc = &priv->rx_ring[q][entry]; 1027 desc->ds_cc = cpu_to_le16(RX_BUF_SZ); 1028 1029 if (!priv->rx_skb[q][entry]) { 1030 skb = netdev_alloc_skb(ndev, info->max_rx_len); 1031 if (!skb) 1032 break; /* Better luck next round. */ 1033 ravb_set_buffer_align(skb); 1034 dma_addr = dma_map_single(ndev->dev.parent, skb->data, 1035 le16_to_cpu(desc->ds_cc), 1036 DMA_FROM_DEVICE); 1037 skb_checksum_none_assert(skb); 1038 /* We just set the data size to 0 for a failed mapping 1039 * which should prevent DMA from happening... 1040 */ 1041 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 1042 desc->ds_cc = cpu_to_le16(0); 1043 desc->dptr = cpu_to_le32(dma_addr); 1044 priv->rx_skb[q][entry] = skb; 1045 } 1046 /* Descriptor type must be set after all the above writes */ 1047 dma_wmb(); 1048 desc->die_dt = DT_FEMPTY; 1049 } 1050 1051 *quota -= limit - (++boguscnt); 1052 1053 return boguscnt <= 0; 1054 } 1055 1056 /* Packet receive function for Ethernet AVB */ 1057 static bool ravb_rx(struct net_device *ndev, int *quota, int q) 1058 { 1059 struct ravb_private *priv = netdev_priv(ndev); 1060 const struct ravb_hw_info *info = priv->info; 1061 1062 return info->receive(ndev, quota, q); 1063 } 1064 1065 static void ravb_rcv_snd_disable(struct net_device *ndev) 1066 { 1067 /* Disable TX and RX */ 1068 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); 1069 } 1070 1071 static void ravb_rcv_snd_enable(struct net_device *ndev) 1072 { 1073 /* Enable TX and RX */ 1074 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); 1075 } 1076 1077 /* function for waiting dma process finished */ 1078 static int ravb_stop_dma(struct net_device *ndev) 1079 { 1080 struct ravb_private *priv = netdev_priv(ndev); 1081 const struct ravb_hw_info *info = priv->info; 1082 int error; 1083 1084 /* Wait for stopping the hardware TX process */ 1085 error = ravb_wait(ndev, TCCR, info->tccr_mask, 0); 1086 1087 if (error) 1088 return error; 1089 1090 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3, 1091 0); 1092 if (error) 1093 return error; 1094 1095 /* Stop the E-MAC's RX/TX processes. */ 1096 ravb_rcv_snd_disable(ndev); 1097 1098 /* Wait for stopping the RX DMA process */ 1099 error = ravb_wait(ndev, CSR, CSR_RPO, 0); 1100 if (error) 1101 return error; 1102 1103 /* Stop AVB-DMAC process */ 1104 return ravb_set_opmode(ndev, CCC_OPC_CONFIG); 1105 } 1106 1107 /* E-MAC interrupt handler */ 1108 static void ravb_emac_interrupt_unlocked(struct net_device *ndev) 1109 { 1110 struct ravb_private *priv = netdev_priv(ndev); 1111 u32 ecsr, psr; 1112 1113 ecsr = ravb_read(ndev, ECSR); 1114 ravb_write(ndev, ecsr, ECSR); /* clear interrupt */ 1115 1116 if (ecsr & ECSR_MPD) 1117 pm_wakeup_event(&priv->pdev->dev, 0); 1118 if (ecsr & ECSR_ICD) 1119 ndev->stats.tx_carrier_errors++; 1120 if (ecsr & ECSR_LCHNG) { 1121 /* Link changed */ 1122 if (priv->no_avb_link) 1123 return; 1124 psr = ravb_read(ndev, PSR); 1125 if (priv->avb_link_active_low) 1126 psr ^= PSR_LMON; 1127 if (!(psr & PSR_LMON)) { 1128 /* DIsable RX and TX */ 1129 ravb_rcv_snd_disable(ndev); 1130 } else { 1131 /* Enable RX and TX */ 1132 ravb_rcv_snd_enable(ndev); 1133 } 1134 } 1135 } 1136 1137 static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id) 1138 { 1139 struct net_device *ndev = dev_id; 1140 struct ravb_private *priv = netdev_priv(ndev); 1141 struct device *dev = &priv->pdev->dev; 1142 irqreturn_t result = IRQ_HANDLED; 1143 1144 pm_runtime_get_noresume(dev); 1145 1146 if (unlikely(!pm_runtime_active(dev))) { 1147 result = IRQ_NONE; 1148 goto out_rpm_put; 1149 } 1150 1151 spin_lock(&priv->lock); 1152 ravb_emac_interrupt_unlocked(ndev); 1153 spin_unlock(&priv->lock); 1154 1155 out_rpm_put: 1156 pm_runtime_put_noidle(dev); 1157 return result; 1158 } 1159 1160 /* Error interrupt handler */ 1161 static void ravb_error_interrupt(struct net_device *ndev) 1162 { 1163 struct ravb_private *priv = netdev_priv(ndev); 1164 u32 eis, ris2; 1165 1166 eis = ravb_read(ndev, EIS); 1167 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); 1168 if (eis & EIS_QFS) { 1169 ris2 = ravb_read(ndev, RIS2); 1170 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED), 1171 RIS2); 1172 1173 /* Receive Descriptor Empty int */ 1174 if (ris2 & RIS2_QFF0) 1175 priv->stats[RAVB_BE].rx_over_errors++; 1176 1177 /* Receive Descriptor Empty int */ 1178 if (ris2 & RIS2_QFF1) 1179 priv->stats[RAVB_NC].rx_over_errors++; 1180 1181 /* Receive FIFO Overflow int */ 1182 if (ris2 & RIS2_RFFF) 1183 priv->rx_fifo_errors++; 1184 } 1185 } 1186 1187 static bool ravb_queue_interrupt(struct net_device *ndev, int q) 1188 { 1189 struct ravb_private *priv = netdev_priv(ndev); 1190 const struct ravb_hw_info *info = priv->info; 1191 u32 ris0 = ravb_read(ndev, RIS0); 1192 u32 ric0 = ravb_read(ndev, RIC0); 1193 u32 tis = ravb_read(ndev, TIS); 1194 u32 tic = ravb_read(ndev, TIC); 1195 1196 if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) { 1197 if (napi_schedule_prep(&priv->napi[q])) { 1198 /* Mask RX and TX interrupts */ 1199 if (!info->irq_en_dis) { 1200 ravb_write(ndev, ric0 & ~BIT(q), RIC0); 1201 ravb_write(ndev, tic & ~BIT(q), TIC); 1202 } else { 1203 ravb_write(ndev, BIT(q), RID0); 1204 ravb_write(ndev, BIT(q), TID); 1205 } 1206 __napi_schedule(&priv->napi[q]); 1207 } else { 1208 netdev_warn(ndev, 1209 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n", 1210 ris0, ric0); 1211 netdev_warn(ndev, 1212 " tx status 0x%08x, tx mask 0x%08x.\n", 1213 tis, tic); 1214 } 1215 return true; 1216 } 1217 return false; 1218 } 1219 1220 static bool ravb_timestamp_interrupt(struct net_device *ndev) 1221 { 1222 u32 tis = ravb_read(ndev, TIS); 1223 1224 if (tis & TIS_TFUF) { 1225 ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS); 1226 ravb_get_tx_tstamp(ndev); 1227 return true; 1228 } 1229 return false; 1230 } 1231 1232 static irqreturn_t ravb_interrupt(int irq, void *dev_id) 1233 { 1234 struct net_device *ndev = dev_id; 1235 struct ravb_private *priv = netdev_priv(ndev); 1236 const struct ravb_hw_info *info = priv->info; 1237 struct device *dev = &priv->pdev->dev; 1238 irqreturn_t result = IRQ_NONE; 1239 u32 iss; 1240 1241 pm_runtime_get_noresume(dev); 1242 1243 if (unlikely(!pm_runtime_active(dev))) 1244 goto out_rpm_put; 1245 1246 spin_lock(&priv->lock); 1247 /* Get interrupt status */ 1248 iss = ravb_read(ndev, ISS); 1249 1250 /* Received and transmitted interrupts */ 1251 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) { 1252 int q; 1253 1254 /* Timestamp updated */ 1255 if (ravb_timestamp_interrupt(ndev)) 1256 result = IRQ_HANDLED; 1257 1258 /* Network control and best effort queue RX/TX */ 1259 if (info->nc_queues) { 1260 for (q = RAVB_NC; q >= RAVB_BE; q--) { 1261 if (ravb_queue_interrupt(ndev, q)) 1262 result = IRQ_HANDLED; 1263 } 1264 } else { 1265 if (ravb_queue_interrupt(ndev, RAVB_BE)) 1266 result = IRQ_HANDLED; 1267 } 1268 } 1269 1270 /* E-MAC status summary */ 1271 if (iss & ISS_MS) { 1272 ravb_emac_interrupt_unlocked(ndev); 1273 result = IRQ_HANDLED; 1274 } 1275 1276 /* Error status summary */ 1277 if (iss & ISS_ES) { 1278 ravb_error_interrupt(ndev); 1279 result = IRQ_HANDLED; 1280 } 1281 1282 /* gPTP interrupt status summary */ 1283 if (iss & ISS_CGIS) { 1284 ravb_ptp_interrupt(ndev); 1285 result = IRQ_HANDLED; 1286 } 1287 1288 spin_unlock(&priv->lock); 1289 1290 out_rpm_put: 1291 pm_runtime_put_noidle(dev); 1292 return result; 1293 } 1294 1295 /* Timestamp/Error/gPTP interrupt handler */ 1296 static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) 1297 { 1298 struct net_device *ndev = dev_id; 1299 struct ravb_private *priv = netdev_priv(ndev); 1300 struct device *dev = &priv->pdev->dev; 1301 irqreturn_t result = IRQ_NONE; 1302 u32 iss; 1303 1304 pm_runtime_get_noresume(dev); 1305 1306 if (unlikely(!pm_runtime_active(dev))) 1307 goto out_rpm_put; 1308 1309 spin_lock(&priv->lock); 1310 /* Get interrupt status */ 1311 iss = ravb_read(ndev, ISS); 1312 1313 /* Timestamp updated */ 1314 if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev)) 1315 result = IRQ_HANDLED; 1316 1317 /* Error status summary */ 1318 if (iss & ISS_ES) { 1319 ravb_error_interrupt(ndev); 1320 result = IRQ_HANDLED; 1321 } 1322 1323 /* gPTP interrupt status summary */ 1324 if (iss & ISS_CGIS) { 1325 ravb_ptp_interrupt(ndev); 1326 result = IRQ_HANDLED; 1327 } 1328 1329 spin_unlock(&priv->lock); 1330 1331 out_rpm_put: 1332 pm_runtime_put_noidle(dev); 1333 return result; 1334 } 1335 1336 static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) 1337 { 1338 struct net_device *ndev = dev_id; 1339 struct ravb_private *priv = netdev_priv(ndev); 1340 struct device *dev = &priv->pdev->dev; 1341 irqreturn_t result = IRQ_NONE; 1342 1343 pm_runtime_get_noresume(dev); 1344 1345 if (unlikely(!pm_runtime_active(dev))) 1346 goto out_rpm_put; 1347 1348 spin_lock(&priv->lock); 1349 1350 /* Network control/Best effort queue RX/TX */ 1351 if (ravb_queue_interrupt(ndev, q)) 1352 result = IRQ_HANDLED; 1353 1354 spin_unlock(&priv->lock); 1355 1356 out_rpm_put: 1357 pm_runtime_put_noidle(dev); 1358 return result; 1359 } 1360 1361 static irqreturn_t ravb_be_interrupt(int irq, void *dev_id) 1362 { 1363 return ravb_dma_interrupt(irq, dev_id, RAVB_BE); 1364 } 1365 1366 static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id) 1367 { 1368 return ravb_dma_interrupt(irq, dev_id, RAVB_NC); 1369 } 1370 1371 static int ravb_poll(struct napi_struct *napi, int budget) 1372 { 1373 struct net_device *ndev = napi->dev; 1374 struct ravb_private *priv = netdev_priv(ndev); 1375 const struct ravb_hw_info *info = priv->info; 1376 unsigned long flags; 1377 int q = napi - priv->napi; 1378 int mask = BIT(q); 1379 int quota = budget; 1380 1381 /* Processing RX Descriptor Ring */ 1382 /* Clear RX interrupt */ 1383 ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); 1384 if (ravb_rx(ndev, "a, q)) 1385 goto out; 1386 1387 /* Processing TX Descriptor Ring */ 1388 spin_lock_irqsave(&priv->lock, flags); 1389 /* Clear TX interrupt */ 1390 ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); 1391 ravb_tx_free(ndev, q, true); 1392 netif_wake_subqueue(ndev, q); 1393 spin_unlock_irqrestore(&priv->lock, flags); 1394 1395 napi_complete(napi); 1396 1397 /* Re-enable RX/TX interrupts */ 1398 spin_lock_irqsave(&priv->lock, flags); 1399 if (!info->irq_en_dis) { 1400 ravb_modify(ndev, RIC0, mask, mask); 1401 ravb_modify(ndev, TIC, mask, mask); 1402 } else { 1403 ravb_write(ndev, mask, RIE0); 1404 ravb_write(ndev, mask, TIE); 1405 } 1406 spin_unlock_irqrestore(&priv->lock, flags); 1407 1408 /* Receive error message handling */ 1409 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; 1410 if (info->nc_queues) 1411 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; 1412 if (priv->rx_over_errors != ndev->stats.rx_over_errors) 1413 ndev->stats.rx_over_errors = priv->rx_over_errors; 1414 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) 1415 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; 1416 out: 1417 return budget - quota; 1418 } 1419 1420 static void ravb_set_duplex_gbeth(struct net_device *ndev) 1421 { 1422 struct ravb_private *priv = netdev_priv(ndev); 1423 1424 ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0); 1425 } 1426 1427 /* PHY state control function */ 1428 static void ravb_adjust_link(struct net_device *ndev) 1429 { 1430 struct ravb_private *priv = netdev_priv(ndev); 1431 const struct ravb_hw_info *info = priv->info; 1432 struct phy_device *phydev = ndev->phydev; 1433 bool new_state = false; 1434 unsigned long flags; 1435 1436 spin_lock_irqsave(&priv->lock, flags); 1437 1438 /* Disable TX and RX right over here, if E-MAC change is ignored */ 1439 if (priv->no_avb_link) 1440 ravb_rcv_snd_disable(ndev); 1441 1442 if (phydev->link) { 1443 if (info->half_duplex && phydev->duplex != priv->duplex) { 1444 new_state = true; 1445 priv->duplex = phydev->duplex; 1446 ravb_set_duplex_gbeth(ndev); 1447 } 1448 1449 if (phydev->speed != priv->speed) { 1450 new_state = true; 1451 priv->speed = phydev->speed; 1452 info->set_rate(ndev); 1453 } 1454 if (!priv->link) { 1455 ravb_modify(ndev, ECMR, ECMR_TXF, 0); 1456 new_state = true; 1457 priv->link = phydev->link; 1458 } 1459 } else if (priv->link) { 1460 new_state = true; 1461 priv->link = 0; 1462 priv->speed = 0; 1463 if (info->half_duplex) 1464 priv->duplex = -1; 1465 } 1466 1467 /* Enable TX and RX right over here, if E-MAC change is ignored */ 1468 if (priv->no_avb_link && phydev->link) 1469 ravb_rcv_snd_enable(ndev); 1470 1471 spin_unlock_irqrestore(&priv->lock, flags); 1472 1473 if (new_state && netif_msg_link(priv)) 1474 phy_print_status(phydev); 1475 } 1476 1477 /* PHY init function */ 1478 static int ravb_phy_init(struct net_device *ndev) 1479 { 1480 struct device_node *np = ndev->dev.parent->of_node; 1481 struct ravb_private *priv = netdev_priv(ndev); 1482 const struct ravb_hw_info *info = priv->info; 1483 struct phy_device *phydev; 1484 struct device_node *pn; 1485 phy_interface_t iface; 1486 int err; 1487 1488 priv->link = 0; 1489 priv->speed = 0; 1490 priv->duplex = -1; 1491 1492 /* Try connecting to PHY */ 1493 pn = of_parse_phandle(np, "phy-handle", 0); 1494 if (!pn) { 1495 /* In the case of a fixed PHY, the DT node associated 1496 * to the PHY is the Ethernet MAC DT node. 1497 */ 1498 if (of_phy_is_fixed_link(np)) { 1499 err = of_phy_register_fixed_link(np); 1500 if (err) 1501 return err; 1502 } 1503 pn = of_node_get(np); 1504 } 1505 1506 iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII 1507 : priv->phy_interface; 1508 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface); 1509 of_node_put(pn); 1510 if (!phydev) { 1511 netdev_err(ndev, "failed to connect PHY\n"); 1512 err = -ENOENT; 1513 goto err_deregister_fixed_link; 1514 } 1515 1516 if (!info->half_duplex) { 1517 /* 10BASE, Pause and Asym Pause is not supported */ 1518 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 1519 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 1520 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); 1521 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); 1522 1523 /* Half Duplex is not supported */ 1524 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1525 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 1526 } 1527 1528 phy_attached_info(phydev); 1529 1530 return 0; 1531 1532 err_deregister_fixed_link: 1533 if (of_phy_is_fixed_link(np)) 1534 of_phy_deregister_fixed_link(np); 1535 1536 return err; 1537 } 1538 1539 /* PHY control start function */ 1540 static int ravb_phy_start(struct net_device *ndev) 1541 { 1542 int error; 1543 1544 error = ravb_phy_init(ndev); 1545 if (error) 1546 return error; 1547 1548 phy_start(ndev->phydev); 1549 1550 return 0; 1551 } 1552 1553 static u32 ravb_get_msglevel(struct net_device *ndev) 1554 { 1555 struct ravb_private *priv = netdev_priv(ndev); 1556 1557 return priv->msg_enable; 1558 } 1559 1560 static void ravb_set_msglevel(struct net_device *ndev, u32 value) 1561 { 1562 struct ravb_private *priv = netdev_priv(ndev); 1563 1564 priv->msg_enable = value; 1565 } 1566 1567 static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = { 1568 "rx_queue_0_current", 1569 "tx_queue_0_current", 1570 "rx_queue_0_dirty", 1571 "tx_queue_0_dirty", 1572 "rx_queue_0_packets", 1573 "tx_queue_0_packets", 1574 "rx_queue_0_bytes", 1575 "tx_queue_0_bytes", 1576 "rx_queue_0_mcast_packets", 1577 "rx_queue_0_errors", 1578 "rx_queue_0_crc_errors", 1579 "rx_queue_0_frame_errors", 1580 "rx_queue_0_length_errors", 1581 "rx_queue_0_csum_offload_errors", 1582 "rx_queue_0_over_errors", 1583 }; 1584 1585 static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { 1586 "rx_queue_0_current", 1587 "tx_queue_0_current", 1588 "rx_queue_0_dirty", 1589 "tx_queue_0_dirty", 1590 "rx_queue_0_packets", 1591 "tx_queue_0_packets", 1592 "rx_queue_0_bytes", 1593 "tx_queue_0_bytes", 1594 "rx_queue_0_mcast_packets", 1595 "rx_queue_0_errors", 1596 "rx_queue_0_crc_errors", 1597 "rx_queue_0_frame_errors", 1598 "rx_queue_0_length_errors", 1599 "rx_queue_0_missed_errors", 1600 "rx_queue_0_over_errors", 1601 1602 "rx_queue_1_current", 1603 "tx_queue_1_current", 1604 "rx_queue_1_dirty", 1605 "tx_queue_1_dirty", 1606 "rx_queue_1_packets", 1607 "tx_queue_1_packets", 1608 "rx_queue_1_bytes", 1609 "tx_queue_1_bytes", 1610 "rx_queue_1_mcast_packets", 1611 "rx_queue_1_errors", 1612 "rx_queue_1_crc_errors", 1613 "rx_queue_1_frame_errors", 1614 "rx_queue_1_length_errors", 1615 "rx_queue_1_missed_errors", 1616 "rx_queue_1_over_errors", 1617 }; 1618 1619 static int ravb_get_sset_count(struct net_device *netdev, int sset) 1620 { 1621 struct ravb_private *priv = netdev_priv(netdev); 1622 const struct ravb_hw_info *info = priv->info; 1623 1624 switch (sset) { 1625 case ETH_SS_STATS: 1626 return info->stats_len; 1627 default: 1628 return -EOPNOTSUPP; 1629 } 1630 } 1631 1632 static void ravb_get_ethtool_stats(struct net_device *ndev, 1633 struct ethtool_stats *estats, u64 *data) 1634 { 1635 struct ravb_private *priv = netdev_priv(ndev); 1636 const struct ravb_hw_info *info = priv->info; 1637 int num_rx_q; 1638 int i = 0; 1639 int q; 1640 1641 num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1; 1642 /* Device-specific stats */ 1643 for (q = RAVB_BE; q < num_rx_q; q++) { 1644 struct net_device_stats *stats = &priv->stats[q]; 1645 1646 data[i++] = priv->cur_rx[q]; 1647 data[i++] = priv->cur_tx[q]; 1648 data[i++] = priv->dirty_rx[q]; 1649 data[i++] = priv->dirty_tx[q]; 1650 data[i++] = stats->rx_packets; 1651 data[i++] = stats->tx_packets; 1652 data[i++] = stats->rx_bytes; 1653 data[i++] = stats->tx_bytes; 1654 data[i++] = stats->multicast; 1655 data[i++] = stats->rx_errors; 1656 data[i++] = stats->rx_crc_errors; 1657 data[i++] = stats->rx_frame_errors; 1658 data[i++] = stats->rx_length_errors; 1659 data[i++] = stats->rx_missed_errors; 1660 data[i++] = stats->rx_over_errors; 1661 } 1662 } 1663 1664 static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 1665 { 1666 struct ravb_private *priv = netdev_priv(ndev); 1667 const struct ravb_hw_info *info = priv->info; 1668 1669 switch (stringset) { 1670 case ETH_SS_STATS: 1671 memcpy(data, info->gstrings_stats, info->gstrings_size); 1672 break; 1673 } 1674 } 1675 1676 static void ravb_get_ringparam(struct net_device *ndev, 1677 struct ethtool_ringparam *ring, 1678 struct kernel_ethtool_ringparam *kernel_ring, 1679 struct netlink_ext_ack *extack) 1680 { 1681 struct ravb_private *priv = netdev_priv(ndev); 1682 1683 ring->rx_max_pending = BE_RX_RING_MAX; 1684 ring->tx_max_pending = BE_TX_RING_MAX; 1685 ring->rx_pending = priv->num_rx_ring[RAVB_BE]; 1686 ring->tx_pending = priv->num_tx_ring[RAVB_BE]; 1687 } 1688 1689 static int ravb_set_ringparam(struct net_device *ndev, 1690 struct ethtool_ringparam *ring, 1691 struct kernel_ethtool_ringparam *kernel_ring, 1692 struct netlink_ext_ack *extack) 1693 { 1694 struct ravb_private *priv = netdev_priv(ndev); 1695 const struct ravb_hw_info *info = priv->info; 1696 int error; 1697 1698 if (ring->tx_pending > BE_TX_RING_MAX || 1699 ring->rx_pending > BE_RX_RING_MAX || 1700 ring->tx_pending < BE_TX_RING_MIN || 1701 ring->rx_pending < BE_RX_RING_MIN) 1702 return -EINVAL; 1703 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 1704 return -EINVAL; 1705 1706 if (netif_running(ndev)) { 1707 netif_device_detach(ndev); 1708 /* Stop PTP Clock driver */ 1709 if (info->gptp) 1710 ravb_ptp_stop(ndev); 1711 /* Wait for DMA stopping */ 1712 error = ravb_stop_dma(ndev); 1713 if (error) { 1714 netdev_err(ndev, 1715 "cannot set ringparam! Any AVB processes are still running?\n"); 1716 return error; 1717 } 1718 synchronize_irq(ndev->irq); 1719 1720 /* Free all the skb's in the RX queue and the DMA buffers. */ 1721 ravb_ring_free(ndev, RAVB_BE); 1722 if (info->nc_queues) 1723 ravb_ring_free(ndev, RAVB_NC); 1724 } 1725 1726 /* Set new parameters */ 1727 priv->num_rx_ring[RAVB_BE] = ring->rx_pending; 1728 priv->num_tx_ring[RAVB_BE] = ring->tx_pending; 1729 1730 if (netif_running(ndev)) { 1731 error = ravb_dmac_init(ndev); 1732 if (error) { 1733 netdev_err(ndev, 1734 "%s: ravb_dmac_init() failed, error %d\n", 1735 __func__, error); 1736 return error; 1737 } 1738 1739 ravb_emac_init(ndev); 1740 1741 /* Initialise PTP Clock driver */ 1742 if (info->gptp) 1743 ravb_ptp_init(ndev, priv->pdev); 1744 1745 netif_device_attach(ndev); 1746 } 1747 1748 return 0; 1749 } 1750 1751 static int ravb_get_ts_info(struct net_device *ndev, 1752 struct ethtool_ts_info *info) 1753 { 1754 struct ravb_private *priv = netdev_priv(ndev); 1755 const struct ravb_hw_info *hw_info = priv->info; 1756 1757 info->so_timestamping = 1758 SOF_TIMESTAMPING_TX_SOFTWARE | 1759 SOF_TIMESTAMPING_RX_SOFTWARE | 1760 SOF_TIMESTAMPING_SOFTWARE | 1761 SOF_TIMESTAMPING_TX_HARDWARE | 1762 SOF_TIMESTAMPING_RX_HARDWARE | 1763 SOF_TIMESTAMPING_RAW_HARDWARE; 1764 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 1765 info->rx_filters = 1766 (1 << HWTSTAMP_FILTER_NONE) | 1767 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 1768 (1 << HWTSTAMP_FILTER_ALL); 1769 if (hw_info->gptp || hw_info->ccc_gac) 1770 info->phc_index = ptp_clock_index(priv->ptp.clock); 1771 1772 return 0; 1773 } 1774 1775 static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 1776 { 1777 struct ravb_private *priv = netdev_priv(ndev); 1778 1779 wol->supported = WAKE_MAGIC; 1780 wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; 1781 } 1782 1783 static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 1784 { 1785 struct ravb_private *priv = netdev_priv(ndev); 1786 const struct ravb_hw_info *info = priv->info; 1787 1788 if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC)) 1789 return -EOPNOTSUPP; 1790 1791 priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); 1792 1793 device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled); 1794 1795 return 0; 1796 } 1797 1798 static const struct ethtool_ops ravb_ethtool_ops = { 1799 .nway_reset = phy_ethtool_nway_reset, 1800 .get_msglevel = ravb_get_msglevel, 1801 .set_msglevel = ravb_set_msglevel, 1802 .get_link = ethtool_op_get_link, 1803 .get_strings = ravb_get_strings, 1804 .get_ethtool_stats = ravb_get_ethtool_stats, 1805 .get_sset_count = ravb_get_sset_count, 1806 .get_ringparam = ravb_get_ringparam, 1807 .set_ringparam = ravb_set_ringparam, 1808 .get_ts_info = ravb_get_ts_info, 1809 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1810 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1811 .get_wol = ravb_get_wol, 1812 .set_wol = ravb_set_wol, 1813 }; 1814 1815 static int ravb_set_config_mode(struct net_device *ndev) 1816 { 1817 struct ravb_private *priv = netdev_priv(ndev); 1818 const struct ravb_hw_info *info = priv->info; 1819 int error; 1820 1821 if (info->gptp) { 1822 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); 1823 if (error) 1824 return error; 1825 /* Set CSEL value */ 1826 ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); 1827 } else if (info->ccc_gac) { 1828 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB); 1829 } else { 1830 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); 1831 } 1832 1833 return error; 1834 } 1835 1836 static void ravb_set_gti(struct net_device *ndev) 1837 { 1838 struct ravb_private *priv = netdev_priv(ndev); 1839 const struct ravb_hw_info *info = priv->info; 1840 1841 if (!(info->gptp || info->ccc_gac)) 1842 return; 1843 1844 ravb_write(ndev, priv->gti_tiv, GTI); 1845 1846 /* Request GTI loading */ 1847 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); 1848 } 1849 1850 static int ravb_compute_gti(struct net_device *ndev) 1851 { 1852 struct ravb_private *priv = netdev_priv(ndev); 1853 const struct ravb_hw_info *info = priv->info; 1854 struct device *dev = ndev->dev.parent; 1855 unsigned long rate; 1856 u64 inc; 1857 1858 if (!(info->gptp || info->ccc_gac)) 1859 return 0; 1860 1861 if (info->gptp_ref_clk) 1862 rate = clk_get_rate(priv->gptp_clk); 1863 else 1864 rate = clk_get_rate(priv->clk); 1865 if (!rate) 1866 return -EINVAL; 1867 1868 inc = div64_ul(1000000000ULL << 20, rate); 1869 1870 if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) { 1871 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", 1872 inc, GTI_TIV_MIN, GTI_TIV_MAX); 1873 return -EINVAL; 1874 } 1875 priv->gti_tiv = inc; 1876 1877 return 0; 1878 } 1879 1880 /* Set tx and rx clock internal delay modes */ 1881 static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev) 1882 { 1883 struct ravb_private *priv = netdev_priv(ndev); 1884 bool explicit_delay = false; 1885 u32 delay; 1886 1887 if (!priv->info->internal_delay) 1888 return; 1889 1890 if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) { 1891 /* Valid values are 0 and 1800, according to DT bindings */ 1892 priv->rxcidm = !!delay; 1893 explicit_delay = true; 1894 } 1895 if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) { 1896 /* Valid values are 0 and 2000, according to DT bindings */ 1897 priv->txcidm = !!delay; 1898 explicit_delay = true; 1899 } 1900 1901 if (explicit_delay) 1902 return; 1903 1904 /* Fall back to legacy rgmii-*id behavior */ 1905 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 1906 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) { 1907 priv->rxcidm = 1; 1908 priv->rgmii_override = 1; 1909 } 1910 1911 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 1912 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { 1913 priv->txcidm = 1; 1914 priv->rgmii_override = 1; 1915 } 1916 } 1917 1918 static void ravb_set_delay_mode(struct net_device *ndev) 1919 { 1920 struct ravb_private *priv = netdev_priv(ndev); 1921 u32 set = 0; 1922 1923 if (!priv->info->internal_delay) 1924 return; 1925 1926 if (priv->rxcidm) 1927 set |= APSR_RDM; 1928 if (priv->txcidm) 1929 set |= APSR_TDM; 1930 ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set); 1931 } 1932 1933 /* Network device open function for Ethernet AVB */ 1934 static int ravb_open(struct net_device *ndev) 1935 { 1936 struct ravb_private *priv = netdev_priv(ndev); 1937 const struct ravb_hw_info *info = priv->info; 1938 struct device *dev = &priv->pdev->dev; 1939 int error; 1940 1941 napi_enable(&priv->napi[RAVB_BE]); 1942 if (info->nc_queues) 1943 napi_enable(&priv->napi[RAVB_NC]); 1944 1945 error = pm_runtime_resume_and_get(dev); 1946 if (error < 0) 1947 goto out_napi_off; 1948 1949 /* Set AVB config mode */ 1950 error = ravb_set_config_mode(ndev); 1951 if (error) 1952 goto out_rpm_put; 1953 1954 ravb_set_delay_mode(ndev); 1955 ravb_write(ndev, priv->desc_bat_dma, DBAT); 1956 1957 /* Device init */ 1958 error = ravb_dmac_init(ndev); 1959 if (error) 1960 goto out_set_reset; 1961 1962 ravb_emac_init(ndev); 1963 1964 ravb_set_gti(ndev); 1965 1966 /* Initialise PTP Clock driver */ 1967 if (info->gptp || info->ccc_gac) 1968 ravb_ptp_init(ndev, priv->pdev); 1969 1970 /* PHY control start */ 1971 error = ravb_phy_start(ndev); 1972 if (error) 1973 goto out_ptp_stop; 1974 1975 netif_tx_start_all_queues(ndev); 1976 1977 return 0; 1978 1979 out_ptp_stop: 1980 /* Stop PTP Clock driver */ 1981 if (info->gptp || info->ccc_gac) 1982 ravb_ptp_stop(ndev); 1983 ravb_stop_dma(ndev); 1984 out_set_reset: 1985 ravb_set_opmode(ndev, CCC_OPC_RESET); 1986 out_rpm_put: 1987 pm_runtime_mark_last_busy(dev); 1988 pm_runtime_put_autosuspend(dev); 1989 out_napi_off: 1990 if (info->nc_queues) 1991 napi_disable(&priv->napi[RAVB_NC]); 1992 napi_disable(&priv->napi[RAVB_BE]); 1993 return error; 1994 } 1995 1996 /* Timeout function for Ethernet AVB */ 1997 static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue) 1998 { 1999 struct ravb_private *priv = netdev_priv(ndev); 2000 2001 netif_err(priv, tx_err, ndev, 2002 "transmit timed out, status %08x, resetting...\n", 2003 ravb_read(ndev, ISS)); 2004 2005 /* tx_errors count up */ 2006 ndev->stats.tx_errors++; 2007 2008 schedule_work(&priv->work); 2009 } 2010 2011 static void ravb_tx_timeout_work(struct work_struct *work) 2012 { 2013 struct ravb_private *priv = container_of(work, struct ravb_private, 2014 work); 2015 const struct ravb_hw_info *info = priv->info; 2016 struct net_device *ndev = priv->ndev; 2017 int error; 2018 2019 if (!rtnl_trylock()) { 2020 usleep_range(1000, 2000); 2021 schedule_work(&priv->work); 2022 return; 2023 } 2024 2025 netif_tx_stop_all_queues(ndev); 2026 2027 /* Stop PTP Clock driver */ 2028 if (info->gptp) 2029 ravb_ptp_stop(ndev); 2030 2031 /* Wait for DMA stopping */ 2032 if (ravb_stop_dma(ndev)) { 2033 /* If ravb_stop_dma() fails, the hardware is still operating 2034 * for TX and/or RX. So, this should not call the following 2035 * functions because ravb_dmac_init() is possible to fail too. 2036 * Also, this should not retry ravb_stop_dma() again and again 2037 * here because it's possible to wait forever. So, this just 2038 * re-enables the TX and RX and skip the following 2039 * re-initialization procedure. 2040 */ 2041 ravb_rcv_snd_enable(ndev); 2042 goto out; 2043 } 2044 2045 ravb_ring_free(ndev, RAVB_BE); 2046 if (info->nc_queues) 2047 ravb_ring_free(ndev, RAVB_NC); 2048 2049 /* Device init */ 2050 error = ravb_dmac_init(ndev); 2051 if (error) { 2052 /* If ravb_dmac_init() fails, descriptors are freed. So, this 2053 * should return here to avoid re-enabling the TX and RX in 2054 * ravb_emac_init(). 2055 */ 2056 netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", 2057 __func__, error); 2058 goto out_unlock; 2059 } 2060 ravb_emac_init(ndev); 2061 2062 out: 2063 /* Initialise PTP Clock driver */ 2064 if (info->gptp) 2065 ravb_ptp_init(ndev, priv->pdev); 2066 2067 netif_tx_start_all_queues(ndev); 2068 2069 out_unlock: 2070 rtnl_unlock(); 2071 } 2072 2073 static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb) 2074 { 2075 struct iphdr *ip = ip_hdr(skb); 2076 2077 /* TODO: Need to add support for VLAN tag 802.1Q */ 2078 if (skb_vlan_tag_present(skb)) 2079 return false; 2080 2081 /* TODO: Need to add hardware checksum for IPv6 */ 2082 if (skb->protocol != htons(ETH_P_IP)) 2083 return false; 2084 2085 switch (ip->protocol) { 2086 case IPPROTO_TCP: 2087 break; 2088 case IPPROTO_UDP: 2089 /* If the checksum value in the UDP header field is 0, TOE does 2090 * not calculate checksum for UDP part of this frame as it is 2091 * optional function as per standards. 2092 */ 2093 if (udp_hdr(skb)->check == 0) 2094 return false; 2095 break; 2096 default: 2097 return false; 2098 } 2099 2100 return true; 2101 } 2102 2103 /* Packet transmit function for Ethernet AVB */ 2104 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) 2105 { 2106 struct ravb_private *priv = netdev_priv(ndev); 2107 const struct ravb_hw_info *info = priv->info; 2108 unsigned int num_tx_desc = priv->num_tx_desc; 2109 u16 q = skb_get_queue_mapping(skb); 2110 struct ravb_tstamp_skb *ts_skb; 2111 struct ravb_tx_desc *desc; 2112 unsigned long flags; 2113 dma_addr_t dma_addr; 2114 void *buffer; 2115 u32 entry; 2116 u32 len; 2117 2118 if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb)) 2119 skb_checksum_help(skb); 2120 2121 spin_lock_irqsave(&priv->lock, flags); 2122 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * 2123 num_tx_desc) { 2124 netif_err(priv, tx_queued, ndev, 2125 "still transmitting with the full ring!\n"); 2126 netif_stop_subqueue(ndev, q); 2127 spin_unlock_irqrestore(&priv->lock, flags); 2128 return NETDEV_TX_BUSY; 2129 } 2130 2131 if (skb_put_padto(skb, ETH_ZLEN)) 2132 goto exit; 2133 2134 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); 2135 priv->tx_skb[q][entry / num_tx_desc] = skb; 2136 2137 if (num_tx_desc > 1) { 2138 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + 2139 entry / num_tx_desc * DPTR_ALIGN; 2140 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; 2141 2142 /* Zero length DMA descriptors are problematic as they seem 2143 * to terminate DMA transfers. Avoid them by simply using a 2144 * length of DPTR_ALIGN (4) when skb data is aligned to 2145 * DPTR_ALIGN. 2146 * 2147 * As skb is guaranteed to have at least ETH_ZLEN (60) 2148 * bytes of data by the call to skb_put_padto() above this 2149 * is safe with respect to both the length of the first DMA 2150 * descriptor (len) overflowing the available data and the 2151 * length of the second DMA descriptor (skb->len - len) 2152 * being negative. 2153 */ 2154 if (len == 0) 2155 len = DPTR_ALIGN; 2156 2157 memcpy(buffer, skb->data, len); 2158 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, 2159 DMA_TO_DEVICE); 2160 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 2161 goto drop; 2162 2163 desc = &priv->tx_ring[q][entry]; 2164 desc->ds_tagl = cpu_to_le16(len); 2165 desc->dptr = cpu_to_le32(dma_addr); 2166 2167 buffer = skb->data + len; 2168 len = skb->len - len; 2169 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, 2170 DMA_TO_DEVICE); 2171 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 2172 goto unmap; 2173 2174 desc++; 2175 } else { 2176 desc = &priv->tx_ring[q][entry]; 2177 len = skb->len; 2178 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, 2179 DMA_TO_DEVICE); 2180 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 2181 goto drop; 2182 } 2183 desc->ds_tagl = cpu_to_le16(len); 2184 desc->dptr = cpu_to_le32(dma_addr); 2185 2186 /* TX timestamp required */ 2187 if (info->gptp || info->ccc_gac) { 2188 if (q == RAVB_NC) { 2189 ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); 2190 if (!ts_skb) { 2191 if (num_tx_desc > 1) { 2192 desc--; 2193 dma_unmap_single(ndev->dev.parent, dma_addr, 2194 len, DMA_TO_DEVICE); 2195 } 2196 goto unmap; 2197 } 2198 ts_skb->skb = skb_get(skb); 2199 ts_skb->tag = priv->ts_skb_tag++; 2200 priv->ts_skb_tag &= 0x3ff; 2201 list_add_tail(&ts_skb->list, &priv->ts_skb_list); 2202 2203 /* TAG and timestamp required flag */ 2204 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2205 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; 2206 desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); 2207 } 2208 2209 skb_tx_timestamp(skb); 2210 } 2211 /* Descriptor type must be set after all the above writes */ 2212 dma_wmb(); 2213 if (num_tx_desc > 1) { 2214 desc->die_dt = DT_FEND; 2215 desc--; 2216 desc->die_dt = DT_FSTART; 2217 } else { 2218 desc->die_dt = DT_FSINGLE; 2219 } 2220 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); 2221 2222 priv->cur_tx[q] += num_tx_desc; 2223 if (priv->cur_tx[q] - priv->dirty_tx[q] > 2224 (priv->num_tx_ring[q] - 1) * num_tx_desc && 2225 !ravb_tx_free(ndev, q, true)) 2226 netif_stop_subqueue(ndev, q); 2227 2228 exit: 2229 spin_unlock_irqrestore(&priv->lock, flags); 2230 return NETDEV_TX_OK; 2231 2232 unmap: 2233 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), 2234 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); 2235 drop: 2236 dev_kfree_skb_any(skb); 2237 priv->tx_skb[q][entry / num_tx_desc] = NULL; 2238 goto exit; 2239 } 2240 2241 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, 2242 struct net_device *sb_dev) 2243 { 2244 /* If skb needs TX timestamp, it is handled in network control queue */ 2245 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : 2246 RAVB_BE; 2247 2248 } 2249 2250 static struct net_device_stats *ravb_get_stats(struct net_device *ndev) 2251 { 2252 struct ravb_private *priv = netdev_priv(ndev); 2253 const struct ravb_hw_info *info = priv->info; 2254 struct net_device_stats *nstats, *stats0, *stats1; 2255 struct device *dev = &priv->pdev->dev; 2256 2257 nstats = &ndev->stats; 2258 2259 pm_runtime_get_noresume(dev); 2260 2261 if (!pm_runtime_active(dev)) 2262 goto out_rpm_put; 2263 2264 stats0 = &priv->stats[RAVB_BE]; 2265 2266 if (info->tx_counters) { 2267 nstats->tx_dropped += ravb_read(ndev, TROCR); 2268 ravb_write(ndev, 0, TROCR); /* (write clear) */ 2269 } 2270 2271 if (info->carrier_counters) { 2272 nstats->collisions += ravb_read(ndev, CXR41); 2273 ravb_write(ndev, 0, CXR41); /* (write clear) */ 2274 nstats->tx_carrier_errors += ravb_read(ndev, CXR42); 2275 ravb_write(ndev, 0, CXR42); /* (write clear) */ 2276 } 2277 2278 nstats->rx_packets = stats0->rx_packets; 2279 nstats->tx_packets = stats0->tx_packets; 2280 nstats->rx_bytes = stats0->rx_bytes; 2281 nstats->tx_bytes = stats0->tx_bytes; 2282 nstats->multicast = stats0->multicast; 2283 nstats->rx_errors = stats0->rx_errors; 2284 nstats->rx_crc_errors = stats0->rx_crc_errors; 2285 nstats->rx_frame_errors = stats0->rx_frame_errors; 2286 nstats->rx_length_errors = stats0->rx_length_errors; 2287 nstats->rx_missed_errors = stats0->rx_missed_errors; 2288 nstats->rx_over_errors = stats0->rx_over_errors; 2289 if (info->nc_queues) { 2290 stats1 = &priv->stats[RAVB_NC]; 2291 2292 nstats->rx_packets += stats1->rx_packets; 2293 nstats->tx_packets += stats1->tx_packets; 2294 nstats->rx_bytes += stats1->rx_bytes; 2295 nstats->tx_bytes += stats1->tx_bytes; 2296 nstats->multicast += stats1->multicast; 2297 nstats->rx_errors += stats1->rx_errors; 2298 nstats->rx_crc_errors += stats1->rx_crc_errors; 2299 nstats->rx_frame_errors += stats1->rx_frame_errors; 2300 nstats->rx_length_errors += stats1->rx_length_errors; 2301 nstats->rx_missed_errors += stats1->rx_missed_errors; 2302 nstats->rx_over_errors += stats1->rx_over_errors; 2303 } 2304 2305 out_rpm_put: 2306 pm_runtime_put_noidle(dev); 2307 return nstats; 2308 } 2309 2310 /* Update promiscuous bit */ 2311 static void ravb_set_rx_mode(struct net_device *ndev) 2312 { 2313 struct ravb_private *priv = netdev_priv(ndev); 2314 unsigned long flags; 2315 2316 spin_lock_irqsave(&priv->lock, flags); 2317 ravb_modify(ndev, ECMR, ECMR_PRM, 2318 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); 2319 spin_unlock_irqrestore(&priv->lock, flags); 2320 } 2321 2322 /* Device close function for Ethernet AVB */ 2323 static int ravb_close(struct net_device *ndev) 2324 { 2325 struct device_node *np = ndev->dev.parent->of_node; 2326 struct ravb_private *priv = netdev_priv(ndev); 2327 const struct ravb_hw_info *info = priv->info; 2328 struct ravb_tstamp_skb *ts_skb, *ts_skb2; 2329 struct device *dev = &priv->pdev->dev; 2330 int error; 2331 2332 netif_tx_stop_all_queues(ndev); 2333 2334 /* Disable interrupts by clearing the interrupt masks. */ 2335 ravb_write(ndev, 0, RIC0); 2336 ravb_write(ndev, 0, RIC2); 2337 ravb_write(ndev, 0, TIC); 2338 2339 /* PHY disconnect */ 2340 if (ndev->phydev) { 2341 phy_stop(ndev->phydev); 2342 phy_disconnect(ndev->phydev); 2343 if (of_phy_is_fixed_link(np)) 2344 of_phy_deregister_fixed_link(np); 2345 } 2346 2347 /* Stop PTP Clock driver */ 2348 if (info->gptp || info->ccc_gac) 2349 ravb_ptp_stop(ndev); 2350 2351 /* Set the config mode to stop the AVB-DMAC's processes */ 2352 if (ravb_stop_dma(ndev) < 0) 2353 netdev_err(ndev, 2354 "device will be stopped after h/w processes are done.\n"); 2355 2356 /* Clear the timestamp list */ 2357 if (info->gptp || info->ccc_gac) { 2358 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { 2359 list_del(&ts_skb->list); 2360 kfree_skb(ts_skb->skb); 2361 kfree(ts_skb); 2362 } 2363 } 2364 2365 cancel_work_sync(&priv->work); 2366 2367 if (info->nc_queues) 2368 napi_disable(&priv->napi[RAVB_NC]); 2369 napi_disable(&priv->napi[RAVB_BE]); 2370 2371 /* Free all the skb's in the RX queue and the DMA buffers. */ 2372 ravb_ring_free(ndev, RAVB_BE); 2373 if (info->nc_queues) 2374 ravb_ring_free(ndev, RAVB_NC); 2375 2376 /* Update statistics. */ 2377 ravb_get_stats(ndev); 2378 2379 /* Set reset mode. */ 2380 error = ravb_set_opmode(ndev, CCC_OPC_RESET); 2381 if (error) 2382 return error; 2383 2384 pm_runtime_mark_last_busy(dev); 2385 pm_runtime_put_autosuspend(dev); 2386 2387 return 0; 2388 } 2389 2390 static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req) 2391 { 2392 struct ravb_private *priv = netdev_priv(ndev); 2393 struct hwtstamp_config config; 2394 2395 config.flags = 0; 2396 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : 2397 HWTSTAMP_TX_OFF; 2398 switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) { 2399 case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT: 2400 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 2401 break; 2402 case RAVB_RXTSTAMP_TYPE_ALL: 2403 config.rx_filter = HWTSTAMP_FILTER_ALL; 2404 break; 2405 default: 2406 config.rx_filter = HWTSTAMP_FILTER_NONE; 2407 } 2408 2409 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? 2410 -EFAULT : 0; 2411 } 2412 2413 /* Control hardware time stamping */ 2414 static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) 2415 { 2416 struct ravb_private *priv = netdev_priv(ndev); 2417 struct hwtstamp_config config; 2418 u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED; 2419 u32 tstamp_tx_ctrl; 2420 2421 if (copy_from_user(&config, req->ifr_data, sizeof(config))) 2422 return -EFAULT; 2423 2424 switch (config.tx_type) { 2425 case HWTSTAMP_TX_OFF: 2426 tstamp_tx_ctrl = 0; 2427 break; 2428 case HWTSTAMP_TX_ON: 2429 tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED; 2430 break; 2431 default: 2432 return -ERANGE; 2433 } 2434 2435 switch (config.rx_filter) { 2436 case HWTSTAMP_FILTER_NONE: 2437 tstamp_rx_ctrl = 0; 2438 break; 2439 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2440 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; 2441 break; 2442 default: 2443 config.rx_filter = HWTSTAMP_FILTER_ALL; 2444 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL; 2445 } 2446 2447 priv->tstamp_tx_ctrl = tstamp_tx_ctrl; 2448 priv->tstamp_rx_ctrl = tstamp_rx_ctrl; 2449 2450 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? 2451 -EFAULT : 0; 2452 } 2453 2454 /* ioctl to device function */ 2455 static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) 2456 { 2457 struct phy_device *phydev = ndev->phydev; 2458 2459 if (!netif_running(ndev)) 2460 return -EINVAL; 2461 2462 if (!phydev) 2463 return -ENODEV; 2464 2465 switch (cmd) { 2466 case SIOCGHWTSTAMP: 2467 return ravb_hwtstamp_get(ndev, req); 2468 case SIOCSHWTSTAMP: 2469 return ravb_hwtstamp_set(ndev, req); 2470 } 2471 2472 return phy_mii_ioctl(phydev, req, cmd); 2473 } 2474 2475 static int ravb_change_mtu(struct net_device *ndev, int new_mtu) 2476 { 2477 struct ravb_private *priv = netdev_priv(ndev); 2478 2479 ndev->mtu = new_mtu; 2480 2481 if (netif_running(ndev)) { 2482 synchronize_irq(priv->emac_irq); 2483 ravb_emac_init(ndev); 2484 } 2485 2486 netdev_update_features(ndev); 2487 2488 return 0; 2489 } 2490 2491 static void ravb_set_rx_csum(struct net_device *ndev, bool enable) 2492 { 2493 struct ravb_private *priv = netdev_priv(ndev); 2494 unsigned long flags; 2495 2496 spin_lock_irqsave(&priv->lock, flags); 2497 2498 /* Disable TX and RX */ 2499 ravb_rcv_snd_disable(ndev); 2500 2501 /* Modify RX Checksum setting */ 2502 ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0); 2503 2504 /* Enable TX and RX */ 2505 ravb_rcv_snd_enable(ndev); 2506 2507 spin_unlock_irqrestore(&priv->lock, flags); 2508 } 2509 2510 static int ravb_endisable_csum_gbeth(struct net_device *ndev, enum ravb_reg reg, 2511 u32 val, u32 mask) 2512 { 2513 u32 csr0 = CSR0_TPE | CSR0_RPE; 2514 int ret; 2515 2516 ravb_write(ndev, csr0 & ~mask, CSR0); 2517 ret = ravb_wait(ndev, CSR0, mask, 0); 2518 if (!ret) 2519 ravb_write(ndev, val, reg); 2520 2521 ravb_write(ndev, csr0, CSR0); 2522 2523 return ret; 2524 } 2525 2526 static int ravb_set_features_gbeth(struct net_device *ndev, 2527 netdev_features_t features) 2528 { 2529 netdev_features_t changed = ndev->features ^ features; 2530 struct ravb_private *priv = netdev_priv(ndev); 2531 unsigned long flags; 2532 int ret = 0; 2533 u32 val; 2534 2535 spin_lock_irqsave(&priv->lock, flags); 2536 if (changed & NETIF_F_RXCSUM) { 2537 if (features & NETIF_F_RXCSUM) 2538 val = CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4; 2539 else 2540 val = 0; 2541 2542 ret = ravb_endisable_csum_gbeth(ndev, CSR2, val, CSR0_RPE); 2543 if (ret) 2544 goto done; 2545 } 2546 2547 if (changed & NETIF_F_HW_CSUM) { 2548 if (features & NETIF_F_HW_CSUM) 2549 val = CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4; 2550 else 2551 val = 0; 2552 2553 ret = ravb_endisable_csum_gbeth(ndev, CSR1, val, CSR0_TPE); 2554 if (ret) 2555 goto done; 2556 } 2557 2558 done: 2559 spin_unlock_irqrestore(&priv->lock, flags); 2560 2561 return ret; 2562 } 2563 2564 static int ravb_set_features_rcar(struct net_device *ndev, 2565 netdev_features_t features) 2566 { 2567 netdev_features_t changed = ndev->features ^ features; 2568 2569 if (changed & NETIF_F_RXCSUM) 2570 ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM); 2571 2572 return 0; 2573 } 2574 2575 static int ravb_set_features(struct net_device *ndev, 2576 netdev_features_t features) 2577 { 2578 struct ravb_private *priv = netdev_priv(ndev); 2579 const struct ravb_hw_info *info = priv->info; 2580 struct device *dev = &priv->pdev->dev; 2581 int ret; 2582 2583 pm_runtime_get_noresume(dev); 2584 2585 if (pm_runtime_active(dev)) 2586 ret = info->set_feature(ndev, features); 2587 else 2588 ret = 0; 2589 2590 pm_runtime_put_noidle(dev); 2591 2592 if (ret) 2593 return ret; 2594 2595 ndev->features = features; 2596 2597 return 0; 2598 } 2599 2600 static const struct net_device_ops ravb_netdev_ops = { 2601 .ndo_open = ravb_open, 2602 .ndo_stop = ravb_close, 2603 .ndo_start_xmit = ravb_start_xmit, 2604 .ndo_select_queue = ravb_select_queue, 2605 .ndo_get_stats = ravb_get_stats, 2606 .ndo_set_rx_mode = ravb_set_rx_mode, 2607 .ndo_tx_timeout = ravb_tx_timeout, 2608 .ndo_eth_ioctl = ravb_do_ioctl, 2609 .ndo_change_mtu = ravb_change_mtu, 2610 .ndo_validate_addr = eth_validate_addr, 2611 .ndo_set_mac_address = eth_mac_addr, 2612 .ndo_set_features = ravb_set_features, 2613 }; 2614 2615 /* MDIO bus init function */ 2616 static int ravb_mdio_init(struct ravb_private *priv) 2617 { 2618 struct platform_device *pdev = priv->pdev; 2619 struct device *dev = &pdev->dev; 2620 struct phy_device *phydev; 2621 struct device_node *pn; 2622 int error; 2623 2624 /* Bitbang init */ 2625 priv->mdiobb.ops = &bb_ops; 2626 2627 /* MII controller setting */ 2628 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); 2629 if (!priv->mii_bus) 2630 return -ENOMEM; 2631 2632 /* Hook up MII support for ethtool */ 2633 priv->mii_bus->name = "ravb_mii"; 2634 priv->mii_bus->parent = dev; 2635 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2636 pdev->name, pdev->id); 2637 2638 /* Register MDIO bus */ 2639 error = of_mdiobus_register(priv->mii_bus, dev->of_node); 2640 if (error) 2641 goto out_free_bus; 2642 2643 pn = of_parse_phandle(dev->of_node, "phy-handle", 0); 2644 phydev = of_phy_find_device(pn); 2645 if (phydev) { 2646 phydev->mac_managed_pm = true; 2647 put_device(&phydev->mdio.dev); 2648 } 2649 of_node_put(pn); 2650 2651 return 0; 2652 2653 out_free_bus: 2654 free_mdio_bitbang(priv->mii_bus); 2655 return error; 2656 } 2657 2658 /* MDIO bus release function */ 2659 static int ravb_mdio_release(struct ravb_private *priv) 2660 { 2661 /* Unregister mdio bus */ 2662 mdiobus_unregister(priv->mii_bus); 2663 2664 /* Free bitbang info */ 2665 free_mdio_bitbang(priv->mii_bus); 2666 2667 return 0; 2668 } 2669 2670 static const struct ravb_hw_info ravb_gen3_hw_info = { 2671 .rx_ring_free = ravb_rx_ring_free_rcar, 2672 .rx_ring_format = ravb_rx_ring_format_rcar, 2673 .alloc_rx_desc = ravb_alloc_rx_desc_rcar, 2674 .receive = ravb_rx_rcar, 2675 .set_rate = ravb_set_rate_rcar, 2676 .set_feature = ravb_set_features_rcar, 2677 .dmac_init = ravb_dmac_init_rcar, 2678 .emac_init = ravb_emac_init_rcar, 2679 .gstrings_stats = ravb_gstrings_stats, 2680 .gstrings_size = sizeof(ravb_gstrings_stats), 2681 .net_hw_features = NETIF_F_RXCSUM, 2682 .net_features = NETIF_F_RXCSUM, 2683 .stats_len = ARRAY_SIZE(ravb_gstrings_stats), 2684 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, 2685 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 2686 .rx_max_buf_size = SZ_2K, 2687 .internal_delay = 1, 2688 .tx_counters = 1, 2689 .multi_irqs = 1, 2690 .irq_en_dis = 1, 2691 .ccc_gac = 1, 2692 .nc_queues = 1, 2693 .magic_pkt = 1, 2694 }; 2695 2696 static const struct ravb_hw_info ravb_gen2_hw_info = { 2697 .rx_ring_free = ravb_rx_ring_free_rcar, 2698 .rx_ring_format = ravb_rx_ring_format_rcar, 2699 .alloc_rx_desc = ravb_alloc_rx_desc_rcar, 2700 .receive = ravb_rx_rcar, 2701 .set_rate = ravb_set_rate_rcar, 2702 .set_feature = ravb_set_features_rcar, 2703 .dmac_init = ravb_dmac_init_rcar, 2704 .emac_init = ravb_emac_init_rcar, 2705 .gstrings_stats = ravb_gstrings_stats, 2706 .gstrings_size = sizeof(ravb_gstrings_stats), 2707 .net_hw_features = NETIF_F_RXCSUM, 2708 .net_features = NETIF_F_RXCSUM, 2709 .stats_len = ARRAY_SIZE(ravb_gstrings_stats), 2710 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, 2711 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 2712 .rx_max_buf_size = SZ_2K, 2713 .aligned_tx = 1, 2714 .gptp = 1, 2715 .nc_queues = 1, 2716 .magic_pkt = 1, 2717 }; 2718 2719 static const struct ravb_hw_info ravb_rzv2m_hw_info = { 2720 .rx_ring_free = ravb_rx_ring_free_rcar, 2721 .rx_ring_format = ravb_rx_ring_format_rcar, 2722 .alloc_rx_desc = ravb_alloc_rx_desc_rcar, 2723 .receive = ravb_rx_rcar, 2724 .set_rate = ravb_set_rate_rcar, 2725 .set_feature = ravb_set_features_rcar, 2726 .dmac_init = ravb_dmac_init_rcar, 2727 .emac_init = ravb_emac_init_rcar, 2728 .gstrings_stats = ravb_gstrings_stats, 2729 .gstrings_size = sizeof(ravb_gstrings_stats), 2730 .net_hw_features = NETIF_F_RXCSUM, 2731 .net_features = NETIF_F_RXCSUM, 2732 .stats_len = ARRAY_SIZE(ravb_gstrings_stats), 2733 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, 2734 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 2735 .rx_max_buf_size = SZ_2K, 2736 .multi_irqs = 1, 2737 .err_mgmt_irqs = 1, 2738 .gptp = 1, 2739 .gptp_ref_clk = 1, 2740 .nc_queues = 1, 2741 .magic_pkt = 1, 2742 }; 2743 2744 static const struct ravb_hw_info gbeth_hw_info = { 2745 .rx_ring_free = ravb_rx_ring_free_gbeth, 2746 .rx_ring_format = ravb_rx_ring_format_gbeth, 2747 .alloc_rx_desc = ravb_alloc_rx_desc_gbeth, 2748 .receive = ravb_rx_gbeth, 2749 .set_rate = ravb_set_rate_gbeth, 2750 .set_feature = ravb_set_features_gbeth, 2751 .dmac_init = ravb_dmac_init_gbeth, 2752 .emac_init = ravb_emac_init_gbeth, 2753 .gstrings_stats = ravb_gstrings_stats_gbeth, 2754 .gstrings_size = sizeof(ravb_gstrings_stats_gbeth), 2755 .net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM, 2756 .net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM, 2757 .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth), 2758 .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN), 2759 .tccr_mask = TCCR_TSRQ0, 2760 .rx_max_buf_size = SZ_8K, 2761 .aligned_tx = 1, 2762 .tx_counters = 1, 2763 .carrier_counters = 1, 2764 .half_duplex = 1, 2765 }; 2766 2767 static const struct of_device_id ravb_match_table[] = { 2768 { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info }, 2769 { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info }, 2770 { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info }, 2771 { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info }, 2772 { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info }, 2773 { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info }, 2774 { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info }, 2775 { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info }, 2776 { } 2777 }; 2778 MODULE_DEVICE_TABLE(of, ravb_match_table); 2779 2780 static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name, 2781 const char *ch, int *irq, irq_handler_t handler) 2782 { 2783 struct platform_device *pdev = priv->pdev; 2784 struct net_device *ndev = priv->ndev; 2785 struct device *dev = &pdev->dev; 2786 const char *dev_name; 2787 unsigned long flags; 2788 int error, irq_num; 2789 2790 if (irq_name) { 2791 dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch); 2792 if (!dev_name) 2793 return -ENOMEM; 2794 2795 irq_num = platform_get_irq_byname(pdev, irq_name); 2796 flags = 0; 2797 } else { 2798 dev_name = ndev->name; 2799 irq_num = platform_get_irq(pdev, 0); 2800 flags = IRQF_SHARED; 2801 } 2802 if (irq_num < 0) 2803 return irq_num; 2804 2805 if (irq) 2806 *irq = irq_num; 2807 2808 error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev); 2809 if (error) 2810 netdev_err(ndev, "cannot request IRQ %s\n", dev_name); 2811 2812 return error; 2813 } 2814 2815 static int ravb_setup_irqs(struct ravb_private *priv) 2816 { 2817 const struct ravb_hw_info *info = priv->info; 2818 struct net_device *ndev = priv->ndev; 2819 const char *irq_name, *emac_irq_name; 2820 int error; 2821 2822 if (!info->multi_irqs) 2823 return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt); 2824 2825 if (info->err_mgmt_irqs) { 2826 irq_name = "dia"; 2827 emac_irq_name = "line3"; 2828 } else { 2829 irq_name = "ch22"; 2830 emac_irq_name = "ch24"; 2831 } 2832 2833 error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt); 2834 if (error) 2835 return error; 2836 2837 error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq, 2838 ravb_emac_interrupt); 2839 if (error) 2840 return error; 2841 2842 if (info->err_mgmt_irqs) { 2843 error = ravb_setup_irq(priv, "err_a", "err_a", NULL, ravb_multi_interrupt); 2844 if (error) 2845 return error; 2846 2847 error = ravb_setup_irq(priv, "mgmt_a", "mgmt_a", NULL, ravb_multi_interrupt); 2848 if (error) 2849 return error; 2850 } 2851 2852 error = ravb_setup_irq(priv, "ch0", "ch0:rx_be", NULL, ravb_be_interrupt); 2853 if (error) 2854 return error; 2855 2856 error = ravb_setup_irq(priv, "ch1", "ch1:rx_nc", NULL, ravb_nc_interrupt); 2857 if (error) 2858 return error; 2859 2860 error = ravb_setup_irq(priv, "ch18", "ch18:tx_be", NULL, ravb_be_interrupt); 2861 if (error) 2862 return error; 2863 2864 return ravb_setup_irq(priv, "ch19", "ch19:tx_nc", NULL, ravb_nc_interrupt); 2865 } 2866 2867 static int ravb_probe(struct platform_device *pdev) 2868 { 2869 struct device_node *np = pdev->dev.of_node; 2870 const struct ravb_hw_info *info; 2871 struct reset_control *rstc; 2872 struct ravb_private *priv; 2873 struct net_device *ndev; 2874 struct resource *res; 2875 int error, q; 2876 2877 if (!np) { 2878 dev_err(&pdev->dev, 2879 "this driver is required to be instantiated from device tree\n"); 2880 return -EINVAL; 2881 } 2882 2883 rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); 2884 if (IS_ERR(rstc)) 2885 return dev_err_probe(&pdev->dev, PTR_ERR(rstc), 2886 "failed to get cpg reset\n"); 2887 2888 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private), 2889 NUM_TX_QUEUE, NUM_RX_QUEUE); 2890 if (!ndev) 2891 return -ENOMEM; 2892 2893 info = of_device_get_match_data(&pdev->dev); 2894 2895 ndev->features = info->net_features; 2896 ndev->hw_features = info->net_hw_features; 2897 2898 error = reset_control_deassert(rstc); 2899 if (error) 2900 goto out_free_netdev; 2901 2902 SET_NETDEV_DEV(ndev, &pdev->dev); 2903 2904 priv = netdev_priv(ndev); 2905 priv->info = info; 2906 priv->rstc = rstc; 2907 priv->ndev = ndev; 2908 priv->pdev = pdev; 2909 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; 2910 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; 2911 if (info->nc_queues) { 2912 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; 2913 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; 2914 } 2915 2916 error = ravb_setup_irqs(priv); 2917 if (error) 2918 goto out_reset_assert; 2919 2920 priv->clk = devm_clk_get(&pdev->dev, NULL); 2921 if (IS_ERR(priv->clk)) { 2922 error = PTR_ERR(priv->clk); 2923 goto out_reset_assert; 2924 } 2925 2926 if (info->gptp_ref_clk) { 2927 priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp"); 2928 if (IS_ERR(priv->gptp_clk)) { 2929 error = PTR_ERR(priv->gptp_clk); 2930 goto out_reset_assert; 2931 } 2932 } 2933 2934 priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk"); 2935 if (IS_ERR(priv->refclk)) { 2936 error = PTR_ERR(priv->refclk); 2937 goto out_reset_assert; 2938 } 2939 clk_prepare(priv->refclk); 2940 2941 platform_set_drvdata(pdev, ndev); 2942 pm_runtime_set_autosuspend_delay(&pdev->dev, 100); 2943 pm_runtime_use_autosuspend(&pdev->dev); 2944 pm_runtime_enable(&pdev->dev); 2945 error = pm_runtime_resume_and_get(&pdev->dev); 2946 if (error < 0) 2947 goto out_rpm_disable; 2948 2949 priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 2950 if (IS_ERR(priv->addr)) { 2951 error = PTR_ERR(priv->addr); 2952 goto out_rpm_put; 2953 } 2954 2955 /* The Ether-specific entries in the device structure. */ 2956 ndev->base_addr = res->start; 2957 2958 spin_lock_init(&priv->lock); 2959 INIT_WORK(&priv->work, ravb_tx_timeout_work); 2960 2961 error = of_get_phy_mode(np, &priv->phy_interface); 2962 if (error && error != -ENODEV) 2963 goto out_rpm_put; 2964 2965 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); 2966 priv->avb_link_active_low = 2967 of_property_read_bool(np, "renesas,ether-link-active-low"); 2968 2969 ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); 2970 ndev->min_mtu = ETH_MIN_MTU; 2971 2972 /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer 2973 * Use two descriptor to handle such situation. First descriptor to 2974 * handle aligned data buffer and second descriptor to handle the 2975 * overflow data because of alignment. 2976 */ 2977 priv->num_tx_desc = info->aligned_tx ? 2 : 1; 2978 2979 /* Set function */ 2980 ndev->netdev_ops = &ravb_netdev_ops; 2981 ndev->ethtool_ops = &ravb_ethtool_ops; 2982 2983 error = ravb_compute_gti(ndev); 2984 if (error) 2985 goto out_rpm_put; 2986 2987 ravb_parse_delay_mode(np, ndev); 2988 2989 /* Allocate descriptor base address table */ 2990 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; 2991 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, 2992 &priv->desc_bat_dma, GFP_KERNEL); 2993 if (!priv->desc_bat) { 2994 dev_err(&pdev->dev, 2995 "Cannot allocate desc base address table (size %d bytes)\n", 2996 priv->desc_bat_size); 2997 error = -ENOMEM; 2998 goto out_rpm_put; 2999 } 3000 for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++) 3001 priv->desc_bat[q].die_dt = DT_EOS; 3002 3003 /* Initialise HW timestamp list */ 3004 INIT_LIST_HEAD(&priv->ts_skb_list); 3005 3006 /* Debug message level */ 3007 priv->msg_enable = RAVB_DEF_MSG_ENABLE; 3008 3009 /* Set config mode as this is needed for PHY initialization. */ 3010 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); 3011 if (error) 3012 goto out_rpm_put; 3013 3014 /* Read and set MAC address */ 3015 ravb_read_mac_address(np, ndev); 3016 if (!is_valid_ether_addr(ndev->dev_addr)) { 3017 dev_warn(&pdev->dev, 3018 "no valid MAC address supplied, using a random one\n"); 3019 eth_hw_addr_random(ndev); 3020 } 3021 3022 /* MDIO bus init */ 3023 error = ravb_mdio_init(priv); 3024 if (error) { 3025 dev_err(&pdev->dev, "failed to initialize MDIO\n"); 3026 goto out_reset_mode; 3027 } 3028 3029 /* Undo previous switch to config opmode. */ 3030 error = ravb_set_opmode(ndev, CCC_OPC_RESET); 3031 if (error) 3032 goto out_mdio_release; 3033 3034 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll); 3035 if (info->nc_queues) 3036 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll); 3037 3038 /* Network device register */ 3039 error = register_netdev(ndev); 3040 if (error) 3041 goto out_napi_del; 3042 3043 device_set_wakeup_capable(&pdev->dev, 1); 3044 3045 /* Print device information */ 3046 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", 3047 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 3048 3049 pm_runtime_mark_last_busy(&pdev->dev); 3050 pm_runtime_put_autosuspend(&pdev->dev); 3051 3052 return 0; 3053 3054 out_napi_del: 3055 if (info->nc_queues) 3056 netif_napi_del(&priv->napi[RAVB_NC]); 3057 3058 netif_napi_del(&priv->napi[RAVB_BE]); 3059 out_mdio_release: 3060 ravb_mdio_release(priv); 3061 out_reset_mode: 3062 ravb_set_opmode(ndev, CCC_OPC_RESET); 3063 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, 3064 priv->desc_bat_dma); 3065 out_rpm_put: 3066 pm_runtime_put(&pdev->dev); 3067 out_rpm_disable: 3068 pm_runtime_disable(&pdev->dev); 3069 pm_runtime_dont_use_autosuspend(&pdev->dev); 3070 clk_unprepare(priv->refclk); 3071 out_reset_assert: 3072 reset_control_assert(rstc); 3073 out_free_netdev: 3074 free_netdev(ndev); 3075 return error; 3076 } 3077 3078 static void ravb_remove(struct platform_device *pdev) 3079 { 3080 struct net_device *ndev = platform_get_drvdata(pdev); 3081 struct ravb_private *priv = netdev_priv(ndev); 3082 const struct ravb_hw_info *info = priv->info; 3083 struct device *dev = &priv->pdev->dev; 3084 int error; 3085 3086 error = pm_runtime_resume_and_get(dev); 3087 if (error < 0) 3088 return; 3089 3090 unregister_netdev(ndev); 3091 if (info->nc_queues) 3092 netif_napi_del(&priv->napi[RAVB_NC]); 3093 netif_napi_del(&priv->napi[RAVB_BE]); 3094 3095 ravb_mdio_release(priv); 3096 3097 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, 3098 priv->desc_bat_dma); 3099 3100 pm_runtime_put_sync_suspend(&pdev->dev); 3101 pm_runtime_disable(&pdev->dev); 3102 pm_runtime_dont_use_autosuspend(dev); 3103 clk_unprepare(priv->refclk); 3104 reset_control_assert(priv->rstc); 3105 free_netdev(ndev); 3106 platform_set_drvdata(pdev, NULL); 3107 } 3108 3109 static int ravb_wol_setup(struct net_device *ndev) 3110 { 3111 struct ravb_private *priv = netdev_priv(ndev); 3112 const struct ravb_hw_info *info = priv->info; 3113 3114 /* Disable interrupts by clearing the interrupt masks. */ 3115 ravb_write(ndev, 0, RIC0); 3116 ravb_write(ndev, 0, RIC2); 3117 ravb_write(ndev, 0, TIC); 3118 3119 /* Only allow ECI interrupts */ 3120 synchronize_irq(priv->emac_irq); 3121 if (info->nc_queues) 3122 napi_disable(&priv->napi[RAVB_NC]); 3123 napi_disable(&priv->napi[RAVB_BE]); 3124 ravb_write(ndev, ECSIPR_MPDIP, ECSIPR); 3125 3126 /* Enable MagicPacket */ 3127 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); 3128 3129 if (priv->info->ccc_gac) 3130 ravb_ptp_stop(ndev); 3131 3132 return enable_irq_wake(priv->emac_irq); 3133 } 3134 3135 static int ravb_wol_restore(struct net_device *ndev) 3136 { 3137 struct ravb_private *priv = netdev_priv(ndev); 3138 const struct ravb_hw_info *info = priv->info; 3139 int error; 3140 3141 /* Set reset mode to rearm the WoL logic. */ 3142 error = ravb_set_opmode(ndev, CCC_OPC_RESET); 3143 if (error) 3144 return error; 3145 3146 /* Set AVB config mode. */ 3147 error = ravb_set_config_mode(ndev); 3148 if (error) 3149 return error; 3150 3151 if (priv->info->ccc_gac) 3152 ravb_ptp_init(ndev, priv->pdev); 3153 3154 if (info->nc_queues) 3155 napi_enable(&priv->napi[RAVB_NC]); 3156 napi_enable(&priv->napi[RAVB_BE]); 3157 3158 /* Disable MagicPacket */ 3159 ravb_modify(ndev, ECMR, ECMR_MPDE, 0); 3160 3161 ravb_close(ndev); 3162 3163 return disable_irq_wake(priv->emac_irq); 3164 } 3165 3166 static int ravb_suspend(struct device *dev) 3167 { 3168 struct net_device *ndev = dev_get_drvdata(dev); 3169 struct ravb_private *priv = netdev_priv(ndev); 3170 int ret; 3171 3172 if (!netif_running(ndev)) 3173 goto reset_assert; 3174 3175 netif_device_detach(ndev); 3176 3177 if (priv->wol_enabled) 3178 return ravb_wol_setup(ndev); 3179 3180 ret = ravb_close(ndev); 3181 if (ret) 3182 return ret; 3183 3184 ret = pm_runtime_force_suspend(&priv->pdev->dev); 3185 if (ret) 3186 return ret; 3187 3188 reset_assert: 3189 return reset_control_assert(priv->rstc); 3190 } 3191 3192 static int ravb_resume(struct device *dev) 3193 { 3194 struct net_device *ndev = dev_get_drvdata(dev); 3195 struct ravb_private *priv = netdev_priv(ndev); 3196 int ret; 3197 3198 ret = reset_control_deassert(priv->rstc); 3199 if (ret) 3200 return ret; 3201 3202 if (!netif_running(ndev)) 3203 return 0; 3204 3205 /* If WoL is enabled restore the interface. */ 3206 if (priv->wol_enabled) { 3207 ret = ravb_wol_restore(ndev); 3208 if (ret) 3209 return ret; 3210 } else { 3211 ret = pm_runtime_force_resume(dev); 3212 if (ret) 3213 return ret; 3214 } 3215 3216 /* Reopening the interface will restore the device to the working state. */ 3217 ret = ravb_open(ndev); 3218 if (ret < 0) 3219 goto out_rpm_put; 3220 3221 ravb_set_rx_mode(ndev); 3222 netif_device_attach(ndev); 3223 3224 return 0; 3225 3226 out_rpm_put: 3227 if (!priv->wol_enabled) { 3228 pm_runtime_mark_last_busy(dev); 3229 pm_runtime_put_autosuspend(dev); 3230 } 3231 3232 return ret; 3233 } 3234 3235 static int ravb_runtime_suspend(struct device *dev) 3236 { 3237 struct net_device *ndev = dev_get_drvdata(dev); 3238 struct ravb_private *priv = netdev_priv(ndev); 3239 3240 clk_disable(priv->refclk); 3241 3242 return 0; 3243 } 3244 3245 static int ravb_runtime_resume(struct device *dev) 3246 { 3247 struct net_device *ndev = dev_get_drvdata(dev); 3248 struct ravb_private *priv = netdev_priv(ndev); 3249 3250 return clk_enable(priv->refclk); 3251 } 3252 3253 static const struct dev_pm_ops ravb_dev_pm_ops = { 3254 SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume) 3255 RUNTIME_PM_OPS(ravb_runtime_suspend, ravb_runtime_resume, NULL) 3256 }; 3257 3258 static struct platform_driver ravb_driver = { 3259 .probe = ravb_probe, 3260 .remove_new = ravb_remove, 3261 .driver = { 3262 .name = "ravb", 3263 .pm = pm_ptr(&ravb_dev_pm_ops), 3264 .of_match_table = ravb_match_table, 3265 }, 3266 }; 3267 3268 module_platform_driver(ravb_driver); 3269 3270 MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai"); 3271 MODULE_DESCRIPTION("Renesas Ethernet AVB driver"); 3272 MODULE_LICENSE("GPL v2"); 3273