1 /* Renesas Ethernet AVB device driver 2 * 3 * Copyright (C) 2014-2015 Renesas Electronics Corporation 4 * Copyright (C) 2015 Renesas Solutions Corp. 5 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * 7 * Based on the SuperH Ethernet driver 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms and conditions of the GNU General Public License version 2, 11 * as published by the Free Software Foundation. 12 */ 13 14 #include <linux/cache.h> 15 #include <linux/clk.h> 16 #include <linux/delay.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/err.h> 19 #include <linux/etherdevice.h> 20 #include <linux/ethtool.h> 21 #include <linux/if_vlan.h> 22 #include <linux/kernel.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/net_tstamp.h> 26 #include <linux/of.h> 27 #include <linux/of_device.h> 28 #include <linux/of_irq.h> 29 #include <linux/of_mdio.h> 30 #include <linux/of_net.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/slab.h> 33 #include <linux/spinlock.h> 34 #include <linux/sys_soc.h> 35 36 #include <asm/div64.h> 37 38 #include "ravb.h" 39 40 #define RAVB_DEF_MSG_ENABLE \ 41 (NETIF_MSG_LINK | \ 42 NETIF_MSG_TIMER | \ 43 NETIF_MSG_RX_ERR | \ 44 NETIF_MSG_TX_ERR) 45 46 static const char *ravb_rx_irqs[NUM_RX_QUEUE] = { 47 "ch0", /* RAVB_BE */ 48 "ch1", /* RAVB_NC */ 49 }; 50 51 static const char *ravb_tx_irqs[NUM_TX_QUEUE] = { 52 "ch18", /* RAVB_BE */ 53 "ch19", /* RAVB_NC */ 54 }; 55 56 void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, 57 u32 set) 58 { 59 ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg); 60 } 61 62 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) 63 { 64 int i; 65 66 for (i = 0; i < 10000; i++) { 67 if ((ravb_read(ndev, reg) & mask) == value) 68 return 0; 69 udelay(10); 70 } 71 return -ETIMEDOUT; 72 } 73 74 static int ravb_config(struct net_device *ndev) 75 { 76 int error; 77 78 /* Set config mode */ 79 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); 80 /* Check if the operating mode is changed to the config mode */ 81 error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG); 82 if (error) 83 netdev_err(ndev, "failed to switch device to config mode\n"); 84 85 return error; 86 } 87 88 static void ravb_set_duplex(struct net_device *ndev) 89 { 90 struct ravb_private *priv = netdev_priv(ndev); 91 92 ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0); 93 } 94 95 static void ravb_set_rate(struct net_device *ndev) 96 { 97 struct ravb_private *priv = netdev_priv(ndev); 98 99 switch (priv->speed) { 100 case 100: /* 100BASE */ 101 ravb_write(ndev, GECMR_SPEED_100, GECMR); 102 break; 103 case 1000: /* 1000BASE */ 104 ravb_write(ndev, GECMR_SPEED_1000, GECMR); 105 break; 106 } 107 } 108 109 static void ravb_set_buffer_align(struct sk_buff *skb) 110 { 111 u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1); 112 113 if (reserve) 114 skb_reserve(skb, RAVB_ALIGN - reserve); 115 } 116 117 /* Get MAC address from the MAC address registers 118 * 119 * Ethernet AVB device doesn't have ROM for MAC address. 120 * This function gets the MAC address that was used by a bootloader. 121 */ 122 static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) 123 { 124 if (mac) { 125 ether_addr_copy(ndev->dev_addr, mac); 126 } else { 127 u32 mahr = ravb_read(ndev, MAHR); 128 u32 malr = ravb_read(ndev, MALR); 129 130 ndev->dev_addr[0] = (mahr >> 24) & 0xFF; 131 ndev->dev_addr[1] = (mahr >> 16) & 0xFF; 132 ndev->dev_addr[2] = (mahr >> 8) & 0xFF; 133 ndev->dev_addr[3] = (mahr >> 0) & 0xFF; 134 ndev->dev_addr[4] = (malr >> 8) & 0xFF; 135 ndev->dev_addr[5] = (malr >> 0) & 0xFF; 136 } 137 } 138 139 static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) 140 { 141 struct ravb_private *priv = container_of(ctrl, struct ravb_private, 142 mdiobb); 143 144 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); 145 } 146 147 /* MDC pin control */ 148 static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level) 149 { 150 ravb_mdio_ctrl(ctrl, PIR_MDC, level); 151 } 152 153 /* Data I/O pin control */ 154 static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output) 155 { 156 ravb_mdio_ctrl(ctrl, PIR_MMD, output); 157 } 158 159 /* Set data bit */ 160 static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value) 161 { 162 ravb_mdio_ctrl(ctrl, PIR_MDO, value); 163 } 164 165 /* Get data bit */ 166 static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl) 167 { 168 struct ravb_private *priv = container_of(ctrl, struct ravb_private, 169 mdiobb); 170 171 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0; 172 } 173 174 /* MDIO bus control struct */ 175 static struct mdiobb_ops bb_ops = { 176 .owner = THIS_MODULE, 177 .set_mdc = ravb_set_mdc, 178 .set_mdio_dir = ravb_set_mdio_dir, 179 .set_mdio_data = ravb_set_mdio_data, 180 .get_mdio_data = ravb_get_mdio_data, 181 }; 182 183 /* Free TX skb function for AVB-IP */ 184 static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) 185 { 186 struct ravb_private *priv = netdev_priv(ndev); 187 struct net_device_stats *stats = &priv->stats[q]; 188 struct ravb_tx_desc *desc; 189 int free_num = 0; 190 int entry; 191 u32 size; 192 193 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { 194 bool txed; 195 196 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * 197 NUM_TX_DESC); 198 desc = &priv->tx_ring[q][entry]; 199 txed = desc->die_dt == DT_FEMPTY; 200 if (free_txed_only && !txed) 201 break; 202 /* Descriptor type must be checked before all other reads */ 203 dma_rmb(); 204 size = le16_to_cpu(desc->ds_tagl) & TX_DS; 205 /* Free the original skb. */ 206 if (priv->tx_skb[q][entry / NUM_TX_DESC]) { 207 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), 208 size, DMA_TO_DEVICE); 209 /* Last packet descriptor? */ 210 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { 211 entry /= NUM_TX_DESC; 212 dev_kfree_skb_any(priv->tx_skb[q][entry]); 213 priv->tx_skb[q][entry] = NULL; 214 if (txed) 215 stats->tx_packets++; 216 } 217 free_num++; 218 } 219 if (txed) 220 stats->tx_bytes += size; 221 desc->die_dt = DT_EEMPTY; 222 } 223 return free_num; 224 } 225 226 /* Free skb's and DMA buffers for Ethernet AVB */ 227 static void ravb_ring_free(struct net_device *ndev, int q) 228 { 229 struct ravb_private *priv = netdev_priv(ndev); 230 int ring_size; 231 int i; 232 233 if (priv->rx_ring[q]) { 234 for (i = 0; i < priv->num_rx_ring[q]; i++) { 235 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; 236 237 if (!dma_mapping_error(ndev->dev.parent, 238 le32_to_cpu(desc->dptr))) 239 dma_unmap_single(ndev->dev.parent, 240 le32_to_cpu(desc->dptr), 241 priv->rx_buf_sz, 242 DMA_FROM_DEVICE); 243 } 244 ring_size = sizeof(struct ravb_ex_rx_desc) * 245 (priv->num_rx_ring[q] + 1); 246 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], 247 priv->rx_desc_dma[q]); 248 priv->rx_ring[q] = NULL; 249 } 250 251 if (priv->tx_ring[q]) { 252 ravb_tx_free(ndev, q, false); 253 254 ring_size = sizeof(struct ravb_tx_desc) * 255 (priv->num_tx_ring[q] * NUM_TX_DESC + 1); 256 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], 257 priv->tx_desc_dma[q]); 258 priv->tx_ring[q] = NULL; 259 } 260 261 /* Free RX skb ringbuffer */ 262 if (priv->rx_skb[q]) { 263 for (i = 0; i < priv->num_rx_ring[q]; i++) 264 dev_kfree_skb(priv->rx_skb[q][i]); 265 } 266 kfree(priv->rx_skb[q]); 267 priv->rx_skb[q] = NULL; 268 269 /* Free aligned TX buffers */ 270 kfree(priv->tx_align[q]); 271 priv->tx_align[q] = NULL; 272 273 /* Free TX skb ringbuffer. 274 * SKBs are freed by ravb_tx_free() call above. 275 */ 276 kfree(priv->tx_skb[q]); 277 priv->tx_skb[q] = NULL; 278 } 279 280 /* Format skb and descriptor buffer for Ethernet AVB */ 281 static void ravb_ring_format(struct net_device *ndev, int q) 282 { 283 struct ravb_private *priv = netdev_priv(ndev); 284 struct ravb_ex_rx_desc *rx_desc; 285 struct ravb_tx_desc *tx_desc; 286 struct ravb_desc *desc; 287 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 288 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * 289 NUM_TX_DESC; 290 dma_addr_t dma_addr; 291 int i; 292 293 priv->cur_rx[q] = 0; 294 priv->cur_tx[q] = 0; 295 priv->dirty_rx[q] = 0; 296 priv->dirty_tx[q] = 0; 297 298 memset(priv->rx_ring[q], 0, rx_ring_size); 299 /* Build RX ring buffer */ 300 for (i = 0; i < priv->num_rx_ring[q]; i++) { 301 /* RX descriptor */ 302 rx_desc = &priv->rx_ring[q][i]; 303 rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); 304 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, 305 priv->rx_buf_sz, 306 DMA_FROM_DEVICE); 307 /* We just set the data size to 0 for a failed mapping which 308 * should prevent DMA from happening... 309 */ 310 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 311 rx_desc->ds_cc = cpu_to_le16(0); 312 rx_desc->dptr = cpu_to_le32(dma_addr); 313 rx_desc->die_dt = DT_FEMPTY; 314 } 315 rx_desc = &priv->rx_ring[q][i]; 316 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 317 rx_desc->die_dt = DT_LINKFIX; /* type */ 318 319 memset(priv->tx_ring[q], 0, tx_ring_size); 320 /* Build TX ring buffer */ 321 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; 322 i++, tx_desc++) { 323 tx_desc->die_dt = DT_EEMPTY; 324 tx_desc++; 325 tx_desc->die_dt = DT_EEMPTY; 326 } 327 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); 328 tx_desc->die_dt = DT_LINKFIX; /* type */ 329 330 /* RX descriptor base address for best effort */ 331 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q]; 332 desc->die_dt = DT_LINKFIX; /* type */ 333 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 334 335 /* TX descriptor base address for best effort */ 336 desc = &priv->desc_bat[q]; 337 desc->die_dt = DT_LINKFIX; /* type */ 338 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); 339 } 340 341 /* Init skb and descriptor buffer for Ethernet AVB */ 342 static int ravb_ring_init(struct net_device *ndev, int q) 343 { 344 struct ravb_private *priv = netdev_priv(ndev); 345 struct sk_buff *skb; 346 int ring_size; 347 int i; 348 349 /* +16 gets room from the status from the card. */ 350 priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + 351 ETH_HLEN + VLAN_HLEN; 352 353 /* Allocate RX and TX skb rings */ 354 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], 355 sizeof(*priv->rx_skb[q]), GFP_KERNEL); 356 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], 357 sizeof(*priv->tx_skb[q]), GFP_KERNEL); 358 if (!priv->rx_skb[q] || !priv->tx_skb[q]) 359 goto error; 360 361 for (i = 0; i < priv->num_rx_ring[q]; i++) { 362 skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1); 363 if (!skb) 364 goto error; 365 ravb_set_buffer_align(skb); 366 priv->rx_skb[q][i] = skb; 367 } 368 369 /* Allocate rings for the aligned buffers */ 370 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + 371 DPTR_ALIGN - 1, GFP_KERNEL); 372 if (!priv->tx_align[q]) 373 goto error; 374 375 /* Allocate all RX descriptors. */ 376 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); 377 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, 378 &priv->rx_desc_dma[q], 379 GFP_KERNEL); 380 if (!priv->rx_ring[q]) 381 goto error; 382 383 priv->dirty_rx[q] = 0; 384 385 /* Allocate all TX descriptors. */ 386 ring_size = sizeof(struct ravb_tx_desc) * 387 (priv->num_tx_ring[q] * NUM_TX_DESC + 1); 388 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, 389 &priv->tx_desc_dma[q], 390 GFP_KERNEL); 391 if (!priv->tx_ring[q]) 392 goto error; 393 394 return 0; 395 396 error: 397 ravb_ring_free(ndev, q); 398 399 return -ENOMEM; 400 } 401 402 /* E-MAC init function */ 403 static void ravb_emac_init(struct net_device *ndev) 404 { 405 struct ravb_private *priv = netdev_priv(ndev); 406 407 /* Receive frame limit set register */ 408 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); 409 410 /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ 411 ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | 412 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | 413 ECMR_TE | ECMR_RE, ECMR); 414 415 ravb_set_rate(ndev); 416 417 /* Set MAC address */ 418 ravb_write(ndev, 419 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 420 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); 421 ravb_write(ndev, 422 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 423 424 /* E-MAC status register clear */ 425 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR); 426 427 /* E-MAC interrupt enable register */ 428 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR); 429 } 430 431 /* Device init function for Ethernet AVB */ 432 static int ravb_dmac_init(struct net_device *ndev) 433 { 434 struct ravb_private *priv = netdev_priv(ndev); 435 int error; 436 437 /* Set CONFIG mode */ 438 error = ravb_config(ndev); 439 if (error) 440 return error; 441 442 error = ravb_ring_init(ndev, RAVB_BE); 443 if (error) 444 return error; 445 error = ravb_ring_init(ndev, RAVB_NC); 446 if (error) { 447 ravb_ring_free(ndev, RAVB_BE); 448 return error; 449 } 450 451 /* Descriptor format */ 452 ravb_ring_format(ndev, RAVB_BE); 453 ravb_ring_format(ndev, RAVB_NC); 454 455 #if defined(__LITTLE_ENDIAN) 456 ravb_modify(ndev, CCC, CCC_BOC, 0); 457 #else 458 ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC); 459 #endif 460 461 /* Set AVB RX */ 462 ravb_write(ndev, 463 RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR); 464 465 /* Set FIFO size */ 466 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC); 467 468 /* Timestamp enable */ 469 ravb_write(ndev, TCCR_TFEN, TCCR); 470 471 /* Interrupt init: */ 472 if (priv->chip_id == RCAR_GEN3) { 473 /* Clear DIL.DPLx */ 474 ravb_write(ndev, 0, DIL); 475 /* Set queue specific interrupt */ 476 ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE); 477 } 478 /* Frame receive */ 479 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); 480 /* Disable FIFO full warning */ 481 ravb_write(ndev, 0, RIC1); 482 /* Receive FIFO full error, descriptor empty */ 483 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); 484 /* Frame transmitted, timestamp FIFO updated */ 485 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); 486 487 /* Setting the control will start the AVB-DMAC process. */ 488 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); 489 490 return 0; 491 } 492 493 static void ravb_get_tx_tstamp(struct net_device *ndev) 494 { 495 struct ravb_private *priv = netdev_priv(ndev); 496 struct ravb_tstamp_skb *ts_skb, *ts_skb2; 497 struct skb_shared_hwtstamps shhwtstamps; 498 struct sk_buff *skb; 499 struct timespec64 ts; 500 u16 tag, tfa_tag; 501 int count; 502 u32 tfa2; 503 504 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8; 505 while (count--) { 506 tfa2 = ravb_read(ndev, TFA2); 507 tfa_tag = (tfa2 & TFA2_TST) >> 16; 508 ts.tv_nsec = (u64)ravb_read(ndev, TFA0); 509 ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) | 510 ravb_read(ndev, TFA1); 511 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 512 shhwtstamps.hwtstamp = timespec64_to_ktime(ts); 513 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, 514 list) { 515 skb = ts_skb->skb; 516 tag = ts_skb->tag; 517 list_del(&ts_skb->list); 518 kfree(ts_skb); 519 if (tag == tfa_tag) { 520 skb_tstamp_tx(skb, &shhwtstamps); 521 break; 522 } 523 } 524 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); 525 } 526 } 527 528 static void ravb_rx_csum(struct sk_buff *skb) 529 { 530 u8 *hw_csum; 531 532 /* The hardware checksum is 2 bytes appended to packet data */ 533 if (unlikely(skb->len < 2)) 534 return; 535 hw_csum = skb_tail_pointer(skb) - 2; 536 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); 537 skb->ip_summed = CHECKSUM_COMPLETE; 538 skb_trim(skb, skb->len - 2); 539 } 540 541 /* Packet receive function for Ethernet AVB */ 542 static bool ravb_rx(struct net_device *ndev, int *quota, int q) 543 { 544 struct ravb_private *priv = netdev_priv(ndev); 545 int entry = priv->cur_rx[q] % priv->num_rx_ring[q]; 546 int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) - 547 priv->cur_rx[q]; 548 struct net_device_stats *stats = &priv->stats[q]; 549 struct ravb_ex_rx_desc *desc; 550 struct sk_buff *skb; 551 dma_addr_t dma_addr; 552 struct timespec64 ts; 553 u8 desc_status; 554 u16 pkt_len; 555 int limit; 556 557 boguscnt = min(boguscnt, *quota); 558 limit = boguscnt; 559 desc = &priv->rx_ring[q][entry]; 560 while (desc->die_dt != DT_FEMPTY) { 561 /* Descriptor type must be checked before all other reads */ 562 dma_rmb(); 563 desc_status = desc->msc; 564 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; 565 566 if (--boguscnt < 0) 567 break; 568 569 /* We use 0-byte descriptors to mark the DMA mapping errors */ 570 if (!pkt_len) 571 continue; 572 573 if (desc_status & MSC_MC) 574 stats->multicast++; 575 576 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | 577 MSC_CEEF)) { 578 stats->rx_errors++; 579 if (desc_status & MSC_CRC) 580 stats->rx_crc_errors++; 581 if (desc_status & MSC_RFE) 582 stats->rx_frame_errors++; 583 if (desc_status & (MSC_RTLF | MSC_RTSF)) 584 stats->rx_length_errors++; 585 if (desc_status & MSC_CEEF) 586 stats->rx_missed_errors++; 587 } else { 588 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; 589 590 skb = priv->rx_skb[q][entry]; 591 priv->rx_skb[q][entry] = NULL; 592 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), 593 priv->rx_buf_sz, 594 DMA_FROM_DEVICE); 595 get_ts &= (q == RAVB_NC) ? 596 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : 597 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; 598 if (get_ts) { 599 struct skb_shared_hwtstamps *shhwtstamps; 600 601 shhwtstamps = skb_hwtstamps(skb); 602 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 603 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) << 604 32) | le32_to_cpu(desc->ts_sl); 605 ts.tv_nsec = le32_to_cpu(desc->ts_n); 606 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); 607 } 608 609 skb_put(skb, pkt_len); 610 skb->protocol = eth_type_trans(skb, ndev); 611 if (ndev->features & NETIF_F_RXCSUM) 612 ravb_rx_csum(skb); 613 napi_gro_receive(&priv->napi[q], skb); 614 stats->rx_packets++; 615 stats->rx_bytes += pkt_len; 616 } 617 618 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; 619 desc = &priv->rx_ring[q][entry]; 620 } 621 622 /* Refill the RX ring buffers. */ 623 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { 624 entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; 625 desc = &priv->rx_ring[q][entry]; 626 desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); 627 628 if (!priv->rx_skb[q][entry]) { 629 skb = netdev_alloc_skb(ndev, 630 priv->rx_buf_sz + 631 RAVB_ALIGN - 1); 632 if (!skb) 633 break; /* Better luck next round. */ 634 ravb_set_buffer_align(skb); 635 dma_addr = dma_map_single(ndev->dev.parent, skb->data, 636 le16_to_cpu(desc->ds_cc), 637 DMA_FROM_DEVICE); 638 skb_checksum_none_assert(skb); 639 /* We just set the data size to 0 for a failed mapping 640 * which should prevent DMA from happening... 641 */ 642 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 643 desc->ds_cc = cpu_to_le16(0); 644 desc->dptr = cpu_to_le32(dma_addr); 645 priv->rx_skb[q][entry] = skb; 646 } 647 /* Descriptor type must be set after all the above writes */ 648 dma_wmb(); 649 desc->die_dt = DT_FEMPTY; 650 } 651 652 *quota -= limit - (++boguscnt); 653 654 return boguscnt <= 0; 655 } 656 657 static void ravb_rcv_snd_disable(struct net_device *ndev) 658 { 659 /* Disable TX and RX */ 660 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); 661 } 662 663 static void ravb_rcv_snd_enable(struct net_device *ndev) 664 { 665 /* Enable TX and RX */ 666 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); 667 } 668 669 /* function for waiting dma process finished */ 670 static int ravb_stop_dma(struct net_device *ndev) 671 { 672 int error; 673 674 /* Wait for stopping the hardware TX process */ 675 error = ravb_wait(ndev, TCCR, 676 TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0); 677 if (error) 678 return error; 679 680 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3, 681 0); 682 if (error) 683 return error; 684 685 /* Stop the E-MAC's RX/TX processes. */ 686 ravb_rcv_snd_disable(ndev); 687 688 /* Wait for stopping the RX DMA process */ 689 error = ravb_wait(ndev, CSR, CSR_RPO, 0); 690 if (error) 691 return error; 692 693 /* Stop AVB-DMAC process */ 694 return ravb_config(ndev); 695 } 696 697 /* E-MAC interrupt handler */ 698 static void ravb_emac_interrupt_unlocked(struct net_device *ndev) 699 { 700 struct ravb_private *priv = netdev_priv(ndev); 701 u32 ecsr, psr; 702 703 ecsr = ravb_read(ndev, ECSR); 704 ravb_write(ndev, ecsr, ECSR); /* clear interrupt */ 705 706 if (ecsr & ECSR_MPD) 707 pm_wakeup_event(&priv->pdev->dev, 0); 708 if (ecsr & ECSR_ICD) 709 ndev->stats.tx_carrier_errors++; 710 if (ecsr & ECSR_LCHNG) { 711 /* Link changed */ 712 if (priv->no_avb_link) 713 return; 714 psr = ravb_read(ndev, PSR); 715 if (priv->avb_link_active_low) 716 psr ^= PSR_LMON; 717 if (!(psr & PSR_LMON)) { 718 /* DIsable RX and TX */ 719 ravb_rcv_snd_disable(ndev); 720 } else { 721 /* Enable RX and TX */ 722 ravb_rcv_snd_enable(ndev); 723 } 724 } 725 } 726 727 static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id) 728 { 729 struct net_device *ndev = dev_id; 730 struct ravb_private *priv = netdev_priv(ndev); 731 732 spin_lock(&priv->lock); 733 ravb_emac_interrupt_unlocked(ndev); 734 mmiowb(); 735 spin_unlock(&priv->lock); 736 return IRQ_HANDLED; 737 } 738 739 /* Error interrupt handler */ 740 static void ravb_error_interrupt(struct net_device *ndev) 741 { 742 struct ravb_private *priv = netdev_priv(ndev); 743 u32 eis, ris2; 744 745 eis = ravb_read(ndev, EIS); 746 ravb_write(ndev, ~EIS_QFS, EIS); 747 if (eis & EIS_QFS) { 748 ris2 = ravb_read(ndev, RIS2); 749 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); 750 751 /* Receive Descriptor Empty int */ 752 if (ris2 & RIS2_QFF0) 753 priv->stats[RAVB_BE].rx_over_errors++; 754 755 /* Receive Descriptor Empty int */ 756 if (ris2 & RIS2_QFF1) 757 priv->stats[RAVB_NC].rx_over_errors++; 758 759 /* Receive FIFO Overflow int */ 760 if (ris2 & RIS2_RFFF) 761 priv->rx_fifo_errors++; 762 } 763 } 764 765 static bool ravb_queue_interrupt(struct net_device *ndev, int q) 766 { 767 struct ravb_private *priv = netdev_priv(ndev); 768 u32 ris0 = ravb_read(ndev, RIS0); 769 u32 ric0 = ravb_read(ndev, RIC0); 770 u32 tis = ravb_read(ndev, TIS); 771 u32 tic = ravb_read(ndev, TIC); 772 773 if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) { 774 if (napi_schedule_prep(&priv->napi[q])) { 775 /* Mask RX and TX interrupts */ 776 if (priv->chip_id == RCAR_GEN2) { 777 ravb_write(ndev, ric0 & ~BIT(q), RIC0); 778 ravb_write(ndev, tic & ~BIT(q), TIC); 779 } else { 780 ravb_write(ndev, BIT(q), RID0); 781 ravb_write(ndev, BIT(q), TID); 782 } 783 __napi_schedule(&priv->napi[q]); 784 } else { 785 netdev_warn(ndev, 786 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n", 787 ris0, ric0); 788 netdev_warn(ndev, 789 " tx status 0x%08x, tx mask 0x%08x.\n", 790 tis, tic); 791 } 792 return true; 793 } 794 return false; 795 } 796 797 static bool ravb_timestamp_interrupt(struct net_device *ndev) 798 { 799 u32 tis = ravb_read(ndev, TIS); 800 801 if (tis & TIS_TFUF) { 802 ravb_write(ndev, ~TIS_TFUF, TIS); 803 ravb_get_tx_tstamp(ndev); 804 return true; 805 } 806 return false; 807 } 808 809 static irqreturn_t ravb_interrupt(int irq, void *dev_id) 810 { 811 struct net_device *ndev = dev_id; 812 struct ravb_private *priv = netdev_priv(ndev); 813 irqreturn_t result = IRQ_NONE; 814 u32 iss; 815 816 spin_lock(&priv->lock); 817 /* Get interrupt status */ 818 iss = ravb_read(ndev, ISS); 819 820 /* Received and transmitted interrupts */ 821 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) { 822 int q; 823 824 /* Timestamp updated */ 825 if (ravb_timestamp_interrupt(ndev)) 826 result = IRQ_HANDLED; 827 828 /* Network control and best effort queue RX/TX */ 829 for (q = RAVB_NC; q >= RAVB_BE; q--) { 830 if (ravb_queue_interrupt(ndev, q)) 831 result = IRQ_HANDLED; 832 } 833 } 834 835 /* E-MAC status summary */ 836 if (iss & ISS_MS) { 837 ravb_emac_interrupt_unlocked(ndev); 838 result = IRQ_HANDLED; 839 } 840 841 /* Error status summary */ 842 if (iss & ISS_ES) { 843 ravb_error_interrupt(ndev); 844 result = IRQ_HANDLED; 845 } 846 847 /* gPTP interrupt status summary */ 848 if (iss & ISS_CGIS) { 849 ravb_ptp_interrupt(ndev); 850 result = IRQ_HANDLED; 851 } 852 853 mmiowb(); 854 spin_unlock(&priv->lock); 855 return result; 856 } 857 858 /* Timestamp/Error/gPTP interrupt handler */ 859 static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) 860 { 861 struct net_device *ndev = dev_id; 862 struct ravb_private *priv = netdev_priv(ndev); 863 irqreturn_t result = IRQ_NONE; 864 u32 iss; 865 866 spin_lock(&priv->lock); 867 /* Get interrupt status */ 868 iss = ravb_read(ndev, ISS); 869 870 /* Timestamp updated */ 871 if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev)) 872 result = IRQ_HANDLED; 873 874 /* Error status summary */ 875 if (iss & ISS_ES) { 876 ravb_error_interrupt(ndev); 877 result = IRQ_HANDLED; 878 } 879 880 /* gPTP interrupt status summary */ 881 if (iss & ISS_CGIS) { 882 ravb_ptp_interrupt(ndev); 883 result = IRQ_HANDLED; 884 } 885 886 mmiowb(); 887 spin_unlock(&priv->lock); 888 return result; 889 } 890 891 static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) 892 { 893 struct net_device *ndev = dev_id; 894 struct ravb_private *priv = netdev_priv(ndev); 895 irqreturn_t result = IRQ_NONE; 896 897 spin_lock(&priv->lock); 898 899 /* Network control/Best effort queue RX/TX */ 900 if (ravb_queue_interrupt(ndev, q)) 901 result = IRQ_HANDLED; 902 903 mmiowb(); 904 spin_unlock(&priv->lock); 905 return result; 906 } 907 908 static irqreturn_t ravb_be_interrupt(int irq, void *dev_id) 909 { 910 return ravb_dma_interrupt(irq, dev_id, RAVB_BE); 911 } 912 913 static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id) 914 { 915 return ravb_dma_interrupt(irq, dev_id, RAVB_NC); 916 } 917 918 static int ravb_poll(struct napi_struct *napi, int budget) 919 { 920 struct net_device *ndev = napi->dev; 921 struct ravb_private *priv = netdev_priv(ndev); 922 unsigned long flags; 923 int q = napi - priv->napi; 924 int mask = BIT(q); 925 int quota = budget; 926 u32 ris0, tis; 927 928 for (;;) { 929 tis = ravb_read(ndev, TIS); 930 ris0 = ravb_read(ndev, RIS0); 931 if (!((ris0 & mask) || (tis & mask))) 932 break; 933 934 /* Processing RX Descriptor Ring */ 935 if (ris0 & mask) { 936 /* Clear RX interrupt */ 937 ravb_write(ndev, ~mask, RIS0); 938 if (ravb_rx(ndev, "a, q)) 939 goto out; 940 } 941 /* Processing TX Descriptor Ring */ 942 if (tis & mask) { 943 spin_lock_irqsave(&priv->lock, flags); 944 /* Clear TX interrupt */ 945 ravb_write(ndev, ~mask, TIS); 946 ravb_tx_free(ndev, q, true); 947 netif_wake_subqueue(ndev, q); 948 mmiowb(); 949 spin_unlock_irqrestore(&priv->lock, flags); 950 } 951 } 952 953 napi_complete(napi); 954 955 /* Re-enable RX/TX interrupts */ 956 spin_lock_irqsave(&priv->lock, flags); 957 if (priv->chip_id == RCAR_GEN2) { 958 ravb_modify(ndev, RIC0, mask, mask); 959 ravb_modify(ndev, TIC, mask, mask); 960 } else { 961 ravb_write(ndev, mask, RIE0); 962 ravb_write(ndev, mask, TIE); 963 } 964 mmiowb(); 965 spin_unlock_irqrestore(&priv->lock, flags); 966 967 /* Receive error message handling */ 968 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; 969 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; 970 if (priv->rx_over_errors != ndev->stats.rx_over_errors) 971 ndev->stats.rx_over_errors = priv->rx_over_errors; 972 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) 973 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; 974 out: 975 return budget - quota; 976 } 977 978 /* PHY state control function */ 979 static void ravb_adjust_link(struct net_device *ndev) 980 { 981 struct ravb_private *priv = netdev_priv(ndev); 982 struct phy_device *phydev = ndev->phydev; 983 bool new_state = false; 984 985 if (phydev->link) { 986 if (phydev->duplex != priv->duplex) { 987 new_state = true; 988 priv->duplex = phydev->duplex; 989 ravb_set_duplex(ndev); 990 } 991 992 if (phydev->speed != priv->speed) { 993 new_state = true; 994 priv->speed = phydev->speed; 995 ravb_set_rate(ndev); 996 } 997 if (!priv->link) { 998 ravb_modify(ndev, ECMR, ECMR_TXF, 0); 999 new_state = true; 1000 priv->link = phydev->link; 1001 if (priv->no_avb_link) 1002 ravb_rcv_snd_enable(ndev); 1003 } 1004 } else if (priv->link) { 1005 new_state = true; 1006 priv->link = 0; 1007 priv->speed = 0; 1008 priv->duplex = -1; 1009 if (priv->no_avb_link) 1010 ravb_rcv_snd_disable(ndev); 1011 } 1012 1013 if (new_state && netif_msg_link(priv)) 1014 phy_print_status(phydev); 1015 } 1016 1017 static const struct soc_device_attribute r8a7795es10[] = { 1018 { .soc_id = "r8a7795", .revision = "ES1.0", }, 1019 { /* sentinel */ } 1020 }; 1021 1022 /* PHY init function */ 1023 static int ravb_phy_init(struct net_device *ndev) 1024 { 1025 struct device_node *np = ndev->dev.parent->of_node; 1026 struct ravb_private *priv = netdev_priv(ndev); 1027 struct phy_device *phydev; 1028 struct device_node *pn; 1029 int err; 1030 1031 priv->link = 0; 1032 priv->speed = 0; 1033 priv->duplex = -1; 1034 1035 /* Try connecting to PHY */ 1036 pn = of_parse_phandle(np, "phy-handle", 0); 1037 if (!pn) { 1038 /* In the case of a fixed PHY, the DT node associated 1039 * to the PHY is the Ethernet MAC DT node. 1040 */ 1041 if (of_phy_is_fixed_link(np)) { 1042 err = of_phy_register_fixed_link(np); 1043 if (err) 1044 return err; 1045 } 1046 pn = of_node_get(np); 1047 } 1048 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, 1049 priv->phy_interface); 1050 of_node_put(pn); 1051 if (!phydev) { 1052 netdev_err(ndev, "failed to connect PHY\n"); 1053 err = -ENOENT; 1054 goto err_deregister_fixed_link; 1055 } 1056 1057 /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0 1058 * at this time. 1059 */ 1060 if (soc_device_match(r8a7795es10)) { 1061 err = phy_set_max_speed(phydev, SPEED_100); 1062 if (err) { 1063 netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n"); 1064 goto err_phy_disconnect; 1065 } 1066 1067 netdev_info(ndev, "limited PHY to 100Mbit/s\n"); 1068 } 1069 1070 /* 10BASE is not supported */ 1071 phydev->supported &= ~PHY_10BT_FEATURES; 1072 1073 phy_attached_info(phydev); 1074 1075 return 0; 1076 1077 err_phy_disconnect: 1078 phy_disconnect(phydev); 1079 err_deregister_fixed_link: 1080 if (of_phy_is_fixed_link(np)) 1081 of_phy_deregister_fixed_link(np); 1082 1083 return err; 1084 } 1085 1086 /* PHY control start function */ 1087 static int ravb_phy_start(struct net_device *ndev) 1088 { 1089 int error; 1090 1091 error = ravb_phy_init(ndev); 1092 if (error) 1093 return error; 1094 1095 phy_start(ndev->phydev); 1096 1097 return 0; 1098 } 1099 1100 static int ravb_get_link_ksettings(struct net_device *ndev, 1101 struct ethtool_link_ksettings *cmd) 1102 { 1103 struct ravb_private *priv = netdev_priv(ndev); 1104 unsigned long flags; 1105 1106 if (!ndev->phydev) 1107 return -ENODEV; 1108 1109 spin_lock_irqsave(&priv->lock, flags); 1110 phy_ethtool_ksettings_get(ndev->phydev, cmd); 1111 spin_unlock_irqrestore(&priv->lock, flags); 1112 1113 return 0; 1114 } 1115 1116 static int ravb_set_link_ksettings(struct net_device *ndev, 1117 const struct ethtool_link_ksettings *cmd) 1118 { 1119 struct ravb_private *priv = netdev_priv(ndev); 1120 unsigned long flags; 1121 int error; 1122 1123 if (!ndev->phydev) 1124 return -ENODEV; 1125 1126 spin_lock_irqsave(&priv->lock, flags); 1127 1128 /* Disable TX and RX */ 1129 ravb_rcv_snd_disable(ndev); 1130 1131 error = phy_ethtool_ksettings_set(ndev->phydev, cmd); 1132 if (error) 1133 goto error_exit; 1134 1135 if (cmd->base.duplex == DUPLEX_FULL) 1136 priv->duplex = 1; 1137 else 1138 priv->duplex = 0; 1139 1140 ravb_set_duplex(ndev); 1141 1142 error_exit: 1143 mdelay(1); 1144 1145 /* Enable TX and RX */ 1146 ravb_rcv_snd_enable(ndev); 1147 1148 mmiowb(); 1149 spin_unlock_irqrestore(&priv->lock, flags); 1150 1151 return error; 1152 } 1153 1154 static int ravb_nway_reset(struct net_device *ndev) 1155 { 1156 struct ravb_private *priv = netdev_priv(ndev); 1157 int error = -ENODEV; 1158 unsigned long flags; 1159 1160 if (ndev->phydev) { 1161 spin_lock_irqsave(&priv->lock, flags); 1162 error = phy_start_aneg(ndev->phydev); 1163 spin_unlock_irqrestore(&priv->lock, flags); 1164 } 1165 1166 return error; 1167 } 1168 1169 static u32 ravb_get_msglevel(struct net_device *ndev) 1170 { 1171 struct ravb_private *priv = netdev_priv(ndev); 1172 1173 return priv->msg_enable; 1174 } 1175 1176 static void ravb_set_msglevel(struct net_device *ndev, u32 value) 1177 { 1178 struct ravb_private *priv = netdev_priv(ndev); 1179 1180 priv->msg_enable = value; 1181 } 1182 1183 static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { 1184 "rx_queue_0_current", 1185 "tx_queue_0_current", 1186 "rx_queue_0_dirty", 1187 "tx_queue_0_dirty", 1188 "rx_queue_0_packets", 1189 "tx_queue_0_packets", 1190 "rx_queue_0_bytes", 1191 "tx_queue_0_bytes", 1192 "rx_queue_0_mcast_packets", 1193 "rx_queue_0_errors", 1194 "rx_queue_0_crc_errors", 1195 "rx_queue_0_frame_errors", 1196 "rx_queue_0_length_errors", 1197 "rx_queue_0_missed_errors", 1198 "rx_queue_0_over_errors", 1199 1200 "rx_queue_1_current", 1201 "tx_queue_1_current", 1202 "rx_queue_1_dirty", 1203 "tx_queue_1_dirty", 1204 "rx_queue_1_packets", 1205 "tx_queue_1_packets", 1206 "rx_queue_1_bytes", 1207 "tx_queue_1_bytes", 1208 "rx_queue_1_mcast_packets", 1209 "rx_queue_1_errors", 1210 "rx_queue_1_crc_errors", 1211 "rx_queue_1_frame_errors", 1212 "rx_queue_1_length_errors", 1213 "rx_queue_1_missed_errors", 1214 "rx_queue_1_over_errors", 1215 }; 1216 1217 #define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats) 1218 1219 static int ravb_get_sset_count(struct net_device *netdev, int sset) 1220 { 1221 switch (sset) { 1222 case ETH_SS_STATS: 1223 return RAVB_STATS_LEN; 1224 default: 1225 return -EOPNOTSUPP; 1226 } 1227 } 1228 1229 static void ravb_get_ethtool_stats(struct net_device *ndev, 1230 struct ethtool_stats *stats, u64 *data) 1231 { 1232 struct ravb_private *priv = netdev_priv(ndev); 1233 int i = 0; 1234 int q; 1235 1236 /* Device-specific stats */ 1237 for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) { 1238 struct net_device_stats *stats = &priv->stats[q]; 1239 1240 data[i++] = priv->cur_rx[q]; 1241 data[i++] = priv->cur_tx[q]; 1242 data[i++] = priv->dirty_rx[q]; 1243 data[i++] = priv->dirty_tx[q]; 1244 data[i++] = stats->rx_packets; 1245 data[i++] = stats->tx_packets; 1246 data[i++] = stats->rx_bytes; 1247 data[i++] = stats->tx_bytes; 1248 data[i++] = stats->multicast; 1249 data[i++] = stats->rx_errors; 1250 data[i++] = stats->rx_crc_errors; 1251 data[i++] = stats->rx_frame_errors; 1252 data[i++] = stats->rx_length_errors; 1253 data[i++] = stats->rx_missed_errors; 1254 data[i++] = stats->rx_over_errors; 1255 } 1256 } 1257 1258 static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 1259 { 1260 switch (stringset) { 1261 case ETH_SS_STATS: 1262 memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); 1263 break; 1264 } 1265 } 1266 1267 static void ravb_get_ringparam(struct net_device *ndev, 1268 struct ethtool_ringparam *ring) 1269 { 1270 struct ravb_private *priv = netdev_priv(ndev); 1271 1272 ring->rx_max_pending = BE_RX_RING_MAX; 1273 ring->tx_max_pending = BE_TX_RING_MAX; 1274 ring->rx_pending = priv->num_rx_ring[RAVB_BE]; 1275 ring->tx_pending = priv->num_tx_ring[RAVB_BE]; 1276 } 1277 1278 static int ravb_set_ringparam(struct net_device *ndev, 1279 struct ethtool_ringparam *ring) 1280 { 1281 struct ravb_private *priv = netdev_priv(ndev); 1282 int error; 1283 1284 if (ring->tx_pending > BE_TX_RING_MAX || 1285 ring->rx_pending > BE_RX_RING_MAX || 1286 ring->tx_pending < BE_TX_RING_MIN || 1287 ring->rx_pending < BE_RX_RING_MIN) 1288 return -EINVAL; 1289 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 1290 return -EINVAL; 1291 1292 if (netif_running(ndev)) { 1293 netif_device_detach(ndev); 1294 /* Stop PTP Clock driver */ 1295 if (priv->chip_id == RCAR_GEN2) 1296 ravb_ptp_stop(ndev); 1297 /* Wait for DMA stopping */ 1298 error = ravb_stop_dma(ndev); 1299 if (error) { 1300 netdev_err(ndev, 1301 "cannot set ringparam! Any AVB processes are still running?\n"); 1302 return error; 1303 } 1304 synchronize_irq(ndev->irq); 1305 1306 /* Free all the skb's in the RX queue and the DMA buffers. */ 1307 ravb_ring_free(ndev, RAVB_BE); 1308 ravb_ring_free(ndev, RAVB_NC); 1309 } 1310 1311 /* Set new parameters */ 1312 priv->num_rx_ring[RAVB_BE] = ring->rx_pending; 1313 priv->num_tx_ring[RAVB_BE] = ring->tx_pending; 1314 1315 if (netif_running(ndev)) { 1316 error = ravb_dmac_init(ndev); 1317 if (error) { 1318 netdev_err(ndev, 1319 "%s: ravb_dmac_init() failed, error %d\n", 1320 __func__, error); 1321 return error; 1322 } 1323 1324 ravb_emac_init(ndev); 1325 1326 /* Initialise PTP Clock driver */ 1327 if (priv->chip_id == RCAR_GEN2) 1328 ravb_ptp_init(ndev, priv->pdev); 1329 1330 netif_device_attach(ndev); 1331 } 1332 1333 return 0; 1334 } 1335 1336 static int ravb_get_ts_info(struct net_device *ndev, 1337 struct ethtool_ts_info *info) 1338 { 1339 struct ravb_private *priv = netdev_priv(ndev); 1340 1341 info->so_timestamping = 1342 SOF_TIMESTAMPING_TX_SOFTWARE | 1343 SOF_TIMESTAMPING_RX_SOFTWARE | 1344 SOF_TIMESTAMPING_SOFTWARE | 1345 SOF_TIMESTAMPING_TX_HARDWARE | 1346 SOF_TIMESTAMPING_RX_HARDWARE | 1347 SOF_TIMESTAMPING_RAW_HARDWARE; 1348 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 1349 info->rx_filters = 1350 (1 << HWTSTAMP_FILTER_NONE) | 1351 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 1352 (1 << HWTSTAMP_FILTER_ALL); 1353 info->phc_index = ptp_clock_index(priv->ptp.clock); 1354 1355 return 0; 1356 } 1357 1358 static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 1359 { 1360 struct ravb_private *priv = netdev_priv(ndev); 1361 1362 wol->supported = WAKE_MAGIC; 1363 wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; 1364 } 1365 1366 static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 1367 { 1368 struct ravb_private *priv = netdev_priv(ndev); 1369 1370 if (wol->wolopts & ~WAKE_MAGIC) 1371 return -EOPNOTSUPP; 1372 1373 priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); 1374 1375 device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled); 1376 1377 return 0; 1378 } 1379 1380 static const struct ethtool_ops ravb_ethtool_ops = { 1381 .nway_reset = ravb_nway_reset, 1382 .get_msglevel = ravb_get_msglevel, 1383 .set_msglevel = ravb_set_msglevel, 1384 .get_link = ethtool_op_get_link, 1385 .get_strings = ravb_get_strings, 1386 .get_ethtool_stats = ravb_get_ethtool_stats, 1387 .get_sset_count = ravb_get_sset_count, 1388 .get_ringparam = ravb_get_ringparam, 1389 .set_ringparam = ravb_set_ringparam, 1390 .get_ts_info = ravb_get_ts_info, 1391 .get_link_ksettings = ravb_get_link_ksettings, 1392 .set_link_ksettings = ravb_set_link_ksettings, 1393 .get_wol = ravb_get_wol, 1394 .set_wol = ravb_set_wol, 1395 }; 1396 1397 static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, 1398 struct net_device *ndev, struct device *dev, 1399 const char *ch) 1400 { 1401 char *name; 1402 int error; 1403 1404 name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch); 1405 if (!name) 1406 return -ENOMEM; 1407 error = request_irq(irq, handler, 0, name, ndev); 1408 if (error) 1409 netdev_err(ndev, "cannot request IRQ %s\n", name); 1410 1411 return error; 1412 } 1413 1414 /* Network device open function for Ethernet AVB */ 1415 static int ravb_open(struct net_device *ndev) 1416 { 1417 struct ravb_private *priv = netdev_priv(ndev); 1418 struct platform_device *pdev = priv->pdev; 1419 struct device *dev = &pdev->dev; 1420 int error; 1421 1422 napi_enable(&priv->napi[RAVB_BE]); 1423 napi_enable(&priv->napi[RAVB_NC]); 1424 1425 if (priv->chip_id == RCAR_GEN2) { 1426 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, 1427 ndev->name, ndev); 1428 if (error) { 1429 netdev_err(ndev, "cannot request IRQ\n"); 1430 goto out_napi_off; 1431 } 1432 } else { 1433 error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev, 1434 dev, "ch22:multi"); 1435 if (error) 1436 goto out_napi_off; 1437 error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev, 1438 dev, "ch24:emac"); 1439 if (error) 1440 goto out_free_irq; 1441 error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt, 1442 ndev, dev, "ch0:rx_be"); 1443 if (error) 1444 goto out_free_irq_emac; 1445 error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt, 1446 ndev, dev, "ch18:tx_be"); 1447 if (error) 1448 goto out_free_irq_be_rx; 1449 error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt, 1450 ndev, dev, "ch1:rx_nc"); 1451 if (error) 1452 goto out_free_irq_be_tx; 1453 error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt, 1454 ndev, dev, "ch19:tx_nc"); 1455 if (error) 1456 goto out_free_irq_nc_rx; 1457 } 1458 1459 /* Device init */ 1460 error = ravb_dmac_init(ndev); 1461 if (error) 1462 goto out_free_irq_nc_tx; 1463 ravb_emac_init(ndev); 1464 1465 /* Initialise PTP Clock driver */ 1466 if (priv->chip_id == RCAR_GEN2) 1467 ravb_ptp_init(ndev, priv->pdev); 1468 1469 netif_tx_start_all_queues(ndev); 1470 1471 /* PHY control start */ 1472 error = ravb_phy_start(ndev); 1473 if (error) 1474 goto out_ptp_stop; 1475 1476 return 0; 1477 1478 out_ptp_stop: 1479 /* Stop PTP Clock driver */ 1480 if (priv->chip_id == RCAR_GEN2) 1481 ravb_ptp_stop(ndev); 1482 out_free_irq_nc_tx: 1483 if (priv->chip_id == RCAR_GEN2) 1484 goto out_free_irq; 1485 free_irq(priv->tx_irqs[RAVB_NC], ndev); 1486 out_free_irq_nc_rx: 1487 free_irq(priv->rx_irqs[RAVB_NC], ndev); 1488 out_free_irq_be_tx: 1489 free_irq(priv->tx_irqs[RAVB_BE], ndev); 1490 out_free_irq_be_rx: 1491 free_irq(priv->rx_irqs[RAVB_BE], ndev); 1492 out_free_irq_emac: 1493 free_irq(priv->emac_irq, ndev); 1494 out_free_irq: 1495 free_irq(ndev->irq, ndev); 1496 out_napi_off: 1497 napi_disable(&priv->napi[RAVB_NC]); 1498 napi_disable(&priv->napi[RAVB_BE]); 1499 return error; 1500 } 1501 1502 /* Timeout function for Ethernet AVB */ 1503 static void ravb_tx_timeout(struct net_device *ndev) 1504 { 1505 struct ravb_private *priv = netdev_priv(ndev); 1506 1507 netif_err(priv, tx_err, ndev, 1508 "transmit timed out, status %08x, resetting...\n", 1509 ravb_read(ndev, ISS)); 1510 1511 /* tx_errors count up */ 1512 ndev->stats.tx_errors++; 1513 1514 schedule_work(&priv->work); 1515 } 1516 1517 static void ravb_tx_timeout_work(struct work_struct *work) 1518 { 1519 struct ravb_private *priv = container_of(work, struct ravb_private, 1520 work); 1521 struct net_device *ndev = priv->ndev; 1522 1523 netif_tx_stop_all_queues(ndev); 1524 1525 /* Stop PTP Clock driver */ 1526 if (priv->chip_id == RCAR_GEN2) 1527 ravb_ptp_stop(ndev); 1528 1529 /* Wait for DMA stopping */ 1530 ravb_stop_dma(ndev); 1531 1532 ravb_ring_free(ndev, RAVB_BE); 1533 ravb_ring_free(ndev, RAVB_NC); 1534 1535 /* Device init */ 1536 ravb_dmac_init(ndev); 1537 ravb_emac_init(ndev); 1538 1539 /* Initialise PTP Clock driver */ 1540 if (priv->chip_id == RCAR_GEN2) 1541 ravb_ptp_init(ndev, priv->pdev); 1542 1543 netif_tx_start_all_queues(ndev); 1544 } 1545 1546 /* Packet transmit function for Ethernet AVB */ 1547 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1548 { 1549 struct ravb_private *priv = netdev_priv(ndev); 1550 u16 q = skb_get_queue_mapping(skb); 1551 struct ravb_tstamp_skb *ts_skb; 1552 struct ravb_tx_desc *desc; 1553 unsigned long flags; 1554 u32 dma_addr; 1555 void *buffer; 1556 u32 entry; 1557 u32 len; 1558 1559 spin_lock_irqsave(&priv->lock, flags); 1560 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * 1561 NUM_TX_DESC) { 1562 netif_err(priv, tx_queued, ndev, 1563 "still transmitting with the full ring!\n"); 1564 netif_stop_subqueue(ndev, q); 1565 spin_unlock_irqrestore(&priv->lock, flags); 1566 return NETDEV_TX_BUSY; 1567 } 1568 1569 if (skb_put_padto(skb, ETH_ZLEN)) 1570 goto exit; 1571 1572 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); 1573 priv->tx_skb[q][entry / NUM_TX_DESC] = skb; 1574 1575 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + 1576 entry / NUM_TX_DESC * DPTR_ALIGN; 1577 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; 1578 /* Zero length DMA descriptors are problematic as they seem to 1579 * terminate DMA transfers. Avoid them by simply using a length of 1580 * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. 1581 * 1582 * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of 1583 * data by the call to skb_put_padto() above this is safe with 1584 * respect to both the length of the first DMA descriptor (len) 1585 * overflowing the available data and the length of the second DMA 1586 * descriptor (skb->len - len) being negative. 1587 */ 1588 if (len == 0) 1589 len = DPTR_ALIGN; 1590 1591 memcpy(buffer, skb->data, len); 1592 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); 1593 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 1594 goto drop; 1595 1596 desc = &priv->tx_ring[q][entry]; 1597 desc->ds_tagl = cpu_to_le16(len); 1598 desc->dptr = cpu_to_le32(dma_addr); 1599 1600 buffer = skb->data + len; 1601 len = skb->len - len; 1602 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); 1603 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 1604 goto unmap; 1605 1606 desc++; 1607 desc->ds_tagl = cpu_to_le16(len); 1608 desc->dptr = cpu_to_le32(dma_addr); 1609 1610 /* TX timestamp required */ 1611 if (q == RAVB_NC) { 1612 ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); 1613 if (!ts_skb) { 1614 desc--; 1615 dma_unmap_single(ndev->dev.parent, dma_addr, len, 1616 DMA_TO_DEVICE); 1617 goto unmap; 1618 } 1619 ts_skb->skb = skb; 1620 ts_skb->tag = priv->ts_skb_tag++; 1621 priv->ts_skb_tag &= 0x3ff; 1622 list_add_tail(&ts_skb->list, &priv->ts_skb_list); 1623 1624 /* TAG and timestamp required flag */ 1625 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1626 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; 1627 desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12); 1628 } 1629 1630 skb_tx_timestamp(skb); 1631 /* Descriptor type must be set after all the above writes */ 1632 dma_wmb(); 1633 desc->die_dt = DT_FEND; 1634 desc--; 1635 desc->die_dt = DT_FSTART; 1636 1637 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); 1638 1639 priv->cur_tx[q] += NUM_TX_DESC; 1640 if (priv->cur_tx[q] - priv->dirty_tx[q] > 1641 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && 1642 !ravb_tx_free(ndev, q, true)) 1643 netif_stop_subqueue(ndev, q); 1644 1645 exit: 1646 mmiowb(); 1647 spin_unlock_irqrestore(&priv->lock, flags); 1648 return NETDEV_TX_OK; 1649 1650 unmap: 1651 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), 1652 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); 1653 drop: 1654 dev_kfree_skb_any(skb); 1655 priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; 1656 goto exit; 1657 } 1658 1659 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, 1660 void *accel_priv, select_queue_fallback_t fallback) 1661 { 1662 /* If skb needs TX timestamp, it is handled in network control queue */ 1663 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : 1664 RAVB_BE; 1665 1666 } 1667 1668 static struct net_device_stats *ravb_get_stats(struct net_device *ndev) 1669 { 1670 struct ravb_private *priv = netdev_priv(ndev); 1671 struct net_device_stats *nstats, *stats0, *stats1; 1672 1673 nstats = &ndev->stats; 1674 stats0 = &priv->stats[RAVB_BE]; 1675 stats1 = &priv->stats[RAVB_NC]; 1676 1677 nstats->tx_dropped += ravb_read(ndev, TROCR); 1678 ravb_write(ndev, 0, TROCR); /* (write clear) */ 1679 nstats->collisions += ravb_read(ndev, CDCR); 1680 ravb_write(ndev, 0, CDCR); /* (write clear) */ 1681 nstats->tx_carrier_errors += ravb_read(ndev, LCCR); 1682 ravb_write(ndev, 0, LCCR); /* (write clear) */ 1683 1684 nstats->tx_carrier_errors += ravb_read(ndev, CERCR); 1685 ravb_write(ndev, 0, CERCR); /* (write clear) */ 1686 nstats->tx_carrier_errors += ravb_read(ndev, CEECR); 1687 ravb_write(ndev, 0, CEECR); /* (write clear) */ 1688 1689 nstats->rx_packets = stats0->rx_packets + stats1->rx_packets; 1690 nstats->tx_packets = stats0->tx_packets + stats1->tx_packets; 1691 nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes; 1692 nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes; 1693 nstats->multicast = stats0->multicast + stats1->multicast; 1694 nstats->rx_errors = stats0->rx_errors + stats1->rx_errors; 1695 nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors; 1696 nstats->rx_frame_errors = 1697 stats0->rx_frame_errors + stats1->rx_frame_errors; 1698 nstats->rx_length_errors = 1699 stats0->rx_length_errors + stats1->rx_length_errors; 1700 nstats->rx_missed_errors = 1701 stats0->rx_missed_errors + stats1->rx_missed_errors; 1702 nstats->rx_over_errors = 1703 stats0->rx_over_errors + stats1->rx_over_errors; 1704 1705 return nstats; 1706 } 1707 1708 /* Update promiscuous bit */ 1709 static void ravb_set_rx_mode(struct net_device *ndev) 1710 { 1711 struct ravb_private *priv = netdev_priv(ndev); 1712 unsigned long flags; 1713 1714 spin_lock_irqsave(&priv->lock, flags); 1715 ravb_modify(ndev, ECMR, ECMR_PRM, 1716 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); 1717 mmiowb(); 1718 spin_unlock_irqrestore(&priv->lock, flags); 1719 } 1720 1721 /* Device close function for Ethernet AVB */ 1722 static int ravb_close(struct net_device *ndev) 1723 { 1724 struct device_node *np = ndev->dev.parent->of_node; 1725 struct ravb_private *priv = netdev_priv(ndev); 1726 struct ravb_tstamp_skb *ts_skb, *ts_skb2; 1727 1728 netif_tx_stop_all_queues(ndev); 1729 1730 /* Disable interrupts by clearing the interrupt masks. */ 1731 ravb_write(ndev, 0, RIC0); 1732 ravb_write(ndev, 0, RIC2); 1733 ravb_write(ndev, 0, TIC); 1734 1735 /* Stop PTP Clock driver */ 1736 if (priv->chip_id == RCAR_GEN2) 1737 ravb_ptp_stop(ndev); 1738 1739 /* Set the config mode to stop the AVB-DMAC's processes */ 1740 if (ravb_stop_dma(ndev) < 0) 1741 netdev_err(ndev, 1742 "device will be stopped after h/w processes are done.\n"); 1743 1744 /* Clear the timestamp list */ 1745 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { 1746 list_del(&ts_skb->list); 1747 kfree(ts_skb); 1748 } 1749 1750 /* PHY disconnect */ 1751 if (ndev->phydev) { 1752 phy_stop(ndev->phydev); 1753 phy_disconnect(ndev->phydev); 1754 if (of_phy_is_fixed_link(np)) 1755 of_phy_deregister_fixed_link(np); 1756 } 1757 1758 if (priv->chip_id != RCAR_GEN2) { 1759 free_irq(priv->tx_irqs[RAVB_NC], ndev); 1760 free_irq(priv->rx_irqs[RAVB_NC], ndev); 1761 free_irq(priv->tx_irqs[RAVB_BE], ndev); 1762 free_irq(priv->rx_irqs[RAVB_BE], ndev); 1763 free_irq(priv->emac_irq, ndev); 1764 } 1765 free_irq(ndev->irq, ndev); 1766 1767 napi_disable(&priv->napi[RAVB_NC]); 1768 napi_disable(&priv->napi[RAVB_BE]); 1769 1770 /* Free all the skb's in the RX queue and the DMA buffers. */ 1771 ravb_ring_free(ndev, RAVB_BE); 1772 ravb_ring_free(ndev, RAVB_NC); 1773 1774 return 0; 1775 } 1776 1777 static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req) 1778 { 1779 struct ravb_private *priv = netdev_priv(ndev); 1780 struct hwtstamp_config config; 1781 1782 config.flags = 0; 1783 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : 1784 HWTSTAMP_TX_OFF; 1785 if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT) 1786 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1787 else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL) 1788 config.rx_filter = HWTSTAMP_FILTER_ALL; 1789 else 1790 config.rx_filter = HWTSTAMP_FILTER_NONE; 1791 1792 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? 1793 -EFAULT : 0; 1794 } 1795 1796 /* Control hardware time stamping */ 1797 static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) 1798 { 1799 struct ravb_private *priv = netdev_priv(ndev); 1800 struct hwtstamp_config config; 1801 u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED; 1802 u32 tstamp_tx_ctrl; 1803 1804 if (copy_from_user(&config, req->ifr_data, sizeof(config))) 1805 return -EFAULT; 1806 1807 /* Reserved for future extensions */ 1808 if (config.flags) 1809 return -EINVAL; 1810 1811 switch (config.tx_type) { 1812 case HWTSTAMP_TX_OFF: 1813 tstamp_tx_ctrl = 0; 1814 break; 1815 case HWTSTAMP_TX_ON: 1816 tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED; 1817 break; 1818 default: 1819 return -ERANGE; 1820 } 1821 1822 switch (config.rx_filter) { 1823 case HWTSTAMP_FILTER_NONE: 1824 tstamp_rx_ctrl = 0; 1825 break; 1826 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1827 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; 1828 break; 1829 default: 1830 config.rx_filter = HWTSTAMP_FILTER_ALL; 1831 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL; 1832 } 1833 1834 priv->tstamp_tx_ctrl = tstamp_tx_ctrl; 1835 priv->tstamp_rx_ctrl = tstamp_rx_ctrl; 1836 1837 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? 1838 -EFAULT : 0; 1839 } 1840 1841 /* ioctl to device function */ 1842 static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) 1843 { 1844 struct phy_device *phydev = ndev->phydev; 1845 1846 if (!netif_running(ndev)) 1847 return -EINVAL; 1848 1849 if (!phydev) 1850 return -ENODEV; 1851 1852 switch (cmd) { 1853 case SIOCGHWTSTAMP: 1854 return ravb_hwtstamp_get(ndev, req); 1855 case SIOCSHWTSTAMP: 1856 return ravb_hwtstamp_set(ndev, req); 1857 } 1858 1859 return phy_mii_ioctl(phydev, req, cmd); 1860 } 1861 1862 static int ravb_change_mtu(struct net_device *ndev, int new_mtu) 1863 { 1864 if (netif_running(ndev)) 1865 return -EBUSY; 1866 1867 ndev->mtu = new_mtu; 1868 netdev_update_features(ndev); 1869 1870 return 0; 1871 } 1872 1873 static void ravb_set_rx_csum(struct net_device *ndev, bool enable) 1874 { 1875 struct ravb_private *priv = netdev_priv(ndev); 1876 unsigned long flags; 1877 1878 spin_lock_irqsave(&priv->lock, flags); 1879 1880 /* Disable TX and RX */ 1881 ravb_rcv_snd_disable(ndev); 1882 1883 /* Modify RX Checksum setting */ 1884 ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0); 1885 1886 /* Enable TX and RX */ 1887 ravb_rcv_snd_enable(ndev); 1888 1889 spin_unlock_irqrestore(&priv->lock, flags); 1890 } 1891 1892 static int ravb_set_features(struct net_device *ndev, 1893 netdev_features_t features) 1894 { 1895 netdev_features_t changed = ndev->features ^ features; 1896 1897 if (changed & NETIF_F_RXCSUM) 1898 ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM); 1899 1900 ndev->features = features; 1901 1902 return 0; 1903 } 1904 1905 static const struct net_device_ops ravb_netdev_ops = { 1906 .ndo_open = ravb_open, 1907 .ndo_stop = ravb_close, 1908 .ndo_start_xmit = ravb_start_xmit, 1909 .ndo_select_queue = ravb_select_queue, 1910 .ndo_get_stats = ravb_get_stats, 1911 .ndo_set_rx_mode = ravb_set_rx_mode, 1912 .ndo_tx_timeout = ravb_tx_timeout, 1913 .ndo_do_ioctl = ravb_do_ioctl, 1914 .ndo_change_mtu = ravb_change_mtu, 1915 .ndo_validate_addr = eth_validate_addr, 1916 .ndo_set_mac_address = eth_mac_addr, 1917 .ndo_set_features = ravb_set_features, 1918 }; 1919 1920 /* MDIO bus init function */ 1921 static int ravb_mdio_init(struct ravb_private *priv) 1922 { 1923 struct platform_device *pdev = priv->pdev; 1924 struct device *dev = &pdev->dev; 1925 int error; 1926 1927 /* Bitbang init */ 1928 priv->mdiobb.ops = &bb_ops; 1929 1930 /* MII controller setting */ 1931 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); 1932 if (!priv->mii_bus) 1933 return -ENOMEM; 1934 1935 /* Hook up MII support for ethtool */ 1936 priv->mii_bus->name = "ravb_mii"; 1937 priv->mii_bus->parent = dev; 1938 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 1939 pdev->name, pdev->id); 1940 1941 /* Register MDIO bus */ 1942 error = of_mdiobus_register(priv->mii_bus, dev->of_node); 1943 if (error) 1944 goto out_free_bus; 1945 1946 return 0; 1947 1948 out_free_bus: 1949 free_mdio_bitbang(priv->mii_bus); 1950 return error; 1951 } 1952 1953 /* MDIO bus release function */ 1954 static int ravb_mdio_release(struct ravb_private *priv) 1955 { 1956 /* Unregister mdio bus */ 1957 mdiobus_unregister(priv->mii_bus); 1958 1959 /* Free bitbang info */ 1960 free_mdio_bitbang(priv->mii_bus); 1961 1962 return 0; 1963 } 1964 1965 static const struct of_device_id ravb_match_table[] = { 1966 { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, 1967 { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, 1968 { .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 }, 1969 { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 }, 1970 { .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 }, 1971 { } 1972 }; 1973 MODULE_DEVICE_TABLE(of, ravb_match_table); 1974 1975 static int ravb_set_gti(struct net_device *ndev) 1976 { 1977 struct ravb_private *priv = netdev_priv(ndev); 1978 struct device *dev = ndev->dev.parent; 1979 unsigned long rate; 1980 uint64_t inc; 1981 1982 rate = clk_get_rate(priv->clk); 1983 if (!rate) 1984 return -EINVAL; 1985 1986 inc = 1000000000ULL << 20; 1987 do_div(inc, rate); 1988 1989 if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) { 1990 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", 1991 inc, GTI_TIV_MIN, GTI_TIV_MAX); 1992 return -EINVAL; 1993 } 1994 1995 ravb_write(ndev, inc, GTI); 1996 1997 return 0; 1998 } 1999 2000 static void ravb_set_config_mode(struct net_device *ndev) 2001 { 2002 struct ravb_private *priv = netdev_priv(ndev); 2003 2004 if (priv->chip_id == RCAR_GEN2) { 2005 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); 2006 /* Set CSEL value */ 2007 ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); 2008 } else { 2009 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | 2010 CCC_GAC | CCC_CSEL_HPB); 2011 } 2012 } 2013 2014 /* Set tx and rx clock internal delay modes */ 2015 static void ravb_set_delay_mode(struct net_device *ndev) 2016 { 2017 struct ravb_private *priv = netdev_priv(ndev); 2018 int set = 0; 2019 2020 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 2021 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) 2022 set |= APSR_DM_RDM; 2023 2024 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 2025 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) 2026 set |= APSR_DM_TDM; 2027 2028 ravb_modify(ndev, APSR, APSR_DM, set); 2029 } 2030 2031 static int ravb_probe(struct platform_device *pdev) 2032 { 2033 struct device_node *np = pdev->dev.of_node; 2034 struct ravb_private *priv; 2035 enum ravb_chip_id chip_id; 2036 struct net_device *ndev; 2037 int error, irq, q; 2038 struct resource *res; 2039 int i; 2040 2041 if (!np) { 2042 dev_err(&pdev->dev, 2043 "this driver is required to be instantiated from device tree\n"); 2044 return -EINVAL; 2045 } 2046 2047 /* Get base address */ 2048 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2049 if (!res) { 2050 dev_err(&pdev->dev, "invalid resource\n"); 2051 return -EINVAL; 2052 } 2053 2054 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private), 2055 NUM_TX_QUEUE, NUM_RX_QUEUE); 2056 if (!ndev) 2057 return -ENOMEM; 2058 2059 ndev->features = NETIF_F_RXCSUM; 2060 ndev->hw_features = NETIF_F_RXCSUM; 2061 2062 pm_runtime_enable(&pdev->dev); 2063 pm_runtime_get_sync(&pdev->dev); 2064 2065 /* The Ether-specific entries in the device structure. */ 2066 ndev->base_addr = res->start; 2067 2068 chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev); 2069 2070 if (chip_id == RCAR_GEN3) 2071 irq = platform_get_irq_byname(pdev, "ch22"); 2072 else 2073 irq = platform_get_irq(pdev, 0); 2074 if (irq < 0) { 2075 error = irq; 2076 goto out_release; 2077 } 2078 ndev->irq = irq; 2079 2080 SET_NETDEV_DEV(ndev, &pdev->dev); 2081 2082 priv = netdev_priv(ndev); 2083 priv->ndev = ndev; 2084 priv->pdev = pdev; 2085 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; 2086 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; 2087 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; 2088 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; 2089 priv->addr = devm_ioremap_resource(&pdev->dev, res); 2090 if (IS_ERR(priv->addr)) { 2091 error = PTR_ERR(priv->addr); 2092 goto out_release; 2093 } 2094 2095 spin_lock_init(&priv->lock); 2096 INIT_WORK(&priv->work, ravb_tx_timeout_work); 2097 2098 priv->phy_interface = of_get_phy_mode(np); 2099 2100 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); 2101 priv->avb_link_active_low = 2102 of_property_read_bool(np, "renesas,ether-link-active-low"); 2103 2104 if (chip_id == RCAR_GEN3) { 2105 irq = platform_get_irq_byname(pdev, "ch24"); 2106 if (irq < 0) { 2107 error = irq; 2108 goto out_release; 2109 } 2110 priv->emac_irq = irq; 2111 for (i = 0; i < NUM_RX_QUEUE; i++) { 2112 irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]); 2113 if (irq < 0) { 2114 error = irq; 2115 goto out_release; 2116 } 2117 priv->rx_irqs[i] = irq; 2118 } 2119 for (i = 0; i < NUM_TX_QUEUE; i++) { 2120 irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]); 2121 if (irq < 0) { 2122 error = irq; 2123 goto out_release; 2124 } 2125 priv->tx_irqs[i] = irq; 2126 } 2127 } 2128 2129 priv->chip_id = chip_id; 2130 2131 priv->clk = devm_clk_get(&pdev->dev, NULL); 2132 if (IS_ERR(priv->clk)) { 2133 error = PTR_ERR(priv->clk); 2134 goto out_release; 2135 } 2136 2137 ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); 2138 ndev->min_mtu = ETH_MIN_MTU; 2139 2140 /* Set function */ 2141 ndev->netdev_ops = &ravb_netdev_ops; 2142 ndev->ethtool_ops = &ravb_ethtool_ops; 2143 2144 /* Set AVB config mode */ 2145 ravb_set_config_mode(ndev); 2146 2147 /* Set GTI value */ 2148 error = ravb_set_gti(ndev); 2149 if (error) 2150 goto out_release; 2151 2152 /* Request GTI loading */ 2153 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); 2154 2155 if (priv->chip_id != RCAR_GEN2) 2156 ravb_set_delay_mode(ndev); 2157 2158 /* Allocate descriptor base address table */ 2159 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; 2160 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, 2161 &priv->desc_bat_dma, GFP_KERNEL); 2162 if (!priv->desc_bat) { 2163 dev_err(&pdev->dev, 2164 "Cannot allocate desc base address table (size %d bytes)\n", 2165 priv->desc_bat_size); 2166 error = -ENOMEM; 2167 goto out_release; 2168 } 2169 for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++) 2170 priv->desc_bat[q].die_dt = DT_EOS; 2171 ravb_write(ndev, priv->desc_bat_dma, DBAT); 2172 2173 /* Initialise HW timestamp list */ 2174 INIT_LIST_HEAD(&priv->ts_skb_list); 2175 2176 /* Initialise PTP Clock driver */ 2177 if (chip_id != RCAR_GEN2) 2178 ravb_ptp_init(ndev, pdev); 2179 2180 /* Debug message level */ 2181 priv->msg_enable = RAVB_DEF_MSG_ENABLE; 2182 2183 /* Read and set MAC address */ 2184 ravb_read_mac_address(ndev, of_get_mac_address(np)); 2185 if (!is_valid_ether_addr(ndev->dev_addr)) { 2186 dev_warn(&pdev->dev, 2187 "no valid MAC address supplied, using a random one\n"); 2188 eth_hw_addr_random(ndev); 2189 } 2190 2191 /* MDIO bus init */ 2192 error = ravb_mdio_init(priv); 2193 if (error) { 2194 dev_err(&pdev->dev, "failed to initialize MDIO\n"); 2195 goto out_dma_free; 2196 } 2197 2198 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); 2199 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); 2200 2201 /* Network device register */ 2202 error = register_netdev(ndev); 2203 if (error) 2204 goto out_napi_del; 2205 2206 device_set_wakeup_capable(&pdev->dev, 1); 2207 2208 /* Print device information */ 2209 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", 2210 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 2211 2212 platform_set_drvdata(pdev, ndev); 2213 2214 return 0; 2215 2216 out_napi_del: 2217 netif_napi_del(&priv->napi[RAVB_NC]); 2218 netif_napi_del(&priv->napi[RAVB_BE]); 2219 ravb_mdio_release(priv); 2220 out_dma_free: 2221 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, 2222 priv->desc_bat_dma); 2223 2224 /* Stop PTP Clock driver */ 2225 if (chip_id != RCAR_GEN2) 2226 ravb_ptp_stop(ndev); 2227 out_release: 2228 free_netdev(ndev); 2229 2230 pm_runtime_put(&pdev->dev); 2231 pm_runtime_disable(&pdev->dev); 2232 return error; 2233 } 2234 2235 static int ravb_remove(struct platform_device *pdev) 2236 { 2237 struct net_device *ndev = platform_get_drvdata(pdev); 2238 struct ravb_private *priv = netdev_priv(ndev); 2239 2240 /* Stop PTP Clock driver */ 2241 if (priv->chip_id != RCAR_GEN2) 2242 ravb_ptp_stop(ndev); 2243 2244 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, 2245 priv->desc_bat_dma); 2246 /* Set reset mode */ 2247 ravb_write(ndev, CCC_OPC_RESET, CCC); 2248 pm_runtime_put_sync(&pdev->dev); 2249 unregister_netdev(ndev); 2250 netif_napi_del(&priv->napi[RAVB_NC]); 2251 netif_napi_del(&priv->napi[RAVB_BE]); 2252 ravb_mdio_release(priv); 2253 pm_runtime_disable(&pdev->dev); 2254 free_netdev(ndev); 2255 platform_set_drvdata(pdev, NULL); 2256 2257 return 0; 2258 } 2259 2260 static int ravb_wol_setup(struct net_device *ndev) 2261 { 2262 struct ravb_private *priv = netdev_priv(ndev); 2263 2264 /* Disable interrupts by clearing the interrupt masks. */ 2265 ravb_write(ndev, 0, RIC0); 2266 ravb_write(ndev, 0, RIC2); 2267 ravb_write(ndev, 0, TIC); 2268 2269 /* Only allow ECI interrupts */ 2270 synchronize_irq(priv->emac_irq); 2271 napi_disable(&priv->napi[RAVB_NC]); 2272 napi_disable(&priv->napi[RAVB_BE]); 2273 ravb_write(ndev, ECSIPR_MPDIP, ECSIPR); 2274 2275 /* Enable MagicPacket */ 2276 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); 2277 2278 return enable_irq_wake(priv->emac_irq); 2279 } 2280 2281 static int ravb_wol_restore(struct net_device *ndev) 2282 { 2283 struct ravb_private *priv = netdev_priv(ndev); 2284 int ret; 2285 2286 napi_enable(&priv->napi[RAVB_NC]); 2287 napi_enable(&priv->napi[RAVB_BE]); 2288 2289 /* Disable MagicPacket */ 2290 ravb_modify(ndev, ECMR, ECMR_MPDE, 0); 2291 2292 ret = ravb_close(ndev); 2293 if (ret < 0) 2294 return ret; 2295 2296 return disable_irq_wake(priv->emac_irq); 2297 } 2298 2299 static int __maybe_unused ravb_suspend(struct device *dev) 2300 { 2301 struct net_device *ndev = dev_get_drvdata(dev); 2302 struct ravb_private *priv = netdev_priv(ndev); 2303 int ret; 2304 2305 if (!netif_running(ndev)) 2306 return 0; 2307 2308 netif_device_detach(ndev); 2309 2310 if (priv->wol_enabled) 2311 ret = ravb_wol_setup(ndev); 2312 else 2313 ret = ravb_close(ndev); 2314 2315 return ret; 2316 } 2317 2318 static int __maybe_unused ravb_resume(struct device *dev) 2319 { 2320 struct net_device *ndev = dev_get_drvdata(dev); 2321 struct ravb_private *priv = netdev_priv(ndev); 2322 int ret = 0; 2323 2324 /* If WoL is enabled set reset mode to rearm the WoL logic */ 2325 if (priv->wol_enabled) 2326 ravb_write(ndev, CCC_OPC_RESET, CCC); 2327 2328 /* All register have been reset to default values. 2329 * Restore all registers which where setup at probe time and 2330 * reopen device if it was running before system suspended. 2331 */ 2332 2333 /* Set AVB config mode */ 2334 ravb_set_config_mode(ndev); 2335 2336 /* Set GTI value */ 2337 ret = ravb_set_gti(ndev); 2338 if (ret) 2339 return ret; 2340 2341 /* Request GTI loading */ 2342 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); 2343 2344 if (priv->chip_id != RCAR_GEN2) 2345 ravb_set_delay_mode(ndev); 2346 2347 /* Restore descriptor base address table */ 2348 ravb_write(ndev, priv->desc_bat_dma, DBAT); 2349 2350 if (netif_running(ndev)) { 2351 if (priv->wol_enabled) { 2352 ret = ravb_wol_restore(ndev); 2353 if (ret) 2354 return ret; 2355 } 2356 ret = ravb_open(ndev); 2357 if (ret < 0) 2358 return ret; 2359 netif_device_attach(ndev); 2360 } 2361 2362 return ret; 2363 } 2364 2365 static int __maybe_unused ravb_runtime_nop(struct device *dev) 2366 { 2367 /* Runtime PM callback shared between ->runtime_suspend() 2368 * and ->runtime_resume(). Simply returns success. 2369 * 2370 * This driver re-initializes all registers after 2371 * pm_runtime_get_sync() anyway so there is no need 2372 * to save and restore registers here. 2373 */ 2374 return 0; 2375 } 2376 2377 static const struct dev_pm_ops ravb_dev_pm_ops = { 2378 SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume) 2379 SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL) 2380 }; 2381 2382 static struct platform_driver ravb_driver = { 2383 .probe = ravb_probe, 2384 .remove = ravb_remove, 2385 .driver = { 2386 .name = "ravb", 2387 .pm = &ravb_dev_pm_ops, 2388 .of_match_table = ravb_match_table, 2389 }, 2390 }; 2391 2392 module_platform_driver(ravb_driver); 2393 2394 MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai"); 2395 MODULE_DESCRIPTION("Renesas Ethernet AVB driver"); 2396 MODULE_LICENSE("GPL v2"); 2397