1 /* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * Right now, I am very wasteful with the buffers. I allocate memory 6 * pages and then divide them into 2K frame buffers. This way I know I 7 * have buffers large enough to hold one frame within one buffer descriptor. 8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 9 * will be much more memory efficient and will easily handle lots of 10 * small packets. 11 * 12 * Much better multiple PHY support by Magnus Damm. 13 * Copyright (c) 2000 Ericsson Radio Systems AB. 14 * 15 * Support for FEC controller of ColdFire processors. 16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 17 * 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 19 * Copyright (c) 2004-2006 Macq Electronique SA. 20 * 21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 22 */ 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/string.h> 27 #include <linux/ptrace.h> 28 #include <linux/errno.h> 29 #include <linux/ioport.h> 30 #include <linux/slab.h> 31 #include <linux/interrupt.h> 32 #include <linux/delay.h> 33 #include <linux/netdevice.h> 34 #include <linux/etherdevice.h> 35 #include <linux/skbuff.h> 36 #include <linux/in.h> 37 #include <linux/ip.h> 38 #include <net/ip.h> 39 #include <net/tso.h> 40 #include <linux/tcp.h> 41 #include <linux/udp.h> 42 #include <linux/icmp.h> 43 #include <linux/spinlock.h> 44 #include <linux/workqueue.h> 45 #include <linux/bitops.h> 46 #include <linux/io.h> 47 #include <linux/irq.h> 48 #include <linux/clk.h> 49 #include <linux/platform_device.h> 50 #include <linux/phy.h> 51 #include <linux/fec.h> 52 #include <linux/of.h> 53 #include <linux/of_device.h> 54 #include <linux/of_gpio.h> 55 #include <linux/of_mdio.h> 56 #include <linux/of_net.h> 57 #include <linux/regulator/consumer.h> 58 #include <linux/if_vlan.h> 59 #include <linux/pinctrl/consumer.h> 60 #include <linux/prefetch.h> 61 62 #include <asm/cacheflush.h> 63 64 #include "fec.h" 65 66 static void set_multicast_list(struct net_device *ndev); 67 static void fec_enet_itr_coal_init(struct net_device *ndev); 68 69 #define DRIVER_NAME "fec" 70 71 #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) 72 73 /* Pause frame feild and FIFO threshold */ 74 #define FEC_ENET_FCE (1 << 5) 75 #define FEC_ENET_RSEM_V 0x84 76 #define FEC_ENET_RSFL_V 16 77 #define FEC_ENET_RAEM_V 0x8 78 #define FEC_ENET_RAFL_V 0x8 79 #define FEC_ENET_OPD_V 0xFFF0 80 81 static struct platform_device_id fec_devtype[] = { 82 { 83 /* keep it for coldfire */ 84 .name = DRIVER_NAME, 85 .driver_data = 0, 86 }, { 87 .name = "imx25-fec", 88 .driver_data = FEC_QUIRK_USE_GASKET, 89 }, { 90 .name = "imx27-fec", 91 .driver_data = 0, 92 }, { 93 .name = "imx28-fec", 94 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 95 FEC_QUIRK_SINGLE_MDIO, 96 }, { 97 .name = "imx6q-fec", 98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 99 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 100 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358, 101 }, { 102 .name = "mvf600-fec", 103 .driver_data = FEC_QUIRK_ENET_MAC, 104 }, { 105 .name = "imx6sx-fec", 106 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 107 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 108 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 109 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE, 110 }, { 111 /* sentinel */ 112 } 113 }; 114 MODULE_DEVICE_TABLE(platform, fec_devtype); 115 116 enum imx_fec_type { 117 IMX25_FEC = 1, /* runs on i.mx25/50/53 */ 118 IMX27_FEC, /* runs on i.mx27/35/51 */ 119 IMX28_FEC, 120 IMX6Q_FEC, 121 MVF600_FEC, 122 IMX6SX_FEC, 123 }; 124 125 static const struct of_device_id fec_dt_ids[] = { 126 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, 127 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, 128 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, 129 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, 130 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, 131 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, 132 { /* sentinel */ } 133 }; 134 MODULE_DEVICE_TABLE(of, fec_dt_ids); 135 136 static unsigned char macaddr[ETH_ALEN]; 137 module_param_array(macaddr, byte, NULL, 0); 138 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 139 140 #if defined(CONFIG_M5272) 141 /* 142 * Some hardware gets it MAC address out of local flash memory. 143 * if this is non-zero then assume it is the address to get MAC from. 144 */ 145 #if defined(CONFIG_NETtel) 146 #define FEC_FLASHMAC 0xf0006006 147 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 148 #define FEC_FLASHMAC 0xf0006000 149 #elif defined(CONFIG_CANCam) 150 #define FEC_FLASHMAC 0xf0020000 151 #elif defined (CONFIG_M5272C3) 152 #define FEC_FLASHMAC (0xffe04000 + 4) 153 #elif defined(CONFIG_MOD5272) 154 #define FEC_FLASHMAC 0xffc0406b 155 #else 156 #define FEC_FLASHMAC 0 157 #endif 158 #endif /* CONFIG_M5272 */ 159 160 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. 161 */ 162 #define PKT_MAXBUF_SIZE 1522 163 #define PKT_MINBUF_SIZE 64 164 #define PKT_MAXBLR_SIZE 1536 165 166 /* FEC receive acceleration */ 167 #define FEC_RACC_IPDIS (1 << 1) 168 #define FEC_RACC_PRODIS (1 << 2) 169 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 170 171 /* 172 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 173 * size bits. Other FEC hardware does not, so we need to take that into 174 * account when setting it. 175 */ 176 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 177 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) 178 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 179 #else 180 #define OPT_FRAME_SIZE 0 181 #endif 182 183 /* FEC MII MMFR bits definition */ 184 #define FEC_MMFR_ST (1 << 30) 185 #define FEC_MMFR_OP_READ (2 << 28) 186 #define FEC_MMFR_OP_WRITE (1 << 28) 187 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 188 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 189 #define FEC_MMFR_TA (2 << 16) 190 #define FEC_MMFR_DATA(v) (v & 0xffff) 191 192 #define FEC_MII_TIMEOUT 30000 /* us */ 193 194 /* Transmitter timeout */ 195 #define TX_TIMEOUT (2 * HZ) 196 197 #define FEC_PAUSE_FLAG_AUTONEG 0x1 198 #define FEC_PAUSE_FLAG_ENABLE 0x2 199 200 #define COPYBREAK_DEFAULT 256 201 202 #define TSO_HEADER_SIZE 128 203 /* Max number of allowed TCP segments for software TSO */ 204 #define FEC_MAX_TSO_SEGS 100 205 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 206 207 #define IS_TSO_HEADER(txq, addr) \ 208 ((addr >= txq->tso_hdrs_dma) && \ 209 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) 210 211 static int mii_cnt; 212 213 static inline 214 struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, 215 struct fec_enet_private *fep, 216 int queue_id) 217 { 218 struct bufdesc *new_bd = bdp + 1; 219 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; 220 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; 221 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; 222 struct bufdesc_ex *ex_base; 223 struct bufdesc *base; 224 int ring_size; 225 226 if (bdp >= txq->tx_bd_base) { 227 base = txq->tx_bd_base; 228 ring_size = txq->tx_ring_size; 229 ex_base = (struct bufdesc_ex *)txq->tx_bd_base; 230 } else { 231 base = rxq->rx_bd_base; 232 ring_size = rxq->rx_ring_size; 233 ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; 234 } 235 236 if (fep->bufdesc_ex) 237 return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? 238 ex_base : ex_new_bd); 239 else 240 return (new_bd >= (base + ring_size)) ? 241 base : new_bd; 242 } 243 244 static inline 245 struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, 246 struct fec_enet_private *fep, 247 int queue_id) 248 { 249 struct bufdesc *new_bd = bdp - 1; 250 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; 251 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; 252 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; 253 struct bufdesc_ex *ex_base; 254 struct bufdesc *base; 255 int ring_size; 256 257 if (bdp >= txq->tx_bd_base) { 258 base = txq->tx_bd_base; 259 ring_size = txq->tx_ring_size; 260 ex_base = (struct bufdesc_ex *)txq->tx_bd_base; 261 } else { 262 base = rxq->rx_bd_base; 263 ring_size = rxq->rx_ring_size; 264 ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; 265 } 266 267 if (fep->bufdesc_ex) 268 return (struct bufdesc *)((ex_new_bd < ex_base) ? 269 (ex_new_bd + ring_size) : ex_new_bd); 270 else 271 return (new_bd < base) ? (new_bd + ring_size) : new_bd; 272 } 273 274 static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, 275 struct fec_enet_private *fep) 276 { 277 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; 278 } 279 280 static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, 281 struct fec_enet_priv_tx_q *txq) 282 { 283 int entries; 284 285 entries = ((const char *)txq->dirty_tx - 286 (const char *)txq->cur_tx) / fep->bufdesc_size - 1; 287 288 return entries > 0 ? entries : entries + txq->tx_ring_size; 289 } 290 291 static void swap_buffer(void *bufaddr, int len) 292 { 293 int i; 294 unsigned int *buf = bufaddr; 295 296 for (i = 0; i < len; i += 4, buf++) 297 swab32s(buf); 298 } 299 300 static void swap_buffer2(void *dst_buf, void *src_buf, int len) 301 { 302 int i; 303 unsigned int *src = src_buf; 304 unsigned int *dst = dst_buf; 305 306 for (i = 0; i < len; i += 4, src++, dst++) 307 *dst = swab32p(src); 308 } 309 310 static void fec_dump(struct net_device *ndev) 311 { 312 struct fec_enet_private *fep = netdev_priv(ndev); 313 struct bufdesc *bdp; 314 struct fec_enet_priv_tx_q *txq; 315 int index = 0; 316 317 netdev_info(ndev, "TX ring dump\n"); 318 pr_info("Nr SC addr len SKB\n"); 319 320 txq = fep->tx_queue[0]; 321 bdp = txq->tx_bd_base; 322 323 do { 324 pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", 325 index, 326 bdp == txq->cur_tx ? 'S' : ' ', 327 bdp == txq->dirty_tx ? 'H' : ' ', 328 bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, 329 txq->tx_skbuff[index]); 330 bdp = fec_enet_get_nextdesc(bdp, fep, 0); 331 index++; 332 } while (bdp != txq->tx_bd_base); 333 } 334 335 static inline bool is_ipv4_pkt(struct sk_buff *skb) 336 { 337 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 338 } 339 340 static int 341 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 342 { 343 /* Only run for packets requiring a checksum. */ 344 if (skb->ip_summed != CHECKSUM_PARTIAL) 345 return 0; 346 347 if (unlikely(skb_cow_head(skb, 0))) 348 return -1; 349 350 if (is_ipv4_pkt(skb)) 351 ip_hdr(skb)->check = 0; 352 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 353 354 return 0; 355 } 356 357 static int 358 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, 359 struct sk_buff *skb, 360 struct net_device *ndev) 361 { 362 struct fec_enet_private *fep = netdev_priv(ndev); 363 struct bufdesc *bdp = txq->cur_tx; 364 struct bufdesc_ex *ebdp; 365 int nr_frags = skb_shinfo(skb)->nr_frags; 366 unsigned short queue = skb_get_queue_mapping(skb); 367 int frag, frag_len; 368 unsigned short status; 369 unsigned int estatus = 0; 370 skb_frag_t *this_frag; 371 unsigned int index; 372 void *bufaddr; 373 dma_addr_t addr; 374 int i; 375 376 for (frag = 0; frag < nr_frags; frag++) { 377 this_frag = &skb_shinfo(skb)->frags[frag]; 378 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 379 ebdp = (struct bufdesc_ex *)bdp; 380 381 status = bdp->cbd_sc; 382 status &= ~BD_ENET_TX_STATS; 383 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 384 frag_len = skb_shinfo(skb)->frags[frag].size; 385 386 /* Handle the last BD specially */ 387 if (frag == nr_frags - 1) { 388 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 389 if (fep->bufdesc_ex) { 390 estatus |= BD_ENET_TX_INT; 391 if (unlikely(skb_shinfo(skb)->tx_flags & 392 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 393 estatus |= BD_ENET_TX_TS; 394 } 395 } 396 397 if (fep->bufdesc_ex) { 398 if (fep->quirks & FEC_QUIRK_HAS_AVB) 399 estatus |= FEC_TX_BD_FTYPE(queue); 400 if (skb->ip_summed == CHECKSUM_PARTIAL) 401 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 402 ebdp->cbd_bdu = 0; 403 ebdp->cbd_esc = estatus; 404 } 405 406 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; 407 408 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); 409 if (((unsigned long) bufaddr) & fep->tx_align || 410 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 411 memcpy(txq->tx_bounce[index], bufaddr, frag_len); 412 bufaddr = txq->tx_bounce[index]; 413 414 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 415 swap_buffer(bufaddr, frag_len); 416 } 417 418 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, 419 DMA_TO_DEVICE); 420 if (dma_mapping_error(&fep->pdev->dev, addr)) { 421 dev_kfree_skb_any(skb); 422 if (net_ratelimit()) 423 netdev_err(ndev, "Tx DMA memory map failed\n"); 424 goto dma_mapping_error; 425 } 426 427 bdp->cbd_bufaddr = addr; 428 bdp->cbd_datlen = frag_len; 429 bdp->cbd_sc = status; 430 } 431 432 txq->cur_tx = bdp; 433 434 return 0; 435 436 dma_mapping_error: 437 bdp = txq->cur_tx; 438 for (i = 0; i < frag; i++) { 439 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 440 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 441 bdp->cbd_datlen, DMA_TO_DEVICE); 442 } 443 return NETDEV_TX_OK; 444 } 445 446 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, 447 struct sk_buff *skb, struct net_device *ndev) 448 { 449 struct fec_enet_private *fep = netdev_priv(ndev); 450 int nr_frags = skb_shinfo(skb)->nr_frags; 451 struct bufdesc *bdp, *last_bdp; 452 void *bufaddr; 453 dma_addr_t addr; 454 unsigned short status; 455 unsigned short buflen; 456 unsigned short queue; 457 unsigned int estatus = 0; 458 unsigned int index; 459 int entries_free; 460 int ret; 461 462 entries_free = fec_enet_get_free_txdesc_num(fep, txq); 463 if (entries_free < MAX_SKB_FRAGS + 1) { 464 dev_kfree_skb_any(skb); 465 if (net_ratelimit()) 466 netdev_err(ndev, "NOT enough BD for SG!\n"); 467 return NETDEV_TX_OK; 468 } 469 470 /* Protocol checksum off-load for TCP and UDP. */ 471 if (fec_enet_clear_csum(skb, ndev)) { 472 dev_kfree_skb_any(skb); 473 return NETDEV_TX_OK; 474 } 475 476 /* Fill in a Tx ring entry */ 477 bdp = txq->cur_tx; 478 status = bdp->cbd_sc; 479 status &= ~BD_ENET_TX_STATS; 480 481 /* Set buffer length and buffer pointer */ 482 bufaddr = skb->data; 483 buflen = skb_headlen(skb); 484 485 queue = skb_get_queue_mapping(skb); 486 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); 487 if (((unsigned long) bufaddr) & fep->tx_align || 488 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 489 memcpy(txq->tx_bounce[index], skb->data, buflen); 490 bufaddr = txq->tx_bounce[index]; 491 492 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 493 swap_buffer(bufaddr, buflen); 494 } 495 496 /* Push the data cache so the CPM does not get stale memory data. */ 497 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); 498 if (dma_mapping_error(&fep->pdev->dev, addr)) { 499 dev_kfree_skb_any(skb); 500 if (net_ratelimit()) 501 netdev_err(ndev, "Tx DMA memory map failed\n"); 502 return NETDEV_TX_OK; 503 } 504 505 if (nr_frags) { 506 ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); 507 if (ret) 508 return ret; 509 } else { 510 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 511 if (fep->bufdesc_ex) { 512 estatus = BD_ENET_TX_INT; 513 if (unlikely(skb_shinfo(skb)->tx_flags & 514 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 515 estatus |= BD_ENET_TX_TS; 516 } 517 } 518 519 if (fep->bufdesc_ex) { 520 521 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 522 523 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 524 fep->hwts_tx_en)) 525 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 526 527 if (fep->quirks & FEC_QUIRK_HAS_AVB) 528 estatus |= FEC_TX_BD_FTYPE(queue); 529 530 if (skb->ip_summed == CHECKSUM_PARTIAL) 531 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 532 533 ebdp->cbd_bdu = 0; 534 ebdp->cbd_esc = estatus; 535 } 536 537 last_bdp = txq->cur_tx; 538 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); 539 /* Save skb pointer */ 540 txq->tx_skbuff[index] = skb; 541 542 bdp->cbd_datlen = buflen; 543 bdp->cbd_bufaddr = addr; 544 545 /* Send it on its way. Tell FEC it's ready, interrupt when done, 546 * it's the last BD of the frame, and to put the CRC on the end. 547 */ 548 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 549 bdp->cbd_sc = status; 550 551 /* If this was the last BD in the ring, start at the beginning again. */ 552 bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); 553 554 skb_tx_timestamp(skb); 555 556 txq->cur_tx = bdp; 557 558 /* Trigger transmission start */ 559 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); 560 561 return 0; 562 } 563 564 static int 565 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, 566 struct net_device *ndev, 567 struct bufdesc *bdp, int index, char *data, 568 int size, bool last_tcp, bool is_last) 569 { 570 struct fec_enet_private *fep = netdev_priv(ndev); 571 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 572 unsigned short queue = skb_get_queue_mapping(skb); 573 unsigned short status; 574 unsigned int estatus = 0; 575 dma_addr_t addr; 576 577 status = bdp->cbd_sc; 578 status &= ~BD_ENET_TX_STATS; 579 580 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 581 582 if (((unsigned long) data) & fep->tx_align || 583 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 584 memcpy(txq->tx_bounce[index], data, size); 585 data = txq->tx_bounce[index]; 586 587 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 588 swap_buffer(data, size); 589 } 590 591 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); 592 if (dma_mapping_error(&fep->pdev->dev, addr)) { 593 dev_kfree_skb_any(skb); 594 if (net_ratelimit()) 595 netdev_err(ndev, "Tx DMA memory map failed\n"); 596 return NETDEV_TX_BUSY; 597 } 598 599 bdp->cbd_datlen = size; 600 bdp->cbd_bufaddr = addr; 601 602 if (fep->bufdesc_ex) { 603 if (fep->quirks & FEC_QUIRK_HAS_AVB) 604 estatus |= FEC_TX_BD_FTYPE(queue); 605 if (skb->ip_summed == CHECKSUM_PARTIAL) 606 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 607 ebdp->cbd_bdu = 0; 608 ebdp->cbd_esc = estatus; 609 } 610 611 /* Handle the last BD specially */ 612 if (last_tcp) 613 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); 614 if (is_last) { 615 status |= BD_ENET_TX_INTR; 616 if (fep->bufdesc_ex) 617 ebdp->cbd_esc |= BD_ENET_TX_INT; 618 } 619 620 bdp->cbd_sc = status; 621 622 return 0; 623 } 624 625 static int 626 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, 627 struct sk_buff *skb, struct net_device *ndev, 628 struct bufdesc *bdp, int index) 629 { 630 struct fec_enet_private *fep = netdev_priv(ndev); 631 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 632 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 633 unsigned short queue = skb_get_queue_mapping(skb); 634 void *bufaddr; 635 unsigned long dmabuf; 636 unsigned short status; 637 unsigned int estatus = 0; 638 639 status = bdp->cbd_sc; 640 status &= ~BD_ENET_TX_STATS; 641 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 642 643 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 644 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; 645 if (((unsigned long)bufaddr) & fep->tx_align || 646 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 647 memcpy(txq->tx_bounce[index], skb->data, hdr_len); 648 bufaddr = txq->tx_bounce[index]; 649 650 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 651 swap_buffer(bufaddr, hdr_len); 652 653 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, 654 hdr_len, DMA_TO_DEVICE); 655 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { 656 dev_kfree_skb_any(skb); 657 if (net_ratelimit()) 658 netdev_err(ndev, "Tx DMA memory map failed\n"); 659 return NETDEV_TX_BUSY; 660 } 661 } 662 663 bdp->cbd_bufaddr = dmabuf; 664 bdp->cbd_datlen = hdr_len; 665 666 if (fep->bufdesc_ex) { 667 if (fep->quirks & FEC_QUIRK_HAS_AVB) 668 estatus |= FEC_TX_BD_FTYPE(queue); 669 if (skb->ip_summed == CHECKSUM_PARTIAL) 670 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 671 ebdp->cbd_bdu = 0; 672 ebdp->cbd_esc = estatus; 673 } 674 675 bdp->cbd_sc = status; 676 677 return 0; 678 } 679 680 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, 681 struct sk_buff *skb, 682 struct net_device *ndev) 683 { 684 struct fec_enet_private *fep = netdev_priv(ndev); 685 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 686 int total_len, data_left; 687 struct bufdesc *bdp = txq->cur_tx; 688 unsigned short queue = skb_get_queue_mapping(skb); 689 struct tso_t tso; 690 unsigned int index = 0; 691 int ret; 692 693 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { 694 dev_kfree_skb_any(skb); 695 if (net_ratelimit()) 696 netdev_err(ndev, "NOT enough BD for TSO!\n"); 697 return NETDEV_TX_OK; 698 } 699 700 /* Protocol checksum off-load for TCP and UDP. */ 701 if (fec_enet_clear_csum(skb, ndev)) { 702 dev_kfree_skb_any(skb); 703 return NETDEV_TX_OK; 704 } 705 706 /* Initialize the TSO handler, and prepare the first payload */ 707 tso_start(skb, &tso); 708 709 total_len = skb->len - hdr_len; 710 while (total_len > 0) { 711 char *hdr; 712 713 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); 714 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 715 total_len -= data_left; 716 717 /* prepare packet headers: MAC + IP + TCP */ 718 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 719 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 720 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); 721 if (ret) 722 goto err_release; 723 724 while (data_left > 0) { 725 int size; 726 727 size = min_t(int, tso.size, data_left); 728 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 729 index = fec_enet_get_bd_index(txq->tx_bd_base, 730 bdp, fep); 731 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, 732 bdp, index, 733 tso.data, size, 734 size == data_left, 735 total_len == 0); 736 if (ret) 737 goto err_release; 738 739 data_left -= size; 740 tso_build_data(skb, &tso, size); 741 } 742 743 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 744 } 745 746 /* Save skb pointer */ 747 txq->tx_skbuff[index] = skb; 748 749 skb_tx_timestamp(skb); 750 txq->cur_tx = bdp; 751 752 /* Trigger transmission start */ 753 if (!(fep->quirks & FEC_QUIRK_ERR007885) || 754 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || 755 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || 756 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || 757 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) 758 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); 759 760 return 0; 761 762 err_release: 763 /* TODO: Release all used data descriptors for TSO */ 764 return ret; 765 } 766 767 static netdev_tx_t 768 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 769 { 770 struct fec_enet_private *fep = netdev_priv(ndev); 771 int entries_free; 772 unsigned short queue; 773 struct fec_enet_priv_tx_q *txq; 774 struct netdev_queue *nq; 775 int ret; 776 777 queue = skb_get_queue_mapping(skb); 778 txq = fep->tx_queue[queue]; 779 nq = netdev_get_tx_queue(ndev, queue); 780 781 if (skb_is_gso(skb)) 782 ret = fec_enet_txq_submit_tso(txq, skb, ndev); 783 else 784 ret = fec_enet_txq_submit_skb(txq, skb, ndev); 785 if (ret) 786 return ret; 787 788 entries_free = fec_enet_get_free_txdesc_num(fep, txq); 789 if (entries_free <= txq->tx_stop_threshold) 790 netif_tx_stop_queue(nq); 791 792 return NETDEV_TX_OK; 793 } 794 795 /* Init RX & TX buffer descriptors 796 */ 797 static void fec_enet_bd_init(struct net_device *dev) 798 { 799 struct fec_enet_private *fep = netdev_priv(dev); 800 struct fec_enet_priv_tx_q *txq; 801 struct fec_enet_priv_rx_q *rxq; 802 struct bufdesc *bdp; 803 unsigned int i; 804 unsigned int q; 805 806 for (q = 0; q < fep->num_rx_queues; q++) { 807 /* Initialize the receive buffer descriptors. */ 808 rxq = fep->rx_queue[q]; 809 bdp = rxq->rx_bd_base; 810 811 for (i = 0; i < rxq->rx_ring_size; i++) { 812 813 /* Initialize the BD for every fragment in the page. */ 814 if (bdp->cbd_bufaddr) 815 bdp->cbd_sc = BD_ENET_RX_EMPTY; 816 else 817 bdp->cbd_sc = 0; 818 bdp = fec_enet_get_nextdesc(bdp, fep, q); 819 } 820 821 /* Set the last buffer to wrap */ 822 bdp = fec_enet_get_prevdesc(bdp, fep, q); 823 bdp->cbd_sc |= BD_SC_WRAP; 824 825 rxq->cur_rx = rxq->rx_bd_base; 826 } 827 828 for (q = 0; q < fep->num_tx_queues; q++) { 829 /* ...and the same for transmit */ 830 txq = fep->tx_queue[q]; 831 bdp = txq->tx_bd_base; 832 txq->cur_tx = bdp; 833 834 for (i = 0; i < txq->tx_ring_size; i++) { 835 /* Initialize the BD for every fragment in the page. */ 836 bdp->cbd_sc = 0; 837 if (txq->tx_skbuff[i]) { 838 dev_kfree_skb_any(txq->tx_skbuff[i]); 839 txq->tx_skbuff[i] = NULL; 840 } 841 bdp->cbd_bufaddr = 0; 842 bdp = fec_enet_get_nextdesc(bdp, fep, q); 843 } 844 845 /* Set the last buffer to wrap */ 846 bdp = fec_enet_get_prevdesc(bdp, fep, q); 847 bdp->cbd_sc |= BD_SC_WRAP; 848 txq->dirty_tx = bdp; 849 } 850 } 851 852 static void fec_enet_active_rxring(struct net_device *ndev) 853 { 854 struct fec_enet_private *fep = netdev_priv(ndev); 855 int i; 856 857 for (i = 0; i < fep->num_rx_queues; i++) 858 writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); 859 } 860 861 static void fec_enet_enable_ring(struct net_device *ndev) 862 { 863 struct fec_enet_private *fep = netdev_priv(ndev); 864 struct fec_enet_priv_tx_q *txq; 865 struct fec_enet_priv_rx_q *rxq; 866 int i; 867 868 for (i = 0; i < fep->num_rx_queues; i++) { 869 rxq = fep->rx_queue[i]; 870 writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); 871 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); 872 873 /* enable DMA1/2 */ 874 if (i) 875 writel(RCMR_MATCHEN | RCMR_CMP(i), 876 fep->hwp + FEC_RCMR(i)); 877 } 878 879 for (i = 0; i < fep->num_tx_queues; i++) { 880 txq = fep->tx_queue[i]; 881 writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); 882 883 /* enable DMA1/2 */ 884 if (i) 885 writel(DMA_CLASS_EN | IDLE_SLOPE(i), 886 fep->hwp + FEC_DMA_CFG(i)); 887 } 888 } 889 890 static void fec_enet_reset_skb(struct net_device *ndev) 891 { 892 struct fec_enet_private *fep = netdev_priv(ndev); 893 struct fec_enet_priv_tx_q *txq; 894 int i, j; 895 896 for (i = 0; i < fep->num_tx_queues; i++) { 897 txq = fep->tx_queue[i]; 898 899 for (j = 0; j < txq->tx_ring_size; j++) { 900 if (txq->tx_skbuff[j]) { 901 dev_kfree_skb_any(txq->tx_skbuff[j]); 902 txq->tx_skbuff[j] = NULL; 903 } 904 } 905 } 906 } 907 908 /* 909 * This function is called to start or restart the FEC during a link 910 * change, transmit timeout, or to reconfigure the FEC. The network 911 * packet processing for this device must be stopped before this call. 912 */ 913 static void 914 fec_restart(struct net_device *ndev) 915 { 916 struct fec_enet_private *fep = netdev_priv(ndev); 917 u32 val; 918 u32 temp_mac[2]; 919 u32 rcntl = OPT_FRAME_SIZE | 0x04; 920 u32 ecntl = 0x2; /* ETHEREN */ 921 922 /* Whack a reset. We should wait for this. 923 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 924 * instead of reset MAC itself. 925 */ 926 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 927 writel(0, fep->hwp + FEC_ECNTRL); 928 } else { 929 writel(1, fep->hwp + FEC_ECNTRL); 930 udelay(10); 931 } 932 933 /* 934 * enet-mac reset will reset mac address registers too, 935 * so need to reconfigure it. 936 */ 937 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 938 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); 939 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); 940 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); 941 } 942 943 /* Clear any outstanding interrupt. */ 944 writel(0xffffffff, fep->hwp + FEC_IEVENT); 945 946 fec_enet_bd_init(ndev); 947 948 fec_enet_enable_ring(ndev); 949 950 /* Reset tx SKB buffers. */ 951 fec_enet_reset_skb(ndev); 952 953 /* Enable MII mode */ 954 if (fep->full_duplex == DUPLEX_FULL) { 955 /* FD enable */ 956 writel(0x04, fep->hwp + FEC_X_CNTRL); 957 } else { 958 /* No Rcv on Xmit */ 959 rcntl |= 0x02; 960 writel(0x0, fep->hwp + FEC_X_CNTRL); 961 } 962 963 /* Set MII speed */ 964 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 965 966 #if !defined(CONFIG_M5272) 967 /* set RX checksum */ 968 val = readl(fep->hwp + FEC_RACC); 969 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 970 val |= FEC_RACC_OPTIONS; 971 else 972 val &= ~FEC_RACC_OPTIONS; 973 writel(val, fep->hwp + FEC_RACC); 974 #endif 975 976 /* 977 * The phy interface and speed need to get configured 978 * differently on enet-mac. 979 */ 980 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 981 /* Enable flow control and length check */ 982 rcntl |= 0x40000000 | 0x00000020; 983 984 /* RGMII, RMII or MII */ 985 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) 986 rcntl |= (1 << 6); 987 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 988 rcntl |= (1 << 8); 989 else 990 rcntl &= ~(1 << 8); 991 992 /* 1G, 100M or 10M */ 993 if (fep->phy_dev) { 994 if (fep->phy_dev->speed == SPEED_1000) 995 ecntl |= (1 << 5); 996 else if (fep->phy_dev->speed == SPEED_100) 997 rcntl &= ~(1 << 9); 998 else 999 rcntl |= (1 << 9); 1000 } 1001 } else { 1002 #ifdef FEC_MIIGSK_ENR 1003 if (fep->quirks & FEC_QUIRK_USE_GASKET) { 1004 u32 cfgr; 1005 /* disable the gasket and wait */ 1006 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1007 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1008 udelay(1); 1009 1010 /* 1011 * configure the gasket: 1012 * RMII, 50 MHz, no loopback, no echo 1013 * MII, 25 MHz, no loopback, no echo 1014 */ 1015 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1016 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; 1017 if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) 1018 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; 1019 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); 1020 1021 /* re-enable the gasket */ 1022 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1023 } 1024 #endif 1025 } 1026 1027 #if !defined(CONFIG_M5272) 1028 /* enable pause frame*/ 1029 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 1030 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 1031 fep->phy_dev && fep->phy_dev->pause)) { 1032 rcntl |= FEC_ENET_FCE; 1033 1034 /* set FIFO threshold parameter to reduce overrun */ 1035 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 1036 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 1037 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 1038 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); 1039 1040 /* OPD */ 1041 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); 1042 } else { 1043 rcntl &= ~FEC_ENET_FCE; 1044 } 1045 #endif /* !defined(CONFIG_M5272) */ 1046 1047 writel(rcntl, fep->hwp + FEC_R_CNTRL); 1048 1049 /* Setup multicast filter. */ 1050 set_multicast_list(ndev); 1051 #ifndef CONFIG_M5272 1052 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1053 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1054 #endif 1055 1056 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1057 /* enable ENET endian swap */ 1058 ecntl |= (1 << 8); 1059 /* enable ENET store and forward mode */ 1060 writel(1 << 8, fep->hwp + FEC_X_WMRK); 1061 } 1062 1063 if (fep->bufdesc_ex) 1064 ecntl |= (1 << 4); 1065 1066 #ifndef CONFIG_M5272 1067 /* Enable the MIB statistic event counters */ 1068 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); 1069 #endif 1070 1071 /* And last, enable the transmit and receive processing */ 1072 writel(ecntl, fep->hwp + FEC_ECNTRL); 1073 fec_enet_active_rxring(ndev); 1074 1075 if (fep->bufdesc_ex) 1076 fec_ptp_start_cyclecounter(ndev); 1077 1078 /* Enable interrupts we wish to service */ 1079 if (fep->link) 1080 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1081 else 1082 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); 1083 1084 /* Init the interrupt coalescing */ 1085 fec_enet_itr_coal_init(ndev); 1086 1087 } 1088 1089 static void 1090 fec_stop(struct net_device *ndev) 1091 { 1092 struct fec_enet_private *fep = netdev_priv(ndev); 1093 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); 1094 1095 /* We cannot expect a graceful transmit stop without link !!! */ 1096 if (fep->link) { 1097 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1098 udelay(10); 1099 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1100 netdev_err(ndev, "Graceful transmit stop did not complete!\n"); 1101 } 1102 1103 /* Whack a reset. We should wait for this. 1104 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1105 * instead of reset MAC itself. 1106 */ 1107 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 1108 writel(0, fep->hwp + FEC_ECNTRL); 1109 } else { 1110 writel(1, fep->hwp + FEC_ECNTRL); 1111 udelay(10); 1112 } 1113 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1114 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1115 1116 /* We have to keep ENET enabled to have MII interrupt stay working */ 1117 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1118 writel(2, fep->hwp + FEC_ECNTRL); 1119 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1120 } 1121 } 1122 1123 1124 static void 1125 fec_timeout(struct net_device *ndev) 1126 { 1127 struct fec_enet_private *fep = netdev_priv(ndev); 1128 1129 fec_dump(ndev); 1130 1131 ndev->stats.tx_errors++; 1132 1133 schedule_work(&fep->tx_timeout_work); 1134 } 1135 1136 static void fec_enet_timeout_work(struct work_struct *work) 1137 { 1138 struct fec_enet_private *fep = 1139 container_of(work, struct fec_enet_private, tx_timeout_work); 1140 struct net_device *ndev = fep->netdev; 1141 1142 rtnl_lock(); 1143 if (netif_device_present(ndev) || netif_running(ndev)) { 1144 napi_disable(&fep->napi); 1145 netif_tx_lock_bh(ndev); 1146 fec_restart(ndev); 1147 netif_wake_queue(ndev); 1148 netif_tx_unlock_bh(ndev); 1149 napi_enable(&fep->napi); 1150 } 1151 rtnl_unlock(); 1152 } 1153 1154 static void 1155 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, 1156 struct skb_shared_hwtstamps *hwtstamps) 1157 { 1158 unsigned long flags; 1159 u64 ns; 1160 1161 spin_lock_irqsave(&fep->tmreg_lock, flags); 1162 ns = timecounter_cyc2time(&fep->tc, ts); 1163 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 1164 1165 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1166 hwtstamps->hwtstamp = ns_to_ktime(ns); 1167 } 1168 1169 static void 1170 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1171 { 1172 struct fec_enet_private *fep; 1173 struct bufdesc *bdp; 1174 unsigned short status; 1175 struct sk_buff *skb; 1176 struct fec_enet_priv_tx_q *txq; 1177 struct netdev_queue *nq; 1178 int index = 0; 1179 int entries_free; 1180 1181 fep = netdev_priv(ndev); 1182 1183 queue_id = FEC_ENET_GET_QUQUE(queue_id); 1184 1185 txq = fep->tx_queue[queue_id]; 1186 /* get next bdp of dirty_tx */ 1187 nq = netdev_get_tx_queue(ndev, queue_id); 1188 bdp = txq->dirty_tx; 1189 1190 /* get next bdp of dirty_tx */ 1191 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); 1192 1193 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 1194 1195 /* current queue is empty */ 1196 if (bdp == txq->cur_tx) 1197 break; 1198 1199 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); 1200 1201 skb = txq->tx_skbuff[index]; 1202 txq->tx_skbuff[index] = NULL; 1203 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) 1204 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 1205 bdp->cbd_datlen, DMA_TO_DEVICE); 1206 bdp->cbd_bufaddr = 0; 1207 if (!skb) { 1208 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); 1209 continue; 1210 } 1211 1212 /* Check for errors. */ 1213 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1214 BD_ENET_TX_RL | BD_ENET_TX_UN | 1215 BD_ENET_TX_CSL)) { 1216 ndev->stats.tx_errors++; 1217 if (status & BD_ENET_TX_HB) /* No heartbeat */ 1218 ndev->stats.tx_heartbeat_errors++; 1219 if (status & BD_ENET_TX_LC) /* Late collision */ 1220 ndev->stats.tx_window_errors++; 1221 if (status & BD_ENET_TX_RL) /* Retrans limit */ 1222 ndev->stats.tx_aborted_errors++; 1223 if (status & BD_ENET_TX_UN) /* Underrun */ 1224 ndev->stats.tx_fifo_errors++; 1225 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 1226 ndev->stats.tx_carrier_errors++; 1227 } else { 1228 ndev->stats.tx_packets++; 1229 ndev->stats.tx_bytes += skb->len; 1230 } 1231 1232 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && 1233 fep->bufdesc_ex) { 1234 struct skb_shared_hwtstamps shhwtstamps; 1235 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1236 1237 fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); 1238 skb_tstamp_tx(skb, &shhwtstamps); 1239 } 1240 1241 /* Deferred means some collisions occurred during transmit, 1242 * but we eventually sent the packet OK. 1243 */ 1244 if (status & BD_ENET_TX_DEF) 1245 ndev->stats.collisions++; 1246 1247 /* Free the sk buffer associated with this last transmit */ 1248 dev_kfree_skb_any(skb); 1249 1250 txq->dirty_tx = bdp; 1251 1252 /* Update pointer to next buffer descriptor to be transmitted */ 1253 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); 1254 1255 /* Since we have freed up a buffer, the ring is no longer full 1256 */ 1257 if (netif_queue_stopped(ndev)) { 1258 entries_free = fec_enet_get_free_txdesc_num(fep, txq); 1259 if (entries_free >= txq->tx_wake_threshold) 1260 netif_tx_wake_queue(nq); 1261 } 1262 } 1263 1264 /* ERR006538: Keep the transmitter going */ 1265 if (bdp != txq->cur_tx && 1266 readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) 1267 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); 1268 } 1269 1270 static void 1271 fec_enet_tx(struct net_device *ndev) 1272 { 1273 struct fec_enet_private *fep = netdev_priv(ndev); 1274 u16 queue_id; 1275 /* First process class A queue, then Class B and Best Effort queue */ 1276 for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { 1277 clear_bit(queue_id, &fep->work_tx); 1278 fec_enet_tx_queue(ndev, queue_id); 1279 } 1280 return; 1281 } 1282 1283 static int 1284 fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) 1285 { 1286 struct fec_enet_private *fep = netdev_priv(ndev); 1287 int off; 1288 1289 off = ((unsigned long)skb->data) & fep->rx_align; 1290 if (off) 1291 skb_reserve(skb, fep->rx_align + 1 - off); 1292 1293 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, 1294 FEC_ENET_RX_FRSIZE - fep->rx_align, 1295 DMA_FROM_DEVICE); 1296 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { 1297 if (net_ratelimit()) 1298 netdev_err(ndev, "Rx DMA memory map failed\n"); 1299 return -ENOMEM; 1300 } 1301 1302 return 0; 1303 } 1304 1305 static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, 1306 struct bufdesc *bdp, u32 length, bool swap) 1307 { 1308 struct fec_enet_private *fep = netdev_priv(ndev); 1309 struct sk_buff *new_skb; 1310 1311 if (length > fep->rx_copybreak) 1312 return false; 1313 1314 new_skb = netdev_alloc_skb(ndev, length); 1315 if (!new_skb) 1316 return false; 1317 1318 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, 1319 FEC_ENET_RX_FRSIZE - fep->rx_align, 1320 DMA_FROM_DEVICE); 1321 if (!swap) 1322 memcpy(new_skb->data, (*skb)->data, length); 1323 else 1324 swap_buffer2(new_skb->data, (*skb)->data, length); 1325 *skb = new_skb; 1326 1327 return true; 1328 } 1329 1330 /* During a receive, the cur_rx points to the current incoming buffer. 1331 * When we update through the ring, if the next incoming buffer has 1332 * not been given to the system, we just set the empty indicator, 1333 * effectively tossing the packet. 1334 */ 1335 static int 1336 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) 1337 { 1338 struct fec_enet_private *fep = netdev_priv(ndev); 1339 struct fec_enet_priv_rx_q *rxq; 1340 struct bufdesc *bdp; 1341 unsigned short status; 1342 struct sk_buff *skb_new = NULL; 1343 struct sk_buff *skb; 1344 ushort pkt_len; 1345 __u8 *data; 1346 int pkt_received = 0; 1347 struct bufdesc_ex *ebdp = NULL; 1348 bool vlan_packet_rcvd = false; 1349 u16 vlan_tag; 1350 int index = 0; 1351 bool is_copybreak; 1352 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; 1353 1354 #ifdef CONFIG_M532x 1355 flush_cache_all(); 1356 #endif 1357 queue_id = FEC_ENET_GET_QUQUE(queue_id); 1358 rxq = fep->rx_queue[queue_id]; 1359 1360 /* First, grab all of the stats for the incoming packet. 1361 * These get messed up if we get called due to a busy condition. 1362 */ 1363 bdp = rxq->cur_rx; 1364 1365 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 1366 1367 if (pkt_received >= budget) 1368 break; 1369 pkt_received++; 1370 1371 /* Since we have allocated space to hold a complete frame, 1372 * the last indicator should be set. 1373 */ 1374 if ((status & BD_ENET_RX_LAST) == 0) 1375 netdev_err(ndev, "rcv is not +last\n"); 1376 1377 1378 /* Check for errors. */ 1379 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 1380 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 1381 ndev->stats.rx_errors++; 1382 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 1383 /* Frame too long or too short. */ 1384 ndev->stats.rx_length_errors++; 1385 } 1386 if (status & BD_ENET_RX_NO) /* Frame alignment */ 1387 ndev->stats.rx_frame_errors++; 1388 if (status & BD_ENET_RX_CR) /* CRC Error */ 1389 ndev->stats.rx_crc_errors++; 1390 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 1391 ndev->stats.rx_fifo_errors++; 1392 } 1393 1394 /* Report late collisions as a frame error. 1395 * On this error, the BD is closed, but we don't know what we 1396 * have in the buffer. So, just drop this frame on the floor. 1397 */ 1398 if (status & BD_ENET_RX_CL) { 1399 ndev->stats.rx_errors++; 1400 ndev->stats.rx_frame_errors++; 1401 goto rx_processing_done; 1402 } 1403 1404 /* Process the incoming frame. */ 1405 ndev->stats.rx_packets++; 1406 pkt_len = bdp->cbd_datlen; 1407 ndev->stats.rx_bytes += pkt_len; 1408 1409 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); 1410 skb = rxq->rx_skbuff[index]; 1411 1412 /* The packet length includes FCS, but we don't want to 1413 * include that when passing upstream as it messes up 1414 * bridging applications. 1415 */ 1416 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, 1417 need_swap); 1418 if (!is_copybreak) { 1419 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1420 if (unlikely(!skb_new)) { 1421 ndev->stats.rx_dropped++; 1422 goto rx_processing_done; 1423 } 1424 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 1425 FEC_ENET_RX_FRSIZE - fep->rx_align, 1426 DMA_FROM_DEVICE); 1427 } 1428 1429 prefetch(skb->data - NET_IP_ALIGN); 1430 skb_put(skb, pkt_len - 4); 1431 data = skb->data; 1432 if (!is_copybreak && need_swap) 1433 swap_buffer(data, pkt_len); 1434 1435 /* Extract the enhanced buffer descriptor */ 1436 ebdp = NULL; 1437 if (fep->bufdesc_ex) 1438 ebdp = (struct bufdesc_ex *)bdp; 1439 1440 /* If this is a VLAN packet remove the VLAN Tag */ 1441 vlan_packet_rcvd = false; 1442 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1443 fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { 1444 /* Push and remove the vlan tag */ 1445 struct vlan_hdr *vlan_header = 1446 (struct vlan_hdr *) (data + ETH_HLEN); 1447 vlan_tag = ntohs(vlan_header->h_vlan_TCI); 1448 1449 vlan_packet_rcvd = true; 1450 1451 skb_copy_to_linear_data_offset(skb, VLAN_HLEN, 1452 data, (2 * ETH_ALEN)); 1453 skb_pull(skb, VLAN_HLEN); 1454 } 1455 1456 skb->protocol = eth_type_trans(skb, ndev); 1457 1458 /* Get receive timestamp from the skb */ 1459 if (fep->hwts_rx_en && fep->bufdesc_ex) 1460 fec_enet_hwtstamp(fep, ebdp->ts, 1461 skb_hwtstamps(skb)); 1462 1463 if (fep->bufdesc_ex && 1464 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 1465 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { 1466 /* don't check it */ 1467 skb->ip_summed = CHECKSUM_UNNECESSARY; 1468 } else { 1469 skb_checksum_none_assert(skb); 1470 } 1471 } 1472 1473 /* Handle received VLAN packets */ 1474 if (vlan_packet_rcvd) 1475 __vlan_hwaccel_put_tag(skb, 1476 htons(ETH_P_8021Q), 1477 vlan_tag); 1478 1479 napi_gro_receive(&fep->napi, skb); 1480 1481 if (is_copybreak) { 1482 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, 1483 FEC_ENET_RX_FRSIZE - fep->rx_align, 1484 DMA_FROM_DEVICE); 1485 } else { 1486 rxq->rx_skbuff[index] = skb_new; 1487 fec_enet_new_rxbdp(ndev, bdp, skb_new); 1488 } 1489 1490 rx_processing_done: 1491 /* Clear the status flags for this buffer */ 1492 status &= ~BD_ENET_RX_STATS; 1493 1494 /* Mark the buffer empty */ 1495 status |= BD_ENET_RX_EMPTY; 1496 bdp->cbd_sc = status; 1497 1498 if (fep->bufdesc_ex) { 1499 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1500 1501 ebdp->cbd_esc = BD_ENET_RX_INT; 1502 ebdp->cbd_prot = 0; 1503 ebdp->cbd_bdu = 0; 1504 } 1505 1506 /* Update BD pointer to next entry */ 1507 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); 1508 1509 /* Doing this here will keep the FEC running while we process 1510 * incoming frames. On a heavily loaded network, we should be 1511 * able to keep up at the expense of system resources. 1512 */ 1513 writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); 1514 } 1515 rxq->cur_rx = bdp; 1516 return pkt_received; 1517 } 1518 1519 static int 1520 fec_enet_rx(struct net_device *ndev, int budget) 1521 { 1522 int pkt_received = 0; 1523 u16 queue_id; 1524 struct fec_enet_private *fep = netdev_priv(ndev); 1525 1526 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { 1527 clear_bit(queue_id, &fep->work_rx); 1528 pkt_received += fec_enet_rx_queue(ndev, 1529 budget - pkt_received, queue_id); 1530 } 1531 return pkt_received; 1532 } 1533 1534 static bool 1535 fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) 1536 { 1537 if (int_events == 0) 1538 return false; 1539 1540 if (int_events & FEC_ENET_RXF) 1541 fep->work_rx |= (1 << 2); 1542 if (int_events & FEC_ENET_RXF_1) 1543 fep->work_rx |= (1 << 0); 1544 if (int_events & FEC_ENET_RXF_2) 1545 fep->work_rx |= (1 << 1); 1546 1547 if (int_events & FEC_ENET_TXF) 1548 fep->work_tx |= (1 << 2); 1549 if (int_events & FEC_ENET_TXF_1) 1550 fep->work_tx |= (1 << 0); 1551 if (int_events & FEC_ENET_TXF_2) 1552 fep->work_tx |= (1 << 1); 1553 1554 return true; 1555 } 1556 1557 static irqreturn_t 1558 fec_enet_interrupt(int irq, void *dev_id) 1559 { 1560 struct net_device *ndev = dev_id; 1561 struct fec_enet_private *fep = netdev_priv(ndev); 1562 uint int_events; 1563 irqreturn_t ret = IRQ_NONE; 1564 1565 int_events = readl(fep->hwp + FEC_IEVENT); 1566 writel(int_events, fep->hwp + FEC_IEVENT); 1567 fec_enet_collect_events(fep, int_events); 1568 1569 if (fep->work_tx || fep->work_rx) { 1570 ret = IRQ_HANDLED; 1571 1572 if (napi_schedule_prep(&fep->napi)) { 1573 /* Disable the NAPI interrupts */ 1574 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); 1575 __napi_schedule(&fep->napi); 1576 } 1577 } 1578 1579 if (int_events & FEC_ENET_MII) { 1580 ret = IRQ_HANDLED; 1581 complete(&fep->mdio_done); 1582 } 1583 1584 if (fep->ptp_clock) 1585 fec_ptp_check_pps_event(fep); 1586 1587 return ret; 1588 } 1589 1590 static int fec_enet_rx_napi(struct napi_struct *napi, int budget) 1591 { 1592 struct net_device *ndev = napi->dev; 1593 struct fec_enet_private *fep = netdev_priv(ndev); 1594 int pkts; 1595 1596 pkts = fec_enet_rx(ndev, budget); 1597 1598 fec_enet_tx(ndev); 1599 1600 if (pkts < budget) { 1601 napi_complete(napi); 1602 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1603 } 1604 return pkts; 1605 } 1606 1607 /* ------------------------------------------------------------------------- */ 1608 static void fec_get_mac(struct net_device *ndev) 1609 { 1610 struct fec_enet_private *fep = netdev_priv(ndev); 1611 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); 1612 unsigned char *iap, tmpaddr[ETH_ALEN]; 1613 1614 /* 1615 * try to get mac address in following order: 1616 * 1617 * 1) module parameter via kernel command line in form 1618 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 1619 */ 1620 iap = macaddr; 1621 1622 /* 1623 * 2) from device tree data 1624 */ 1625 if (!is_valid_ether_addr(iap)) { 1626 struct device_node *np = fep->pdev->dev.of_node; 1627 if (np) { 1628 const char *mac = of_get_mac_address(np); 1629 if (mac) 1630 iap = (unsigned char *) mac; 1631 } 1632 } 1633 1634 /* 1635 * 3) from flash or fuse (via platform data) 1636 */ 1637 if (!is_valid_ether_addr(iap)) { 1638 #ifdef CONFIG_M5272 1639 if (FEC_FLASHMAC) 1640 iap = (unsigned char *)FEC_FLASHMAC; 1641 #else 1642 if (pdata) 1643 iap = (unsigned char *)&pdata->mac; 1644 #endif 1645 } 1646 1647 /* 1648 * 4) FEC mac registers set by bootloader 1649 */ 1650 if (!is_valid_ether_addr(iap)) { 1651 *((__be32 *) &tmpaddr[0]) = 1652 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); 1653 *((__be16 *) &tmpaddr[4]) = 1654 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 1655 iap = &tmpaddr[0]; 1656 } 1657 1658 /* 1659 * 5) random mac address 1660 */ 1661 if (!is_valid_ether_addr(iap)) { 1662 /* Report it and use a random ethernet address instead */ 1663 netdev_err(ndev, "Invalid MAC address: %pM\n", iap); 1664 eth_hw_addr_random(ndev); 1665 netdev_info(ndev, "Using random MAC address: %pM\n", 1666 ndev->dev_addr); 1667 return; 1668 } 1669 1670 memcpy(ndev->dev_addr, iap, ETH_ALEN); 1671 1672 /* Adjust MAC if using macaddr */ 1673 if (iap == macaddr) 1674 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; 1675 } 1676 1677 /* ------------------------------------------------------------------------- */ 1678 1679 /* 1680 * Phy section 1681 */ 1682 static void fec_enet_adjust_link(struct net_device *ndev) 1683 { 1684 struct fec_enet_private *fep = netdev_priv(ndev); 1685 struct phy_device *phy_dev = fep->phy_dev; 1686 int status_change = 0; 1687 1688 /* Prevent a state halted on mii error */ 1689 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { 1690 phy_dev->state = PHY_RESUMING; 1691 return; 1692 } 1693 1694 /* 1695 * If the netdev is down, or is going down, we're not interested 1696 * in link state events, so just mark our idea of the link as down 1697 * and ignore the event. 1698 */ 1699 if (!netif_running(ndev) || !netif_device_present(ndev)) { 1700 fep->link = 0; 1701 } else if (phy_dev->link) { 1702 if (!fep->link) { 1703 fep->link = phy_dev->link; 1704 status_change = 1; 1705 } 1706 1707 if (fep->full_duplex != phy_dev->duplex) { 1708 fep->full_duplex = phy_dev->duplex; 1709 status_change = 1; 1710 } 1711 1712 if (phy_dev->speed != fep->speed) { 1713 fep->speed = phy_dev->speed; 1714 status_change = 1; 1715 } 1716 1717 /* if any of the above changed restart the FEC */ 1718 if (status_change) { 1719 napi_disable(&fep->napi); 1720 netif_tx_lock_bh(ndev); 1721 fec_restart(ndev); 1722 netif_wake_queue(ndev); 1723 netif_tx_unlock_bh(ndev); 1724 napi_enable(&fep->napi); 1725 } 1726 } else { 1727 if (fep->link) { 1728 napi_disable(&fep->napi); 1729 netif_tx_lock_bh(ndev); 1730 fec_stop(ndev); 1731 netif_tx_unlock_bh(ndev); 1732 napi_enable(&fep->napi); 1733 fep->link = phy_dev->link; 1734 status_change = 1; 1735 } 1736 } 1737 1738 if (status_change) 1739 phy_print_status(phy_dev); 1740 } 1741 1742 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 1743 { 1744 struct fec_enet_private *fep = bus->priv; 1745 unsigned long time_left; 1746 1747 fep->mii_timeout = 0; 1748 init_completion(&fep->mdio_done); 1749 1750 /* start a read op */ 1751 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 1752 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 1753 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 1754 1755 /* wait for end of transfer */ 1756 time_left = wait_for_completion_timeout(&fep->mdio_done, 1757 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1758 if (time_left == 0) { 1759 fep->mii_timeout = 1; 1760 netdev_err(fep->netdev, "MDIO read timeout\n"); 1761 return -ETIMEDOUT; 1762 } 1763 1764 /* return value */ 1765 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1766 } 1767 1768 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 1769 u16 value) 1770 { 1771 struct fec_enet_private *fep = bus->priv; 1772 unsigned long time_left; 1773 1774 fep->mii_timeout = 0; 1775 init_completion(&fep->mdio_done); 1776 1777 /* start a write op */ 1778 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | 1779 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 1780 FEC_MMFR_TA | FEC_MMFR_DATA(value), 1781 fep->hwp + FEC_MII_DATA); 1782 1783 /* wait for end of transfer */ 1784 time_left = wait_for_completion_timeout(&fep->mdio_done, 1785 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1786 if (time_left == 0) { 1787 fep->mii_timeout = 1; 1788 netdev_err(fep->netdev, "MDIO write timeout\n"); 1789 return -ETIMEDOUT; 1790 } 1791 1792 return 0; 1793 } 1794 1795 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 1796 { 1797 struct fec_enet_private *fep = netdev_priv(ndev); 1798 int ret; 1799 1800 if (enable) { 1801 ret = clk_prepare_enable(fep->clk_ahb); 1802 if (ret) 1803 return ret; 1804 ret = clk_prepare_enable(fep->clk_ipg); 1805 if (ret) 1806 goto failed_clk_ipg; 1807 if (fep->clk_enet_out) { 1808 ret = clk_prepare_enable(fep->clk_enet_out); 1809 if (ret) 1810 goto failed_clk_enet_out; 1811 } 1812 if (fep->clk_ptp) { 1813 mutex_lock(&fep->ptp_clk_mutex); 1814 ret = clk_prepare_enable(fep->clk_ptp); 1815 if (ret) { 1816 mutex_unlock(&fep->ptp_clk_mutex); 1817 goto failed_clk_ptp; 1818 } else { 1819 fep->ptp_clk_on = true; 1820 } 1821 mutex_unlock(&fep->ptp_clk_mutex); 1822 } 1823 if (fep->clk_ref) { 1824 ret = clk_prepare_enable(fep->clk_ref); 1825 if (ret) 1826 goto failed_clk_ref; 1827 } 1828 } else { 1829 clk_disable_unprepare(fep->clk_ahb); 1830 clk_disable_unprepare(fep->clk_ipg); 1831 if (fep->clk_enet_out) 1832 clk_disable_unprepare(fep->clk_enet_out); 1833 if (fep->clk_ptp) { 1834 mutex_lock(&fep->ptp_clk_mutex); 1835 clk_disable_unprepare(fep->clk_ptp); 1836 fep->ptp_clk_on = false; 1837 mutex_unlock(&fep->ptp_clk_mutex); 1838 } 1839 if (fep->clk_ref) 1840 clk_disable_unprepare(fep->clk_ref); 1841 } 1842 1843 return 0; 1844 1845 failed_clk_ref: 1846 if (fep->clk_ref) 1847 clk_disable_unprepare(fep->clk_ref); 1848 failed_clk_ptp: 1849 if (fep->clk_enet_out) 1850 clk_disable_unprepare(fep->clk_enet_out); 1851 failed_clk_enet_out: 1852 clk_disable_unprepare(fep->clk_ipg); 1853 failed_clk_ipg: 1854 clk_disable_unprepare(fep->clk_ahb); 1855 1856 return ret; 1857 } 1858 1859 static int fec_enet_mii_probe(struct net_device *ndev) 1860 { 1861 struct fec_enet_private *fep = netdev_priv(ndev); 1862 struct phy_device *phy_dev = NULL; 1863 char mdio_bus_id[MII_BUS_ID_SIZE]; 1864 char phy_name[MII_BUS_ID_SIZE + 3]; 1865 int phy_id; 1866 int dev_id = fep->dev_id; 1867 1868 fep->phy_dev = NULL; 1869 1870 if (fep->phy_node) { 1871 phy_dev = of_phy_connect(ndev, fep->phy_node, 1872 &fec_enet_adjust_link, 0, 1873 fep->phy_interface); 1874 if (!phy_dev) 1875 return -ENODEV; 1876 } else { 1877 /* check for attached phy */ 1878 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { 1879 if ((fep->mii_bus->phy_mask & (1 << phy_id))) 1880 continue; 1881 if (fep->mii_bus->phy_map[phy_id] == NULL) 1882 continue; 1883 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) 1884 continue; 1885 if (dev_id--) 1886 continue; 1887 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 1888 break; 1889 } 1890 1891 if (phy_id >= PHY_MAX_ADDR) { 1892 netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); 1893 strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); 1894 phy_id = 0; 1895 } 1896 1897 snprintf(phy_name, sizeof(phy_name), 1898 PHY_ID_FMT, mdio_bus_id, phy_id); 1899 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 1900 fep->phy_interface); 1901 } 1902 1903 if (IS_ERR(phy_dev)) { 1904 netdev_err(ndev, "could not attach to PHY\n"); 1905 return PTR_ERR(phy_dev); 1906 } 1907 1908 /* mask with MAC supported features */ 1909 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { 1910 phy_dev->supported &= PHY_GBIT_FEATURES; 1911 phy_dev->supported &= ~SUPPORTED_1000baseT_Half; 1912 #if !defined(CONFIG_M5272) 1913 phy_dev->supported |= SUPPORTED_Pause; 1914 #endif 1915 } 1916 else 1917 phy_dev->supported &= PHY_BASIC_FEATURES; 1918 1919 phy_dev->advertising = phy_dev->supported; 1920 1921 fep->phy_dev = phy_dev; 1922 fep->link = 0; 1923 fep->full_duplex = 0; 1924 1925 netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", 1926 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), 1927 fep->phy_dev->irq); 1928 1929 return 0; 1930 } 1931 1932 static int fec_enet_mii_init(struct platform_device *pdev) 1933 { 1934 static struct mii_bus *fec0_mii_bus; 1935 struct net_device *ndev = platform_get_drvdata(pdev); 1936 struct fec_enet_private *fep = netdev_priv(ndev); 1937 struct device_node *node; 1938 int err = -ENXIO, i; 1939 1940 /* 1941 * The i.MX28 dual fec interfaces are not equal. 1942 * Here are the differences: 1943 * 1944 * - fec0 supports MII & RMII modes while fec1 only supports RMII 1945 * - fec0 acts as the 1588 time master while fec1 is slave 1946 * - external phys can only be configured by fec0 1947 * 1948 * That is to say fec1 can not work independently. It only works 1949 * when fec0 is working. The reason behind this design is that the 1950 * second interface is added primarily for Switch mode. 1951 * 1952 * Because of the last point above, both phys are attached on fec0 1953 * mdio interface in board design, and need to be configured by 1954 * fec0 mii_bus. 1955 */ 1956 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 1957 /* fec1 uses fec0 mii_bus */ 1958 if (mii_cnt && fec0_mii_bus) { 1959 fep->mii_bus = fec0_mii_bus; 1960 mii_cnt++; 1961 return 0; 1962 } 1963 return -ENOENT; 1964 } 1965 1966 fep->mii_timeout = 0; 1967 1968 /* 1969 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) 1970 * 1971 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while 1972 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 1973 * Reference Manual has an error on this, and gets fixed on i.MX6Q 1974 * document. 1975 */ 1976 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); 1977 if (fep->quirks & FEC_QUIRK_ENET_MAC) 1978 fep->phy_speed--; 1979 fep->phy_speed <<= 1; 1980 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1981 1982 fep->mii_bus = mdiobus_alloc(); 1983 if (fep->mii_bus == NULL) { 1984 err = -ENOMEM; 1985 goto err_out; 1986 } 1987 1988 fep->mii_bus->name = "fec_enet_mii_bus"; 1989 fep->mii_bus->read = fec_enet_mdio_read; 1990 fep->mii_bus->write = fec_enet_mdio_write; 1991 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 1992 pdev->name, fep->dev_id + 1); 1993 fep->mii_bus->priv = fep; 1994 fep->mii_bus->parent = &pdev->dev; 1995 1996 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 1997 if (!fep->mii_bus->irq) { 1998 err = -ENOMEM; 1999 goto err_out_free_mdiobus; 2000 } 2001 2002 for (i = 0; i < PHY_MAX_ADDR; i++) 2003 fep->mii_bus->irq[i] = PHY_POLL; 2004 2005 node = of_get_child_by_name(pdev->dev.of_node, "mdio"); 2006 if (node) { 2007 err = of_mdiobus_register(fep->mii_bus, node); 2008 of_node_put(node); 2009 } else { 2010 err = mdiobus_register(fep->mii_bus); 2011 } 2012 2013 if (err) 2014 goto err_out_free_mdio_irq; 2015 2016 mii_cnt++; 2017 2018 /* save fec0 mii_bus */ 2019 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2020 fec0_mii_bus = fep->mii_bus; 2021 2022 return 0; 2023 2024 err_out_free_mdio_irq: 2025 kfree(fep->mii_bus->irq); 2026 err_out_free_mdiobus: 2027 mdiobus_free(fep->mii_bus); 2028 err_out: 2029 return err; 2030 } 2031 2032 static void fec_enet_mii_remove(struct fec_enet_private *fep) 2033 { 2034 if (--mii_cnt == 0) { 2035 mdiobus_unregister(fep->mii_bus); 2036 kfree(fep->mii_bus->irq); 2037 mdiobus_free(fep->mii_bus); 2038 } 2039 } 2040 2041 static int fec_enet_get_settings(struct net_device *ndev, 2042 struct ethtool_cmd *cmd) 2043 { 2044 struct fec_enet_private *fep = netdev_priv(ndev); 2045 struct phy_device *phydev = fep->phy_dev; 2046 2047 if (!phydev) 2048 return -ENODEV; 2049 2050 return phy_ethtool_gset(phydev, cmd); 2051 } 2052 2053 static int fec_enet_set_settings(struct net_device *ndev, 2054 struct ethtool_cmd *cmd) 2055 { 2056 struct fec_enet_private *fep = netdev_priv(ndev); 2057 struct phy_device *phydev = fep->phy_dev; 2058 2059 if (!phydev) 2060 return -ENODEV; 2061 2062 return phy_ethtool_sset(phydev, cmd); 2063 } 2064 2065 static void fec_enet_get_drvinfo(struct net_device *ndev, 2066 struct ethtool_drvinfo *info) 2067 { 2068 struct fec_enet_private *fep = netdev_priv(ndev); 2069 2070 strlcpy(info->driver, fep->pdev->dev.driver->name, 2071 sizeof(info->driver)); 2072 strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); 2073 strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); 2074 } 2075 2076 static int fec_enet_get_ts_info(struct net_device *ndev, 2077 struct ethtool_ts_info *info) 2078 { 2079 struct fec_enet_private *fep = netdev_priv(ndev); 2080 2081 if (fep->bufdesc_ex) { 2082 2083 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 2084 SOF_TIMESTAMPING_RX_SOFTWARE | 2085 SOF_TIMESTAMPING_SOFTWARE | 2086 SOF_TIMESTAMPING_TX_HARDWARE | 2087 SOF_TIMESTAMPING_RX_HARDWARE | 2088 SOF_TIMESTAMPING_RAW_HARDWARE; 2089 if (fep->ptp_clock) 2090 info->phc_index = ptp_clock_index(fep->ptp_clock); 2091 else 2092 info->phc_index = -1; 2093 2094 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 2095 (1 << HWTSTAMP_TX_ON); 2096 2097 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2098 (1 << HWTSTAMP_FILTER_ALL); 2099 return 0; 2100 } else { 2101 return ethtool_op_get_ts_info(ndev, info); 2102 } 2103 } 2104 2105 #if !defined(CONFIG_M5272) 2106 2107 static void fec_enet_get_pauseparam(struct net_device *ndev, 2108 struct ethtool_pauseparam *pause) 2109 { 2110 struct fec_enet_private *fep = netdev_priv(ndev); 2111 2112 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; 2113 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; 2114 pause->rx_pause = pause->tx_pause; 2115 } 2116 2117 static int fec_enet_set_pauseparam(struct net_device *ndev, 2118 struct ethtool_pauseparam *pause) 2119 { 2120 struct fec_enet_private *fep = netdev_priv(ndev); 2121 2122 if (!fep->phy_dev) 2123 return -ENODEV; 2124 2125 if (pause->tx_pause != pause->rx_pause) { 2126 netdev_info(ndev, 2127 "hardware only support enable/disable both tx and rx"); 2128 return -EINVAL; 2129 } 2130 2131 fep->pause_flag = 0; 2132 2133 /* tx pause must be same as rx pause */ 2134 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; 2135 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; 2136 2137 if (pause->rx_pause || pause->autoneg) { 2138 fep->phy_dev->supported |= ADVERTISED_Pause; 2139 fep->phy_dev->advertising |= ADVERTISED_Pause; 2140 } else { 2141 fep->phy_dev->supported &= ~ADVERTISED_Pause; 2142 fep->phy_dev->advertising &= ~ADVERTISED_Pause; 2143 } 2144 2145 if (pause->autoneg) { 2146 if (netif_running(ndev)) 2147 fec_stop(ndev); 2148 phy_start_aneg(fep->phy_dev); 2149 } 2150 if (netif_running(ndev)) { 2151 napi_disable(&fep->napi); 2152 netif_tx_lock_bh(ndev); 2153 fec_restart(ndev); 2154 netif_wake_queue(ndev); 2155 netif_tx_unlock_bh(ndev); 2156 napi_enable(&fep->napi); 2157 } 2158 2159 return 0; 2160 } 2161 2162 static const struct fec_stat { 2163 char name[ETH_GSTRING_LEN]; 2164 u16 offset; 2165 } fec_stats[] = { 2166 /* RMON TX */ 2167 { "tx_dropped", RMON_T_DROP }, 2168 { "tx_packets", RMON_T_PACKETS }, 2169 { "tx_broadcast", RMON_T_BC_PKT }, 2170 { "tx_multicast", RMON_T_MC_PKT }, 2171 { "tx_crc_errors", RMON_T_CRC_ALIGN }, 2172 { "tx_undersize", RMON_T_UNDERSIZE }, 2173 { "tx_oversize", RMON_T_OVERSIZE }, 2174 { "tx_fragment", RMON_T_FRAG }, 2175 { "tx_jabber", RMON_T_JAB }, 2176 { "tx_collision", RMON_T_COL }, 2177 { "tx_64byte", RMON_T_P64 }, 2178 { "tx_65to127byte", RMON_T_P65TO127 }, 2179 { "tx_128to255byte", RMON_T_P128TO255 }, 2180 { "tx_256to511byte", RMON_T_P256TO511 }, 2181 { "tx_512to1023byte", RMON_T_P512TO1023 }, 2182 { "tx_1024to2047byte", RMON_T_P1024TO2047 }, 2183 { "tx_GTE2048byte", RMON_T_P_GTE2048 }, 2184 { "tx_octets", RMON_T_OCTETS }, 2185 2186 /* IEEE TX */ 2187 { "IEEE_tx_drop", IEEE_T_DROP }, 2188 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, 2189 { "IEEE_tx_1col", IEEE_T_1COL }, 2190 { "IEEE_tx_mcol", IEEE_T_MCOL }, 2191 { "IEEE_tx_def", IEEE_T_DEF }, 2192 { "IEEE_tx_lcol", IEEE_T_LCOL }, 2193 { "IEEE_tx_excol", IEEE_T_EXCOL }, 2194 { "IEEE_tx_macerr", IEEE_T_MACERR }, 2195 { "IEEE_tx_cserr", IEEE_T_CSERR }, 2196 { "IEEE_tx_sqe", IEEE_T_SQE }, 2197 { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, 2198 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, 2199 2200 /* RMON RX */ 2201 { "rx_packets", RMON_R_PACKETS }, 2202 { "rx_broadcast", RMON_R_BC_PKT }, 2203 { "rx_multicast", RMON_R_MC_PKT }, 2204 { "rx_crc_errors", RMON_R_CRC_ALIGN }, 2205 { "rx_undersize", RMON_R_UNDERSIZE }, 2206 { "rx_oversize", RMON_R_OVERSIZE }, 2207 { "rx_fragment", RMON_R_FRAG }, 2208 { "rx_jabber", RMON_R_JAB }, 2209 { "rx_64byte", RMON_R_P64 }, 2210 { "rx_65to127byte", RMON_R_P65TO127 }, 2211 { "rx_128to255byte", RMON_R_P128TO255 }, 2212 { "rx_256to511byte", RMON_R_P256TO511 }, 2213 { "rx_512to1023byte", RMON_R_P512TO1023 }, 2214 { "rx_1024to2047byte", RMON_R_P1024TO2047 }, 2215 { "rx_GTE2048byte", RMON_R_P_GTE2048 }, 2216 { "rx_octets", RMON_R_OCTETS }, 2217 2218 /* IEEE RX */ 2219 { "IEEE_rx_drop", IEEE_R_DROP }, 2220 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, 2221 { "IEEE_rx_crc", IEEE_R_CRC }, 2222 { "IEEE_rx_align", IEEE_R_ALIGN }, 2223 { "IEEE_rx_macerr", IEEE_R_MACERR }, 2224 { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, 2225 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 2226 }; 2227 2228 static void fec_enet_get_ethtool_stats(struct net_device *dev, 2229 struct ethtool_stats *stats, u64 *data) 2230 { 2231 struct fec_enet_private *fep = netdev_priv(dev); 2232 int i; 2233 2234 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2235 data[i] = readl(fep->hwp + fec_stats[i].offset); 2236 } 2237 2238 static void fec_enet_get_strings(struct net_device *netdev, 2239 u32 stringset, u8 *data) 2240 { 2241 int i; 2242 switch (stringset) { 2243 case ETH_SS_STATS: 2244 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2245 memcpy(data + i * ETH_GSTRING_LEN, 2246 fec_stats[i].name, ETH_GSTRING_LEN); 2247 break; 2248 } 2249 } 2250 2251 static int fec_enet_get_sset_count(struct net_device *dev, int sset) 2252 { 2253 switch (sset) { 2254 case ETH_SS_STATS: 2255 return ARRAY_SIZE(fec_stats); 2256 default: 2257 return -EOPNOTSUPP; 2258 } 2259 } 2260 #endif /* !defined(CONFIG_M5272) */ 2261 2262 static int fec_enet_nway_reset(struct net_device *dev) 2263 { 2264 struct fec_enet_private *fep = netdev_priv(dev); 2265 struct phy_device *phydev = fep->phy_dev; 2266 2267 if (!phydev) 2268 return -ENODEV; 2269 2270 return genphy_restart_aneg(phydev); 2271 } 2272 2273 /* ITR clock source is enet system clock (clk_ahb). 2274 * TCTT unit is cycle_ns * 64 cycle 2275 * So, the ICTT value = X us / (cycle_ns * 64) 2276 */ 2277 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) 2278 { 2279 struct fec_enet_private *fep = netdev_priv(ndev); 2280 2281 return us * (fep->itr_clk_rate / 64000) / 1000; 2282 } 2283 2284 /* Set threshold for interrupt coalescing */ 2285 static void fec_enet_itr_coal_set(struct net_device *ndev) 2286 { 2287 struct fec_enet_private *fep = netdev_priv(ndev); 2288 int rx_itr, tx_itr; 2289 2290 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 2291 return; 2292 2293 /* Must be greater than zero to avoid unpredictable behavior */ 2294 if (!fep->rx_time_itr || !fep->rx_pkts_itr || 2295 !fep->tx_time_itr || !fep->tx_pkts_itr) 2296 return; 2297 2298 /* Select enet system clock as Interrupt Coalescing 2299 * timer Clock Source 2300 */ 2301 rx_itr = FEC_ITR_CLK_SEL; 2302 tx_itr = FEC_ITR_CLK_SEL; 2303 2304 /* set ICFT and ICTT */ 2305 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); 2306 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); 2307 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); 2308 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); 2309 2310 rx_itr |= FEC_ITR_EN; 2311 tx_itr |= FEC_ITR_EN; 2312 2313 writel(tx_itr, fep->hwp + FEC_TXIC0); 2314 writel(rx_itr, fep->hwp + FEC_RXIC0); 2315 writel(tx_itr, fep->hwp + FEC_TXIC1); 2316 writel(rx_itr, fep->hwp + FEC_RXIC1); 2317 writel(tx_itr, fep->hwp + FEC_TXIC2); 2318 writel(rx_itr, fep->hwp + FEC_RXIC2); 2319 } 2320 2321 static int 2322 fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) 2323 { 2324 struct fec_enet_private *fep = netdev_priv(ndev); 2325 2326 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 2327 return -EOPNOTSUPP; 2328 2329 ec->rx_coalesce_usecs = fep->rx_time_itr; 2330 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; 2331 2332 ec->tx_coalesce_usecs = fep->tx_time_itr; 2333 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; 2334 2335 return 0; 2336 } 2337 2338 static int 2339 fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) 2340 { 2341 struct fec_enet_private *fep = netdev_priv(ndev); 2342 unsigned int cycle; 2343 2344 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 2345 return -EOPNOTSUPP; 2346 2347 if (ec->rx_max_coalesced_frames > 255) { 2348 pr_err("Rx coalesced frames exceed hardware limiation"); 2349 return -EINVAL; 2350 } 2351 2352 if (ec->tx_max_coalesced_frames > 255) { 2353 pr_err("Tx coalesced frame exceed hardware limiation"); 2354 return -EINVAL; 2355 } 2356 2357 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); 2358 if (cycle > 0xFFFF) { 2359 pr_err("Rx coalesed usec exceeed hardware limiation"); 2360 return -EINVAL; 2361 } 2362 2363 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); 2364 if (cycle > 0xFFFF) { 2365 pr_err("Rx coalesed usec exceeed hardware limiation"); 2366 return -EINVAL; 2367 } 2368 2369 fep->rx_time_itr = ec->rx_coalesce_usecs; 2370 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; 2371 2372 fep->tx_time_itr = ec->tx_coalesce_usecs; 2373 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; 2374 2375 fec_enet_itr_coal_set(ndev); 2376 2377 return 0; 2378 } 2379 2380 static void fec_enet_itr_coal_init(struct net_device *ndev) 2381 { 2382 struct ethtool_coalesce ec; 2383 2384 ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; 2385 ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; 2386 2387 ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; 2388 ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; 2389 2390 fec_enet_set_coalesce(ndev, &ec); 2391 } 2392 2393 static int fec_enet_get_tunable(struct net_device *netdev, 2394 const struct ethtool_tunable *tuna, 2395 void *data) 2396 { 2397 struct fec_enet_private *fep = netdev_priv(netdev); 2398 int ret = 0; 2399 2400 switch (tuna->id) { 2401 case ETHTOOL_RX_COPYBREAK: 2402 *(u32 *)data = fep->rx_copybreak; 2403 break; 2404 default: 2405 ret = -EINVAL; 2406 break; 2407 } 2408 2409 return ret; 2410 } 2411 2412 static int fec_enet_set_tunable(struct net_device *netdev, 2413 const struct ethtool_tunable *tuna, 2414 const void *data) 2415 { 2416 struct fec_enet_private *fep = netdev_priv(netdev); 2417 int ret = 0; 2418 2419 switch (tuna->id) { 2420 case ETHTOOL_RX_COPYBREAK: 2421 fep->rx_copybreak = *(u32 *)data; 2422 break; 2423 default: 2424 ret = -EINVAL; 2425 break; 2426 } 2427 2428 return ret; 2429 } 2430 2431 static const struct ethtool_ops fec_enet_ethtool_ops = { 2432 .get_settings = fec_enet_get_settings, 2433 .set_settings = fec_enet_set_settings, 2434 .get_drvinfo = fec_enet_get_drvinfo, 2435 .nway_reset = fec_enet_nway_reset, 2436 .get_link = ethtool_op_get_link, 2437 .get_coalesce = fec_enet_get_coalesce, 2438 .set_coalesce = fec_enet_set_coalesce, 2439 #ifndef CONFIG_M5272 2440 .get_pauseparam = fec_enet_get_pauseparam, 2441 .set_pauseparam = fec_enet_set_pauseparam, 2442 .get_strings = fec_enet_get_strings, 2443 .get_ethtool_stats = fec_enet_get_ethtool_stats, 2444 .get_sset_count = fec_enet_get_sset_count, 2445 #endif 2446 .get_ts_info = fec_enet_get_ts_info, 2447 .get_tunable = fec_enet_get_tunable, 2448 .set_tunable = fec_enet_set_tunable, 2449 }; 2450 2451 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2452 { 2453 struct fec_enet_private *fep = netdev_priv(ndev); 2454 struct phy_device *phydev = fep->phy_dev; 2455 2456 if (!netif_running(ndev)) 2457 return -EINVAL; 2458 2459 if (!phydev) 2460 return -ENODEV; 2461 2462 if (fep->bufdesc_ex) { 2463 if (cmd == SIOCSHWTSTAMP) 2464 return fec_ptp_set(ndev, rq); 2465 if (cmd == SIOCGHWTSTAMP) 2466 return fec_ptp_get(ndev, rq); 2467 } 2468 2469 return phy_mii_ioctl(phydev, rq, cmd); 2470 } 2471 2472 static void fec_enet_free_buffers(struct net_device *ndev) 2473 { 2474 struct fec_enet_private *fep = netdev_priv(ndev); 2475 unsigned int i; 2476 struct sk_buff *skb; 2477 struct bufdesc *bdp; 2478 struct fec_enet_priv_tx_q *txq; 2479 struct fec_enet_priv_rx_q *rxq; 2480 unsigned int q; 2481 2482 for (q = 0; q < fep->num_rx_queues; q++) { 2483 rxq = fep->rx_queue[q]; 2484 bdp = rxq->rx_bd_base; 2485 for (i = 0; i < rxq->rx_ring_size; i++) { 2486 skb = rxq->rx_skbuff[i]; 2487 rxq->rx_skbuff[i] = NULL; 2488 if (skb) { 2489 dma_unmap_single(&fep->pdev->dev, 2490 bdp->cbd_bufaddr, 2491 FEC_ENET_RX_FRSIZE - fep->rx_align, 2492 DMA_FROM_DEVICE); 2493 dev_kfree_skb(skb); 2494 } 2495 bdp = fec_enet_get_nextdesc(bdp, fep, q); 2496 } 2497 } 2498 2499 for (q = 0; q < fep->num_tx_queues; q++) { 2500 txq = fep->tx_queue[q]; 2501 bdp = txq->tx_bd_base; 2502 for (i = 0; i < txq->tx_ring_size; i++) { 2503 kfree(txq->tx_bounce[i]); 2504 txq->tx_bounce[i] = NULL; 2505 skb = txq->tx_skbuff[i]; 2506 txq->tx_skbuff[i] = NULL; 2507 dev_kfree_skb(skb); 2508 } 2509 } 2510 } 2511 2512 static void fec_enet_free_queue(struct net_device *ndev) 2513 { 2514 struct fec_enet_private *fep = netdev_priv(ndev); 2515 int i; 2516 struct fec_enet_priv_tx_q *txq; 2517 2518 for (i = 0; i < fep->num_tx_queues; i++) 2519 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { 2520 txq = fep->tx_queue[i]; 2521 dma_free_coherent(NULL, 2522 txq->tx_ring_size * TSO_HEADER_SIZE, 2523 txq->tso_hdrs, 2524 txq->tso_hdrs_dma); 2525 } 2526 2527 for (i = 0; i < fep->num_rx_queues; i++) 2528 if (fep->rx_queue[i]) 2529 kfree(fep->rx_queue[i]); 2530 2531 for (i = 0; i < fep->num_tx_queues; i++) 2532 if (fep->tx_queue[i]) 2533 kfree(fep->tx_queue[i]); 2534 } 2535 2536 static int fec_enet_alloc_queue(struct net_device *ndev) 2537 { 2538 struct fec_enet_private *fep = netdev_priv(ndev); 2539 int i; 2540 int ret = 0; 2541 struct fec_enet_priv_tx_q *txq; 2542 2543 for (i = 0; i < fep->num_tx_queues; i++) { 2544 txq = kzalloc(sizeof(*txq), GFP_KERNEL); 2545 if (!txq) { 2546 ret = -ENOMEM; 2547 goto alloc_failed; 2548 } 2549 2550 fep->tx_queue[i] = txq; 2551 txq->tx_ring_size = TX_RING_SIZE; 2552 fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; 2553 2554 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; 2555 txq->tx_wake_threshold = 2556 (txq->tx_ring_size - txq->tx_stop_threshold) / 2; 2557 2558 txq->tso_hdrs = dma_alloc_coherent(NULL, 2559 txq->tx_ring_size * TSO_HEADER_SIZE, 2560 &txq->tso_hdrs_dma, 2561 GFP_KERNEL); 2562 if (!txq->tso_hdrs) { 2563 ret = -ENOMEM; 2564 goto alloc_failed; 2565 } 2566 } 2567 2568 for (i = 0; i < fep->num_rx_queues; i++) { 2569 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), 2570 GFP_KERNEL); 2571 if (!fep->rx_queue[i]) { 2572 ret = -ENOMEM; 2573 goto alloc_failed; 2574 } 2575 2576 fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; 2577 fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; 2578 } 2579 return ret; 2580 2581 alloc_failed: 2582 fec_enet_free_queue(ndev); 2583 return ret; 2584 } 2585 2586 static int 2587 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) 2588 { 2589 struct fec_enet_private *fep = netdev_priv(ndev); 2590 unsigned int i; 2591 struct sk_buff *skb; 2592 struct bufdesc *bdp; 2593 struct fec_enet_priv_rx_q *rxq; 2594 2595 rxq = fep->rx_queue[queue]; 2596 bdp = rxq->rx_bd_base; 2597 for (i = 0; i < rxq->rx_ring_size; i++) { 2598 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 2599 if (!skb) 2600 goto err_alloc; 2601 2602 if (fec_enet_new_rxbdp(ndev, bdp, skb)) { 2603 dev_kfree_skb(skb); 2604 goto err_alloc; 2605 } 2606 2607 rxq->rx_skbuff[i] = skb; 2608 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2609 2610 if (fep->bufdesc_ex) { 2611 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2612 ebdp->cbd_esc = BD_ENET_RX_INT; 2613 } 2614 2615 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 2616 } 2617 2618 /* Set the last buffer to wrap. */ 2619 bdp = fec_enet_get_prevdesc(bdp, fep, queue); 2620 bdp->cbd_sc |= BD_SC_WRAP; 2621 return 0; 2622 2623 err_alloc: 2624 fec_enet_free_buffers(ndev); 2625 return -ENOMEM; 2626 } 2627 2628 static int 2629 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) 2630 { 2631 struct fec_enet_private *fep = netdev_priv(ndev); 2632 unsigned int i; 2633 struct bufdesc *bdp; 2634 struct fec_enet_priv_tx_q *txq; 2635 2636 txq = fep->tx_queue[queue]; 2637 bdp = txq->tx_bd_base; 2638 for (i = 0; i < txq->tx_ring_size; i++) { 2639 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 2640 if (!txq->tx_bounce[i]) 2641 goto err_alloc; 2642 2643 bdp->cbd_sc = 0; 2644 bdp->cbd_bufaddr = 0; 2645 2646 if (fep->bufdesc_ex) { 2647 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2648 ebdp->cbd_esc = BD_ENET_TX_INT; 2649 } 2650 2651 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 2652 } 2653 2654 /* Set the last buffer to wrap. */ 2655 bdp = fec_enet_get_prevdesc(bdp, fep, queue); 2656 bdp->cbd_sc |= BD_SC_WRAP; 2657 2658 return 0; 2659 2660 err_alloc: 2661 fec_enet_free_buffers(ndev); 2662 return -ENOMEM; 2663 } 2664 2665 static int fec_enet_alloc_buffers(struct net_device *ndev) 2666 { 2667 struct fec_enet_private *fep = netdev_priv(ndev); 2668 unsigned int i; 2669 2670 for (i = 0; i < fep->num_rx_queues; i++) 2671 if (fec_enet_alloc_rxq_buffers(ndev, i)) 2672 return -ENOMEM; 2673 2674 for (i = 0; i < fep->num_tx_queues; i++) 2675 if (fec_enet_alloc_txq_buffers(ndev, i)) 2676 return -ENOMEM; 2677 return 0; 2678 } 2679 2680 static int 2681 fec_enet_open(struct net_device *ndev) 2682 { 2683 struct fec_enet_private *fep = netdev_priv(ndev); 2684 int ret; 2685 2686 pinctrl_pm_select_default_state(&fep->pdev->dev); 2687 ret = fec_enet_clk_enable(ndev, true); 2688 if (ret) 2689 return ret; 2690 2691 /* I should reset the ring buffers here, but I don't yet know 2692 * a simple way to do that. 2693 */ 2694 2695 ret = fec_enet_alloc_buffers(ndev); 2696 if (ret) 2697 goto err_enet_alloc; 2698 2699 /* Probe and connect to PHY when open the interface */ 2700 ret = fec_enet_mii_probe(ndev); 2701 if (ret) 2702 goto err_enet_mii_probe; 2703 2704 fec_restart(ndev); 2705 napi_enable(&fep->napi); 2706 phy_start(fep->phy_dev); 2707 netif_tx_start_all_queues(ndev); 2708 2709 return 0; 2710 2711 err_enet_mii_probe: 2712 fec_enet_free_buffers(ndev); 2713 err_enet_alloc: 2714 fec_enet_clk_enable(ndev, false); 2715 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2716 return ret; 2717 } 2718 2719 static int 2720 fec_enet_close(struct net_device *ndev) 2721 { 2722 struct fec_enet_private *fep = netdev_priv(ndev); 2723 2724 phy_stop(fep->phy_dev); 2725 2726 if (netif_device_present(ndev)) { 2727 napi_disable(&fep->napi); 2728 netif_tx_disable(ndev); 2729 fec_stop(ndev); 2730 } 2731 2732 phy_disconnect(fep->phy_dev); 2733 fep->phy_dev = NULL; 2734 2735 fec_enet_clk_enable(ndev, false); 2736 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2737 fec_enet_free_buffers(ndev); 2738 2739 return 0; 2740 } 2741 2742 /* Set or clear the multicast filter for this adaptor. 2743 * Skeleton taken from sunlance driver. 2744 * The CPM Ethernet implementation allows Multicast as well as individual 2745 * MAC address filtering. Some of the drivers check to make sure it is 2746 * a group multicast address, and discard those that are not. I guess I 2747 * will do the same for now, but just remove the test if you want 2748 * individual filtering as well (do the upper net layers want or support 2749 * this kind of feature?). 2750 */ 2751 2752 #define HASH_BITS 6 /* #bits in hash */ 2753 #define CRC32_POLY 0xEDB88320 2754 2755 static void set_multicast_list(struct net_device *ndev) 2756 { 2757 struct fec_enet_private *fep = netdev_priv(ndev); 2758 struct netdev_hw_addr *ha; 2759 unsigned int i, bit, data, crc, tmp; 2760 unsigned char hash; 2761 2762 if (ndev->flags & IFF_PROMISC) { 2763 tmp = readl(fep->hwp + FEC_R_CNTRL); 2764 tmp |= 0x8; 2765 writel(tmp, fep->hwp + FEC_R_CNTRL); 2766 return; 2767 } 2768 2769 tmp = readl(fep->hwp + FEC_R_CNTRL); 2770 tmp &= ~0x8; 2771 writel(tmp, fep->hwp + FEC_R_CNTRL); 2772 2773 if (ndev->flags & IFF_ALLMULTI) { 2774 /* Catch all multicast addresses, so set the 2775 * filter to all 1's 2776 */ 2777 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2778 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2779 2780 return; 2781 } 2782 2783 /* Clear filter and add the addresses in hash register 2784 */ 2785 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2786 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2787 2788 netdev_for_each_mc_addr(ha, ndev) { 2789 /* calculate crc32 value of mac address */ 2790 crc = 0xffffffff; 2791 2792 for (i = 0; i < ndev->addr_len; i++) { 2793 data = ha->addr[i]; 2794 for (bit = 0; bit < 8; bit++, data >>= 1) { 2795 crc = (crc >> 1) ^ 2796 (((crc ^ data) & 1) ? CRC32_POLY : 0); 2797 } 2798 } 2799 2800 /* only upper 6 bits (HASH_BITS) are used 2801 * which point to specific bit in he hash registers 2802 */ 2803 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 2804 2805 if (hash > 31) { 2806 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2807 tmp |= 1 << (hash - 32); 2808 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2809 } else { 2810 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2811 tmp |= 1 << hash; 2812 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2813 } 2814 } 2815 } 2816 2817 /* Set a MAC change in hardware. */ 2818 static int 2819 fec_set_mac_address(struct net_device *ndev, void *p) 2820 { 2821 struct fec_enet_private *fep = netdev_priv(ndev); 2822 struct sockaddr *addr = p; 2823 2824 if (addr) { 2825 if (!is_valid_ether_addr(addr->sa_data)) 2826 return -EADDRNOTAVAIL; 2827 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 2828 } 2829 2830 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 2831 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 2832 fep->hwp + FEC_ADDR_LOW); 2833 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), 2834 fep->hwp + FEC_ADDR_HIGH); 2835 return 0; 2836 } 2837 2838 #ifdef CONFIG_NET_POLL_CONTROLLER 2839 /** 2840 * fec_poll_controller - FEC Poll controller function 2841 * @dev: The FEC network adapter 2842 * 2843 * Polled functionality used by netconsole and others in non interrupt mode 2844 * 2845 */ 2846 static void fec_poll_controller(struct net_device *dev) 2847 { 2848 int i; 2849 struct fec_enet_private *fep = netdev_priv(dev); 2850 2851 for (i = 0; i < FEC_IRQ_NUM; i++) { 2852 if (fep->irq[i] > 0) { 2853 disable_irq(fep->irq[i]); 2854 fec_enet_interrupt(fep->irq[i], dev); 2855 enable_irq(fep->irq[i]); 2856 } 2857 } 2858 } 2859 #endif 2860 2861 #define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM 2862 static inline void fec_enet_set_netdev_features(struct net_device *netdev, 2863 netdev_features_t features) 2864 { 2865 struct fec_enet_private *fep = netdev_priv(netdev); 2866 netdev_features_t changed = features ^ netdev->features; 2867 2868 netdev->features = features; 2869 2870 /* Receive checksum has been changed */ 2871 if (changed & NETIF_F_RXCSUM) { 2872 if (features & NETIF_F_RXCSUM) 2873 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 2874 else 2875 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; 2876 } 2877 } 2878 2879 static int fec_set_features(struct net_device *netdev, 2880 netdev_features_t features) 2881 { 2882 struct fec_enet_private *fep = netdev_priv(netdev); 2883 netdev_features_t changed = features ^ netdev->features; 2884 2885 if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { 2886 napi_disable(&fep->napi); 2887 netif_tx_lock_bh(netdev); 2888 fec_stop(netdev); 2889 fec_enet_set_netdev_features(netdev, features); 2890 fec_restart(netdev); 2891 netif_tx_wake_all_queues(netdev); 2892 netif_tx_unlock_bh(netdev); 2893 napi_enable(&fep->napi); 2894 } else { 2895 fec_enet_set_netdev_features(netdev, features); 2896 } 2897 2898 return 0; 2899 } 2900 2901 static const struct net_device_ops fec_netdev_ops = { 2902 .ndo_open = fec_enet_open, 2903 .ndo_stop = fec_enet_close, 2904 .ndo_start_xmit = fec_enet_start_xmit, 2905 .ndo_set_rx_mode = set_multicast_list, 2906 .ndo_change_mtu = eth_change_mtu, 2907 .ndo_validate_addr = eth_validate_addr, 2908 .ndo_tx_timeout = fec_timeout, 2909 .ndo_set_mac_address = fec_set_mac_address, 2910 .ndo_do_ioctl = fec_enet_ioctl, 2911 #ifdef CONFIG_NET_POLL_CONTROLLER 2912 .ndo_poll_controller = fec_poll_controller, 2913 #endif 2914 .ndo_set_features = fec_set_features, 2915 }; 2916 2917 /* 2918 * XXX: We need to clean up on failure exits here. 2919 * 2920 */ 2921 static int fec_enet_init(struct net_device *ndev) 2922 { 2923 struct fec_enet_private *fep = netdev_priv(ndev); 2924 struct fec_enet_priv_tx_q *txq; 2925 struct fec_enet_priv_rx_q *rxq; 2926 struct bufdesc *cbd_base; 2927 dma_addr_t bd_dma; 2928 int bd_size; 2929 unsigned int i; 2930 2931 #if defined(CONFIG_ARM) 2932 fep->rx_align = 0xf; 2933 fep->tx_align = 0xf; 2934 #else 2935 fep->rx_align = 0x3; 2936 fep->tx_align = 0x3; 2937 #endif 2938 2939 fec_enet_alloc_queue(ndev); 2940 2941 if (fep->bufdesc_ex) 2942 fep->bufdesc_size = sizeof(struct bufdesc_ex); 2943 else 2944 fep->bufdesc_size = sizeof(struct bufdesc); 2945 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * 2946 fep->bufdesc_size; 2947 2948 /* Allocate memory for buffer descriptors. */ 2949 cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, 2950 GFP_KERNEL); 2951 if (!cbd_base) { 2952 return -ENOMEM; 2953 } 2954 2955 memset(cbd_base, 0, bd_size); 2956 2957 /* Get the Ethernet address */ 2958 fec_get_mac(ndev); 2959 /* make sure MAC we just acquired is programmed into the hw */ 2960 fec_set_mac_address(ndev, NULL); 2961 2962 /* Set receive and transmit descriptor base. */ 2963 for (i = 0; i < fep->num_rx_queues; i++) { 2964 rxq = fep->rx_queue[i]; 2965 rxq->index = i; 2966 rxq->rx_bd_base = (struct bufdesc *)cbd_base; 2967 rxq->bd_dma = bd_dma; 2968 if (fep->bufdesc_ex) { 2969 bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; 2970 cbd_base = (struct bufdesc *) 2971 (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); 2972 } else { 2973 bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; 2974 cbd_base += rxq->rx_ring_size; 2975 } 2976 } 2977 2978 for (i = 0; i < fep->num_tx_queues; i++) { 2979 txq = fep->tx_queue[i]; 2980 txq->index = i; 2981 txq->tx_bd_base = (struct bufdesc *)cbd_base; 2982 txq->bd_dma = bd_dma; 2983 if (fep->bufdesc_ex) { 2984 bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; 2985 cbd_base = (struct bufdesc *) 2986 (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); 2987 } else { 2988 bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; 2989 cbd_base += txq->tx_ring_size; 2990 } 2991 } 2992 2993 2994 /* The FEC Ethernet specific entries in the device structure */ 2995 ndev->watchdog_timeo = TX_TIMEOUT; 2996 ndev->netdev_ops = &fec_netdev_ops; 2997 ndev->ethtool_ops = &fec_enet_ethtool_ops; 2998 2999 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 3000 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); 3001 3002 if (fep->quirks & FEC_QUIRK_HAS_VLAN) 3003 /* enable hw VLAN support */ 3004 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3005 3006 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { 3007 ndev->gso_max_segs = FEC_MAX_TSO_SEGS; 3008 3009 /* enable hw accelerator */ 3010 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 3011 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); 3012 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3013 } 3014 3015 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 3016 fep->tx_align = 0; 3017 fep->rx_align = 0x3f; 3018 } 3019 3020 ndev->hw_features = ndev->features; 3021 3022 fec_restart(ndev); 3023 3024 return 0; 3025 } 3026 3027 #ifdef CONFIG_OF 3028 static void fec_reset_phy(struct platform_device *pdev) 3029 { 3030 int err, phy_reset; 3031 int msec = 1; 3032 struct device_node *np = pdev->dev.of_node; 3033 3034 if (!np) 3035 return; 3036 3037 of_property_read_u32(np, "phy-reset-duration", &msec); 3038 /* A sane reset duration should not be longer than 1s */ 3039 if (msec > 1000) 3040 msec = 1; 3041 3042 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); 3043 if (!gpio_is_valid(phy_reset)) 3044 return; 3045 3046 err = devm_gpio_request_one(&pdev->dev, phy_reset, 3047 GPIOF_OUT_INIT_LOW, "phy-reset"); 3048 if (err) { 3049 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); 3050 return; 3051 } 3052 msleep(msec); 3053 gpio_set_value(phy_reset, 1); 3054 } 3055 #else /* CONFIG_OF */ 3056 static void fec_reset_phy(struct platform_device *pdev) 3057 { 3058 /* 3059 * In case of platform probe, the reset has been done 3060 * by machine code. 3061 */ 3062 } 3063 #endif /* CONFIG_OF */ 3064 3065 static void 3066 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) 3067 { 3068 struct device_node *np = pdev->dev.of_node; 3069 int err; 3070 3071 *num_tx = *num_rx = 1; 3072 3073 if (!np || !of_device_is_available(np)) 3074 return; 3075 3076 /* parse the num of tx and rx queues */ 3077 err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx); 3078 if (err) 3079 *num_tx = 1; 3080 3081 err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx); 3082 if (err) 3083 *num_rx = 1; 3084 3085 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { 3086 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", 3087 *num_tx); 3088 *num_tx = 1; 3089 return; 3090 } 3091 3092 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { 3093 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", 3094 *num_rx); 3095 *num_rx = 1; 3096 return; 3097 } 3098 3099 } 3100 3101 static int 3102 fec_probe(struct platform_device *pdev) 3103 { 3104 struct fec_enet_private *fep; 3105 struct fec_platform_data *pdata; 3106 struct net_device *ndev; 3107 int i, irq, ret = 0; 3108 struct resource *r; 3109 const struct of_device_id *of_id; 3110 static int dev_id; 3111 struct device_node *np = pdev->dev.of_node, *phy_node; 3112 int num_tx_qs; 3113 int num_rx_qs; 3114 3115 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 3116 3117 /* Init network device */ 3118 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), 3119 num_tx_qs, num_rx_qs); 3120 if (!ndev) 3121 return -ENOMEM; 3122 3123 SET_NETDEV_DEV(ndev, &pdev->dev); 3124 3125 /* setup board info structure */ 3126 fep = netdev_priv(ndev); 3127 3128 of_id = of_match_device(fec_dt_ids, &pdev->dev); 3129 if (of_id) 3130 pdev->id_entry = of_id->data; 3131 fep->quirks = pdev->id_entry->driver_data; 3132 3133 fep->netdev = ndev; 3134 fep->num_rx_queues = num_rx_qs; 3135 fep->num_tx_queues = num_tx_qs; 3136 3137 #if !defined(CONFIG_M5272) 3138 /* default enable pause frame auto negotiation */ 3139 if (fep->quirks & FEC_QUIRK_HAS_GBIT) 3140 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 3141 #endif 3142 3143 /* Select default pin state */ 3144 pinctrl_pm_select_default_state(&pdev->dev); 3145 3146 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3147 fep->hwp = devm_ioremap_resource(&pdev->dev, r); 3148 if (IS_ERR(fep->hwp)) { 3149 ret = PTR_ERR(fep->hwp); 3150 goto failed_ioremap; 3151 } 3152 3153 fep->pdev = pdev; 3154 fep->dev_id = dev_id++; 3155 3156 platform_set_drvdata(pdev, ndev); 3157 3158 phy_node = of_parse_phandle(np, "phy-handle", 0); 3159 if (!phy_node && of_phy_is_fixed_link(np)) { 3160 ret = of_phy_register_fixed_link(np); 3161 if (ret < 0) { 3162 dev_err(&pdev->dev, 3163 "broken fixed-link specification\n"); 3164 goto failed_phy; 3165 } 3166 phy_node = of_node_get(np); 3167 } 3168 fep->phy_node = phy_node; 3169 3170 ret = of_get_phy_mode(pdev->dev.of_node); 3171 if (ret < 0) { 3172 pdata = dev_get_platdata(&pdev->dev); 3173 if (pdata) 3174 fep->phy_interface = pdata->phy; 3175 else 3176 fep->phy_interface = PHY_INTERFACE_MODE_MII; 3177 } else { 3178 fep->phy_interface = ret; 3179 } 3180 3181 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 3182 if (IS_ERR(fep->clk_ipg)) { 3183 ret = PTR_ERR(fep->clk_ipg); 3184 goto failed_clk; 3185 } 3186 3187 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 3188 if (IS_ERR(fep->clk_ahb)) { 3189 ret = PTR_ERR(fep->clk_ahb); 3190 goto failed_clk; 3191 } 3192 3193 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); 3194 3195 /* enet_out is optional, depends on board */ 3196 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); 3197 if (IS_ERR(fep->clk_enet_out)) 3198 fep->clk_enet_out = NULL; 3199 3200 fep->ptp_clk_on = false; 3201 mutex_init(&fep->ptp_clk_mutex); 3202 3203 /* clk_ref is optional, depends on board */ 3204 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); 3205 if (IS_ERR(fep->clk_ref)) 3206 fep->clk_ref = NULL; 3207 3208 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; 3209 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); 3210 if (IS_ERR(fep->clk_ptp)) { 3211 fep->clk_ptp = NULL; 3212 fep->bufdesc_ex = false; 3213 } 3214 3215 ret = fec_enet_clk_enable(ndev, true); 3216 if (ret) 3217 goto failed_clk; 3218 3219 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3220 if (!IS_ERR(fep->reg_phy)) { 3221 ret = regulator_enable(fep->reg_phy); 3222 if (ret) { 3223 dev_err(&pdev->dev, 3224 "Failed to enable phy regulator: %d\n", ret); 3225 goto failed_regulator; 3226 } 3227 } else { 3228 fep->reg_phy = NULL; 3229 } 3230 3231 fec_reset_phy(pdev); 3232 3233 if (fep->bufdesc_ex) 3234 fec_ptp_init(pdev); 3235 3236 ret = fec_enet_init(ndev); 3237 if (ret) 3238 goto failed_init; 3239 3240 for (i = 0; i < FEC_IRQ_NUM; i++) { 3241 irq = platform_get_irq(pdev, i); 3242 if (irq < 0) { 3243 if (i) 3244 break; 3245 ret = irq; 3246 goto failed_irq; 3247 } 3248 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 3249 0, pdev->name, ndev); 3250 if (ret) 3251 goto failed_irq; 3252 } 3253 3254 init_completion(&fep->mdio_done); 3255 ret = fec_enet_mii_init(pdev); 3256 if (ret) 3257 goto failed_mii_init; 3258 3259 /* Carrier starts down, phylib will bring it up */ 3260 netif_carrier_off(ndev); 3261 fec_enet_clk_enable(ndev, false); 3262 pinctrl_pm_select_sleep_state(&pdev->dev); 3263 3264 ret = register_netdev(ndev); 3265 if (ret) 3266 goto failed_register; 3267 3268 if (fep->bufdesc_ex && fep->ptp_clock) 3269 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 3270 3271 fep->rx_copybreak = COPYBREAK_DEFAULT; 3272 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3273 return 0; 3274 3275 failed_register: 3276 fec_enet_mii_remove(fep); 3277 failed_mii_init: 3278 failed_irq: 3279 failed_init: 3280 if (fep->reg_phy) 3281 regulator_disable(fep->reg_phy); 3282 failed_regulator: 3283 fec_enet_clk_enable(ndev, false); 3284 failed_clk: 3285 failed_phy: 3286 of_node_put(phy_node); 3287 failed_ioremap: 3288 free_netdev(ndev); 3289 3290 return ret; 3291 } 3292 3293 static int 3294 fec_drv_remove(struct platform_device *pdev) 3295 { 3296 struct net_device *ndev = platform_get_drvdata(pdev); 3297 struct fec_enet_private *fep = netdev_priv(ndev); 3298 3299 cancel_delayed_work_sync(&fep->time_keep); 3300 cancel_work_sync(&fep->tx_timeout_work); 3301 unregister_netdev(ndev); 3302 fec_enet_mii_remove(fep); 3303 if (fep->reg_phy) 3304 regulator_disable(fep->reg_phy); 3305 if (fep->ptp_clock) 3306 ptp_clock_unregister(fep->ptp_clock); 3307 fec_enet_clk_enable(ndev, false); 3308 of_node_put(fep->phy_node); 3309 free_netdev(ndev); 3310 3311 return 0; 3312 } 3313 3314 static int __maybe_unused fec_suspend(struct device *dev) 3315 { 3316 struct net_device *ndev = dev_get_drvdata(dev); 3317 struct fec_enet_private *fep = netdev_priv(ndev); 3318 3319 rtnl_lock(); 3320 if (netif_running(ndev)) { 3321 phy_stop(fep->phy_dev); 3322 napi_disable(&fep->napi); 3323 netif_tx_lock_bh(ndev); 3324 netif_device_detach(ndev); 3325 netif_tx_unlock_bh(ndev); 3326 fec_stop(ndev); 3327 fec_enet_clk_enable(ndev, false); 3328 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3329 } 3330 rtnl_unlock(); 3331 3332 if (fep->reg_phy) 3333 regulator_disable(fep->reg_phy); 3334 3335 /* SOC supply clock to phy, when clock is disabled, phy link down 3336 * SOC control phy regulator, when regulator is disabled, phy link down 3337 */ 3338 if (fep->clk_enet_out || fep->reg_phy) 3339 fep->link = 0; 3340 3341 return 0; 3342 } 3343 3344 static int __maybe_unused fec_resume(struct device *dev) 3345 { 3346 struct net_device *ndev = dev_get_drvdata(dev); 3347 struct fec_enet_private *fep = netdev_priv(ndev); 3348 int ret; 3349 3350 if (fep->reg_phy) { 3351 ret = regulator_enable(fep->reg_phy); 3352 if (ret) 3353 return ret; 3354 } 3355 3356 rtnl_lock(); 3357 if (netif_running(ndev)) { 3358 pinctrl_pm_select_default_state(&fep->pdev->dev); 3359 ret = fec_enet_clk_enable(ndev, true); 3360 if (ret) { 3361 rtnl_unlock(); 3362 goto failed_clk; 3363 } 3364 fec_restart(ndev); 3365 netif_tx_lock_bh(ndev); 3366 netif_device_attach(ndev); 3367 netif_tx_unlock_bh(ndev); 3368 napi_enable(&fep->napi); 3369 phy_start(fep->phy_dev); 3370 } 3371 rtnl_unlock(); 3372 3373 return 0; 3374 3375 failed_clk: 3376 if (fep->reg_phy) 3377 regulator_disable(fep->reg_phy); 3378 return ret; 3379 } 3380 3381 static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); 3382 3383 static struct platform_driver fec_driver = { 3384 .driver = { 3385 .name = DRIVER_NAME, 3386 .pm = &fec_pm_ops, 3387 .of_match_table = fec_dt_ids, 3388 }, 3389 .id_table = fec_devtype, 3390 .probe = fec_probe, 3391 .remove = fec_drv_remove, 3392 }; 3393 3394 module_platform_driver(fec_driver); 3395 3396 MODULE_ALIAS("platform:"DRIVER_NAME); 3397 MODULE_LICENSE("GPL"); 3398