1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 5 * 6 * Right now, I am very wasteful with the buffers. I allocate memory 7 * pages and then divide them into 2K frame buffers. This way I know I 8 * have buffers large enough to hold one frame within one buffer descriptor. 9 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 10 * will be much more memory efficient and will easily handle lots of 11 * small packets. 12 * 13 * Much better multiple PHY support by Magnus Damm. 14 * Copyright (c) 2000 Ericsson Radio Systems AB. 15 * 16 * Support for FEC controller of ColdFire processors. 17 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 18 * 19 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 20 * Copyright (c) 2004-2006 Macq Electronique SA. 21 * 22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 23 */ 24 25 #include <linux/module.h> 26 #include <linux/kernel.h> 27 #include <linux/string.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/ptrace.h> 30 #include <linux/errno.h> 31 #include <linux/ioport.h> 32 #include <linux/slab.h> 33 #include <linux/interrupt.h> 34 #include <linux/delay.h> 35 #include <linux/netdevice.h> 36 #include <linux/etherdevice.h> 37 #include <linux/skbuff.h> 38 #include <linux/in.h> 39 #include <linux/ip.h> 40 #include <net/ip.h> 41 #include <net/page_pool/helpers.h> 42 #include <net/selftests.h> 43 #include <net/tso.h> 44 #include <linux/tcp.h> 45 #include <linux/udp.h> 46 #include <linux/icmp.h> 47 #include <linux/spinlock.h> 48 #include <linux/workqueue.h> 49 #include <linux/bitops.h> 50 #include <linux/io.h> 51 #include <linux/irq.h> 52 #include <linux/clk.h> 53 #include <linux/crc32.h> 54 #include <linux/platform_device.h> 55 #include <linux/property.h> 56 #include <linux/mdio.h> 57 #include <linux/phy.h> 58 #include <linux/fec.h> 59 #include <linux/of.h> 60 #include <linux/of_mdio.h> 61 #include <linux/of_net.h> 62 #include <linux/regulator/consumer.h> 63 #include <linux/if_vlan.h> 64 #include <linux/pinctrl/consumer.h> 65 #include <linux/gpio/consumer.h> 66 #include <linux/prefetch.h> 67 #include <linux/mfd/syscon.h> 68 #include <linux/regmap.h> 69 #include <soc/imx/cpuidle.h> 70 #include <linux/filter.h> 71 #include <linux/bpf.h> 72 #include <linux/bpf_trace.h> 73 74 #include <asm/cacheflush.h> 75 76 #include "fec.h" 77 78 static void set_multicast_list(struct net_device *ndev); 79 static void fec_enet_itr_coal_set(struct net_device *ndev); 80 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, 81 int cpu, struct xdp_buff *xdp, 82 u32 dma_sync_len); 83 84 #define DRIVER_NAME "fec" 85 86 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; 87 88 #define FEC_ENET_RSEM_V 0x84 89 #define FEC_ENET_RSFL_V 16 90 #define FEC_ENET_RAEM_V 0x8 91 #define FEC_ENET_RAFL_V 0x8 92 #define FEC_ENET_OPD_V 0xFFF0 93 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 94 95 #define FEC_ENET_XDP_PASS 0 96 #define FEC_ENET_XDP_CONSUMED BIT(0) 97 #define FEC_ENET_XDP_TX BIT(1) 98 #define FEC_ENET_XDP_REDIR BIT(2) 99 100 struct fec_devinfo { 101 u32 quirks; 102 }; 103 104 static const struct fec_devinfo fec_imx25_info = { 105 .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | 106 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45, 107 }; 108 109 static const struct fec_devinfo fec_imx27_info = { 110 .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG | 111 FEC_QUIRK_HAS_MDIO_C45, 112 }; 113 114 static const struct fec_devinfo fec_imx28_info = { 115 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 116 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | 117 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII | 118 FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45, 119 }; 120 121 static const struct fec_devinfo fec_imx6q_info = { 122 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 123 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 124 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | 125 FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII | 126 FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45, 127 }; 128 129 static const struct fec_devinfo fec_mvf600_info = { 130 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC | 131 FEC_QUIRK_HAS_MDIO_C45, 132 }; 133 134 static const struct fec_devinfo fec_imx6x_info = { 135 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 136 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 137 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 138 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 139 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 140 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 141 FEC_QUIRK_HAS_MDIO_C45, 142 }; 143 144 static const struct fec_devinfo fec_imx6ul_info = { 145 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 146 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 147 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | 148 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | 149 FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII | 150 FEC_QUIRK_HAS_MDIO_C45, 151 }; 152 153 static const struct fec_devinfo fec_imx8mq_info = { 154 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 155 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 156 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 157 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 158 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 159 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 160 FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 | 161 FEC_QUIRK_HAS_MDIO_C45, 162 }; 163 164 static const struct fec_devinfo fec_imx8qm_info = { 165 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 166 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 167 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 168 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 169 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 170 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 171 FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45, 172 }; 173 174 static const struct fec_devinfo fec_s32v234_info = { 175 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 176 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 177 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 178 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 179 FEC_QUIRK_HAS_MDIO_C45, 180 }; 181 182 static struct platform_device_id fec_devtype[] = { 183 { 184 /* keep it for coldfire */ 185 .name = DRIVER_NAME, 186 .driver_data = 0, 187 }, { 188 /* sentinel */ 189 } 190 }; 191 MODULE_DEVICE_TABLE(platform, fec_devtype); 192 193 static const struct of_device_id fec_dt_ids[] = { 194 { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, }, 195 { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, }, 196 { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, }, 197 { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, }, 198 { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, }, 199 { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, }, 200 { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, }, 201 { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, }, 202 { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, }, 203 { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, }, 204 { /* sentinel */ } 205 }; 206 MODULE_DEVICE_TABLE(of, fec_dt_ids); 207 208 static unsigned char macaddr[ETH_ALEN]; 209 module_param_array(macaddr, byte, NULL, 0); 210 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 211 212 #if defined(CONFIG_M5272) 213 /* 214 * Some hardware gets it MAC address out of local flash memory. 215 * if this is non-zero then assume it is the address to get MAC from. 216 */ 217 #if defined(CONFIG_NETtel) 218 #define FEC_FLASHMAC 0xf0006006 219 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 220 #define FEC_FLASHMAC 0xf0006000 221 #elif defined(CONFIG_CANCam) 222 #define FEC_FLASHMAC 0xf0020000 223 #elif defined (CONFIG_M5272C3) 224 #define FEC_FLASHMAC (0xffe04000 + 4) 225 #elif defined(CONFIG_MOD5272) 226 #define FEC_FLASHMAC 0xffc0406b 227 #else 228 #define FEC_FLASHMAC 0 229 #endif 230 #endif /* CONFIG_M5272 */ 231 232 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. 233 * 234 * 2048 byte skbufs are allocated. However, alignment requirements 235 * varies between FEC variants. Worst case is 64, so round down by 64. 236 */ 237 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) 238 #define PKT_MINBUF_SIZE 64 239 240 /* FEC receive acceleration */ 241 #define FEC_RACC_IPDIS BIT(1) 242 #define FEC_RACC_PRODIS BIT(2) 243 #define FEC_RACC_SHIFT16 BIT(7) 244 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 245 246 /* MIB Control Register */ 247 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31) 248 249 /* 250 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 251 * size bits. Other FEC hardware does not, so we need to take that into 252 * account when setting it. 253 */ 254 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 255 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 256 defined(CONFIG_ARM64) 257 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 258 #else 259 #define OPT_FRAME_SIZE 0 260 #endif 261 262 /* FEC MII MMFR bits definition */ 263 #define FEC_MMFR_ST (1 << 30) 264 #define FEC_MMFR_ST_C45 (0) 265 #define FEC_MMFR_OP_READ (2 << 28) 266 #define FEC_MMFR_OP_READ_C45 (3 << 28) 267 #define FEC_MMFR_OP_WRITE (1 << 28) 268 #define FEC_MMFR_OP_ADDR_WRITE (0) 269 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 270 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 271 #define FEC_MMFR_TA (2 << 16) 272 #define FEC_MMFR_DATA(v) (v & 0xffff) 273 /* FEC ECR bits definition */ 274 #define FEC_ECR_RESET BIT(0) 275 #define FEC_ECR_ETHEREN BIT(1) 276 #define FEC_ECR_MAGICEN BIT(2) 277 #define FEC_ECR_SLEEP BIT(3) 278 #define FEC_ECR_EN1588 BIT(4) 279 #define FEC_ECR_BYTESWP BIT(8) 280 /* FEC RCR bits definition */ 281 #define FEC_RCR_LOOP BIT(0) 282 #define FEC_RCR_HALFDPX BIT(1) 283 #define FEC_RCR_MII BIT(2) 284 #define FEC_RCR_PROMISC BIT(3) 285 #define FEC_RCR_BC_REJ BIT(4) 286 #define FEC_RCR_FLOWCTL BIT(5) 287 #define FEC_RCR_RMII BIT(8) 288 #define FEC_RCR_10BASET BIT(9) 289 /* TX WMARK bits */ 290 #define FEC_TXWMRK_STRFWD BIT(8) 291 292 #define FEC_MII_TIMEOUT 30000 /* us */ 293 294 /* Transmitter timeout */ 295 #define TX_TIMEOUT (2 * HZ) 296 297 #define FEC_PAUSE_FLAG_AUTONEG 0x1 298 #define FEC_PAUSE_FLAG_ENABLE 0x2 299 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) 300 #define FEC_WOL_FLAG_ENABLE (0x1 << 1) 301 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) 302 303 /* Max number of allowed TCP segments for software TSO */ 304 #define FEC_MAX_TSO_SEGS 100 305 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 306 307 #define IS_TSO_HEADER(txq, addr) \ 308 ((addr >= txq->tso_hdrs_dma) && \ 309 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 310 311 static int mii_cnt; 312 313 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, 314 struct bufdesc_prop *bd) 315 { 316 return (bdp >= bd->last) ? bd->base 317 : (struct bufdesc *)(((void *)bdp) + bd->dsize); 318 } 319 320 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, 321 struct bufdesc_prop *bd) 322 { 323 return (bdp <= bd->base) ? bd->last 324 : (struct bufdesc *)(((void *)bdp) - bd->dsize); 325 } 326 327 static int fec_enet_get_bd_index(struct bufdesc *bdp, 328 struct bufdesc_prop *bd) 329 { 330 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; 331 } 332 333 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) 334 { 335 int entries; 336 337 entries = (((const char *)txq->dirty_tx - 338 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; 339 340 return entries >= 0 ? entries : entries + txq->bd.ring_size; 341 } 342 343 static void swap_buffer(void *bufaddr, int len) 344 { 345 int i; 346 unsigned int *buf = bufaddr; 347 348 for (i = 0; i < len; i += 4, buf++) 349 swab32s(buf); 350 } 351 352 static void fec_dump(struct net_device *ndev) 353 { 354 struct fec_enet_private *fep = netdev_priv(ndev); 355 struct bufdesc *bdp; 356 struct fec_enet_priv_tx_q *txq; 357 int index = 0; 358 359 netdev_info(ndev, "TX ring dump\n"); 360 pr_info("Nr SC addr len SKB\n"); 361 362 txq = fep->tx_queue[0]; 363 bdp = txq->bd.base; 364 365 do { 366 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", 367 index, 368 bdp == txq->bd.cur ? 'S' : ' ', 369 bdp == txq->dirty_tx ? 'H' : ' ', 370 fec16_to_cpu(bdp->cbd_sc), 371 fec32_to_cpu(bdp->cbd_bufaddr), 372 fec16_to_cpu(bdp->cbd_datlen), 373 txq->tx_buf[index].buf_p); 374 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 375 index++; 376 } while (bdp != txq->bd.base); 377 } 378 379 /* 380 * Coldfire does not support DMA coherent allocations, and has historically used 381 * a band-aid with a manual flush in fec_enet_rx_queue. 382 */ 383 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) 384 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 385 gfp_t gfp) 386 { 387 return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp); 388 } 389 390 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, 391 dma_addr_t handle) 392 { 393 dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL); 394 } 395 #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ 396 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 397 gfp_t gfp) 398 { 399 return dma_alloc_coherent(dev, size, handle, gfp); 400 } 401 402 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, 403 dma_addr_t handle) 404 { 405 dma_free_coherent(dev, size, cpu_addr, handle); 406 } 407 #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ 408 409 struct fec_dma_devres { 410 size_t size; 411 void *vaddr; 412 dma_addr_t dma_handle; 413 }; 414 415 static void fec_dmam_release(struct device *dev, void *res) 416 { 417 struct fec_dma_devres *this = res; 418 419 fec_dma_free(dev, this->size, this->vaddr, this->dma_handle); 420 } 421 422 static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle, 423 gfp_t gfp) 424 { 425 struct fec_dma_devres *dr; 426 void *vaddr; 427 428 dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp); 429 if (!dr) 430 return NULL; 431 vaddr = fec_dma_alloc(dev, size, handle, gfp); 432 if (!vaddr) { 433 devres_free(dr); 434 return NULL; 435 } 436 dr->vaddr = vaddr; 437 dr->dma_handle = *handle; 438 dr->size = size; 439 devres_add(dev, dr); 440 return vaddr; 441 } 442 443 static inline bool is_ipv4_pkt(struct sk_buff *skb) 444 { 445 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 446 } 447 448 static int 449 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 450 { 451 /* Only run for packets requiring a checksum. */ 452 if (skb->ip_summed != CHECKSUM_PARTIAL) 453 return 0; 454 455 if (unlikely(skb_cow_head(skb, 0))) 456 return -1; 457 458 if (is_ipv4_pkt(skb)) 459 ip_hdr(skb)->check = 0; 460 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 461 462 return 0; 463 } 464 465 static int 466 fec_enet_create_page_pool(struct fec_enet_private *fep, 467 struct fec_enet_priv_rx_q *rxq, int size) 468 { 469 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); 470 struct page_pool_params pp_params = { 471 .order = 0, 472 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 473 .pool_size = size, 474 .nid = dev_to_node(&fep->pdev->dev), 475 .dev = &fep->pdev->dev, 476 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, 477 .offset = FEC_ENET_XDP_HEADROOM, 478 .max_len = FEC_ENET_RX_FRSIZE, 479 }; 480 int err; 481 482 rxq->page_pool = page_pool_create(&pp_params); 483 if (IS_ERR(rxq->page_pool)) { 484 err = PTR_ERR(rxq->page_pool); 485 rxq->page_pool = NULL; 486 return err; 487 } 488 489 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); 490 if (err < 0) 491 goto err_free_pp; 492 493 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 494 rxq->page_pool); 495 if (err) 496 goto err_unregister_rxq; 497 498 return 0; 499 500 err_unregister_rxq: 501 xdp_rxq_info_unreg(&rxq->xdp_rxq); 502 err_free_pp: 503 page_pool_destroy(rxq->page_pool); 504 rxq->page_pool = NULL; 505 return err; 506 } 507 508 static struct bufdesc * 509 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, 510 struct sk_buff *skb, 511 struct net_device *ndev) 512 { 513 struct fec_enet_private *fep = netdev_priv(ndev); 514 struct bufdesc *bdp = txq->bd.cur; 515 struct bufdesc_ex *ebdp; 516 int nr_frags = skb_shinfo(skb)->nr_frags; 517 int frag, frag_len; 518 unsigned short status; 519 unsigned int estatus = 0; 520 skb_frag_t *this_frag; 521 unsigned int index; 522 void *bufaddr; 523 dma_addr_t addr; 524 int i; 525 526 for (frag = 0; frag < nr_frags; frag++) { 527 this_frag = &skb_shinfo(skb)->frags[frag]; 528 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 529 ebdp = (struct bufdesc_ex *)bdp; 530 531 status = fec16_to_cpu(bdp->cbd_sc); 532 status &= ~BD_ENET_TX_STATS; 533 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 534 frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]); 535 536 /* Handle the last BD specially */ 537 if (frag == nr_frags - 1) { 538 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 539 if (fep->bufdesc_ex) { 540 estatus |= BD_ENET_TX_INT; 541 if (unlikely(skb_shinfo(skb)->tx_flags & 542 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 543 estatus |= BD_ENET_TX_TS; 544 } 545 } 546 547 if (fep->bufdesc_ex) { 548 if (fep->quirks & FEC_QUIRK_HAS_AVB) 549 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 550 if (skb->ip_summed == CHECKSUM_PARTIAL) 551 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 552 553 ebdp->cbd_bdu = 0; 554 ebdp->cbd_esc = cpu_to_fec32(estatus); 555 } 556 557 bufaddr = skb_frag_address(this_frag); 558 559 index = fec_enet_get_bd_index(bdp, &txq->bd); 560 if (((unsigned long) bufaddr) & fep->tx_align || 561 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 562 memcpy(txq->tx_bounce[index], bufaddr, frag_len); 563 bufaddr = txq->tx_bounce[index]; 564 565 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 566 swap_buffer(bufaddr, frag_len); 567 } 568 569 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, 570 DMA_TO_DEVICE); 571 if (dma_mapping_error(&fep->pdev->dev, addr)) { 572 if (net_ratelimit()) 573 netdev_err(ndev, "Tx DMA memory map failed\n"); 574 goto dma_mapping_error; 575 } 576 577 bdp->cbd_bufaddr = cpu_to_fec32(addr); 578 bdp->cbd_datlen = cpu_to_fec16(frag_len); 579 /* Make sure the updates to rest of the descriptor are 580 * performed before transferring ownership. 581 */ 582 wmb(); 583 bdp->cbd_sc = cpu_to_fec16(status); 584 } 585 586 return bdp; 587 dma_mapping_error: 588 bdp = txq->bd.cur; 589 for (i = 0; i < frag; i++) { 590 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 591 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), 592 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); 593 } 594 return ERR_PTR(-ENOMEM); 595 } 596 597 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, 598 struct sk_buff *skb, struct net_device *ndev) 599 { 600 struct fec_enet_private *fep = netdev_priv(ndev); 601 int nr_frags = skb_shinfo(skb)->nr_frags; 602 struct bufdesc *bdp, *last_bdp; 603 void *bufaddr; 604 dma_addr_t addr; 605 unsigned short status; 606 unsigned short buflen; 607 unsigned int estatus = 0; 608 unsigned int index; 609 int entries_free; 610 611 entries_free = fec_enet_get_free_txdesc_num(txq); 612 if (entries_free < MAX_SKB_FRAGS + 1) { 613 dev_kfree_skb_any(skb); 614 if (net_ratelimit()) 615 netdev_err(ndev, "NOT enough BD for SG!\n"); 616 return NETDEV_TX_OK; 617 } 618 619 /* Protocol checksum off-load for TCP and UDP. */ 620 if (fec_enet_clear_csum(skb, ndev)) { 621 dev_kfree_skb_any(skb); 622 return NETDEV_TX_OK; 623 } 624 625 /* Fill in a Tx ring entry */ 626 bdp = txq->bd.cur; 627 last_bdp = bdp; 628 status = fec16_to_cpu(bdp->cbd_sc); 629 status &= ~BD_ENET_TX_STATS; 630 631 /* Set buffer length and buffer pointer */ 632 bufaddr = skb->data; 633 buflen = skb_headlen(skb); 634 635 index = fec_enet_get_bd_index(bdp, &txq->bd); 636 if (((unsigned long) bufaddr) & fep->tx_align || 637 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 638 memcpy(txq->tx_bounce[index], skb->data, buflen); 639 bufaddr = txq->tx_bounce[index]; 640 641 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 642 swap_buffer(bufaddr, buflen); 643 } 644 645 /* Push the data cache so the CPM does not get stale memory data. */ 646 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); 647 if (dma_mapping_error(&fep->pdev->dev, addr)) { 648 dev_kfree_skb_any(skb); 649 if (net_ratelimit()) 650 netdev_err(ndev, "Tx DMA memory map failed\n"); 651 return NETDEV_TX_OK; 652 } 653 654 if (nr_frags) { 655 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); 656 if (IS_ERR(last_bdp)) { 657 dma_unmap_single(&fep->pdev->dev, addr, 658 buflen, DMA_TO_DEVICE); 659 dev_kfree_skb_any(skb); 660 return NETDEV_TX_OK; 661 } 662 } else { 663 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 664 if (fep->bufdesc_ex) { 665 estatus = BD_ENET_TX_INT; 666 if (unlikely(skb_shinfo(skb)->tx_flags & 667 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 668 estatus |= BD_ENET_TX_TS; 669 } 670 } 671 bdp->cbd_bufaddr = cpu_to_fec32(addr); 672 bdp->cbd_datlen = cpu_to_fec16(buflen); 673 674 if (fep->bufdesc_ex) { 675 676 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 677 678 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 679 fep->hwts_tx_en)) 680 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 681 682 if (fep->quirks & FEC_QUIRK_HAS_AVB) 683 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 684 685 if (skb->ip_summed == CHECKSUM_PARTIAL) 686 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 687 688 ebdp->cbd_bdu = 0; 689 ebdp->cbd_esc = cpu_to_fec32(estatus); 690 } 691 692 index = fec_enet_get_bd_index(last_bdp, &txq->bd); 693 /* Save skb pointer */ 694 txq->tx_buf[index].buf_p = skb; 695 696 /* Make sure the updates to rest of the descriptor are performed before 697 * transferring ownership. 698 */ 699 wmb(); 700 701 /* Send it on its way. Tell FEC it's ready, interrupt when done, 702 * it's the last BD of the frame, and to put the CRC on the end. 703 */ 704 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 705 bdp->cbd_sc = cpu_to_fec16(status); 706 707 /* If this was the last BD in the ring, start at the beginning again. */ 708 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); 709 710 skb_tx_timestamp(skb); 711 712 /* Make sure the update to bdp is performed before txq->bd.cur. */ 713 wmb(); 714 txq->bd.cur = bdp; 715 716 /* Trigger transmission start */ 717 writel(0, txq->bd.reg_desc_active); 718 719 return 0; 720 } 721 722 static int 723 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, 724 struct net_device *ndev, 725 struct bufdesc *bdp, int index, char *data, 726 int size, bool last_tcp, bool is_last) 727 { 728 struct fec_enet_private *fep = netdev_priv(ndev); 729 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 730 unsigned short status; 731 unsigned int estatus = 0; 732 dma_addr_t addr; 733 734 status = fec16_to_cpu(bdp->cbd_sc); 735 status &= ~BD_ENET_TX_STATS; 736 737 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 738 739 if (((unsigned long) data) & fep->tx_align || 740 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 741 memcpy(txq->tx_bounce[index], data, size); 742 data = txq->tx_bounce[index]; 743 744 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 745 swap_buffer(data, size); 746 } 747 748 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); 749 if (dma_mapping_error(&fep->pdev->dev, addr)) { 750 dev_kfree_skb_any(skb); 751 if (net_ratelimit()) 752 netdev_err(ndev, "Tx DMA memory map failed\n"); 753 return NETDEV_TX_OK; 754 } 755 756 bdp->cbd_datlen = cpu_to_fec16(size); 757 bdp->cbd_bufaddr = cpu_to_fec32(addr); 758 759 if (fep->bufdesc_ex) { 760 if (fep->quirks & FEC_QUIRK_HAS_AVB) 761 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 762 if (skb->ip_summed == CHECKSUM_PARTIAL) 763 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 764 ebdp->cbd_bdu = 0; 765 ebdp->cbd_esc = cpu_to_fec32(estatus); 766 } 767 768 /* Handle the last BD specially */ 769 if (last_tcp) 770 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); 771 if (is_last) { 772 status |= BD_ENET_TX_INTR; 773 if (fep->bufdesc_ex) 774 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); 775 } 776 777 bdp->cbd_sc = cpu_to_fec16(status); 778 779 return 0; 780 } 781 782 static int 783 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, 784 struct sk_buff *skb, struct net_device *ndev, 785 struct bufdesc *bdp, int index) 786 { 787 struct fec_enet_private *fep = netdev_priv(ndev); 788 int hdr_len = skb_tcp_all_headers(skb); 789 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 790 void *bufaddr; 791 unsigned long dmabuf; 792 unsigned short status; 793 unsigned int estatus = 0; 794 795 status = fec16_to_cpu(bdp->cbd_sc); 796 status &= ~BD_ENET_TX_STATS; 797 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 798 799 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 800 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; 801 if (((unsigned long)bufaddr) & fep->tx_align || 802 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 803 memcpy(txq->tx_bounce[index], skb->data, hdr_len); 804 bufaddr = txq->tx_bounce[index]; 805 806 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 807 swap_buffer(bufaddr, hdr_len); 808 809 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, 810 hdr_len, DMA_TO_DEVICE); 811 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { 812 dev_kfree_skb_any(skb); 813 if (net_ratelimit()) 814 netdev_err(ndev, "Tx DMA memory map failed\n"); 815 return NETDEV_TX_OK; 816 } 817 } 818 819 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); 820 bdp->cbd_datlen = cpu_to_fec16(hdr_len); 821 822 if (fep->bufdesc_ex) { 823 if (fep->quirks & FEC_QUIRK_HAS_AVB) 824 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 825 if (skb->ip_summed == CHECKSUM_PARTIAL) 826 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 827 ebdp->cbd_bdu = 0; 828 ebdp->cbd_esc = cpu_to_fec32(estatus); 829 } 830 831 bdp->cbd_sc = cpu_to_fec16(status); 832 833 return 0; 834 } 835 836 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, 837 struct sk_buff *skb, 838 struct net_device *ndev) 839 { 840 struct fec_enet_private *fep = netdev_priv(ndev); 841 int hdr_len, total_len, data_left; 842 struct bufdesc *bdp = txq->bd.cur; 843 struct bufdesc *tmp_bdp; 844 struct bufdesc_ex *ebdp; 845 struct tso_t tso; 846 unsigned int index = 0; 847 int ret; 848 849 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { 850 dev_kfree_skb_any(skb); 851 if (net_ratelimit()) 852 netdev_err(ndev, "NOT enough BD for TSO!\n"); 853 return NETDEV_TX_OK; 854 } 855 856 /* Protocol checksum off-load for TCP and UDP. */ 857 if (fec_enet_clear_csum(skb, ndev)) { 858 dev_kfree_skb_any(skb); 859 return NETDEV_TX_OK; 860 } 861 862 /* Initialize the TSO handler, and prepare the first payload */ 863 hdr_len = tso_start(skb, &tso); 864 865 total_len = skb->len - hdr_len; 866 while (total_len > 0) { 867 char *hdr; 868 869 index = fec_enet_get_bd_index(bdp, &txq->bd); 870 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 871 total_len -= data_left; 872 873 /* prepare packet headers: MAC + IP + TCP */ 874 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 875 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 876 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); 877 if (ret) 878 goto err_release; 879 880 while (data_left > 0) { 881 int size; 882 883 size = min_t(int, tso.size, data_left); 884 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 885 index = fec_enet_get_bd_index(bdp, &txq->bd); 886 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, 887 bdp, index, 888 tso.data, size, 889 size == data_left, 890 total_len == 0); 891 if (ret) 892 goto err_release; 893 894 data_left -= size; 895 tso_build_data(skb, &tso, size); 896 } 897 898 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 899 } 900 901 /* Save skb pointer */ 902 txq->tx_buf[index].buf_p = skb; 903 904 skb_tx_timestamp(skb); 905 txq->bd.cur = bdp; 906 907 /* Trigger transmission start */ 908 if (!(fep->quirks & FEC_QUIRK_ERR007885) || 909 !readl(txq->bd.reg_desc_active) || 910 !readl(txq->bd.reg_desc_active) || 911 !readl(txq->bd.reg_desc_active) || 912 !readl(txq->bd.reg_desc_active)) 913 writel(0, txq->bd.reg_desc_active); 914 915 return 0; 916 917 err_release: 918 /* Release all used data descriptors for TSO */ 919 tmp_bdp = txq->bd.cur; 920 921 while (tmp_bdp != bdp) { 922 /* Unmap data buffers */ 923 if (tmp_bdp->cbd_bufaddr && 924 !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr))) 925 dma_unmap_single(&fep->pdev->dev, 926 fec32_to_cpu(tmp_bdp->cbd_bufaddr), 927 fec16_to_cpu(tmp_bdp->cbd_datlen), 928 DMA_TO_DEVICE); 929 930 /* Clear standard buffer descriptor fields */ 931 tmp_bdp->cbd_sc = 0; 932 tmp_bdp->cbd_datlen = 0; 933 tmp_bdp->cbd_bufaddr = 0; 934 935 /* Handle extended descriptor if enabled */ 936 if (fep->bufdesc_ex) { 937 ebdp = (struct bufdesc_ex *)tmp_bdp; 938 ebdp->cbd_esc = 0; 939 } 940 941 tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd); 942 } 943 944 dev_kfree_skb_any(skb); 945 946 return ret; 947 } 948 949 static netdev_tx_t 950 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 951 { 952 struct fec_enet_private *fep = netdev_priv(ndev); 953 int entries_free; 954 unsigned short queue; 955 struct fec_enet_priv_tx_q *txq; 956 struct netdev_queue *nq; 957 int ret; 958 959 queue = skb_get_queue_mapping(skb); 960 txq = fep->tx_queue[queue]; 961 nq = netdev_get_tx_queue(ndev, queue); 962 963 if (skb_is_gso(skb)) 964 ret = fec_enet_txq_submit_tso(txq, skb, ndev); 965 else 966 ret = fec_enet_txq_submit_skb(txq, skb, ndev); 967 if (ret) 968 return ret; 969 970 entries_free = fec_enet_get_free_txdesc_num(txq); 971 if (entries_free <= txq->tx_stop_threshold) 972 netif_tx_stop_queue(nq); 973 974 return NETDEV_TX_OK; 975 } 976 977 /* Init RX & TX buffer descriptors 978 */ 979 static void fec_enet_bd_init(struct net_device *dev) 980 { 981 struct fec_enet_private *fep = netdev_priv(dev); 982 struct fec_enet_priv_tx_q *txq; 983 struct fec_enet_priv_rx_q *rxq; 984 struct bufdesc *bdp; 985 unsigned int i; 986 unsigned int q; 987 988 for (q = 0; q < fep->num_rx_queues; q++) { 989 /* Initialize the receive buffer descriptors. */ 990 rxq = fep->rx_queue[q]; 991 bdp = rxq->bd.base; 992 993 for (i = 0; i < rxq->bd.ring_size; i++) { 994 995 /* Initialize the BD for every fragment in the page. */ 996 if (bdp->cbd_bufaddr) 997 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 998 else 999 bdp->cbd_sc = cpu_to_fec16(0); 1000 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 1001 } 1002 1003 /* Set the last buffer to wrap */ 1004 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 1005 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 1006 1007 rxq->bd.cur = rxq->bd.base; 1008 } 1009 1010 for (q = 0; q < fep->num_tx_queues; q++) { 1011 /* ...and the same for transmit */ 1012 txq = fep->tx_queue[q]; 1013 bdp = txq->bd.base; 1014 txq->bd.cur = bdp; 1015 1016 for (i = 0; i < txq->bd.ring_size; i++) { 1017 /* Initialize the BD for every fragment in the page. */ 1018 bdp->cbd_sc = cpu_to_fec16(0); 1019 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { 1020 if (bdp->cbd_bufaddr && 1021 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 1022 dma_unmap_single(&fep->pdev->dev, 1023 fec32_to_cpu(bdp->cbd_bufaddr), 1024 fec16_to_cpu(bdp->cbd_datlen), 1025 DMA_TO_DEVICE); 1026 if (txq->tx_buf[i].buf_p) 1027 dev_kfree_skb_any(txq->tx_buf[i].buf_p); 1028 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { 1029 if (bdp->cbd_bufaddr) 1030 dma_unmap_single(&fep->pdev->dev, 1031 fec32_to_cpu(bdp->cbd_bufaddr), 1032 fec16_to_cpu(bdp->cbd_datlen), 1033 DMA_TO_DEVICE); 1034 1035 if (txq->tx_buf[i].buf_p) 1036 xdp_return_frame(txq->tx_buf[i].buf_p); 1037 } else { 1038 struct page *page = txq->tx_buf[i].buf_p; 1039 1040 if (page) 1041 page_pool_put_page(page->pp, page, 0, false); 1042 } 1043 1044 txq->tx_buf[i].buf_p = NULL; 1045 /* restore default tx buffer type: FEC_TXBUF_T_SKB */ 1046 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 1047 bdp->cbd_bufaddr = cpu_to_fec32(0); 1048 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1049 } 1050 1051 /* Set the last buffer to wrap */ 1052 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 1053 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 1054 txq->dirty_tx = bdp; 1055 } 1056 } 1057 1058 static void fec_enet_active_rxring(struct net_device *ndev) 1059 { 1060 struct fec_enet_private *fep = netdev_priv(ndev); 1061 int i; 1062 1063 for (i = 0; i < fep->num_rx_queues; i++) 1064 writel(0, fep->rx_queue[i]->bd.reg_desc_active); 1065 } 1066 1067 static void fec_enet_enable_ring(struct net_device *ndev) 1068 { 1069 struct fec_enet_private *fep = netdev_priv(ndev); 1070 struct fec_enet_priv_tx_q *txq; 1071 struct fec_enet_priv_rx_q *rxq; 1072 int i; 1073 1074 for (i = 0; i < fep->num_rx_queues; i++) { 1075 rxq = fep->rx_queue[i]; 1076 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); 1077 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); 1078 1079 /* enable DMA1/2 */ 1080 if (i) 1081 writel(RCMR_MATCHEN | RCMR_CMP(i), 1082 fep->hwp + FEC_RCMR(i)); 1083 } 1084 1085 for (i = 0; i < fep->num_tx_queues; i++) { 1086 txq = fep->tx_queue[i]; 1087 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); 1088 1089 /* enable DMA1/2 */ 1090 if (i) 1091 writel(DMA_CLASS_EN | IDLE_SLOPE(i), 1092 fep->hwp + FEC_DMA_CFG(i)); 1093 } 1094 } 1095 1096 /* 1097 * This function is called to start or restart the FEC during a link 1098 * change, transmit timeout, or to reconfigure the FEC. The network 1099 * packet processing for this device must be stopped before this call. 1100 */ 1101 static void 1102 fec_restart(struct net_device *ndev) 1103 { 1104 struct fec_enet_private *fep = netdev_priv(ndev); 1105 u32 temp_mac[2]; 1106 u32 rcntl = OPT_FRAME_SIZE | 0x04; 1107 u32 ecntl = FEC_ECR_ETHEREN; 1108 1109 if (fep->bufdesc_ex) 1110 fec_ptp_save_state(fep); 1111 1112 /* Whack a reset. We should wait for this. 1113 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1114 * instead of reset MAC itself. 1115 */ 1116 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || 1117 ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { 1118 writel(0, fep->hwp + FEC_ECNTRL); 1119 } else { 1120 writel(1, fep->hwp + FEC_ECNTRL); 1121 udelay(10); 1122 } 1123 1124 /* 1125 * enet-mac reset will reset mac address registers too, 1126 * so need to reconfigure it. 1127 */ 1128 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); 1129 writel((__force u32)cpu_to_be32(temp_mac[0]), 1130 fep->hwp + FEC_ADDR_LOW); 1131 writel((__force u32)cpu_to_be32(temp_mac[1]), 1132 fep->hwp + FEC_ADDR_HIGH); 1133 1134 /* Clear any outstanding interrupt, except MDIO. */ 1135 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT); 1136 1137 fec_enet_bd_init(ndev); 1138 1139 fec_enet_enable_ring(ndev); 1140 1141 /* Enable MII mode */ 1142 if (fep->full_duplex == DUPLEX_FULL) { 1143 /* FD enable */ 1144 writel(0x04, fep->hwp + FEC_X_CNTRL); 1145 } else { 1146 /* No Rcv on Xmit */ 1147 rcntl |= 0x02; 1148 writel(0x0, fep->hwp + FEC_X_CNTRL); 1149 } 1150 1151 /* Set MII speed */ 1152 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1153 1154 #if !defined(CONFIG_M5272) 1155 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 1156 u32 val = readl(fep->hwp + FEC_RACC); 1157 1158 /* align IP header */ 1159 val |= FEC_RACC_SHIFT16; 1160 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 1161 /* set RX checksum */ 1162 val |= FEC_RACC_OPTIONS; 1163 else 1164 val &= ~FEC_RACC_OPTIONS; 1165 writel(val, fep->hwp + FEC_RACC); 1166 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); 1167 } 1168 #endif 1169 1170 /* 1171 * The phy interface and speed need to get configured 1172 * differently on enet-mac. 1173 */ 1174 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1175 /* Enable flow control and length check */ 1176 rcntl |= 0x40000000 | 0x00000020; 1177 1178 /* RGMII, RMII or MII */ 1179 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || 1180 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 1181 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || 1182 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) 1183 rcntl |= (1 << 6); 1184 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1185 rcntl |= FEC_RCR_RMII; 1186 else 1187 rcntl &= ~FEC_RCR_RMII; 1188 1189 /* 1G, 100M or 10M */ 1190 if (ndev->phydev) { 1191 if (ndev->phydev->speed == SPEED_1000) 1192 ecntl |= (1 << 5); 1193 else if (ndev->phydev->speed == SPEED_100) 1194 rcntl &= ~FEC_RCR_10BASET; 1195 else 1196 rcntl |= FEC_RCR_10BASET; 1197 } 1198 } else { 1199 #ifdef FEC_MIIGSK_ENR 1200 if (fep->quirks & FEC_QUIRK_USE_GASKET) { 1201 u32 cfgr; 1202 /* disable the gasket and wait */ 1203 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1204 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1205 udelay(1); 1206 1207 /* 1208 * configure the gasket: 1209 * RMII, 50 MHz, no loopback, no echo 1210 * MII, 25 MHz, no loopback, no echo 1211 */ 1212 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1213 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; 1214 if (ndev->phydev && ndev->phydev->speed == SPEED_10) 1215 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; 1216 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); 1217 1218 /* re-enable the gasket */ 1219 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1220 } 1221 #endif 1222 } 1223 1224 #if !defined(CONFIG_M5272) 1225 /* enable pause frame*/ 1226 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 1227 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 1228 ndev->phydev && ndev->phydev->pause)) { 1229 rcntl |= FEC_RCR_FLOWCTL; 1230 1231 /* set FIFO threshold parameter to reduce overrun */ 1232 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 1233 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 1234 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 1235 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); 1236 1237 /* OPD */ 1238 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); 1239 } else { 1240 rcntl &= ~FEC_RCR_FLOWCTL; 1241 } 1242 #endif /* !defined(CONFIG_M5272) */ 1243 1244 writel(rcntl, fep->hwp + FEC_R_CNTRL); 1245 1246 /* Setup multicast filter. */ 1247 set_multicast_list(ndev); 1248 #ifndef CONFIG_M5272 1249 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1250 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1251 #endif 1252 1253 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1254 /* enable ENET endian swap */ 1255 ecntl |= FEC_ECR_BYTESWP; 1256 /* enable ENET store and forward mode */ 1257 writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK); 1258 } 1259 1260 if (fep->bufdesc_ex) 1261 ecntl |= FEC_ECR_EN1588; 1262 1263 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && 1264 fep->rgmii_txc_dly) 1265 ecntl |= FEC_ENET_TXC_DLY; 1266 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && 1267 fep->rgmii_rxc_dly) 1268 ecntl |= FEC_ENET_RXC_DLY; 1269 1270 #ifndef CONFIG_M5272 1271 /* Enable the MIB statistic event counters */ 1272 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); 1273 #endif 1274 1275 /* And last, enable the transmit and receive processing */ 1276 writel(ecntl, fep->hwp + FEC_ECNTRL); 1277 fec_enet_active_rxring(ndev); 1278 1279 if (fep->bufdesc_ex) { 1280 fec_ptp_start_cyclecounter(ndev); 1281 fec_ptp_restore_state(fep); 1282 } 1283 1284 /* Enable interrupts we wish to service */ 1285 if (fep->link) 1286 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1287 else 1288 writel(0, fep->hwp + FEC_IMASK); 1289 1290 /* Init the interrupt coalescing */ 1291 if (fep->quirks & FEC_QUIRK_HAS_COALESCE) 1292 fec_enet_itr_coal_set(ndev); 1293 } 1294 1295 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) 1296 { 1297 if (!(of_machine_is_compatible("fsl,imx8qm") || 1298 of_machine_is_compatible("fsl,imx8qxp") || 1299 of_machine_is_compatible("fsl,imx8dxl"))) 1300 return 0; 1301 1302 return imx_scu_get_handle(&fep->ipc_handle); 1303 } 1304 1305 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) 1306 { 1307 struct device_node *np = fep->pdev->dev.of_node; 1308 u32 rsrc_id, val; 1309 int idx; 1310 1311 if (!np || !fep->ipc_handle) 1312 return; 1313 1314 idx = of_alias_get_id(np, "ethernet"); 1315 if (idx < 0) 1316 idx = 0; 1317 rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; 1318 1319 val = enabled ? 1 : 0; 1320 imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); 1321 } 1322 1323 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) 1324 { 1325 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1326 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; 1327 1328 if (stop_gpr->gpr) { 1329 if (enabled) 1330 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, 1331 BIT(stop_gpr->bit), 1332 BIT(stop_gpr->bit)); 1333 else 1334 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, 1335 BIT(stop_gpr->bit), 0); 1336 } else if (pdata && pdata->sleep_mode_enable) { 1337 pdata->sleep_mode_enable(enabled); 1338 } else { 1339 fec_enet_ipg_stop_set(fep, enabled); 1340 } 1341 } 1342 1343 static void fec_irqs_disable(struct net_device *ndev) 1344 { 1345 struct fec_enet_private *fep = netdev_priv(ndev); 1346 1347 writel(0, fep->hwp + FEC_IMASK); 1348 } 1349 1350 static void fec_irqs_disable_except_wakeup(struct net_device *ndev) 1351 { 1352 struct fec_enet_private *fep = netdev_priv(ndev); 1353 1354 writel(0, fep->hwp + FEC_IMASK); 1355 writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); 1356 } 1357 1358 static void 1359 fec_stop(struct net_device *ndev) 1360 { 1361 struct fec_enet_private *fep = netdev_priv(ndev); 1362 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII; 1363 u32 val; 1364 1365 /* We cannot expect a graceful transmit stop without link !!! */ 1366 if (fep->link) { 1367 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1368 udelay(10); 1369 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1370 netdev_err(ndev, "Graceful transmit stop did not complete!\n"); 1371 } 1372 1373 if (fep->bufdesc_ex) 1374 fec_ptp_save_state(fep); 1375 1376 /* Whack a reset. We should wait for this. 1377 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1378 * instead of reset MAC itself. 1379 */ 1380 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1381 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 1382 writel(0, fep->hwp + FEC_ECNTRL); 1383 } else { 1384 writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); 1385 udelay(10); 1386 } 1387 } else { 1388 val = readl(fep->hwp + FEC_ECNTRL); 1389 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 1390 writel(val, fep->hwp + FEC_ECNTRL); 1391 } 1392 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1393 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1394 1395 /* We have to keep ENET enabled to have MII interrupt stay working */ 1396 if (fep->quirks & FEC_QUIRK_ENET_MAC && 1397 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1398 writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL); 1399 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1400 } 1401 1402 if (fep->bufdesc_ex) { 1403 val = readl(fep->hwp + FEC_ECNTRL); 1404 val |= FEC_ECR_EN1588; 1405 writel(val, fep->hwp + FEC_ECNTRL); 1406 1407 fec_ptp_start_cyclecounter(ndev); 1408 fec_ptp_restore_state(fep); 1409 } 1410 } 1411 1412 static void 1413 fec_timeout(struct net_device *ndev, unsigned int txqueue) 1414 { 1415 struct fec_enet_private *fep = netdev_priv(ndev); 1416 1417 fec_dump(ndev); 1418 1419 ndev->stats.tx_errors++; 1420 1421 schedule_work(&fep->tx_timeout_work); 1422 } 1423 1424 static void fec_enet_timeout_work(struct work_struct *work) 1425 { 1426 struct fec_enet_private *fep = 1427 container_of(work, struct fec_enet_private, tx_timeout_work); 1428 struct net_device *ndev = fep->netdev; 1429 1430 rtnl_lock(); 1431 if (netif_device_present(ndev) || netif_running(ndev)) { 1432 napi_disable(&fep->napi); 1433 netif_tx_lock_bh(ndev); 1434 fec_restart(ndev); 1435 netif_tx_wake_all_queues(ndev); 1436 netif_tx_unlock_bh(ndev); 1437 napi_enable(&fep->napi); 1438 } 1439 rtnl_unlock(); 1440 } 1441 1442 static void 1443 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, 1444 struct skb_shared_hwtstamps *hwtstamps) 1445 { 1446 unsigned long flags; 1447 u64 ns; 1448 1449 spin_lock_irqsave(&fep->tmreg_lock, flags); 1450 ns = timecounter_cyc2time(&fep->tc, ts); 1451 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 1452 1453 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1454 hwtstamps->hwtstamp = ns_to_ktime(ns); 1455 } 1456 1457 static void 1458 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) 1459 { 1460 struct fec_enet_private *fep; 1461 struct xdp_frame *xdpf; 1462 struct bufdesc *bdp; 1463 unsigned short status; 1464 struct sk_buff *skb; 1465 struct fec_enet_priv_tx_q *txq; 1466 struct netdev_queue *nq; 1467 int index = 0; 1468 int entries_free; 1469 struct page *page; 1470 int frame_len; 1471 1472 fep = netdev_priv(ndev); 1473 1474 txq = fep->tx_queue[queue_id]; 1475 /* get next bdp of dirty_tx */ 1476 nq = netdev_get_tx_queue(ndev, queue_id); 1477 bdp = txq->dirty_tx; 1478 1479 /* get next bdp of dirty_tx */ 1480 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1481 1482 while (bdp != READ_ONCE(txq->bd.cur)) { 1483 /* Order the load of bd.cur and cbd_sc */ 1484 rmb(); 1485 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); 1486 if (status & BD_ENET_TX_READY) 1487 break; 1488 1489 index = fec_enet_get_bd_index(bdp, &txq->bd); 1490 1491 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { 1492 skb = txq->tx_buf[index].buf_p; 1493 if (bdp->cbd_bufaddr && 1494 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 1495 dma_unmap_single(&fep->pdev->dev, 1496 fec32_to_cpu(bdp->cbd_bufaddr), 1497 fec16_to_cpu(bdp->cbd_datlen), 1498 DMA_TO_DEVICE); 1499 bdp->cbd_bufaddr = cpu_to_fec32(0); 1500 if (!skb) 1501 goto tx_buf_done; 1502 } else { 1503 /* Tx processing cannot call any XDP (or page pool) APIs if 1504 * the "budget" is 0. Because NAPI is called with budget of 1505 * 0 (such as netpoll) indicates we may be in an IRQ context, 1506 * however, we can't use the page pool from IRQ context. 1507 */ 1508 if (unlikely(!budget)) 1509 break; 1510 1511 if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { 1512 xdpf = txq->tx_buf[index].buf_p; 1513 if (bdp->cbd_bufaddr) 1514 dma_unmap_single(&fep->pdev->dev, 1515 fec32_to_cpu(bdp->cbd_bufaddr), 1516 fec16_to_cpu(bdp->cbd_datlen), 1517 DMA_TO_DEVICE); 1518 } else { 1519 page = txq->tx_buf[index].buf_p; 1520 } 1521 1522 bdp->cbd_bufaddr = cpu_to_fec32(0); 1523 if (unlikely(!txq->tx_buf[index].buf_p)) { 1524 txq->tx_buf[index].type = FEC_TXBUF_T_SKB; 1525 goto tx_buf_done; 1526 } 1527 1528 frame_len = fec16_to_cpu(bdp->cbd_datlen); 1529 } 1530 1531 /* Check for errors. */ 1532 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1533 BD_ENET_TX_RL | BD_ENET_TX_UN | 1534 BD_ENET_TX_CSL)) { 1535 ndev->stats.tx_errors++; 1536 if (status & BD_ENET_TX_HB) /* No heartbeat */ 1537 ndev->stats.tx_heartbeat_errors++; 1538 if (status & BD_ENET_TX_LC) /* Late collision */ 1539 ndev->stats.tx_window_errors++; 1540 if (status & BD_ENET_TX_RL) /* Retrans limit */ 1541 ndev->stats.tx_aborted_errors++; 1542 if (status & BD_ENET_TX_UN) /* Underrun */ 1543 ndev->stats.tx_fifo_errors++; 1544 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 1545 ndev->stats.tx_carrier_errors++; 1546 } else { 1547 ndev->stats.tx_packets++; 1548 1549 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) 1550 ndev->stats.tx_bytes += skb->len; 1551 else 1552 ndev->stats.tx_bytes += frame_len; 1553 } 1554 1555 /* Deferred means some collisions occurred during transmit, 1556 * but we eventually sent the packet OK. 1557 */ 1558 if (status & BD_ENET_TX_DEF) 1559 ndev->stats.collisions++; 1560 1561 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { 1562 /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who 1563 * are to time stamp the packet, so we still need to check time 1564 * stamping enabled flag. 1565 */ 1566 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && 1567 fep->hwts_tx_en) && fep->bufdesc_ex) { 1568 struct skb_shared_hwtstamps shhwtstamps; 1569 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1570 1571 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); 1572 skb_tstamp_tx(skb, &shhwtstamps); 1573 } 1574 1575 /* Free the sk buffer associated with this last transmit */ 1576 napi_consume_skb(skb, budget); 1577 } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { 1578 xdp_return_frame_rx_napi(xdpf); 1579 } else { /* recycle pages of XDP_TX frames */ 1580 /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */ 1581 page_pool_put_page(page->pp, page, 0, true); 1582 } 1583 1584 txq->tx_buf[index].buf_p = NULL; 1585 /* restore default tx buffer type: FEC_TXBUF_T_SKB */ 1586 txq->tx_buf[index].type = FEC_TXBUF_T_SKB; 1587 1588 tx_buf_done: 1589 /* Make sure the update to bdp and tx_buf are performed 1590 * before dirty_tx 1591 */ 1592 wmb(); 1593 txq->dirty_tx = bdp; 1594 1595 /* Update pointer to next buffer descriptor to be transmitted */ 1596 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1597 1598 /* Since we have freed up a buffer, the ring is no longer full 1599 */ 1600 if (netif_tx_queue_stopped(nq)) { 1601 entries_free = fec_enet_get_free_txdesc_num(txq); 1602 if (entries_free >= txq->tx_wake_threshold) 1603 netif_tx_wake_queue(nq); 1604 } 1605 } 1606 1607 /* ERR006358: Keep the transmitter going */ 1608 if (bdp != txq->bd.cur && 1609 readl(txq->bd.reg_desc_active) == 0) 1610 writel(0, txq->bd.reg_desc_active); 1611 } 1612 1613 static void fec_enet_tx(struct net_device *ndev, int budget) 1614 { 1615 struct fec_enet_private *fep = netdev_priv(ndev); 1616 int i; 1617 1618 /* Make sure that AVB queues are processed first. */ 1619 for (i = fep->num_tx_queues - 1; i >= 0; i--) 1620 fec_enet_tx_queue(ndev, i, budget); 1621 } 1622 1623 static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, 1624 struct bufdesc *bdp, int index) 1625 { 1626 struct page *new_page; 1627 dma_addr_t phys_addr; 1628 1629 new_page = page_pool_dev_alloc_pages(rxq->page_pool); 1630 if (unlikely(!new_page)) 1631 return -ENOMEM; 1632 1633 rxq->rx_skb_info[index].page = new_page; 1634 rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; 1635 phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; 1636 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); 1637 1638 return 0; 1639 } 1640 1641 static u32 1642 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, 1643 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu) 1644 { 1645 unsigned int sync, len = xdp->data_end - xdp->data; 1646 u32 ret = FEC_ENET_XDP_PASS; 1647 struct page *page; 1648 int err; 1649 u32 act; 1650 1651 act = bpf_prog_run_xdp(prog, xdp); 1652 1653 /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover 1654 * max len CPU touch 1655 */ 1656 sync = xdp->data_end - xdp->data; 1657 sync = max(sync, len); 1658 1659 switch (act) { 1660 case XDP_PASS: 1661 rxq->stats[RX_XDP_PASS]++; 1662 ret = FEC_ENET_XDP_PASS; 1663 break; 1664 1665 case XDP_REDIRECT: 1666 rxq->stats[RX_XDP_REDIRECT]++; 1667 err = xdp_do_redirect(fep->netdev, xdp, prog); 1668 if (unlikely(err)) 1669 goto xdp_err; 1670 1671 ret = FEC_ENET_XDP_REDIR; 1672 break; 1673 1674 case XDP_TX: 1675 rxq->stats[RX_XDP_TX]++; 1676 err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync); 1677 if (unlikely(err)) { 1678 rxq->stats[RX_XDP_TX_ERRORS]++; 1679 goto xdp_err; 1680 } 1681 1682 ret = FEC_ENET_XDP_TX; 1683 break; 1684 1685 default: 1686 bpf_warn_invalid_xdp_action(fep->netdev, prog, act); 1687 fallthrough; 1688 1689 case XDP_ABORTED: 1690 fallthrough; /* handle aborts by dropping packet */ 1691 1692 case XDP_DROP: 1693 rxq->stats[RX_XDP_DROP]++; 1694 xdp_err: 1695 ret = FEC_ENET_XDP_CONSUMED; 1696 page = virt_to_head_page(xdp->data); 1697 page_pool_put_page(rxq->page_pool, page, sync, true); 1698 if (act != XDP_DROP) 1699 trace_xdp_exception(fep->netdev, prog, act); 1700 break; 1701 } 1702 1703 return ret; 1704 } 1705 1706 /* During a receive, the bd_rx.cur points to the current incoming buffer. 1707 * When we update through the ring, if the next incoming buffer has 1708 * not been given to the system, we just set the empty indicator, 1709 * effectively tossing the packet. 1710 */ 1711 static int 1712 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) 1713 { 1714 struct fec_enet_private *fep = netdev_priv(ndev); 1715 struct fec_enet_priv_rx_q *rxq; 1716 struct bufdesc *bdp; 1717 unsigned short status; 1718 struct sk_buff *skb; 1719 ushort pkt_len; 1720 __u8 *data; 1721 int pkt_received = 0; 1722 struct bufdesc_ex *ebdp = NULL; 1723 bool vlan_packet_rcvd = false; 1724 u16 vlan_tag; 1725 int index = 0; 1726 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; 1727 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); 1728 u32 ret, xdp_result = FEC_ENET_XDP_PASS; 1729 u32 data_start = FEC_ENET_XDP_HEADROOM; 1730 int cpu = smp_processor_id(); 1731 struct xdp_buff xdp; 1732 struct page *page; 1733 __fec32 cbd_bufaddr; 1734 u32 sub_len = 4; 1735 1736 #if !defined(CONFIG_M5272) 1737 /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of 1738 * FEC_RACC_SHIFT16 is set by default in the probe function. 1739 */ 1740 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 1741 data_start += 2; 1742 sub_len += 2; 1743 } 1744 #endif 1745 1746 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) 1747 /* 1748 * Hacky flush of all caches instead of using the DMA API for the TSO 1749 * headers. 1750 */ 1751 flush_cache_all(); 1752 #endif 1753 rxq = fep->rx_queue[queue_id]; 1754 1755 /* First, grab all of the stats for the incoming packet. 1756 * These get messed up if we get called due to a busy condition. 1757 */ 1758 bdp = rxq->bd.cur; 1759 xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); 1760 1761 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { 1762 1763 if (pkt_received >= budget) 1764 break; 1765 pkt_received++; 1766 1767 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); 1768 1769 /* Check for errors. */ 1770 status ^= BD_ENET_RX_LAST; 1771 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 1772 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | 1773 BD_ENET_RX_CL)) { 1774 ndev->stats.rx_errors++; 1775 if (status & BD_ENET_RX_OV) { 1776 /* FIFO overrun */ 1777 ndev->stats.rx_fifo_errors++; 1778 goto rx_processing_done; 1779 } 1780 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH 1781 | BD_ENET_RX_LAST)) { 1782 /* Frame too long or too short. */ 1783 ndev->stats.rx_length_errors++; 1784 if (status & BD_ENET_RX_LAST) 1785 netdev_err(ndev, "rcv is not +last\n"); 1786 } 1787 if (status & BD_ENET_RX_CR) /* CRC Error */ 1788 ndev->stats.rx_crc_errors++; 1789 /* Report late collisions as a frame error. */ 1790 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 1791 ndev->stats.rx_frame_errors++; 1792 goto rx_processing_done; 1793 } 1794 1795 /* Process the incoming frame. */ 1796 ndev->stats.rx_packets++; 1797 pkt_len = fec16_to_cpu(bdp->cbd_datlen); 1798 ndev->stats.rx_bytes += pkt_len; 1799 1800 index = fec_enet_get_bd_index(bdp, &rxq->bd); 1801 page = rxq->rx_skb_info[index].page; 1802 cbd_bufaddr = bdp->cbd_bufaddr; 1803 if (fec_enet_update_cbd(rxq, bdp, index)) { 1804 ndev->stats.rx_dropped++; 1805 goto rx_processing_done; 1806 } 1807 1808 dma_sync_single_for_cpu(&fep->pdev->dev, 1809 fec32_to_cpu(cbd_bufaddr), 1810 pkt_len, 1811 DMA_FROM_DEVICE); 1812 prefetch(page_address(page)); 1813 1814 if (xdp_prog) { 1815 xdp_buff_clear_frags_flag(&xdp); 1816 /* subtract 16bit shift and FCS */ 1817 xdp_prepare_buff(&xdp, page_address(page), 1818 data_start, pkt_len - sub_len, false); 1819 ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu); 1820 xdp_result |= ret; 1821 if (ret != FEC_ENET_XDP_PASS) 1822 goto rx_processing_done; 1823 } 1824 1825 /* The packet length includes FCS, but we don't want to 1826 * include that when passing upstream as it messes up 1827 * bridging applications. 1828 */ 1829 skb = build_skb(page_address(page), PAGE_SIZE); 1830 if (unlikely(!skb)) { 1831 page_pool_recycle_direct(rxq->page_pool, page); 1832 ndev->stats.rx_dropped++; 1833 1834 netdev_err_once(ndev, "build_skb failed!\n"); 1835 goto rx_processing_done; 1836 } 1837 1838 skb_reserve(skb, data_start); 1839 skb_put(skb, pkt_len - sub_len); 1840 skb_mark_for_recycle(skb); 1841 1842 if (unlikely(need_swap)) { 1843 data = page_address(page) + FEC_ENET_XDP_HEADROOM; 1844 swap_buffer(data, pkt_len); 1845 } 1846 data = skb->data; 1847 1848 /* Extract the enhanced buffer descriptor */ 1849 ebdp = NULL; 1850 if (fep->bufdesc_ex) 1851 ebdp = (struct bufdesc_ex *)bdp; 1852 1853 /* If this is a VLAN packet remove the VLAN Tag */ 1854 vlan_packet_rcvd = false; 1855 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1856 fep->bufdesc_ex && 1857 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { 1858 /* Push and remove the vlan tag */ 1859 struct vlan_hdr *vlan_header = 1860 (struct vlan_hdr *) (data + ETH_HLEN); 1861 vlan_tag = ntohs(vlan_header->h_vlan_TCI); 1862 1863 vlan_packet_rcvd = true; 1864 1865 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); 1866 skb_pull(skb, VLAN_HLEN); 1867 } 1868 1869 skb->protocol = eth_type_trans(skb, ndev); 1870 1871 /* Get receive timestamp from the skb */ 1872 if (fep->hwts_rx_en && fep->bufdesc_ex) 1873 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), 1874 skb_hwtstamps(skb)); 1875 1876 if (fep->bufdesc_ex && 1877 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 1878 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { 1879 /* don't check it */ 1880 skb->ip_summed = CHECKSUM_UNNECESSARY; 1881 } else { 1882 skb_checksum_none_assert(skb); 1883 } 1884 } 1885 1886 /* Handle received VLAN packets */ 1887 if (vlan_packet_rcvd) 1888 __vlan_hwaccel_put_tag(skb, 1889 htons(ETH_P_8021Q), 1890 vlan_tag); 1891 1892 skb_record_rx_queue(skb, queue_id); 1893 napi_gro_receive(&fep->napi, skb); 1894 1895 rx_processing_done: 1896 /* Clear the status flags for this buffer */ 1897 status &= ~BD_ENET_RX_STATS; 1898 1899 /* Mark the buffer empty */ 1900 status |= BD_ENET_RX_EMPTY; 1901 1902 if (fep->bufdesc_ex) { 1903 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1904 1905 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 1906 ebdp->cbd_prot = 0; 1907 ebdp->cbd_bdu = 0; 1908 } 1909 /* Make sure the updates to rest of the descriptor are 1910 * performed before transferring ownership. 1911 */ 1912 wmb(); 1913 bdp->cbd_sc = cpu_to_fec16(status); 1914 1915 /* Update BD pointer to next entry */ 1916 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 1917 1918 /* Doing this here will keep the FEC running while we process 1919 * incoming frames. On a heavily loaded network, we should be 1920 * able to keep up at the expense of system resources. 1921 */ 1922 writel(0, rxq->bd.reg_desc_active); 1923 } 1924 rxq->bd.cur = bdp; 1925 1926 if (xdp_result & FEC_ENET_XDP_REDIR) 1927 xdp_do_flush(); 1928 1929 return pkt_received; 1930 } 1931 1932 static int fec_enet_rx(struct net_device *ndev, int budget) 1933 { 1934 struct fec_enet_private *fep = netdev_priv(ndev); 1935 int i, done = 0; 1936 1937 /* Make sure that AVB queues are processed first. */ 1938 for (i = fep->num_rx_queues - 1; i >= 0; i--) 1939 done += fec_enet_rx_queue(ndev, budget - done, i); 1940 1941 return done; 1942 } 1943 1944 static bool fec_enet_collect_events(struct fec_enet_private *fep) 1945 { 1946 uint int_events; 1947 1948 int_events = readl(fep->hwp + FEC_IEVENT); 1949 1950 /* Don't clear MDIO events, we poll for those */ 1951 int_events &= ~FEC_ENET_MII; 1952 1953 writel(int_events, fep->hwp + FEC_IEVENT); 1954 1955 return int_events != 0; 1956 } 1957 1958 static irqreturn_t 1959 fec_enet_interrupt(int irq, void *dev_id) 1960 { 1961 struct net_device *ndev = dev_id; 1962 struct fec_enet_private *fep = netdev_priv(ndev); 1963 irqreturn_t ret = IRQ_NONE; 1964 1965 if (fec_enet_collect_events(fep) && fep->link) { 1966 ret = IRQ_HANDLED; 1967 1968 if (napi_schedule_prep(&fep->napi)) { 1969 /* Disable interrupts */ 1970 writel(0, fep->hwp + FEC_IMASK); 1971 __napi_schedule(&fep->napi); 1972 } 1973 } 1974 1975 return ret; 1976 } 1977 1978 static int fec_enet_rx_napi(struct napi_struct *napi, int budget) 1979 { 1980 struct net_device *ndev = napi->dev; 1981 struct fec_enet_private *fep = netdev_priv(ndev); 1982 int done = 0; 1983 1984 do { 1985 done += fec_enet_rx(ndev, budget - done); 1986 fec_enet_tx(ndev, budget); 1987 } while ((done < budget) && fec_enet_collect_events(fep)); 1988 1989 if (done < budget) { 1990 napi_complete_done(napi, done); 1991 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1992 } 1993 1994 return done; 1995 } 1996 1997 /* ------------------------------------------------------------------------- */ 1998 static int fec_get_mac(struct net_device *ndev) 1999 { 2000 struct fec_enet_private *fep = netdev_priv(ndev); 2001 unsigned char *iap, tmpaddr[ETH_ALEN]; 2002 int ret; 2003 2004 /* 2005 * try to get mac address in following order: 2006 * 2007 * 1) module parameter via kernel command line in form 2008 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 2009 */ 2010 iap = macaddr; 2011 2012 /* 2013 * 2) from device tree data 2014 */ 2015 if (!is_valid_ether_addr(iap)) { 2016 struct device_node *np = fep->pdev->dev.of_node; 2017 if (np) { 2018 ret = of_get_mac_address(np, tmpaddr); 2019 if (!ret) 2020 iap = tmpaddr; 2021 else if (ret == -EPROBE_DEFER) 2022 return ret; 2023 } 2024 } 2025 2026 /* 2027 * 3) from flash or fuse (via platform data) 2028 */ 2029 if (!is_valid_ether_addr(iap)) { 2030 #ifdef CONFIG_M5272 2031 if (FEC_FLASHMAC) 2032 iap = (unsigned char *)FEC_FLASHMAC; 2033 #else 2034 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); 2035 2036 if (pdata) 2037 iap = (unsigned char *)&pdata->mac; 2038 #endif 2039 } 2040 2041 /* 2042 * 4) FEC mac registers set by bootloader 2043 */ 2044 if (!is_valid_ether_addr(iap)) { 2045 *((__be32 *) &tmpaddr[0]) = 2046 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); 2047 *((__be16 *) &tmpaddr[4]) = 2048 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 2049 iap = &tmpaddr[0]; 2050 } 2051 2052 /* 2053 * 5) random mac address 2054 */ 2055 if (!is_valid_ether_addr(iap)) { 2056 /* Report it and use a random ethernet address instead */ 2057 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap); 2058 eth_hw_addr_random(ndev); 2059 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n", 2060 ndev->dev_addr); 2061 return 0; 2062 } 2063 2064 /* Adjust MAC if using macaddr */ 2065 eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0); 2066 2067 return 0; 2068 } 2069 2070 /* ------------------------------------------------------------------------- */ 2071 2072 /* 2073 * Phy section 2074 */ 2075 2076 /* LPI Sleep Ts count base on tx clk (clk_ref). 2077 * The lpi sleep cnt value = X us / (cycle_ns). 2078 */ 2079 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) 2080 { 2081 struct fec_enet_private *fep = netdev_priv(ndev); 2082 2083 return us * (fep->clk_ref_rate / 1000) / 1000; 2084 } 2085 2086 static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer, 2087 bool enable) 2088 { 2089 struct fec_enet_private *fep = netdev_priv(ndev); 2090 unsigned int sleep_cycle, wake_cycle; 2091 2092 if (enable) { 2093 sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer); 2094 wake_cycle = sleep_cycle; 2095 } else { 2096 sleep_cycle = 0; 2097 wake_cycle = 0; 2098 } 2099 2100 writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP); 2101 writel(wake_cycle, fep->hwp + FEC_LPI_WAKE); 2102 2103 return 0; 2104 } 2105 2106 static void fec_enet_adjust_link(struct net_device *ndev) 2107 { 2108 struct fec_enet_private *fep = netdev_priv(ndev); 2109 struct phy_device *phy_dev = ndev->phydev; 2110 int status_change = 0; 2111 2112 /* 2113 * If the netdev is down, or is going down, we're not interested 2114 * in link state events, so just mark our idea of the link as down 2115 * and ignore the event. 2116 */ 2117 if (!netif_running(ndev) || !netif_device_present(ndev)) { 2118 fep->link = 0; 2119 } else if (phy_dev->link) { 2120 if (!fep->link) { 2121 fep->link = phy_dev->link; 2122 status_change = 1; 2123 } 2124 2125 if (fep->full_duplex != phy_dev->duplex) { 2126 fep->full_duplex = phy_dev->duplex; 2127 status_change = 1; 2128 } 2129 2130 if (phy_dev->speed != fep->speed) { 2131 fep->speed = phy_dev->speed; 2132 status_change = 1; 2133 } 2134 2135 /* if any of the above changed restart the FEC */ 2136 if (status_change) { 2137 netif_stop_queue(ndev); 2138 napi_disable(&fep->napi); 2139 netif_tx_lock_bh(ndev); 2140 fec_restart(ndev); 2141 netif_tx_wake_all_queues(ndev); 2142 netif_tx_unlock_bh(ndev); 2143 napi_enable(&fep->napi); 2144 } 2145 if (fep->quirks & FEC_QUIRK_HAS_EEE) 2146 fec_enet_eee_mode_set(ndev, 2147 phy_dev->eee_cfg.tx_lpi_timer, 2148 phy_dev->enable_tx_lpi); 2149 } else { 2150 if (fep->link) { 2151 netif_stop_queue(ndev); 2152 napi_disable(&fep->napi); 2153 netif_tx_lock_bh(ndev); 2154 fec_stop(ndev); 2155 netif_tx_unlock_bh(ndev); 2156 napi_enable(&fep->napi); 2157 fep->link = phy_dev->link; 2158 status_change = 1; 2159 } 2160 } 2161 2162 if (status_change) 2163 phy_print_status(phy_dev); 2164 } 2165 2166 static int fec_enet_mdio_wait(struct fec_enet_private *fep) 2167 { 2168 uint ievent; 2169 int ret; 2170 2171 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, 2172 ievent & FEC_ENET_MII, 2, 30000); 2173 2174 if (!ret) 2175 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 2176 2177 return ret; 2178 } 2179 2180 static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) 2181 { 2182 struct fec_enet_private *fep = bus->priv; 2183 struct device *dev = &fep->pdev->dev; 2184 int ret = 0, frame_start, frame_addr, frame_op; 2185 2186 ret = pm_runtime_resume_and_get(dev); 2187 if (ret < 0) 2188 return ret; 2189 2190 /* C22 read */ 2191 frame_op = FEC_MMFR_OP_READ; 2192 frame_start = FEC_MMFR_ST; 2193 frame_addr = regnum; 2194 2195 /* start a read op */ 2196 writel(frame_start | frame_op | 2197 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | 2198 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 2199 2200 /* wait for end of transfer */ 2201 ret = fec_enet_mdio_wait(fep); 2202 if (ret) { 2203 netdev_err(fep->netdev, "MDIO read timeout\n"); 2204 goto out; 2205 } 2206 2207 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 2208 2209 out: 2210 pm_runtime_mark_last_busy(dev); 2211 pm_runtime_put_autosuspend(dev); 2212 2213 return ret; 2214 } 2215 2216 static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id, 2217 int devad, int regnum) 2218 { 2219 struct fec_enet_private *fep = bus->priv; 2220 struct device *dev = &fep->pdev->dev; 2221 int ret = 0, frame_start, frame_op; 2222 2223 ret = pm_runtime_resume_and_get(dev); 2224 if (ret < 0) 2225 return ret; 2226 2227 frame_start = FEC_MMFR_ST_C45; 2228 2229 /* write address */ 2230 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | 2231 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2232 FEC_MMFR_TA | (regnum & 0xFFFF), 2233 fep->hwp + FEC_MII_DATA); 2234 2235 /* wait for end of transfer */ 2236 ret = fec_enet_mdio_wait(fep); 2237 if (ret) { 2238 netdev_err(fep->netdev, "MDIO address write timeout\n"); 2239 goto out; 2240 } 2241 2242 frame_op = FEC_MMFR_OP_READ_C45; 2243 2244 /* start a read op */ 2245 writel(frame_start | frame_op | 2246 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2247 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 2248 2249 /* wait for end of transfer */ 2250 ret = fec_enet_mdio_wait(fep); 2251 if (ret) { 2252 netdev_err(fep->netdev, "MDIO read timeout\n"); 2253 goto out; 2254 } 2255 2256 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 2257 2258 out: 2259 pm_runtime_mark_last_busy(dev); 2260 pm_runtime_put_autosuspend(dev); 2261 2262 return ret; 2263 } 2264 2265 static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, 2266 u16 value) 2267 { 2268 struct fec_enet_private *fep = bus->priv; 2269 struct device *dev = &fep->pdev->dev; 2270 int ret, frame_start, frame_addr; 2271 2272 ret = pm_runtime_resume_and_get(dev); 2273 if (ret < 0) 2274 return ret; 2275 2276 /* C22 write */ 2277 frame_start = FEC_MMFR_ST; 2278 frame_addr = regnum; 2279 2280 /* start a write op */ 2281 writel(frame_start | FEC_MMFR_OP_WRITE | 2282 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | 2283 FEC_MMFR_TA | FEC_MMFR_DATA(value), 2284 fep->hwp + FEC_MII_DATA); 2285 2286 /* wait for end of transfer */ 2287 ret = fec_enet_mdio_wait(fep); 2288 if (ret) 2289 netdev_err(fep->netdev, "MDIO write timeout\n"); 2290 2291 pm_runtime_mark_last_busy(dev); 2292 pm_runtime_put_autosuspend(dev); 2293 2294 return ret; 2295 } 2296 2297 static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id, 2298 int devad, int regnum, u16 value) 2299 { 2300 struct fec_enet_private *fep = bus->priv; 2301 struct device *dev = &fep->pdev->dev; 2302 int ret, frame_start; 2303 2304 ret = pm_runtime_resume_and_get(dev); 2305 if (ret < 0) 2306 return ret; 2307 2308 frame_start = FEC_MMFR_ST_C45; 2309 2310 /* write address */ 2311 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | 2312 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2313 FEC_MMFR_TA | (regnum & 0xFFFF), 2314 fep->hwp + FEC_MII_DATA); 2315 2316 /* wait for end of transfer */ 2317 ret = fec_enet_mdio_wait(fep); 2318 if (ret) { 2319 netdev_err(fep->netdev, "MDIO address write timeout\n"); 2320 goto out; 2321 } 2322 2323 /* start a write op */ 2324 writel(frame_start | FEC_MMFR_OP_WRITE | 2325 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2326 FEC_MMFR_TA | FEC_MMFR_DATA(value), 2327 fep->hwp + FEC_MII_DATA); 2328 2329 /* wait for end of transfer */ 2330 ret = fec_enet_mdio_wait(fep); 2331 if (ret) 2332 netdev_err(fep->netdev, "MDIO write timeout\n"); 2333 2334 out: 2335 pm_runtime_mark_last_busy(dev); 2336 pm_runtime_put_autosuspend(dev); 2337 2338 return ret; 2339 } 2340 2341 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) 2342 { 2343 struct fec_enet_private *fep = netdev_priv(ndev); 2344 struct phy_device *phy_dev = ndev->phydev; 2345 2346 if (phy_dev) { 2347 phy_reset_after_clk_enable(phy_dev); 2348 } else if (fep->phy_node) { 2349 /* 2350 * If the PHY still is not bound to the MAC, but there is 2351 * OF PHY node and a matching PHY device instance already, 2352 * use the OF PHY node to obtain the PHY device instance, 2353 * and then use that PHY device instance when triggering 2354 * the PHY reset. 2355 */ 2356 phy_dev = of_phy_find_device(fep->phy_node); 2357 phy_reset_after_clk_enable(phy_dev); 2358 put_device(&phy_dev->mdio.dev); 2359 } 2360 } 2361 2362 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 2363 { 2364 struct fec_enet_private *fep = netdev_priv(ndev); 2365 int ret; 2366 2367 if (enable) { 2368 ret = clk_prepare_enable(fep->clk_enet_out); 2369 if (ret) 2370 return ret; 2371 2372 if (fep->clk_ptp) { 2373 mutex_lock(&fep->ptp_clk_mutex); 2374 ret = clk_prepare_enable(fep->clk_ptp); 2375 if (ret) { 2376 mutex_unlock(&fep->ptp_clk_mutex); 2377 goto failed_clk_ptp; 2378 } else { 2379 fep->ptp_clk_on = true; 2380 } 2381 mutex_unlock(&fep->ptp_clk_mutex); 2382 } 2383 2384 ret = clk_prepare_enable(fep->clk_ref); 2385 if (ret) 2386 goto failed_clk_ref; 2387 2388 ret = clk_prepare_enable(fep->clk_2x_txclk); 2389 if (ret) 2390 goto failed_clk_2x_txclk; 2391 2392 fec_enet_phy_reset_after_clk_enable(ndev); 2393 } else { 2394 clk_disable_unprepare(fep->clk_enet_out); 2395 if (fep->clk_ptp) { 2396 mutex_lock(&fep->ptp_clk_mutex); 2397 clk_disable_unprepare(fep->clk_ptp); 2398 fep->ptp_clk_on = false; 2399 mutex_unlock(&fep->ptp_clk_mutex); 2400 } 2401 clk_disable_unprepare(fep->clk_ref); 2402 clk_disable_unprepare(fep->clk_2x_txclk); 2403 } 2404 2405 return 0; 2406 2407 failed_clk_2x_txclk: 2408 if (fep->clk_ref) 2409 clk_disable_unprepare(fep->clk_ref); 2410 failed_clk_ref: 2411 if (fep->clk_ptp) { 2412 mutex_lock(&fep->ptp_clk_mutex); 2413 clk_disable_unprepare(fep->clk_ptp); 2414 fep->ptp_clk_on = false; 2415 mutex_unlock(&fep->ptp_clk_mutex); 2416 } 2417 failed_clk_ptp: 2418 clk_disable_unprepare(fep->clk_enet_out); 2419 2420 return ret; 2421 } 2422 2423 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, 2424 struct device_node *np) 2425 { 2426 u32 rgmii_tx_delay, rgmii_rx_delay; 2427 2428 /* For rgmii tx internal delay, valid values are 0ps and 2000ps */ 2429 if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) { 2430 if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) { 2431 dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps"); 2432 return -EINVAL; 2433 } else if (rgmii_tx_delay == 2000) { 2434 fep->rgmii_txc_dly = true; 2435 } 2436 } 2437 2438 /* For rgmii rx internal delay, valid values are 0ps and 2000ps */ 2439 if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) { 2440 if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) { 2441 dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps"); 2442 return -EINVAL; 2443 } else if (rgmii_rx_delay == 2000) { 2444 fep->rgmii_rxc_dly = true; 2445 } 2446 } 2447 2448 return 0; 2449 } 2450 2451 static int fec_enet_mii_probe(struct net_device *ndev) 2452 { 2453 struct fec_enet_private *fep = netdev_priv(ndev); 2454 struct phy_device *phy_dev = NULL; 2455 char mdio_bus_id[MII_BUS_ID_SIZE]; 2456 char phy_name[MII_BUS_ID_SIZE + 3]; 2457 int phy_id; 2458 int dev_id = fep->dev_id; 2459 2460 if (fep->phy_node) { 2461 phy_dev = of_phy_connect(ndev, fep->phy_node, 2462 &fec_enet_adjust_link, 0, 2463 fep->phy_interface); 2464 if (!phy_dev) { 2465 netdev_err(ndev, "Unable to connect to phy\n"); 2466 return -ENODEV; 2467 } 2468 } else { 2469 /* check for attached phy */ 2470 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { 2471 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) 2472 continue; 2473 if (dev_id--) 2474 continue; 2475 strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 2476 break; 2477 } 2478 2479 if (phy_id >= PHY_MAX_ADDR) { 2480 netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); 2481 strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); 2482 phy_id = 0; 2483 } 2484 2485 snprintf(phy_name, sizeof(phy_name), 2486 PHY_ID_FMT, mdio_bus_id, phy_id); 2487 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 2488 fep->phy_interface); 2489 } 2490 2491 if (IS_ERR(phy_dev)) { 2492 netdev_err(ndev, "could not attach to PHY\n"); 2493 return PTR_ERR(phy_dev); 2494 } 2495 2496 /* mask with MAC supported features */ 2497 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { 2498 phy_set_max_speed(phy_dev, 1000); 2499 phy_remove_link_mode(phy_dev, 2500 ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 2501 #if !defined(CONFIG_M5272) 2502 phy_support_sym_pause(phy_dev); 2503 #endif 2504 } 2505 else 2506 phy_set_max_speed(phy_dev, 100); 2507 2508 if (fep->quirks & FEC_QUIRK_HAS_EEE) 2509 phy_support_eee(phy_dev); 2510 2511 fep->link = 0; 2512 fep->full_duplex = 0; 2513 2514 phy_attached_info(phy_dev); 2515 2516 return 0; 2517 } 2518 2519 static int fec_enet_mii_init(struct platform_device *pdev) 2520 { 2521 static struct mii_bus *fec0_mii_bus; 2522 struct net_device *ndev = platform_get_drvdata(pdev); 2523 struct fec_enet_private *fep = netdev_priv(ndev); 2524 bool suppress_preamble = false; 2525 struct phy_device *phydev; 2526 struct device_node *node; 2527 int err = -ENXIO; 2528 u32 mii_speed, holdtime; 2529 u32 bus_freq; 2530 int addr; 2531 2532 /* 2533 * The i.MX28 dual fec interfaces are not equal. 2534 * Here are the differences: 2535 * 2536 * - fec0 supports MII & RMII modes while fec1 only supports RMII 2537 * - fec0 acts as the 1588 time master while fec1 is slave 2538 * - external phys can only be configured by fec0 2539 * 2540 * That is to say fec1 can not work independently. It only works 2541 * when fec0 is working. The reason behind this design is that the 2542 * second interface is added primarily for Switch mode. 2543 * 2544 * Because of the last point above, both phys are attached on fec0 2545 * mdio interface in board design, and need to be configured by 2546 * fec0 mii_bus. 2547 */ 2548 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 2549 /* fec1 uses fec0 mii_bus */ 2550 if (mii_cnt && fec0_mii_bus) { 2551 fep->mii_bus = fec0_mii_bus; 2552 mii_cnt++; 2553 return 0; 2554 } 2555 return -ENOENT; 2556 } 2557 2558 bus_freq = 2500000; /* 2.5MHz by default */ 2559 node = of_get_child_by_name(pdev->dev.of_node, "mdio"); 2560 if (node) { 2561 of_property_read_u32(node, "clock-frequency", &bus_freq); 2562 suppress_preamble = of_property_read_bool(node, 2563 "suppress-preamble"); 2564 } 2565 2566 /* 2567 * Set MII speed (= clk_get_rate() / 2 * phy_speed) 2568 * 2569 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while 2570 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 2571 * Reference Manual has an error on this, and gets fixed on i.MX6Q 2572 * document. 2573 */ 2574 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); 2575 if (fep->quirks & FEC_QUIRK_ENET_MAC) 2576 mii_speed--; 2577 if (mii_speed > 63) { 2578 dev_err(&pdev->dev, 2579 "fec clock (%lu) too fast to get right mii speed\n", 2580 clk_get_rate(fep->clk_ipg)); 2581 err = -EINVAL; 2582 goto err_out; 2583 } 2584 2585 /* 2586 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka 2587 * MII_SPEED) register that defines the MDIO output hold time. Earlier 2588 * versions are RAZ there, so just ignore the difference and write the 2589 * register always. 2590 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. 2591 * HOLDTIME + 1 is the number of clk cycles the fec is holding the 2592 * output. 2593 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). 2594 * Given that ceil(clkrate / 5000000) <= 64, the calculation for 2595 * holdtime cannot result in a value greater than 3. 2596 */ 2597 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; 2598 2599 fep->phy_speed = mii_speed << 1 | holdtime << 8; 2600 2601 if (suppress_preamble) 2602 fep->phy_speed |= BIT(7); 2603 2604 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { 2605 /* Clear MMFR to avoid to generate MII event by writing MSCR. 2606 * MII event generation condition: 2607 * - writing MSCR: 2608 * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & 2609 * mscr_reg_data_in[7:0] != 0 2610 * - writing MMFR: 2611 * - mscr[7:0]_not_zero 2612 */ 2613 writel(0, fep->hwp + FEC_MII_DATA); 2614 } 2615 2616 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 2617 2618 /* Clear any pending transaction complete indication */ 2619 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 2620 2621 fep->mii_bus = mdiobus_alloc(); 2622 if (fep->mii_bus == NULL) { 2623 err = -ENOMEM; 2624 goto err_out; 2625 } 2626 2627 fep->mii_bus->name = "fec_enet_mii_bus"; 2628 fep->mii_bus->read = fec_enet_mdio_read_c22; 2629 fep->mii_bus->write = fec_enet_mdio_write_c22; 2630 if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) { 2631 fep->mii_bus->read_c45 = fec_enet_mdio_read_c45; 2632 fep->mii_bus->write_c45 = fec_enet_mdio_write_c45; 2633 } 2634 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2635 pdev->name, fep->dev_id + 1); 2636 fep->mii_bus->priv = fep; 2637 fep->mii_bus->parent = &pdev->dev; 2638 2639 err = of_mdiobus_register(fep->mii_bus, node); 2640 if (err) 2641 goto err_out_free_mdiobus; 2642 of_node_put(node); 2643 2644 /* find all the PHY devices on the bus and set mac_managed_pm to true */ 2645 for (addr = 0; addr < PHY_MAX_ADDR; addr++) { 2646 phydev = mdiobus_get_phy(fep->mii_bus, addr); 2647 if (phydev) 2648 phydev->mac_managed_pm = true; 2649 } 2650 2651 mii_cnt++; 2652 2653 /* save fec0 mii_bus */ 2654 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2655 fec0_mii_bus = fep->mii_bus; 2656 2657 return 0; 2658 2659 err_out_free_mdiobus: 2660 mdiobus_free(fep->mii_bus); 2661 err_out: 2662 of_node_put(node); 2663 return err; 2664 } 2665 2666 static void fec_enet_mii_remove(struct fec_enet_private *fep) 2667 { 2668 if (--mii_cnt == 0) { 2669 mdiobus_unregister(fep->mii_bus); 2670 mdiobus_free(fep->mii_bus); 2671 } 2672 } 2673 2674 static void fec_enet_get_drvinfo(struct net_device *ndev, 2675 struct ethtool_drvinfo *info) 2676 { 2677 struct fec_enet_private *fep = netdev_priv(ndev); 2678 2679 strscpy(info->driver, fep->pdev->dev.driver->name, 2680 sizeof(info->driver)); 2681 strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); 2682 } 2683 2684 static int fec_enet_get_regs_len(struct net_device *ndev) 2685 { 2686 struct fec_enet_private *fep = netdev_priv(ndev); 2687 struct resource *r; 2688 int s = 0; 2689 2690 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); 2691 if (r) 2692 s = resource_size(r); 2693 2694 return s; 2695 } 2696 2697 /* List of registers that can be safety be read to dump them with ethtool */ 2698 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2699 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2700 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2701 static __u32 fec_enet_register_version = 2; 2702 static u32 fec_enet_register_offset[] = { 2703 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2704 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2705 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, 2706 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, 2707 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, 2708 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, 2709 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, 2710 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, 2711 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2712 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, 2713 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, 2714 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, 2715 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2716 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2717 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2718 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2719 RMON_T_P_GTE2048, RMON_T_OCTETS, 2720 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2721 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2722 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2723 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2724 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2725 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2726 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2727 RMON_R_P_GTE2048, RMON_R_OCTETS, 2728 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2729 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2730 }; 2731 /* for i.MX6ul */ 2732 static u32 fec_enet_register_offset_6ul[] = { 2733 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2734 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2735 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, 2736 FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, 2737 FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, 2738 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2739 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, 2740 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2741 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2742 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2743 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2744 RMON_T_P_GTE2048, RMON_T_OCTETS, 2745 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2746 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2747 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2748 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2749 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2750 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2751 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2752 RMON_R_P_GTE2048, RMON_R_OCTETS, 2753 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2754 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2755 }; 2756 #else 2757 static __u32 fec_enet_register_version = 1; 2758 static u32 fec_enet_register_offset[] = { 2759 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2760 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2761 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, 2762 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, 2763 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, 2764 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, 2765 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, 2766 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, 2767 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 2768 }; 2769 #endif 2770 2771 static void fec_enet_get_regs(struct net_device *ndev, 2772 struct ethtool_regs *regs, void *regbuf) 2773 { 2774 struct fec_enet_private *fep = netdev_priv(ndev); 2775 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; 2776 struct device *dev = &fep->pdev->dev; 2777 u32 *buf = (u32 *)regbuf; 2778 u32 i, off; 2779 int ret; 2780 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2781 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2782 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2783 u32 *reg_list; 2784 u32 reg_cnt; 2785 2786 if (!of_machine_is_compatible("fsl,imx6ul")) { 2787 reg_list = fec_enet_register_offset; 2788 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); 2789 } else { 2790 reg_list = fec_enet_register_offset_6ul; 2791 reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); 2792 } 2793 #else 2794 /* coldfire */ 2795 static u32 *reg_list = fec_enet_register_offset; 2796 static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); 2797 #endif 2798 ret = pm_runtime_resume_and_get(dev); 2799 if (ret < 0) 2800 return; 2801 2802 regs->version = fec_enet_register_version; 2803 2804 memset(buf, 0, regs->len); 2805 2806 for (i = 0; i < reg_cnt; i++) { 2807 off = reg_list[i]; 2808 2809 if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && 2810 !(fep->quirks & FEC_QUIRK_HAS_FRREG)) 2811 continue; 2812 2813 off >>= 2; 2814 buf[off] = readl(&theregs[off]); 2815 } 2816 2817 pm_runtime_mark_last_busy(dev); 2818 pm_runtime_put_autosuspend(dev); 2819 } 2820 2821 static int fec_enet_get_ts_info(struct net_device *ndev, 2822 struct kernel_ethtool_ts_info *info) 2823 { 2824 struct fec_enet_private *fep = netdev_priv(ndev); 2825 2826 if (fep->bufdesc_ex) { 2827 2828 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 2829 SOF_TIMESTAMPING_TX_HARDWARE | 2830 SOF_TIMESTAMPING_RX_HARDWARE | 2831 SOF_TIMESTAMPING_RAW_HARDWARE; 2832 if (fep->ptp_clock) 2833 info->phc_index = ptp_clock_index(fep->ptp_clock); 2834 2835 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 2836 (1 << HWTSTAMP_TX_ON); 2837 2838 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2839 (1 << HWTSTAMP_FILTER_ALL); 2840 return 0; 2841 } else { 2842 return ethtool_op_get_ts_info(ndev, info); 2843 } 2844 } 2845 2846 #if !defined(CONFIG_M5272) 2847 2848 static void fec_enet_get_pauseparam(struct net_device *ndev, 2849 struct ethtool_pauseparam *pause) 2850 { 2851 struct fec_enet_private *fep = netdev_priv(ndev); 2852 2853 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; 2854 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; 2855 pause->rx_pause = pause->tx_pause; 2856 } 2857 2858 static int fec_enet_set_pauseparam(struct net_device *ndev, 2859 struct ethtool_pauseparam *pause) 2860 { 2861 struct fec_enet_private *fep = netdev_priv(ndev); 2862 2863 if (!ndev->phydev) 2864 return -ENODEV; 2865 2866 if (pause->tx_pause != pause->rx_pause) { 2867 netdev_info(ndev, 2868 "hardware only support enable/disable both tx and rx"); 2869 return -EINVAL; 2870 } 2871 2872 fep->pause_flag = 0; 2873 2874 /* tx pause must be same as rx pause */ 2875 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; 2876 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; 2877 2878 phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause, 2879 pause->autoneg); 2880 2881 if (pause->autoneg) { 2882 if (netif_running(ndev)) 2883 fec_stop(ndev); 2884 phy_start_aneg(ndev->phydev); 2885 } 2886 if (netif_running(ndev)) { 2887 napi_disable(&fep->napi); 2888 netif_tx_lock_bh(ndev); 2889 fec_restart(ndev); 2890 netif_tx_wake_all_queues(ndev); 2891 netif_tx_unlock_bh(ndev); 2892 napi_enable(&fep->napi); 2893 } 2894 2895 return 0; 2896 } 2897 2898 static const struct fec_stat { 2899 char name[ETH_GSTRING_LEN]; 2900 u16 offset; 2901 } fec_stats[] = { 2902 /* RMON TX */ 2903 { "tx_dropped", RMON_T_DROP }, 2904 { "tx_packets", RMON_T_PACKETS }, 2905 { "tx_broadcast", RMON_T_BC_PKT }, 2906 { "tx_multicast", RMON_T_MC_PKT }, 2907 { "tx_crc_errors", RMON_T_CRC_ALIGN }, 2908 { "tx_undersize", RMON_T_UNDERSIZE }, 2909 { "tx_oversize", RMON_T_OVERSIZE }, 2910 { "tx_fragment", RMON_T_FRAG }, 2911 { "tx_jabber", RMON_T_JAB }, 2912 { "tx_collision", RMON_T_COL }, 2913 { "tx_64byte", RMON_T_P64 }, 2914 { "tx_65to127byte", RMON_T_P65TO127 }, 2915 { "tx_128to255byte", RMON_T_P128TO255 }, 2916 { "tx_256to511byte", RMON_T_P256TO511 }, 2917 { "tx_512to1023byte", RMON_T_P512TO1023 }, 2918 { "tx_1024to2047byte", RMON_T_P1024TO2047 }, 2919 { "tx_GTE2048byte", RMON_T_P_GTE2048 }, 2920 { "tx_octets", RMON_T_OCTETS }, 2921 2922 /* IEEE TX */ 2923 { "IEEE_tx_drop", IEEE_T_DROP }, 2924 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, 2925 { "IEEE_tx_1col", IEEE_T_1COL }, 2926 { "IEEE_tx_mcol", IEEE_T_MCOL }, 2927 { "IEEE_tx_def", IEEE_T_DEF }, 2928 { "IEEE_tx_lcol", IEEE_T_LCOL }, 2929 { "IEEE_tx_excol", IEEE_T_EXCOL }, 2930 { "IEEE_tx_macerr", IEEE_T_MACERR }, 2931 { "IEEE_tx_cserr", IEEE_T_CSERR }, 2932 { "IEEE_tx_sqe", IEEE_T_SQE }, 2933 { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, 2934 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, 2935 2936 /* RMON RX */ 2937 { "rx_packets", RMON_R_PACKETS }, 2938 { "rx_broadcast", RMON_R_BC_PKT }, 2939 { "rx_multicast", RMON_R_MC_PKT }, 2940 { "rx_crc_errors", RMON_R_CRC_ALIGN }, 2941 { "rx_undersize", RMON_R_UNDERSIZE }, 2942 { "rx_oversize", RMON_R_OVERSIZE }, 2943 { "rx_fragment", RMON_R_FRAG }, 2944 { "rx_jabber", RMON_R_JAB }, 2945 { "rx_64byte", RMON_R_P64 }, 2946 { "rx_65to127byte", RMON_R_P65TO127 }, 2947 { "rx_128to255byte", RMON_R_P128TO255 }, 2948 { "rx_256to511byte", RMON_R_P256TO511 }, 2949 { "rx_512to1023byte", RMON_R_P512TO1023 }, 2950 { "rx_1024to2047byte", RMON_R_P1024TO2047 }, 2951 { "rx_GTE2048byte", RMON_R_P_GTE2048 }, 2952 { "rx_octets", RMON_R_OCTETS }, 2953 2954 /* IEEE RX */ 2955 { "IEEE_rx_drop", IEEE_R_DROP }, 2956 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, 2957 { "IEEE_rx_crc", IEEE_R_CRC }, 2958 { "IEEE_rx_align", IEEE_R_ALIGN }, 2959 { "IEEE_rx_macerr", IEEE_R_MACERR }, 2960 { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, 2961 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 2962 }; 2963 2964 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) 2965 2966 static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = { 2967 "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */ 2968 "rx_xdp_pass", /* RX_XDP_PASS, */ 2969 "rx_xdp_drop", /* RX_XDP_DROP, */ 2970 "rx_xdp_tx", /* RX_XDP_TX, */ 2971 "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */ 2972 "tx_xdp_xmit", /* TX_XDP_XMIT, */ 2973 "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */ 2974 }; 2975 2976 static void fec_enet_update_ethtool_stats(struct net_device *dev) 2977 { 2978 struct fec_enet_private *fep = netdev_priv(dev); 2979 int i; 2980 2981 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2982 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); 2983 } 2984 2985 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) 2986 { 2987 u64 xdp_stats[XDP_STATS_TOTAL] = { 0 }; 2988 struct fec_enet_priv_rx_q *rxq; 2989 int i, j; 2990 2991 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 2992 rxq = fep->rx_queue[i]; 2993 2994 for (j = 0; j < XDP_STATS_TOTAL; j++) 2995 xdp_stats[j] += rxq->stats[j]; 2996 } 2997 2998 memcpy(data, xdp_stats, sizeof(xdp_stats)); 2999 } 3000 3001 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) 3002 { 3003 #ifdef CONFIG_PAGE_POOL_STATS 3004 struct page_pool_stats stats = {}; 3005 struct fec_enet_priv_rx_q *rxq; 3006 int i; 3007 3008 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 3009 rxq = fep->rx_queue[i]; 3010 3011 if (!rxq->page_pool) 3012 continue; 3013 3014 page_pool_get_stats(rxq->page_pool, &stats); 3015 } 3016 3017 page_pool_ethtool_stats_get(data, &stats); 3018 #endif 3019 } 3020 3021 static void fec_enet_get_ethtool_stats(struct net_device *dev, 3022 struct ethtool_stats *stats, u64 *data) 3023 { 3024 struct fec_enet_private *fep = netdev_priv(dev); 3025 3026 if (netif_running(dev)) 3027 fec_enet_update_ethtool_stats(dev); 3028 3029 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); 3030 data += FEC_STATS_SIZE / sizeof(u64); 3031 3032 fec_enet_get_xdp_stats(fep, data); 3033 data += XDP_STATS_TOTAL; 3034 3035 fec_enet_page_pool_stats(fep, data); 3036 } 3037 3038 static void fec_enet_get_strings(struct net_device *netdev, 3039 u32 stringset, u8 *data) 3040 { 3041 int i; 3042 switch (stringset) { 3043 case ETH_SS_STATS: 3044 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { 3045 ethtool_puts(&data, fec_stats[i].name); 3046 } 3047 for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { 3048 ethtool_puts(&data, fec_xdp_stat_strs[i]); 3049 } 3050 page_pool_ethtool_stats_get_strings(data); 3051 3052 break; 3053 case ETH_SS_TEST: 3054 net_selftest_get_strings(data); 3055 break; 3056 } 3057 } 3058 3059 static int fec_enet_get_sset_count(struct net_device *dev, int sset) 3060 { 3061 int count; 3062 3063 switch (sset) { 3064 case ETH_SS_STATS: 3065 count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL; 3066 count += page_pool_ethtool_stats_get_count(); 3067 return count; 3068 3069 case ETH_SS_TEST: 3070 return net_selftest_get_count(); 3071 default: 3072 return -EOPNOTSUPP; 3073 } 3074 } 3075 3076 static void fec_enet_clear_ethtool_stats(struct net_device *dev) 3077 { 3078 struct fec_enet_private *fep = netdev_priv(dev); 3079 struct fec_enet_priv_rx_q *rxq; 3080 int i, j; 3081 3082 /* Disable MIB statistics counters */ 3083 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); 3084 3085 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 3086 writel(0, fep->hwp + fec_stats[i].offset); 3087 3088 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 3089 rxq = fep->rx_queue[i]; 3090 for (j = 0; j < XDP_STATS_TOTAL; j++) 3091 rxq->stats[j] = 0; 3092 } 3093 3094 /* Don't disable MIB statistics counters */ 3095 writel(0, fep->hwp + FEC_MIB_CTRLSTAT); 3096 } 3097 3098 #else /* !defined(CONFIG_M5272) */ 3099 #define FEC_STATS_SIZE 0 3100 static inline void fec_enet_update_ethtool_stats(struct net_device *dev) 3101 { 3102 } 3103 3104 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) 3105 { 3106 } 3107 #endif /* !defined(CONFIG_M5272) */ 3108 3109 /* ITR clock source is enet system clock (clk_ahb). 3110 * TCTT unit is cycle_ns * 64 cycle 3111 * So, the ICTT value = X us / (cycle_ns * 64) 3112 */ 3113 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) 3114 { 3115 struct fec_enet_private *fep = netdev_priv(ndev); 3116 3117 return us * (fep->itr_clk_rate / 64000) / 1000; 3118 } 3119 3120 /* Set threshold for interrupt coalescing */ 3121 static void fec_enet_itr_coal_set(struct net_device *ndev) 3122 { 3123 struct fec_enet_private *fep = netdev_priv(ndev); 3124 int rx_itr, tx_itr; 3125 3126 /* Must be greater than zero to avoid unpredictable behavior */ 3127 if (!fep->rx_time_itr || !fep->rx_pkts_itr || 3128 !fep->tx_time_itr || !fep->tx_pkts_itr) 3129 return; 3130 3131 /* Select enet system clock as Interrupt Coalescing 3132 * timer Clock Source 3133 */ 3134 rx_itr = FEC_ITR_CLK_SEL; 3135 tx_itr = FEC_ITR_CLK_SEL; 3136 3137 /* set ICFT and ICTT */ 3138 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); 3139 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); 3140 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); 3141 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); 3142 3143 rx_itr |= FEC_ITR_EN; 3144 tx_itr |= FEC_ITR_EN; 3145 3146 writel(tx_itr, fep->hwp + FEC_TXIC0); 3147 writel(rx_itr, fep->hwp + FEC_RXIC0); 3148 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 3149 writel(tx_itr, fep->hwp + FEC_TXIC1); 3150 writel(rx_itr, fep->hwp + FEC_RXIC1); 3151 writel(tx_itr, fep->hwp + FEC_TXIC2); 3152 writel(rx_itr, fep->hwp + FEC_RXIC2); 3153 } 3154 } 3155 3156 static int fec_enet_get_coalesce(struct net_device *ndev, 3157 struct ethtool_coalesce *ec, 3158 struct kernel_ethtool_coalesce *kernel_coal, 3159 struct netlink_ext_ack *extack) 3160 { 3161 struct fec_enet_private *fep = netdev_priv(ndev); 3162 3163 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 3164 return -EOPNOTSUPP; 3165 3166 ec->rx_coalesce_usecs = fep->rx_time_itr; 3167 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; 3168 3169 ec->tx_coalesce_usecs = fep->tx_time_itr; 3170 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; 3171 3172 return 0; 3173 } 3174 3175 static int fec_enet_set_coalesce(struct net_device *ndev, 3176 struct ethtool_coalesce *ec, 3177 struct kernel_ethtool_coalesce *kernel_coal, 3178 struct netlink_ext_ack *extack) 3179 { 3180 struct fec_enet_private *fep = netdev_priv(ndev); 3181 struct device *dev = &fep->pdev->dev; 3182 unsigned int cycle; 3183 3184 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 3185 return -EOPNOTSUPP; 3186 3187 if (ec->rx_max_coalesced_frames > 255) { 3188 dev_err(dev, "Rx coalesced frames exceed hardware limitation\n"); 3189 return -EINVAL; 3190 } 3191 3192 if (ec->tx_max_coalesced_frames > 255) { 3193 dev_err(dev, "Tx coalesced frame exceed hardware limitation\n"); 3194 return -EINVAL; 3195 } 3196 3197 cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); 3198 if (cycle > 0xFFFF) { 3199 dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); 3200 return -EINVAL; 3201 } 3202 3203 cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); 3204 if (cycle > 0xFFFF) { 3205 dev_err(dev, "Tx coalesced usec exceed hardware limitation\n"); 3206 return -EINVAL; 3207 } 3208 3209 fep->rx_time_itr = ec->rx_coalesce_usecs; 3210 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; 3211 3212 fep->tx_time_itr = ec->tx_coalesce_usecs; 3213 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; 3214 3215 fec_enet_itr_coal_set(ndev); 3216 3217 return 0; 3218 } 3219 3220 static int 3221 fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) 3222 { 3223 struct fec_enet_private *fep = netdev_priv(ndev); 3224 3225 if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) 3226 return -EOPNOTSUPP; 3227 3228 if (!netif_running(ndev)) 3229 return -ENETDOWN; 3230 3231 return phy_ethtool_get_eee(ndev->phydev, edata); 3232 } 3233 3234 static int 3235 fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) 3236 { 3237 struct fec_enet_private *fep = netdev_priv(ndev); 3238 3239 if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) 3240 return -EOPNOTSUPP; 3241 3242 if (!netif_running(ndev)) 3243 return -ENETDOWN; 3244 3245 return phy_ethtool_set_eee(ndev->phydev, edata); 3246 } 3247 3248 static void 3249 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 3250 { 3251 struct fec_enet_private *fep = netdev_priv(ndev); 3252 3253 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { 3254 wol->supported = WAKE_MAGIC; 3255 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; 3256 } else { 3257 wol->supported = wol->wolopts = 0; 3258 } 3259 } 3260 3261 static int 3262 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 3263 { 3264 struct fec_enet_private *fep = netdev_priv(ndev); 3265 3266 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) 3267 return -EINVAL; 3268 3269 if (wol->wolopts & ~WAKE_MAGIC) 3270 return -EINVAL; 3271 3272 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); 3273 if (device_may_wakeup(&ndev->dev)) 3274 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; 3275 else 3276 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); 3277 3278 return 0; 3279 } 3280 3281 static const struct ethtool_ops fec_enet_ethtool_ops = { 3282 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 3283 ETHTOOL_COALESCE_MAX_FRAMES, 3284 .get_drvinfo = fec_enet_get_drvinfo, 3285 .get_regs_len = fec_enet_get_regs_len, 3286 .get_regs = fec_enet_get_regs, 3287 .nway_reset = phy_ethtool_nway_reset, 3288 .get_link = ethtool_op_get_link, 3289 .get_coalesce = fec_enet_get_coalesce, 3290 .set_coalesce = fec_enet_set_coalesce, 3291 #ifndef CONFIG_M5272 3292 .get_pauseparam = fec_enet_get_pauseparam, 3293 .set_pauseparam = fec_enet_set_pauseparam, 3294 .get_strings = fec_enet_get_strings, 3295 .get_ethtool_stats = fec_enet_get_ethtool_stats, 3296 .get_sset_count = fec_enet_get_sset_count, 3297 #endif 3298 .get_ts_info = fec_enet_get_ts_info, 3299 .get_wol = fec_enet_get_wol, 3300 .set_wol = fec_enet_set_wol, 3301 .get_eee = fec_enet_get_eee, 3302 .set_eee = fec_enet_set_eee, 3303 .get_link_ksettings = phy_ethtool_get_link_ksettings, 3304 .set_link_ksettings = phy_ethtool_set_link_ksettings, 3305 .self_test = net_selftest, 3306 }; 3307 3308 static void fec_enet_free_buffers(struct net_device *ndev) 3309 { 3310 struct fec_enet_private *fep = netdev_priv(ndev); 3311 unsigned int i; 3312 struct fec_enet_priv_tx_q *txq; 3313 struct fec_enet_priv_rx_q *rxq; 3314 unsigned int q; 3315 3316 for (q = 0; q < fep->num_rx_queues; q++) { 3317 rxq = fep->rx_queue[q]; 3318 for (i = 0; i < rxq->bd.ring_size; i++) 3319 page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false); 3320 3321 for (i = 0; i < XDP_STATS_TOTAL; i++) 3322 rxq->stats[i] = 0; 3323 3324 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) 3325 xdp_rxq_info_unreg(&rxq->xdp_rxq); 3326 page_pool_destroy(rxq->page_pool); 3327 rxq->page_pool = NULL; 3328 } 3329 3330 for (q = 0; q < fep->num_tx_queues; q++) { 3331 txq = fep->tx_queue[q]; 3332 for (i = 0; i < txq->bd.ring_size; i++) { 3333 kfree(txq->tx_bounce[i]); 3334 txq->tx_bounce[i] = NULL; 3335 3336 if (!txq->tx_buf[i].buf_p) { 3337 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 3338 continue; 3339 } 3340 3341 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { 3342 dev_kfree_skb(txq->tx_buf[i].buf_p); 3343 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { 3344 xdp_return_frame(txq->tx_buf[i].buf_p); 3345 } else { 3346 struct page *page = txq->tx_buf[i].buf_p; 3347 3348 page_pool_put_page(page->pp, page, 0, false); 3349 } 3350 3351 txq->tx_buf[i].buf_p = NULL; 3352 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 3353 } 3354 } 3355 } 3356 3357 static void fec_enet_free_queue(struct net_device *ndev) 3358 { 3359 struct fec_enet_private *fep = netdev_priv(ndev); 3360 int i; 3361 struct fec_enet_priv_tx_q *txq; 3362 3363 for (i = 0; i < fep->num_tx_queues; i++) 3364 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { 3365 txq = fep->tx_queue[i]; 3366 fec_dma_free(&fep->pdev->dev, 3367 txq->bd.ring_size * TSO_HEADER_SIZE, 3368 txq->tso_hdrs, txq->tso_hdrs_dma); 3369 } 3370 3371 for (i = 0; i < fep->num_rx_queues; i++) 3372 kfree(fep->rx_queue[i]); 3373 for (i = 0; i < fep->num_tx_queues; i++) 3374 kfree(fep->tx_queue[i]); 3375 } 3376 3377 static int fec_enet_alloc_queue(struct net_device *ndev) 3378 { 3379 struct fec_enet_private *fep = netdev_priv(ndev); 3380 int i; 3381 int ret = 0; 3382 struct fec_enet_priv_tx_q *txq; 3383 3384 for (i = 0; i < fep->num_tx_queues; i++) { 3385 txq = kzalloc(sizeof(*txq), GFP_KERNEL); 3386 if (!txq) { 3387 ret = -ENOMEM; 3388 goto alloc_failed; 3389 } 3390 3391 fep->tx_queue[i] = txq; 3392 txq->bd.ring_size = TX_RING_SIZE; 3393 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; 3394 3395 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; 3396 txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; 3397 3398 txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev, 3399 txq->bd.ring_size * TSO_HEADER_SIZE, 3400 &txq->tso_hdrs_dma, GFP_KERNEL); 3401 if (!txq->tso_hdrs) { 3402 ret = -ENOMEM; 3403 goto alloc_failed; 3404 } 3405 } 3406 3407 for (i = 0; i < fep->num_rx_queues; i++) { 3408 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), 3409 GFP_KERNEL); 3410 if (!fep->rx_queue[i]) { 3411 ret = -ENOMEM; 3412 goto alloc_failed; 3413 } 3414 3415 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; 3416 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; 3417 } 3418 return ret; 3419 3420 alloc_failed: 3421 fec_enet_free_queue(ndev); 3422 return ret; 3423 } 3424 3425 static int 3426 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) 3427 { 3428 struct fec_enet_private *fep = netdev_priv(ndev); 3429 struct fec_enet_priv_rx_q *rxq; 3430 dma_addr_t phys_addr; 3431 struct bufdesc *bdp; 3432 struct page *page; 3433 int i, err; 3434 3435 rxq = fep->rx_queue[queue]; 3436 bdp = rxq->bd.base; 3437 3438 err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size); 3439 if (err < 0) { 3440 netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err); 3441 return err; 3442 } 3443 3444 for (i = 0; i < rxq->bd.ring_size; i++) { 3445 page = page_pool_dev_alloc_pages(rxq->page_pool); 3446 if (!page) 3447 goto err_alloc; 3448 3449 phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM; 3450 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); 3451 3452 rxq->rx_skb_info[i].page = page; 3453 rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM; 3454 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 3455 3456 if (fep->bufdesc_ex) { 3457 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3458 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 3459 } 3460 3461 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 3462 } 3463 3464 /* Set the last buffer to wrap. */ 3465 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 3466 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 3467 return 0; 3468 3469 err_alloc: 3470 fec_enet_free_buffers(ndev); 3471 return -ENOMEM; 3472 } 3473 3474 static int 3475 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) 3476 { 3477 struct fec_enet_private *fep = netdev_priv(ndev); 3478 unsigned int i; 3479 struct bufdesc *bdp; 3480 struct fec_enet_priv_tx_q *txq; 3481 3482 txq = fep->tx_queue[queue]; 3483 bdp = txq->bd.base; 3484 for (i = 0; i < txq->bd.ring_size; i++) { 3485 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 3486 if (!txq->tx_bounce[i]) 3487 goto err_alloc; 3488 3489 bdp->cbd_sc = cpu_to_fec16(0); 3490 bdp->cbd_bufaddr = cpu_to_fec32(0); 3491 3492 if (fep->bufdesc_ex) { 3493 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3494 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); 3495 } 3496 3497 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 3498 } 3499 3500 /* Set the last buffer to wrap. */ 3501 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 3502 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 3503 3504 return 0; 3505 3506 err_alloc: 3507 fec_enet_free_buffers(ndev); 3508 return -ENOMEM; 3509 } 3510 3511 static int fec_enet_alloc_buffers(struct net_device *ndev) 3512 { 3513 struct fec_enet_private *fep = netdev_priv(ndev); 3514 unsigned int i; 3515 3516 for (i = 0; i < fep->num_rx_queues; i++) 3517 if (fec_enet_alloc_rxq_buffers(ndev, i)) 3518 return -ENOMEM; 3519 3520 for (i = 0; i < fep->num_tx_queues; i++) 3521 if (fec_enet_alloc_txq_buffers(ndev, i)) 3522 return -ENOMEM; 3523 return 0; 3524 } 3525 3526 static int 3527 fec_enet_open(struct net_device *ndev) 3528 { 3529 struct fec_enet_private *fep = netdev_priv(ndev); 3530 int ret; 3531 bool reset_again; 3532 3533 ret = pm_runtime_resume_and_get(&fep->pdev->dev); 3534 if (ret < 0) 3535 return ret; 3536 3537 pinctrl_pm_select_default_state(&fep->pdev->dev); 3538 ret = fec_enet_clk_enable(ndev, true); 3539 if (ret) 3540 goto clk_enable; 3541 3542 /* During the first fec_enet_open call the PHY isn't probed at this 3543 * point. Therefore the phy_reset_after_clk_enable() call within 3544 * fec_enet_clk_enable() fails. As we need this reset in order to be 3545 * sure the PHY is working correctly we check if we need to reset again 3546 * later when the PHY is probed 3547 */ 3548 if (ndev->phydev && ndev->phydev->drv) 3549 reset_again = false; 3550 else 3551 reset_again = true; 3552 3553 /* I should reset the ring buffers here, but I don't yet know 3554 * a simple way to do that. 3555 */ 3556 3557 ret = fec_enet_alloc_buffers(ndev); 3558 if (ret) 3559 goto err_enet_alloc; 3560 3561 /* Init MAC prior to mii bus probe */ 3562 fec_restart(ndev); 3563 3564 /* Call phy_reset_after_clk_enable() again if it failed during 3565 * phy_reset_after_clk_enable() before because the PHY wasn't probed. 3566 */ 3567 if (reset_again) 3568 fec_enet_phy_reset_after_clk_enable(ndev); 3569 3570 /* Probe and connect to PHY when open the interface */ 3571 ret = fec_enet_mii_probe(ndev); 3572 if (ret) 3573 goto err_enet_mii_probe; 3574 3575 if (fep->quirks & FEC_QUIRK_ERR006687) 3576 imx6q_cpuidle_fec_irqs_used(); 3577 3578 if (fep->quirks & FEC_QUIRK_HAS_PMQOS) 3579 cpu_latency_qos_add_request(&fep->pm_qos_req, 0); 3580 3581 napi_enable(&fep->napi); 3582 phy_start(ndev->phydev); 3583 netif_tx_start_all_queues(ndev); 3584 3585 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & 3586 FEC_WOL_FLAG_ENABLE); 3587 3588 return 0; 3589 3590 err_enet_mii_probe: 3591 fec_enet_free_buffers(ndev); 3592 err_enet_alloc: 3593 fec_enet_clk_enable(ndev, false); 3594 clk_enable: 3595 pm_runtime_mark_last_busy(&fep->pdev->dev); 3596 pm_runtime_put_autosuspend(&fep->pdev->dev); 3597 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3598 return ret; 3599 } 3600 3601 static int 3602 fec_enet_close(struct net_device *ndev) 3603 { 3604 struct fec_enet_private *fep = netdev_priv(ndev); 3605 3606 phy_stop(ndev->phydev); 3607 3608 if (netif_device_present(ndev)) { 3609 napi_disable(&fep->napi); 3610 netif_tx_disable(ndev); 3611 fec_stop(ndev); 3612 } 3613 3614 phy_disconnect(ndev->phydev); 3615 3616 if (fep->quirks & FEC_QUIRK_ERR006687) 3617 imx6q_cpuidle_fec_irqs_unused(); 3618 3619 fec_enet_update_ethtool_stats(ndev); 3620 3621 fec_enet_clk_enable(ndev, false); 3622 if (fep->quirks & FEC_QUIRK_HAS_PMQOS) 3623 cpu_latency_qos_remove_request(&fep->pm_qos_req); 3624 3625 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3626 pm_runtime_mark_last_busy(&fep->pdev->dev); 3627 pm_runtime_put_autosuspend(&fep->pdev->dev); 3628 3629 fec_enet_free_buffers(ndev); 3630 3631 return 0; 3632 } 3633 3634 /* Set or clear the multicast filter for this adaptor. 3635 * Skeleton taken from sunlance driver. 3636 * The CPM Ethernet implementation allows Multicast as well as individual 3637 * MAC address filtering. Some of the drivers check to make sure it is 3638 * a group multicast address, and discard those that are not. I guess I 3639 * will do the same for now, but just remove the test if you want 3640 * individual filtering as well (do the upper net layers want or support 3641 * this kind of feature?). 3642 */ 3643 3644 #define FEC_HASH_BITS 6 /* #bits in hash */ 3645 3646 static void set_multicast_list(struct net_device *ndev) 3647 { 3648 struct fec_enet_private *fep = netdev_priv(ndev); 3649 struct netdev_hw_addr *ha; 3650 unsigned int crc, tmp; 3651 unsigned char hash; 3652 unsigned int hash_high = 0, hash_low = 0; 3653 3654 if (ndev->flags & IFF_PROMISC) { 3655 tmp = readl(fep->hwp + FEC_R_CNTRL); 3656 tmp |= 0x8; 3657 writel(tmp, fep->hwp + FEC_R_CNTRL); 3658 return; 3659 } 3660 3661 tmp = readl(fep->hwp + FEC_R_CNTRL); 3662 tmp &= ~0x8; 3663 writel(tmp, fep->hwp + FEC_R_CNTRL); 3664 3665 if (ndev->flags & IFF_ALLMULTI) { 3666 /* Catch all multicast addresses, so set the 3667 * filter to all 1's 3668 */ 3669 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 3670 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3671 3672 return; 3673 } 3674 3675 /* Add the addresses in hash register */ 3676 netdev_for_each_mc_addr(ha, ndev) { 3677 /* calculate crc32 value of mac address */ 3678 crc = ether_crc_le(ndev->addr_len, ha->addr); 3679 3680 /* only upper 6 bits (FEC_HASH_BITS) are used 3681 * which point to specific bit in the hash registers 3682 */ 3683 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; 3684 3685 if (hash > 31) 3686 hash_high |= 1 << (hash - 32); 3687 else 3688 hash_low |= 1 << hash; 3689 } 3690 3691 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 3692 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3693 } 3694 3695 /* Set a MAC change in hardware. */ 3696 static int 3697 fec_set_mac_address(struct net_device *ndev, void *p) 3698 { 3699 struct fec_enet_private *fep = netdev_priv(ndev); 3700 struct sockaddr *addr = p; 3701 3702 if (addr) { 3703 if (!is_valid_ether_addr(addr->sa_data)) 3704 return -EADDRNOTAVAIL; 3705 eth_hw_addr_set(ndev, addr->sa_data); 3706 } 3707 3708 /* Add netif status check here to avoid system hang in below case: 3709 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; 3710 * After ethx down, fec all clocks are gated off and then register 3711 * access causes system hang. 3712 */ 3713 if (!netif_running(ndev)) 3714 return 0; 3715 3716 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 3717 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 3718 fep->hwp + FEC_ADDR_LOW); 3719 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), 3720 fep->hwp + FEC_ADDR_HIGH); 3721 return 0; 3722 } 3723 3724 static inline void fec_enet_set_netdev_features(struct net_device *netdev, 3725 netdev_features_t features) 3726 { 3727 struct fec_enet_private *fep = netdev_priv(netdev); 3728 netdev_features_t changed = features ^ netdev->features; 3729 3730 netdev->features = features; 3731 3732 /* Receive checksum has been changed */ 3733 if (changed & NETIF_F_RXCSUM) { 3734 if (features & NETIF_F_RXCSUM) 3735 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3736 else 3737 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; 3738 } 3739 } 3740 3741 static int fec_set_features(struct net_device *netdev, 3742 netdev_features_t features) 3743 { 3744 struct fec_enet_private *fep = netdev_priv(netdev); 3745 netdev_features_t changed = features ^ netdev->features; 3746 3747 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) { 3748 napi_disable(&fep->napi); 3749 netif_tx_lock_bh(netdev); 3750 fec_stop(netdev); 3751 fec_enet_set_netdev_features(netdev, features); 3752 fec_restart(netdev); 3753 netif_tx_wake_all_queues(netdev); 3754 netif_tx_unlock_bh(netdev); 3755 napi_enable(&fep->napi); 3756 } else { 3757 fec_enet_set_netdev_features(netdev, features); 3758 } 3759 3760 return 0; 3761 } 3762 3763 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, 3764 struct net_device *sb_dev) 3765 { 3766 struct fec_enet_private *fep = netdev_priv(ndev); 3767 u16 vlan_tag = 0; 3768 3769 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 3770 return netdev_pick_tx(ndev, skb, NULL); 3771 3772 /* VLAN is present in the payload.*/ 3773 if (eth_type_vlan(skb->protocol)) { 3774 struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); 3775 3776 vlan_tag = ntohs(vhdr->h_vlan_TCI); 3777 /* VLAN is present in the skb but not yet pushed in the payload.*/ 3778 } else if (skb_vlan_tag_present(skb)) { 3779 vlan_tag = skb->vlan_tci; 3780 } else { 3781 return vlan_tag; 3782 } 3783 3784 return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; 3785 } 3786 3787 static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) 3788 { 3789 struct fec_enet_private *fep = netdev_priv(dev); 3790 bool is_run = netif_running(dev); 3791 struct bpf_prog *old_prog; 3792 3793 switch (bpf->command) { 3794 case XDP_SETUP_PROG: 3795 /* No need to support the SoCs that require to 3796 * do the frame swap because the performance wouldn't be 3797 * better than the skb mode. 3798 */ 3799 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 3800 return -EOPNOTSUPP; 3801 3802 if (!bpf->prog) 3803 xdp_features_clear_redirect_target(dev); 3804 3805 if (is_run) { 3806 napi_disable(&fep->napi); 3807 netif_tx_disable(dev); 3808 } 3809 3810 old_prog = xchg(&fep->xdp_prog, bpf->prog); 3811 if (old_prog) 3812 bpf_prog_put(old_prog); 3813 3814 fec_restart(dev); 3815 3816 if (is_run) { 3817 napi_enable(&fep->napi); 3818 netif_tx_start_all_queues(dev); 3819 } 3820 3821 if (bpf->prog) 3822 xdp_features_set_redirect_target(dev, false); 3823 3824 return 0; 3825 3826 case XDP_SETUP_XSK_POOL: 3827 return -EOPNOTSUPP; 3828 3829 default: 3830 return -EOPNOTSUPP; 3831 } 3832 } 3833 3834 static int 3835 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) 3836 { 3837 if (unlikely(index < 0)) 3838 return 0; 3839 3840 return (index % fep->num_tx_queues); 3841 } 3842 3843 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, 3844 struct fec_enet_priv_tx_q *txq, 3845 void *frame, u32 dma_sync_len, 3846 bool ndo_xmit) 3847 { 3848 unsigned int index, status, estatus; 3849 struct bufdesc *bdp; 3850 dma_addr_t dma_addr; 3851 int entries_free; 3852 u16 frame_len; 3853 3854 entries_free = fec_enet_get_free_txdesc_num(txq); 3855 if (entries_free < MAX_SKB_FRAGS + 1) { 3856 netdev_err_once(fep->netdev, "NOT enough BD for SG!\n"); 3857 return -EBUSY; 3858 } 3859 3860 /* Fill in a Tx ring entry */ 3861 bdp = txq->bd.cur; 3862 status = fec16_to_cpu(bdp->cbd_sc); 3863 status &= ~BD_ENET_TX_STATS; 3864 3865 index = fec_enet_get_bd_index(bdp, &txq->bd); 3866 3867 if (ndo_xmit) { 3868 struct xdp_frame *xdpf = frame; 3869 3870 dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data, 3871 xdpf->len, DMA_TO_DEVICE); 3872 if (dma_mapping_error(&fep->pdev->dev, dma_addr)) 3873 return -ENOMEM; 3874 3875 frame_len = xdpf->len; 3876 txq->tx_buf[index].buf_p = xdpf; 3877 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; 3878 } else { 3879 struct xdp_buff *xdpb = frame; 3880 struct page *page; 3881 3882 page = virt_to_page(xdpb->data); 3883 dma_addr = page_pool_get_dma_addr(page) + 3884 (xdpb->data - xdpb->data_hard_start); 3885 dma_sync_single_for_device(&fep->pdev->dev, dma_addr, 3886 dma_sync_len, DMA_BIDIRECTIONAL); 3887 frame_len = xdpb->data_end - xdpb->data; 3888 txq->tx_buf[index].buf_p = page; 3889 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX; 3890 } 3891 3892 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 3893 if (fep->bufdesc_ex) 3894 estatus = BD_ENET_TX_INT; 3895 3896 bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); 3897 bdp->cbd_datlen = cpu_to_fec16(frame_len); 3898 3899 if (fep->bufdesc_ex) { 3900 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3901 3902 if (fep->quirks & FEC_QUIRK_HAS_AVB) 3903 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 3904 3905 ebdp->cbd_bdu = 0; 3906 ebdp->cbd_esc = cpu_to_fec32(estatus); 3907 } 3908 3909 /* Make sure the updates to rest of the descriptor are performed before 3910 * transferring ownership. 3911 */ 3912 dma_wmb(); 3913 3914 /* Send it on its way. Tell FEC it's ready, interrupt when done, 3915 * it's the last BD of the frame, and to put the CRC on the end. 3916 */ 3917 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 3918 bdp->cbd_sc = cpu_to_fec16(status); 3919 3920 /* If this was the last BD in the ring, start at the beginning again. */ 3921 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 3922 3923 /* Make sure the update to bdp are performed before txq->bd.cur. */ 3924 dma_wmb(); 3925 3926 txq->bd.cur = bdp; 3927 3928 /* Trigger transmission start */ 3929 writel(0, txq->bd.reg_desc_active); 3930 3931 return 0; 3932 } 3933 3934 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, 3935 int cpu, struct xdp_buff *xdp, 3936 u32 dma_sync_len) 3937 { 3938 struct fec_enet_priv_tx_q *txq; 3939 struct netdev_queue *nq; 3940 int queue, ret; 3941 3942 queue = fec_enet_xdp_get_tx_queue(fep, cpu); 3943 txq = fep->tx_queue[queue]; 3944 nq = netdev_get_tx_queue(fep->netdev, queue); 3945 3946 __netif_tx_lock(nq, cpu); 3947 3948 /* Avoid tx timeout as XDP shares the queue with kernel stack */ 3949 txq_trans_cond_update(nq); 3950 ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false); 3951 3952 __netif_tx_unlock(nq); 3953 3954 return ret; 3955 } 3956 3957 static int fec_enet_xdp_xmit(struct net_device *dev, 3958 int num_frames, 3959 struct xdp_frame **frames, 3960 u32 flags) 3961 { 3962 struct fec_enet_private *fep = netdev_priv(dev); 3963 struct fec_enet_priv_tx_q *txq; 3964 int cpu = smp_processor_id(); 3965 unsigned int sent_frames = 0; 3966 struct netdev_queue *nq; 3967 unsigned int queue; 3968 int i; 3969 3970 queue = fec_enet_xdp_get_tx_queue(fep, cpu); 3971 txq = fep->tx_queue[queue]; 3972 nq = netdev_get_tx_queue(fep->netdev, queue); 3973 3974 __netif_tx_lock(nq, cpu); 3975 3976 /* Avoid tx timeout as XDP shares the queue with kernel stack */ 3977 txq_trans_cond_update(nq); 3978 for (i = 0; i < num_frames; i++) { 3979 if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0) 3980 break; 3981 sent_frames++; 3982 } 3983 3984 __netif_tx_unlock(nq); 3985 3986 return sent_frames; 3987 } 3988 3989 static int fec_hwtstamp_get(struct net_device *ndev, 3990 struct kernel_hwtstamp_config *config) 3991 { 3992 struct fec_enet_private *fep = netdev_priv(ndev); 3993 3994 if (!netif_running(ndev)) 3995 return -EINVAL; 3996 3997 if (!fep->bufdesc_ex) 3998 return -EOPNOTSUPP; 3999 4000 fec_ptp_get(ndev, config); 4001 4002 return 0; 4003 } 4004 4005 static int fec_hwtstamp_set(struct net_device *ndev, 4006 struct kernel_hwtstamp_config *config, 4007 struct netlink_ext_ack *extack) 4008 { 4009 struct fec_enet_private *fep = netdev_priv(ndev); 4010 4011 if (!netif_running(ndev)) 4012 return -EINVAL; 4013 4014 if (!fep->bufdesc_ex) 4015 return -EOPNOTSUPP; 4016 4017 return fec_ptp_set(ndev, config, extack); 4018 } 4019 4020 static const struct net_device_ops fec_netdev_ops = { 4021 .ndo_open = fec_enet_open, 4022 .ndo_stop = fec_enet_close, 4023 .ndo_start_xmit = fec_enet_start_xmit, 4024 .ndo_select_queue = fec_enet_select_queue, 4025 .ndo_set_rx_mode = set_multicast_list, 4026 .ndo_validate_addr = eth_validate_addr, 4027 .ndo_tx_timeout = fec_timeout, 4028 .ndo_set_mac_address = fec_set_mac_address, 4029 .ndo_eth_ioctl = phy_do_ioctl_running, 4030 .ndo_set_features = fec_set_features, 4031 .ndo_bpf = fec_enet_bpf, 4032 .ndo_xdp_xmit = fec_enet_xdp_xmit, 4033 .ndo_hwtstamp_get = fec_hwtstamp_get, 4034 .ndo_hwtstamp_set = fec_hwtstamp_set, 4035 }; 4036 4037 static const unsigned short offset_des_active_rxq[] = { 4038 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 4039 }; 4040 4041 static const unsigned short offset_des_active_txq[] = { 4042 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 4043 }; 4044 4045 /* 4046 * XXX: We need to clean up on failure exits here. 4047 * 4048 */ 4049 static int fec_enet_init(struct net_device *ndev) 4050 { 4051 struct fec_enet_private *fep = netdev_priv(ndev); 4052 struct bufdesc *cbd_base; 4053 dma_addr_t bd_dma; 4054 int bd_size; 4055 unsigned int i; 4056 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : 4057 sizeof(struct bufdesc); 4058 unsigned dsize_log2 = __fls(dsize); 4059 int ret; 4060 4061 WARN_ON(dsize != (1 << dsize_log2)); 4062 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 4063 fep->rx_align = 0xf; 4064 fep->tx_align = 0xf; 4065 #else 4066 fep->rx_align = 0x3; 4067 fep->tx_align = 0x3; 4068 #endif 4069 fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT; 4070 fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT; 4071 fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT; 4072 fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT; 4073 4074 /* Check mask of the streaming and coherent API */ 4075 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); 4076 if (ret < 0) { 4077 dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); 4078 return ret; 4079 } 4080 4081 ret = fec_enet_alloc_queue(ndev); 4082 if (ret) 4083 return ret; 4084 4085 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; 4086 4087 /* Allocate memory for buffer descriptors. */ 4088 cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma, 4089 GFP_KERNEL); 4090 if (!cbd_base) { 4091 ret = -ENOMEM; 4092 goto free_queue_mem; 4093 } 4094 4095 /* Get the Ethernet address */ 4096 ret = fec_get_mac(ndev); 4097 if (ret) 4098 goto free_queue_mem; 4099 4100 /* Set receive and transmit descriptor base. */ 4101 for (i = 0; i < fep->num_rx_queues; i++) { 4102 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; 4103 unsigned size = dsize * rxq->bd.ring_size; 4104 4105 rxq->bd.qid = i; 4106 rxq->bd.base = cbd_base; 4107 rxq->bd.cur = cbd_base; 4108 rxq->bd.dma = bd_dma; 4109 rxq->bd.dsize = dsize; 4110 rxq->bd.dsize_log2 = dsize_log2; 4111 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; 4112 bd_dma += size; 4113 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 4114 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 4115 } 4116 4117 for (i = 0; i < fep->num_tx_queues; i++) { 4118 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; 4119 unsigned size = dsize * txq->bd.ring_size; 4120 4121 txq->bd.qid = i; 4122 txq->bd.base = cbd_base; 4123 txq->bd.cur = cbd_base; 4124 txq->bd.dma = bd_dma; 4125 txq->bd.dsize = dsize; 4126 txq->bd.dsize_log2 = dsize_log2; 4127 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; 4128 bd_dma += size; 4129 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 4130 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 4131 } 4132 4133 4134 /* The FEC Ethernet specific entries in the device structure */ 4135 ndev->watchdog_timeo = TX_TIMEOUT; 4136 ndev->netdev_ops = &fec_netdev_ops; 4137 ndev->ethtool_ops = &fec_enet_ethtool_ops; 4138 4139 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 4140 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi); 4141 4142 if (fep->quirks & FEC_QUIRK_HAS_VLAN) 4143 /* enable hw VLAN support */ 4144 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 4145 4146 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { 4147 netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS); 4148 4149 /* enable hw accelerator */ 4150 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 4151 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); 4152 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 4153 } 4154 4155 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 4156 fep->tx_align = 0; 4157 fep->rx_align = 0x3f; 4158 } 4159 4160 ndev->hw_features = ndev->features; 4161 4162 if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) 4163 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | 4164 NETDEV_XDP_ACT_REDIRECT; 4165 4166 fec_restart(ndev); 4167 4168 if (fep->quirks & FEC_QUIRK_MIB_CLEAR) 4169 fec_enet_clear_ethtool_stats(ndev); 4170 else 4171 fec_enet_update_ethtool_stats(ndev); 4172 4173 return 0; 4174 4175 free_queue_mem: 4176 fec_enet_free_queue(ndev); 4177 return ret; 4178 } 4179 4180 static void fec_enet_deinit(struct net_device *ndev) 4181 { 4182 struct fec_enet_private *fep = netdev_priv(ndev); 4183 4184 netif_napi_del(&fep->napi); 4185 fec_enet_free_queue(ndev); 4186 } 4187 4188 #ifdef CONFIG_OF 4189 static int fec_reset_phy(struct platform_device *pdev) 4190 { 4191 struct gpio_desc *phy_reset; 4192 int msec = 1, phy_post_delay = 0; 4193 struct device_node *np = pdev->dev.of_node; 4194 int err; 4195 4196 if (!np) 4197 return 0; 4198 4199 err = of_property_read_u32(np, "phy-reset-duration", &msec); 4200 /* A sane reset duration should not be longer than 1s */ 4201 if (!err && msec > 1000) 4202 msec = 1; 4203 4204 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); 4205 /* valid reset duration should be less than 1s */ 4206 if (!err && phy_post_delay > 1000) 4207 return -EINVAL; 4208 4209 phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset", 4210 GPIOD_OUT_HIGH); 4211 if (IS_ERR(phy_reset)) 4212 return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset), 4213 "failed to get phy-reset-gpios\n"); 4214 4215 if (!phy_reset) 4216 return 0; 4217 4218 if (msec > 20) 4219 msleep(msec); 4220 else 4221 usleep_range(msec * 1000, msec * 1000 + 1000); 4222 4223 gpiod_set_value_cansleep(phy_reset, 0); 4224 4225 if (!phy_post_delay) 4226 return 0; 4227 4228 if (phy_post_delay > 20) 4229 msleep(phy_post_delay); 4230 else 4231 usleep_range(phy_post_delay * 1000, 4232 phy_post_delay * 1000 + 1000); 4233 4234 return 0; 4235 } 4236 #else /* CONFIG_OF */ 4237 static int fec_reset_phy(struct platform_device *pdev) 4238 { 4239 /* 4240 * In case of platform probe, the reset has been done 4241 * by machine code. 4242 */ 4243 return 0; 4244 } 4245 #endif /* CONFIG_OF */ 4246 4247 static void 4248 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) 4249 { 4250 struct device_node *np = pdev->dev.of_node; 4251 4252 *num_tx = *num_rx = 1; 4253 4254 if (!np || !of_device_is_available(np)) 4255 return; 4256 4257 /* parse the num of tx and rx queues */ 4258 of_property_read_u32(np, "fsl,num-tx-queues", num_tx); 4259 4260 of_property_read_u32(np, "fsl,num-rx-queues", num_rx); 4261 4262 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { 4263 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", 4264 *num_tx); 4265 *num_tx = 1; 4266 return; 4267 } 4268 4269 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { 4270 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", 4271 *num_rx); 4272 *num_rx = 1; 4273 return; 4274 } 4275 4276 } 4277 4278 static int fec_enet_get_irq_cnt(struct platform_device *pdev) 4279 { 4280 int irq_cnt = platform_irq_count(pdev); 4281 4282 if (irq_cnt > FEC_IRQ_NUM) 4283 irq_cnt = FEC_IRQ_NUM; /* last for pps */ 4284 else if (irq_cnt == 2) 4285 irq_cnt = 1; /* last for pps */ 4286 else if (irq_cnt <= 0) 4287 irq_cnt = 1; /* At least 1 irq is needed */ 4288 return irq_cnt; 4289 } 4290 4291 static void fec_enet_get_wakeup_irq(struct platform_device *pdev) 4292 { 4293 struct net_device *ndev = platform_get_drvdata(pdev); 4294 struct fec_enet_private *fep = netdev_priv(ndev); 4295 4296 if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2) 4297 fep->wake_irq = fep->irq[2]; 4298 else 4299 fep->wake_irq = fep->irq[0]; 4300 } 4301 4302 static int fec_enet_init_stop_mode(struct fec_enet_private *fep, 4303 struct device_node *np) 4304 { 4305 struct device_node *gpr_np; 4306 u32 out_val[3]; 4307 int ret = 0; 4308 4309 gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0); 4310 if (!gpr_np) 4311 return 0; 4312 4313 ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, 4314 ARRAY_SIZE(out_val)); 4315 if (ret) { 4316 dev_dbg(&fep->pdev->dev, "no stop mode property\n"); 4317 goto out; 4318 } 4319 4320 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); 4321 if (IS_ERR(fep->stop_gpr.gpr)) { 4322 dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); 4323 ret = PTR_ERR(fep->stop_gpr.gpr); 4324 fep->stop_gpr.gpr = NULL; 4325 goto out; 4326 } 4327 4328 fep->stop_gpr.reg = out_val[1]; 4329 fep->stop_gpr.bit = out_val[2]; 4330 4331 out: 4332 of_node_put(gpr_np); 4333 4334 return ret; 4335 } 4336 4337 static int 4338 fec_probe(struct platform_device *pdev) 4339 { 4340 struct fec_enet_private *fep; 4341 struct fec_platform_data *pdata; 4342 phy_interface_t interface; 4343 struct net_device *ndev; 4344 int i, irq, ret = 0; 4345 static int dev_id; 4346 struct device_node *np = pdev->dev.of_node, *phy_node; 4347 int num_tx_qs; 4348 int num_rx_qs; 4349 char irq_name[8]; 4350 int irq_cnt; 4351 const struct fec_devinfo *dev_info; 4352 4353 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 4354 4355 /* Init network device */ 4356 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) + 4357 FEC_STATS_SIZE, num_tx_qs, num_rx_qs); 4358 if (!ndev) 4359 return -ENOMEM; 4360 4361 SET_NETDEV_DEV(ndev, &pdev->dev); 4362 4363 /* setup board info structure */ 4364 fep = netdev_priv(ndev); 4365 4366 dev_info = device_get_match_data(&pdev->dev); 4367 if (!dev_info) 4368 dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data; 4369 if (dev_info) 4370 fep->quirks = dev_info->quirks; 4371 4372 fep->netdev = ndev; 4373 fep->num_rx_queues = num_rx_qs; 4374 fep->num_tx_queues = num_tx_qs; 4375 4376 #if !defined(CONFIG_M5272) 4377 /* default enable pause frame auto negotiation */ 4378 if (fep->quirks & FEC_QUIRK_HAS_GBIT) 4379 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 4380 #endif 4381 4382 /* Select default pin state */ 4383 pinctrl_pm_select_default_state(&pdev->dev); 4384 4385 fep->hwp = devm_platform_ioremap_resource(pdev, 0); 4386 if (IS_ERR(fep->hwp)) { 4387 ret = PTR_ERR(fep->hwp); 4388 goto failed_ioremap; 4389 } 4390 4391 fep->pdev = pdev; 4392 fep->dev_id = dev_id++; 4393 4394 platform_set_drvdata(pdev, ndev); 4395 4396 if ((of_machine_is_compatible("fsl,imx6q") || 4397 of_machine_is_compatible("fsl,imx6dl")) && 4398 !of_property_read_bool(np, "fsl,err006687-workaround-present")) 4399 fep->quirks |= FEC_QUIRK_ERR006687; 4400 4401 ret = fec_enet_ipc_handle_init(fep); 4402 if (ret) 4403 goto failed_ipc_init; 4404 4405 if (of_property_read_bool(np, "fsl,magic-packet")) 4406 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; 4407 4408 ret = fec_enet_init_stop_mode(fep, np); 4409 if (ret) 4410 goto failed_stop_mode; 4411 4412 phy_node = of_parse_phandle(np, "phy-handle", 0); 4413 if (!phy_node && of_phy_is_fixed_link(np)) { 4414 ret = of_phy_register_fixed_link(np); 4415 if (ret < 0) { 4416 dev_err(&pdev->dev, 4417 "broken fixed-link specification\n"); 4418 goto failed_phy; 4419 } 4420 phy_node = of_node_get(np); 4421 } 4422 fep->phy_node = phy_node; 4423 4424 ret = of_get_phy_mode(pdev->dev.of_node, &interface); 4425 if (ret) { 4426 pdata = dev_get_platdata(&pdev->dev); 4427 if (pdata) 4428 fep->phy_interface = pdata->phy; 4429 else 4430 fep->phy_interface = PHY_INTERFACE_MODE_MII; 4431 } else { 4432 fep->phy_interface = interface; 4433 } 4434 4435 ret = fec_enet_parse_rgmii_delay(fep, np); 4436 if (ret) 4437 goto failed_rgmii_delay; 4438 4439 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 4440 if (IS_ERR(fep->clk_ipg)) { 4441 ret = PTR_ERR(fep->clk_ipg); 4442 goto failed_clk; 4443 } 4444 4445 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 4446 if (IS_ERR(fep->clk_ahb)) { 4447 ret = PTR_ERR(fep->clk_ahb); 4448 goto failed_clk; 4449 } 4450 4451 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); 4452 4453 /* enet_out is optional, depends on board */ 4454 fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out"); 4455 if (IS_ERR(fep->clk_enet_out)) { 4456 ret = PTR_ERR(fep->clk_enet_out); 4457 goto failed_clk; 4458 } 4459 4460 fep->ptp_clk_on = false; 4461 mutex_init(&fep->ptp_clk_mutex); 4462 4463 /* clk_ref is optional, depends on board */ 4464 fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); 4465 if (IS_ERR(fep->clk_ref)) { 4466 ret = PTR_ERR(fep->clk_ref); 4467 goto failed_clk; 4468 } 4469 fep->clk_ref_rate = clk_get_rate(fep->clk_ref); 4470 4471 /* clk_2x_txclk is optional, depends on board */ 4472 if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) { 4473 fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk"); 4474 if (IS_ERR(fep->clk_2x_txclk)) 4475 fep->clk_2x_txclk = NULL; 4476 } 4477 4478 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; 4479 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); 4480 if (IS_ERR(fep->clk_ptp)) { 4481 fep->clk_ptp = NULL; 4482 fep->bufdesc_ex = false; 4483 } 4484 4485 ret = fec_enet_clk_enable(ndev, true); 4486 if (ret) 4487 goto failed_clk; 4488 4489 ret = clk_prepare_enable(fep->clk_ipg); 4490 if (ret) 4491 goto failed_clk_ipg; 4492 ret = clk_prepare_enable(fep->clk_ahb); 4493 if (ret) 4494 goto failed_clk_ahb; 4495 4496 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); 4497 if (!IS_ERR(fep->reg_phy)) { 4498 ret = regulator_enable(fep->reg_phy); 4499 if (ret) { 4500 dev_err(&pdev->dev, 4501 "Failed to enable phy regulator: %d\n", ret); 4502 goto failed_regulator; 4503 } 4504 } else { 4505 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { 4506 ret = -EPROBE_DEFER; 4507 goto failed_regulator; 4508 } 4509 fep->reg_phy = NULL; 4510 } 4511 4512 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 4513 pm_runtime_use_autosuspend(&pdev->dev); 4514 pm_runtime_get_noresume(&pdev->dev); 4515 pm_runtime_set_active(&pdev->dev); 4516 pm_runtime_enable(&pdev->dev); 4517 4518 ret = fec_reset_phy(pdev); 4519 if (ret) 4520 goto failed_reset; 4521 4522 irq_cnt = fec_enet_get_irq_cnt(pdev); 4523 if (fep->bufdesc_ex) 4524 fec_ptp_init(pdev, irq_cnt); 4525 4526 ret = fec_enet_init(ndev); 4527 if (ret) 4528 goto failed_init; 4529 4530 for (i = 0; i < irq_cnt; i++) { 4531 snprintf(irq_name, sizeof(irq_name), "int%d", i); 4532 irq = platform_get_irq_byname_optional(pdev, irq_name); 4533 if (irq < 0) 4534 irq = platform_get_irq(pdev, i); 4535 if (irq < 0) { 4536 ret = irq; 4537 goto failed_irq; 4538 } 4539 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 4540 0, pdev->name, ndev); 4541 if (ret) 4542 goto failed_irq; 4543 4544 fep->irq[i] = irq; 4545 } 4546 4547 /* Decide which interrupt line is wakeup capable */ 4548 fec_enet_get_wakeup_irq(pdev); 4549 4550 ret = fec_enet_mii_init(pdev); 4551 if (ret) 4552 goto failed_mii_init; 4553 4554 /* Carrier starts down, phylib will bring it up */ 4555 netif_carrier_off(ndev); 4556 fec_enet_clk_enable(ndev, false); 4557 pinctrl_pm_select_sleep_state(&pdev->dev); 4558 4559 ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN; 4560 4561 ret = register_netdev(ndev); 4562 if (ret) 4563 goto failed_register; 4564 4565 device_init_wakeup(&ndev->dev, fep->wol_flag & 4566 FEC_WOL_HAS_MAGIC_PACKET); 4567 4568 if (fep->bufdesc_ex && fep->ptp_clock) 4569 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 4570 4571 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 4572 4573 pm_runtime_mark_last_busy(&pdev->dev); 4574 pm_runtime_put_autosuspend(&pdev->dev); 4575 4576 return 0; 4577 4578 failed_register: 4579 fec_enet_mii_remove(fep); 4580 failed_mii_init: 4581 failed_irq: 4582 fec_enet_deinit(ndev); 4583 failed_init: 4584 fec_ptp_stop(pdev); 4585 failed_reset: 4586 pm_runtime_put_noidle(&pdev->dev); 4587 pm_runtime_disable(&pdev->dev); 4588 if (fep->reg_phy) 4589 regulator_disable(fep->reg_phy); 4590 failed_regulator: 4591 clk_disable_unprepare(fep->clk_ahb); 4592 failed_clk_ahb: 4593 clk_disable_unprepare(fep->clk_ipg); 4594 failed_clk_ipg: 4595 fec_enet_clk_enable(ndev, false); 4596 failed_clk: 4597 failed_rgmii_delay: 4598 if (of_phy_is_fixed_link(np)) 4599 of_phy_deregister_fixed_link(np); 4600 of_node_put(phy_node); 4601 failed_stop_mode: 4602 failed_ipc_init: 4603 failed_phy: 4604 dev_id--; 4605 failed_ioremap: 4606 free_netdev(ndev); 4607 4608 return ret; 4609 } 4610 4611 static void 4612 fec_drv_remove(struct platform_device *pdev) 4613 { 4614 struct net_device *ndev = platform_get_drvdata(pdev); 4615 struct fec_enet_private *fep = netdev_priv(ndev); 4616 struct device_node *np = pdev->dev.of_node; 4617 int ret; 4618 4619 ret = pm_runtime_get_sync(&pdev->dev); 4620 if (ret < 0) 4621 dev_err(&pdev->dev, 4622 "Failed to resume device in remove callback (%pe)\n", 4623 ERR_PTR(ret)); 4624 4625 cancel_work_sync(&fep->tx_timeout_work); 4626 fec_ptp_stop(pdev); 4627 unregister_netdev(ndev); 4628 fec_enet_mii_remove(fep); 4629 if (fep->reg_phy) 4630 regulator_disable(fep->reg_phy); 4631 4632 if (of_phy_is_fixed_link(np)) 4633 of_phy_deregister_fixed_link(np); 4634 of_node_put(fep->phy_node); 4635 4636 /* After pm_runtime_get_sync() failed, the clks are still off, so skip 4637 * disabling them again. 4638 */ 4639 if (ret >= 0) { 4640 clk_disable_unprepare(fep->clk_ahb); 4641 clk_disable_unprepare(fep->clk_ipg); 4642 } 4643 pm_runtime_put_noidle(&pdev->dev); 4644 pm_runtime_disable(&pdev->dev); 4645 4646 fec_enet_deinit(ndev); 4647 free_netdev(ndev); 4648 } 4649 4650 static int fec_suspend(struct device *dev) 4651 { 4652 struct net_device *ndev = dev_get_drvdata(dev); 4653 struct fec_enet_private *fep = netdev_priv(ndev); 4654 int ret; 4655 4656 rtnl_lock(); 4657 if (netif_running(ndev)) { 4658 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) 4659 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; 4660 phy_stop(ndev->phydev); 4661 napi_disable(&fep->napi); 4662 netif_tx_lock_bh(ndev); 4663 netif_device_detach(ndev); 4664 netif_tx_unlock_bh(ndev); 4665 fec_stop(ndev); 4666 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 4667 fec_irqs_disable(ndev); 4668 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 4669 } else { 4670 fec_irqs_disable_except_wakeup(ndev); 4671 if (fep->wake_irq > 0) { 4672 disable_irq(fep->wake_irq); 4673 enable_irq_wake(fep->wake_irq); 4674 } 4675 fec_enet_stop_mode(fep, true); 4676 } 4677 /* It's safe to disable clocks since interrupts are masked */ 4678 fec_enet_clk_enable(ndev, false); 4679 4680 fep->rpm_active = !pm_runtime_status_suspended(dev); 4681 if (fep->rpm_active) { 4682 ret = pm_runtime_force_suspend(dev); 4683 if (ret < 0) { 4684 rtnl_unlock(); 4685 return ret; 4686 } 4687 } 4688 } 4689 rtnl_unlock(); 4690 4691 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 4692 regulator_disable(fep->reg_phy); 4693 4694 /* SOC supply clock to phy, when clock is disabled, phy link down 4695 * SOC control phy regulator, when regulator is disabled, phy link down 4696 */ 4697 if (fep->clk_enet_out || fep->reg_phy) 4698 fep->link = 0; 4699 4700 return 0; 4701 } 4702 4703 static int fec_resume(struct device *dev) 4704 { 4705 struct net_device *ndev = dev_get_drvdata(dev); 4706 struct fec_enet_private *fep = netdev_priv(ndev); 4707 int ret; 4708 int val; 4709 4710 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 4711 ret = regulator_enable(fep->reg_phy); 4712 if (ret) 4713 return ret; 4714 } 4715 4716 rtnl_lock(); 4717 if (netif_running(ndev)) { 4718 if (fep->rpm_active) 4719 pm_runtime_force_resume(dev); 4720 4721 ret = fec_enet_clk_enable(ndev, true); 4722 if (ret) { 4723 rtnl_unlock(); 4724 goto failed_clk; 4725 } 4726 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { 4727 fec_enet_stop_mode(fep, false); 4728 if (fep->wake_irq) { 4729 disable_irq_wake(fep->wake_irq); 4730 enable_irq(fep->wake_irq); 4731 } 4732 4733 val = readl(fep->hwp + FEC_ECNTRL); 4734 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 4735 writel(val, fep->hwp + FEC_ECNTRL); 4736 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; 4737 } else { 4738 pinctrl_pm_select_default_state(&fep->pdev->dev); 4739 } 4740 fec_restart(ndev); 4741 netif_tx_lock_bh(ndev); 4742 netif_device_attach(ndev); 4743 netif_tx_unlock_bh(ndev); 4744 napi_enable(&fep->napi); 4745 phy_init_hw(ndev->phydev); 4746 phy_start(ndev->phydev); 4747 } 4748 rtnl_unlock(); 4749 4750 return 0; 4751 4752 failed_clk: 4753 if (fep->reg_phy) 4754 regulator_disable(fep->reg_phy); 4755 return ret; 4756 } 4757 4758 static int fec_runtime_suspend(struct device *dev) 4759 { 4760 struct net_device *ndev = dev_get_drvdata(dev); 4761 struct fec_enet_private *fep = netdev_priv(ndev); 4762 4763 clk_disable_unprepare(fep->clk_ahb); 4764 clk_disable_unprepare(fep->clk_ipg); 4765 4766 return 0; 4767 } 4768 4769 static int fec_runtime_resume(struct device *dev) 4770 { 4771 struct net_device *ndev = dev_get_drvdata(dev); 4772 struct fec_enet_private *fep = netdev_priv(ndev); 4773 int ret; 4774 4775 ret = clk_prepare_enable(fep->clk_ahb); 4776 if (ret) 4777 return ret; 4778 ret = clk_prepare_enable(fep->clk_ipg); 4779 if (ret) 4780 goto failed_clk_ipg; 4781 4782 return 0; 4783 4784 failed_clk_ipg: 4785 clk_disable_unprepare(fep->clk_ahb); 4786 return ret; 4787 } 4788 4789 static const struct dev_pm_ops fec_pm_ops = { 4790 SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 4791 RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 4792 }; 4793 4794 static struct platform_driver fec_driver = { 4795 .driver = { 4796 .name = DRIVER_NAME, 4797 .pm = pm_ptr(&fec_pm_ops), 4798 .of_match_table = fec_dt_ids, 4799 .suppress_bind_attrs = true, 4800 }, 4801 .id_table = fec_devtype, 4802 .probe = fec_probe, 4803 .remove = fec_drv_remove, 4804 }; 4805 4806 module_platform_driver(fec_driver); 4807 4808 MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver"); 4809 MODULE_LICENSE("GPL"); 4810