1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 5 * 6 * Right now, I am very wasteful with the buffers. I allocate memory 7 * pages and then divide them into 2K frame buffers. This way I know I 8 * have buffers large enough to hold one frame within one buffer descriptor. 9 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 10 * will be much more memory efficient and will easily handle lots of 11 * small packets. 12 * 13 * Much better multiple PHY support by Magnus Damm. 14 * Copyright (c) 2000 Ericsson Radio Systems AB. 15 * 16 * Support for FEC controller of ColdFire processors. 17 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 18 * 19 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 20 * Copyright (c) 2004-2006 Macq Electronique SA. 21 * 22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 23 */ 24 25 #include <linux/bitops.h> 26 #include <linux/bpf.h> 27 #include <linux/bpf_trace.h> 28 #include <linux/cacheflush.h> 29 #include <linux/clk.h> 30 #include <linux/crc32.h> 31 #include <linux/delay.h> 32 #include <linux/errno.h> 33 #include <linux/etherdevice.h> 34 #include <linux/fec.h> 35 #include <linux/filter.h> 36 #include <linux/gpio/consumer.h> 37 #include <linux/icmp.h> 38 #include <linux/if_vlan.h> 39 #include <linux/in.h> 40 #include <linux/interrupt.h> 41 #include <linux/io.h> 42 #include <linux/ioport.h> 43 #include <linux/ip.h> 44 #include <linux/irq.h> 45 #include <linux/kernel.h> 46 #include <linux/mdio.h> 47 #include <linux/mfd/syscon.h> 48 #include <linux/module.h> 49 #include <linux/netdevice.h> 50 #include <linux/of.h> 51 #include <linux/of_mdio.h> 52 #include <linux/of_net.h> 53 #include <linux/phy.h> 54 #include <linux/pinctrl/consumer.h> 55 #include <linux/platform_device.h> 56 #include <linux/pm_runtime.h> 57 #include <linux/prefetch.h> 58 #include <linux/property.h> 59 #include <linux/ptrace.h> 60 #include <linux/regmap.h> 61 #include <linux/regulator/consumer.h> 62 #include <linux/skbuff.h> 63 #include <linux/slab.h> 64 #include <linux/spinlock.h> 65 #include <linux/string.h> 66 #include <linux/tcp.h> 67 #include <linux/udp.h> 68 #include <linux/workqueue.h> 69 #include <net/ip.h> 70 #include <net/page_pool/helpers.h> 71 #include <net/selftests.h> 72 #include <net/tso.h> 73 #include <soc/imx/cpuidle.h> 74 75 #include "fec.h" 76 77 static void set_multicast_list(struct net_device *ndev); 78 static void fec_enet_itr_coal_set(struct net_device *ndev); 79 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, 80 int cpu, struct xdp_buff *xdp, 81 u32 dma_sync_len); 82 83 #define DRIVER_NAME "fec" 84 85 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; 86 87 #define FEC_ENET_RSEM_V 0x84 88 #define FEC_ENET_RSFL_V 16 89 #define FEC_ENET_RAEM_V 0x8 90 #define FEC_ENET_RAFL_V 0x8 91 #define FEC_ENET_OPD_V 0xFFF0 92 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 93 94 #define FEC_ENET_XDP_PASS 0 95 #define FEC_ENET_XDP_CONSUMED BIT(0) 96 #define FEC_ENET_XDP_TX BIT(1) 97 #define FEC_ENET_XDP_REDIR BIT(2) 98 99 struct fec_devinfo { 100 u32 quirks; 101 }; 102 103 static const struct fec_devinfo fec_imx25_info = { 104 .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | 105 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45, 106 }; 107 108 static const struct fec_devinfo fec_imx27_info = { 109 .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG | 110 FEC_QUIRK_HAS_MDIO_C45, 111 }; 112 113 static const struct fec_devinfo fec_imx28_info = { 114 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 115 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | 116 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII | 117 FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45, 118 }; 119 120 static const struct fec_devinfo fec_imx6q_info = { 121 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 122 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 123 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | 124 FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII | 125 FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45, 126 }; 127 128 static const struct fec_devinfo fec_mvf600_info = { 129 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC | 130 FEC_QUIRK_HAS_MDIO_C45, 131 }; 132 133 static const struct fec_devinfo fec_imx6sx_info = { 134 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 135 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 136 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 137 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 138 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 139 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 140 FEC_QUIRK_HAS_MDIO_C45, 141 }; 142 143 static const struct fec_devinfo fec_imx6ul_info = { 144 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 145 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 146 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | 147 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | 148 FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII | 149 FEC_QUIRK_HAS_MDIO_C45, 150 }; 151 152 static const struct fec_devinfo fec_imx8mq_info = { 153 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 154 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 155 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 156 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 157 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 158 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 159 FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 | 160 FEC_QUIRK_HAS_MDIO_C45, 161 }; 162 163 static const struct fec_devinfo fec_imx8qm_info = { 164 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 165 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 166 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 167 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 168 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 169 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 170 FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45, 171 }; 172 173 static const struct fec_devinfo fec_s32v234_info = { 174 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 175 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 176 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 177 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 178 FEC_QUIRK_HAS_MDIO_C45, 179 }; 180 181 static struct platform_device_id fec_devtype[] = { 182 { 183 /* keep it for coldfire */ 184 .name = DRIVER_NAME, 185 .driver_data = 0, 186 }, { 187 /* sentinel */ 188 } 189 }; 190 MODULE_DEVICE_TABLE(platform, fec_devtype); 191 192 static const struct of_device_id fec_dt_ids[] = { 193 { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, }, 194 { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, }, 195 { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, }, 196 { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, }, 197 { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, }, 198 { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, }, 199 { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, }, 200 { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, }, 201 { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, }, 202 { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, }, 203 { /* sentinel */ } 204 }; 205 MODULE_DEVICE_TABLE(of, fec_dt_ids); 206 207 static unsigned char macaddr[ETH_ALEN]; 208 module_param_array(macaddr, byte, NULL, 0); 209 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 210 211 #if defined(CONFIG_M5272) 212 /* 213 * Some hardware gets it MAC address out of local flash memory. 214 * if this is non-zero then assume it is the address to get MAC from. 215 */ 216 #if defined(CONFIG_NETtel) 217 #define FEC_FLASHMAC 0xf0006006 218 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 219 #define FEC_FLASHMAC 0xf0006000 220 #elif defined(CONFIG_CANCam) 221 #define FEC_FLASHMAC 0xf0020000 222 #elif defined (CONFIG_M5272C3) 223 #define FEC_FLASHMAC (0xffe04000 + 4) 224 #elif defined(CONFIG_MOD5272) 225 #define FEC_FLASHMAC 0xffc0406b 226 #else 227 #define FEC_FLASHMAC 0 228 #endif 229 #endif /* CONFIG_M5272 */ 230 231 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. 232 * 233 * 2048 byte skbufs are allocated. However, alignment requirements 234 * varies between FEC variants. Worst case is 64, so round down by 64. 235 */ 236 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) 237 #define PKT_MINBUF_SIZE 64 238 239 /* FEC receive acceleration */ 240 #define FEC_RACC_IPDIS BIT(1) 241 #define FEC_RACC_PRODIS BIT(2) 242 #define FEC_RACC_SHIFT16 BIT(7) 243 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 244 245 /* MIB Control Register */ 246 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31) 247 248 /* 249 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 250 * size bits. Other FEC hardware does not, so we need to take that into 251 * account when setting it. 252 */ 253 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 254 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 255 defined(CONFIG_ARM64) 256 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 257 #else 258 #define OPT_FRAME_SIZE 0 259 #endif 260 261 /* FEC MII MMFR bits definition */ 262 #define FEC_MMFR_ST (1 << 30) 263 #define FEC_MMFR_ST_C45 (0) 264 #define FEC_MMFR_OP_READ (2 << 28) 265 #define FEC_MMFR_OP_READ_C45 (3 << 28) 266 #define FEC_MMFR_OP_WRITE (1 << 28) 267 #define FEC_MMFR_OP_ADDR_WRITE (0) 268 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 269 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 270 #define FEC_MMFR_TA (2 << 16) 271 #define FEC_MMFR_DATA(v) (v & 0xffff) 272 /* FEC ECR bits definition */ 273 #define FEC_ECR_RESET BIT(0) 274 #define FEC_ECR_ETHEREN BIT(1) 275 #define FEC_ECR_MAGICEN BIT(2) 276 #define FEC_ECR_SLEEP BIT(3) 277 #define FEC_ECR_EN1588 BIT(4) 278 #define FEC_ECR_SPEED BIT(5) 279 #define FEC_ECR_BYTESWP BIT(8) 280 /* FEC RCR bits definition */ 281 #define FEC_RCR_LOOP BIT(0) 282 #define FEC_RCR_DRT BIT(1) 283 #define FEC_RCR_MII BIT(2) 284 #define FEC_RCR_PROMISC BIT(3) 285 #define FEC_RCR_BC_REJ BIT(4) 286 #define FEC_RCR_FLOWCTL BIT(5) 287 #define FEC_RCR_RGMII BIT(6) 288 #define FEC_RCR_RMII BIT(8) 289 #define FEC_RCR_10BASET BIT(9) 290 #define FEC_RCR_NLC BIT(30) 291 /* TX WMARK bits */ 292 #define FEC_TXWMRK_STRFWD BIT(8) 293 294 #define FEC_MII_TIMEOUT 30000 /* us */ 295 296 /* Transmitter timeout */ 297 #define TX_TIMEOUT (2 * HZ) 298 299 #define FEC_PAUSE_FLAG_AUTONEG 0x1 300 #define FEC_PAUSE_FLAG_ENABLE 0x2 301 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) 302 #define FEC_WOL_FLAG_ENABLE (0x1 << 1) 303 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) 304 305 /* Max number of allowed TCP segments for software TSO */ 306 #define FEC_MAX_TSO_SEGS 100 307 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 308 309 #define IS_TSO_HEADER(txq, addr) \ 310 ((addr >= txq->tso_hdrs_dma) && \ 311 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 312 313 static int mii_cnt; 314 315 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, 316 struct bufdesc_prop *bd) 317 { 318 return (bdp >= bd->last) ? bd->base 319 : (struct bufdesc *)(((void *)bdp) + bd->dsize); 320 } 321 322 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, 323 struct bufdesc_prop *bd) 324 { 325 return (bdp <= bd->base) ? bd->last 326 : (struct bufdesc *)(((void *)bdp) - bd->dsize); 327 } 328 329 static int fec_enet_get_bd_index(struct bufdesc *bdp, 330 struct bufdesc_prop *bd) 331 { 332 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; 333 } 334 335 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) 336 { 337 int entries; 338 339 entries = (((const char *)txq->dirty_tx - 340 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; 341 342 return entries >= 0 ? entries : entries + txq->bd.ring_size; 343 } 344 345 static void swap_buffer(void *bufaddr, int len) 346 { 347 int i; 348 unsigned int *buf = bufaddr; 349 350 for (i = 0; i < len; i += 4, buf++) 351 swab32s(buf); 352 } 353 354 static void fec_dump(struct net_device *ndev) 355 { 356 struct fec_enet_private *fep = netdev_priv(ndev); 357 struct bufdesc *bdp; 358 struct fec_enet_priv_tx_q *txq; 359 int index = 0; 360 361 netdev_info(ndev, "TX ring dump\n"); 362 pr_info("Nr SC addr len SKB\n"); 363 364 txq = fep->tx_queue[0]; 365 bdp = txq->bd.base; 366 367 do { 368 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", 369 index, 370 bdp == txq->bd.cur ? 'S' : ' ', 371 bdp == txq->dirty_tx ? 'H' : ' ', 372 fec16_to_cpu(bdp->cbd_sc), 373 fec32_to_cpu(bdp->cbd_bufaddr), 374 fec16_to_cpu(bdp->cbd_datlen), 375 txq->tx_buf[index].buf_p); 376 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 377 index++; 378 } while (bdp != txq->bd.base); 379 } 380 381 /* 382 * Coldfire does not support DMA coherent allocations, and has historically used 383 * a band-aid with a manual flush in fec_enet_rx_queue. 384 */ 385 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) 386 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 387 gfp_t gfp) 388 { 389 return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp); 390 } 391 392 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, 393 dma_addr_t handle) 394 { 395 dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL); 396 } 397 #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ 398 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 399 gfp_t gfp) 400 { 401 return dma_alloc_coherent(dev, size, handle, gfp); 402 } 403 404 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, 405 dma_addr_t handle) 406 { 407 dma_free_coherent(dev, size, cpu_addr, handle); 408 } 409 #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ 410 411 struct fec_dma_devres { 412 size_t size; 413 void *vaddr; 414 dma_addr_t dma_handle; 415 }; 416 417 static void fec_dmam_release(struct device *dev, void *res) 418 { 419 struct fec_dma_devres *this = res; 420 421 fec_dma_free(dev, this->size, this->vaddr, this->dma_handle); 422 } 423 424 static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle, 425 gfp_t gfp) 426 { 427 struct fec_dma_devres *dr; 428 void *vaddr; 429 430 dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp); 431 if (!dr) 432 return NULL; 433 vaddr = fec_dma_alloc(dev, size, handle, gfp); 434 if (!vaddr) { 435 devres_free(dr); 436 return NULL; 437 } 438 dr->vaddr = vaddr; 439 dr->dma_handle = *handle; 440 dr->size = size; 441 devres_add(dev, dr); 442 return vaddr; 443 } 444 445 static inline bool is_ipv4_pkt(struct sk_buff *skb) 446 { 447 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 448 } 449 450 static int 451 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 452 { 453 /* Only run for packets requiring a checksum. */ 454 if (skb->ip_summed != CHECKSUM_PARTIAL) 455 return 0; 456 457 if (unlikely(skb_cow_head(skb, 0))) 458 return -1; 459 460 if (is_ipv4_pkt(skb)) 461 ip_hdr(skb)->check = 0; 462 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 463 464 return 0; 465 } 466 467 static int 468 fec_enet_create_page_pool(struct fec_enet_private *fep, 469 struct fec_enet_priv_rx_q *rxq, int size) 470 { 471 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); 472 struct page_pool_params pp_params = { 473 .order = 0, 474 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 475 .pool_size = size, 476 .nid = dev_to_node(&fep->pdev->dev), 477 .dev = &fep->pdev->dev, 478 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, 479 .offset = FEC_ENET_XDP_HEADROOM, 480 .max_len = FEC_ENET_RX_FRSIZE, 481 }; 482 int err; 483 484 rxq->page_pool = page_pool_create(&pp_params); 485 if (IS_ERR(rxq->page_pool)) { 486 err = PTR_ERR(rxq->page_pool); 487 rxq->page_pool = NULL; 488 return err; 489 } 490 491 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); 492 if (err < 0) 493 goto err_free_pp; 494 495 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 496 rxq->page_pool); 497 if (err) 498 goto err_unregister_rxq; 499 500 return 0; 501 502 err_unregister_rxq: 503 xdp_rxq_info_unreg(&rxq->xdp_rxq); 504 err_free_pp: 505 page_pool_destroy(rxq->page_pool); 506 rxq->page_pool = NULL; 507 return err; 508 } 509 510 static struct bufdesc * 511 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, 512 struct sk_buff *skb, 513 struct net_device *ndev) 514 { 515 struct fec_enet_private *fep = netdev_priv(ndev); 516 struct bufdesc *bdp = txq->bd.cur; 517 struct bufdesc_ex *ebdp; 518 int nr_frags = skb_shinfo(skb)->nr_frags; 519 int frag, frag_len; 520 unsigned short status; 521 unsigned int estatus = 0; 522 skb_frag_t *this_frag; 523 unsigned int index; 524 void *bufaddr; 525 dma_addr_t addr; 526 int i; 527 528 for (frag = 0; frag < nr_frags; frag++) { 529 this_frag = &skb_shinfo(skb)->frags[frag]; 530 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 531 ebdp = (struct bufdesc_ex *)bdp; 532 533 status = fec16_to_cpu(bdp->cbd_sc); 534 status &= ~BD_ENET_TX_STATS; 535 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 536 frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]); 537 538 /* Handle the last BD specially */ 539 if (frag == nr_frags - 1) { 540 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 541 if (fep->bufdesc_ex) { 542 estatus |= BD_ENET_TX_INT; 543 if (unlikely(skb_shinfo(skb)->tx_flags & 544 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 545 estatus |= BD_ENET_TX_TS; 546 } 547 } 548 549 if (fep->bufdesc_ex) { 550 if (fep->quirks & FEC_QUIRK_HAS_AVB) 551 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 552 if (skb->ip_summed == CHECKSUM_PARTIAL) 553 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 554 555 ebdp->cbd_bdu = 0; 556 ebdp->cbd_esc = cpu_to_fec32(estatus); 557 } 558 559 bufaddr = skb_frag_address(this_frag); 560 561 index = fec_enet_get_bd_index(bdp, &txq->bd); 562 if (((unsigned long) bufaddr) & fep->tx_align || 563 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 564 memcpy(txq->tx_bounce[index], bufaddr, frag_len); 565 bufaddr = txq->tx_bounce[index]; 566 567 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 568 swap_buffer(bufaddr, frag_len); 569 } 570 571 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, 572 DMA_TO_DEVICE); 573 if (dma_mapping_error(&fep->pdev->dev, addr)) { 574 if (net_ratelimit()) 575 netdev_err(ndev, "Tx DMA memory map failed\n"); 576 goto dma_mapping_error; 577 } 578 579 bdp->cbd_bufaddr = cpu_to_fec32(addr); 580 bdp->cbd_datlen = cpu_to_fec16(frag_len); 581 /* Make sure the updates to rest of the descriptor are 582 * performed before transferring ownership. 583 */ 584 wmb(); 585 bdp->cbd_sc = cpu_to_fec16(status); 586 } 587 588 return bdp; 589 dma_mapping_error: 590 bdp = txq->bd.cur; 591 for (i = 0; i < frag; i++) { 592 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 593 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), 594 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); 595 } 596 return ERR_PTR(-ENOMEM); 597 } 598 599 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, 600 struct sk_buff *skb, struct net_device *ndev) 601 { 602 struct fec_enet_private *fep = netdev_priv(ndev); 603 int nr_frags = skb_shinfo(skb)->nr_frags; 604 struct bufdesc *bdp, *last_bdp; 605 void *bufaddr; 606 dma_addr_t addr; 607 unsigned short status; 608 unsigned short buflen; 609 unsigned int estatus = 0; 610 unsigned int index; 611 int entries_free; 612 613 entries_free = fec_enet_get_free_txdesc_num(txq); 614 if (entries_free < MAX_SKB_FRAGS + 1) { 615 dev_kfree_skb_any(skb); 616 if (net_ratelimit()) 617 netdev_err(ndev, "NOT enough BD for SG!\n"); 618 return NETDEV_TX_OK; 619 } 620 621 /* Protocol checksum off-load for TCP and UDP. */ 622 if (fec_enet_clear_csum(skb, ndev)) { 623 dev_kfree_skb_any(skb); 624 return NETDEV_TX_OK; 625 } 626 627 /* Fill in a Tx ring entry */ 628 bdp = txq->bd.cur; 629 last_bdp = bdp; 630 status = fec16_to_cpu(bdp->cbd_sc); 631 status &= ~BD_ENET_TX_STATS; 632 633 /* Set buffer length and buffer pointer */ 634 bufaddr = skb->data; 635 buflen = skb_headlen(skb); 636 637 index = fec_enet_get_bd_index(bdp, &txq->bd); 638 if (((unsigned long) bufaddr) & fep->tx_align || 639 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 640 memcpy(txq->tx_bounce[index], skb->data, buflen); 641 bufaddr = txq->tx_bounce[index]; 642 643 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 644 swap_buffer(bufaddr, buflen); 645 } 646 647 /* Push the data cache so the CPM does not get stale memory data. */ 648 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); 649 if (dma_mapping_error(&fep->pdev->dev, addr)) { 650 dev_kfree_skb_any(skb); 651 if (net_ratelimit()) 652 netdev_err(ndev, "Tx DMA memory map failed\n"); 653 return NETDEV_TX_OK; 654 } 655 656 if (nr_frags) { 657 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); 658 if (IS_ERR(last_bdp)) { 659 dma_unmap_single(&fep->pdev->dev, addr, 660 buflen, DMA_TO_DEVICE); 661 dev_kfree_skb_any(skb); 662 return NETDEV_TX_OK; 663 } 664 } else { 665 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 666 if (fep->bufdesc_ex) { 667 estatus = BD_ENET_TX_INT; 668 if (unlikely(skb_shinfo(skb)->tx_flags & 669 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 670 estatus |= BD_ENET_TX_TS; 671 } 672 } 673 bdp->cbd_bufaddr = cpu_to_fec32(addr); 674 bdp->cbd_datlen = cpu_to_fec16(buflen); 675 676 if (fep->bufdesc_ex) { 677 678 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 679 680 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 681 fep->hwts_tx_en)) 682 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 683 684 if (fep->quirks & FEC_QUIRK_HAS_AVB) 685 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 686 687 if (skb->ip_summed == CHECKSUM_PARTIAL) 688 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 689 690 ebdp->cbd_bdu = 0; 691 ebdp->cbd_esc = cpu_to_fec32(estatus); 692 } 693 694 index = fec_enet_get_bd_index(last_bdp, &txq->bd); 695 /* Save skb pointer */ 696 txq->tx_buf[index].buf_p = skb; 697 698 /* Make sure the updates to rest of the descriptor are performed before 699 * transferring ownership. 700 */ 701 wmb(); 702 703 /* Send it on its way. Tell FEC it's ready, interrupt when done, 704 * it's the last BD of the frame, and to put the CRC on the end. 705 */ 706 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 707 bdp->cbd_sc = cpu_to_fec16(status); 708 709 /* If this was the last BD in the ring, start at the beginning again. */ 710 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); 711 712 skb_tx_timestamp(skb); 713 714 /* Make sure the update to bdp is performed before txq->bd.cur. */ 715 wmb(); 716 txq->bd.cur = bdp; 717 718 /* Trigger transmission start */ 719 if (!(fep->quirks & FEC_QUIRK_ERR007885) || 720 !readl(txq->bd.reg_desc_active) || 721 !readl(txq->bd.reg_desc_active) || 722 !readl(txq->bd.reg_desc_active) || 723 !readl(txq->bd.reg_desc_active)) 724 writel(0, txq->bd.reg_desc_active); 725 726 return 0; 727 } 728 729 static int 730 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, 731 struct net_device *ndev, 732 struct bufdesc *bdp, int index, char *data, 733 int size, bool last_tcp, bool is_last) 734 { 735 struct fec_enet_private *fep = netdev_priv(ndev); 736 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 737 unsigned short status; 738 unsigned int estatus = 0; 739 dma_addr_t addr; 740 741 status = fec16_to_cpu(bdp->cbd_sc); 742 status &= ~BD_ENET_TX_STATS; 743 744 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 745 746 if (((unsigned long) data) & fep->tx_align || 747 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 748 memcpy(txq->tx_bounce[index], data, size); 749 data = txq->tx_bounce[index]; 750 751 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 752 swap_buffer(data, size); 753 } 754 755 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); 756 if (dma_mapping_error(&fep->pdev->dev, addr)) { 757 dev_kfree_skb_any(skb); 758 if (net_ratelimit()) 759 netdev_err(ndev, "Tx DMA memory map failed\n"); 760 return NETDEV_TX_OK; 761 } 762 763 bdp->cbd_datlen = cpu_to_fec16(size); 764 bdp->cbd_bufaddr = cpu_to_fec32(addr); 765 766 if (fep->bufdesc_ex) { 767 if (fep->quirks & FEC_QUIRK_HAS_AVB) 768 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 769 if (skb->ip_summed == CHECKSUM_PARTIAL) 770 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 771 ebdp->cbd_bdu = 0; 772 ebdp->cbd_esc = cpu_to_fec32(estatus); 773 } 774 775 /* Handle the last BD specially */ 776 if (last_tcp) 777 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); 778 if (is_last) { 779 status |= BD_ENET_TX_INTR; 780 if (fep->bufdesc_ex) 781 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); 782 } 783 784 bdp->cbd_sc = cpu_to_fec16(status); 785 786 return 0; 787 } 788 789 static int 790 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, 791 struct sk_buff *skb, struct net_device *ndev, 792 struct bufdesc *bdp, int index) 793 { 794 struct fec_enet_private *fep = netdev_priv(ndev); 795 int hdr_len = skb_tcp_all_headers(skb); 796 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 797 void *bufaddr; 798 unsigned long dmabuf; 799 unsigned short status; 800 unsigned int estatus = 0; 801 802 status = fec16_to_cpu(bdp->cbd_sc); 803 status &= ~BD_ENET_TX_STATS; 804 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 805 806 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 807 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; 808 if (((unsigned long)bufaddr) & fep->tx_align || 809 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 810 memcpy(txq->tx_bounce[index], skb->data, hdr_len); 811 bufaddr = txq->tx_bounce[index]; 812 813 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 814 swap_buffer(bufaddr, hdr_len); 815 816 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, 817 hdr_len, DMA_TO_DEVICE); 818 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { 819 dev_kfree_skb_any(skb); 820 if (net_ratelimit()) 821 netdev_err(ndev, "Tx DMA memory map failed\n"); 822 return NETDEV_TX_OK; 823 } 824 } 825 826 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); 827 bdp->cbd_datlen = cpu_to_fec16(hdr_len); 828 829 if (fep->bufdesc_ex) { 830 if (fep->quirks & FEC_QUIRK_HAS_AVB) 831 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 832 if (skb->ip_summed == CHECKSUM_PARTIAL) 833 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 834 ebdp->cbd_bdu = 0; 835 ebdp->cbd_esc = cpu_to_fec32(estatus); 836 } 837 838 bdp->cbd_sc = cpu_to_fec16(status); 839 840 return 0; 841 } 842 843 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, 844 struct sk_buff *skb, 845 struct net_device *ndev) 846 { 847 struct fec_enet_private *fep = netdev_priv(ndev); 848 int hdr_len, total_len, data_left; 849 struct bufdesc *bdp = txq->bd.cur; 850 struct bufdesc *tmp_bdp; 851 struct bufdesc_ex *ebdp; 852 struct tso_t tso; 853 unsigned int index = 0; 854 int ret; 855 856 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { 857 dev_kfree_skb_any(skb); 858 if (net_ratelimit()) 859 netdev_err(ndev, "NOT enough BD for TSO!\n"); 860 return NETDEV_TX_OK; 861 } 862 863 /* Protocol checksum off-load for TCP and UDP. */ 864 if (fec_enet_clear_csum(skb, ndev)) { 865 dev_kfree_skb_any(skb); 866 return NETDEV_TX_OK; 867 } 868 869 /* Initialize the TSO handler, and prepare the first payload */ 870 hdr_len = tso_start(skb, &tso); 871 872 total_len = skb->len - hdr_len; 873 while (total_len > 0) { 874 char *hdr; 875 876 index = fec_enet_get_bd_index(bdp, &txq->bd); 877 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 878 total_len -= data_left; 879 880 /* prepare packet headers: MAC + IP + TCP */ 881 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 882 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 883 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); 884 if (ret) 885 goto err_release; 886 887 while (data_left > 0) { 888 int size; 889 890 size = min_t(int, tso.size, data_left); 891 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 892 index = fec_enet_get_bd_index(bdp, &txq->bd); 893 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, 894 bdp, index, 895 tso.data, size, 896 size == data_left, 897 total_len == 0); 898 if (ret) 899 goto err_release; 900 901 data_left -= size; 902 tso_build_data(skb, &tso, size); 903 } 904 905 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 906 } 907 908 /* Save skb pointer */ 909 txq->tx_buf[index].buf_p = skb; 910 911 skb_tx_timestamp(skb); 912 txq->bd.cur = bdp; 913 914 /* Trigger transmission start */ 915 if (!(fep->quirks & FEC_QUIRK_ERR007885) || 916 !readl(txq->bd.reg_desc_active) || 917 !readl(txq->bd.reg_desc_active) || 918 !readl(txq->bd.reg_desc_active) || 919 !readl(txq->bd.reg_desc_active)) 920 writel(0, txq->bd.reg_desc_active); 921 922 return 0; 923 924 err_release: 925 /* Release all used data descriptors for TSO */ 926 tmp_bdp = txq->bd.cur; 927 928 while (tmp_bdp != bdp) { 929 /* Unmap data buffers */ 930 if (tmp_bdp->cbd_bufaddr && 931 !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr))) 932 dma_unmap_single(&fep->pdev->dev, 933 fec32_to_cpu(tmp_bdp->cbd_bufaddr), 934 fec16_to_cpu(tmp_bdp->cbd_datlen), 935 DMA_TO_DEVICE); 936 937 /* Clear standard buffer descriptor fields */ 938 tmp_bdp->cbd_sc = 0; 939 tmp_bdp->cbd_datlen = 0; 940 tmp_bdp->cbd_bufaddr = 0; 941 942 /* Handle extended descriptor if enabled */ 943 if (fep->bufdesc_ex) { 944 ebdp = (struct bufdesc_ex *)tmp_bdp; 945 ebdp->cbd_esc = 0; 946 } 947 948 tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd); 949 } 950 951 dev_kfree_skb_any(skb); 952 953 return ret; 954 } 955 956 static netdev_tx_t 957 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 958 { 959 struct fec_enet_private *fep = netdev_priv(ndev); 960 int entries_free; 961 unsigned short queue; 962 struct fec_enet_priv_tx_q *txq; 963 struct netdev_queue *nq; 964 int ret; 965 966 queue = skb_get_queue_mapping(skb); 967 txq = fep->tx_queue[queue]; 968 nq = netdev_get_tx_queue(ndev, queue); 969 970 if (skb_is_gso(skb)) 971 ret = fec_enet_txq_submit_tso(txq, skb, ndev); 972 else 973 ret = fec_enet_txq_submit_skb(txq, skb, ndev); 974 if (ret) 975 return ret; 976 977 entries_free = fec_enet_get_free_txdesc_num(txq); 978 if (entries_free <= txq->tx_stop_threshold) 979 netif_tx_stop_queue(nq); 980 981 return NETDEV_TX_OK; 982 } 983 984 /* Init RX & TX buffer descriptors 985 */ 986 static void fec_enet_bd_init(struct net_device *dev) 987 { 988 struct fec_enet_private *fep = netdev_priv(dev); 989 struct fec_enet_priv_tx_q *txq; 990 struct fec_enet_priv_rx_q *rxq; 991 struct bufdesc *bdp; 992 unsigned int i; 993 unsigned int q; 994 995 for (q = 0; q < fep->num_rx_queues; q++) { 996 /* Initialize the receive buffer descriptors. */ 997 rxq = fep->rx_queue[q]; 998 bdp = rxq->bd.base; 999 1000 for (i = 0; i < rxq->bd.ring_size; i++) { 1001 1002 /* Initialize the BD for every fragment in the page. */ 1003 if (bdp->cbd_bufaddr) 1004 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 1005 else 1006 bdp->cbd_sc = cpu_to_fec16(0); 1007 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 1008 } 1009 1010 /* Set the last buffer to wrap */ 1011 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 1012 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 1013 1014 rxq->bd.cur = rxq->bd.base; 1015 } 1016 1017 for (q = 0; q < fep->num_tx_queues; q++) { 1018 /* ...and the same for transmit */ 1019 txq = fep->tx_queue[q]; 1020 bdp = txq->bd.base; 1021 txq->bd.cur = bdp; 1022 1023 for (i = 0; i < txq->bd.ring_size; i++) { 1024 /* Initialize the BD for every fragment in the page. */ 1025 bdp->cbd_sc = cpu_to_fec16(0); 1026 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { 1027 if (bdp->cbd_bufaddr && 1028 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 1029 dma_unmap_single(&fep->pdev->dev, 1030 fec32_to_cpu(bdp->cbd_bufaddr), 1031 fec16_to_cpu(bdp->cbd_datlen), 1032 DMA_TO_DEVICE); 1033 if (txq->tx_buf[i].buf_p) 1034 dev_kfree_skb_any(txq->tx_buf[i].buf_p); 1035 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { 1036 if (bdp->cbd_bufaddr) 1037 dma_unmap_single(&fep->pdev->dev, 1038 fec32_to_cpu(bdp->cbd_bufaddr), 1039 fec16_to_cpu(bdp->cbd_datlen), 1040 DMA_TO_DEVICE); 1041 1042 if (txq->tx_buf[i].buf_p) 1043 xdp_return_frame(txq->tx_buf[i].buf_p); 1044 } else { 1045 struct page *page = txq->tx_buf[i].buf_p; 1046 1047 if (page) 1048 page_pool_put_page(pp_page_to_nmdesc(page)->pp, 1049 page, 0, 1050 false); 1051 } 1052 1053 txq->tx_buf[i].buf_p = NULL; 1054 /* restore default tx buffer type: FEC_TXBUF_T_SKB */ 1055 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 1056 bdp->cbd_bufaddr = cpu_to_fec32(0); 1057 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1058 } 1059 1060 /* Set the last buffer to wrap */ 1061 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 1062 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 1063 txq->dirty_tx = bdp; 1064 } 1065 } 1066 1067 static void fec_enet_active_rxring(struct net_device *ndev) 1068 { 1069 struct fec_enet_private *fep = netdev_priv(ndev); 1070 int i; 1071 1072 for (i = 0; i < fep->num_rx_queues; i++) 1073 writel(0, fep->rx_queue[i]->bd.reg_desc_active); 1074 } 1075 1076 static void fec_enet_enable_ring(struct net_device *ndev) 1077 { 1078 struct fec_enet_private *fep = netdev_priv(ndev); 1079 struct fec_enet_priv_tx_q *txq; 1080 struct fec_enet_priv_rx_q *rxq; 1081 int i; 1082 1083 for (i = 0; i < fep->num_rx_queues; i++) { 1084 rxq = fep->rx_queue[i]; 1085 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); 1086 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); 1087 1088 /* enable DMA1/2 */ 1089 if (i) 1090 writel(RCMR_MATCHEN | RCMR_CMP(i), 1091 fep->hwp + FEC_RCMR(i)); 1092 } 1093 1094 for (i = 0; i < fep->num_tx_queues; i++) { 1095 txq = fep->tx_queue[i]; 1096 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); 1097 1098 /* enable DMA1/2 */ 1099 if (i) 1100 writel(DMA_CLASS_EN | IDLE_SLOPE(i), 1101 fep->hwp + FEC_DMA_CFG(i)); 1102 } 1103 } 1104 1105 /* Whack a reset. We should wait for this. 1106 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1107 * instead of reset MAC itself. 1108 */ 1109 static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol) 1110 { 1111 u32 val; 1112 1113 if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1114 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || 1115 ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { 1116 writel(0, fep->hwp + FEC_ECNTRL); 1117 } else { 1118 writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); 1119 udelay(10); 1120 } 1121 } else { 1122 val = readl(fep->hwp + FEC_ECNTRL); 1123 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 1124 writel(val, fep->hwp + FEC_ECNTRL); 1125 } 1126 } 1127 1128 static void fec_set_hw_mac_addr(struct net_device *ndev) 1129 { 1130 struct fec_enet_private *fep = netdev_priv(ndev); 1131 1132 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 1133 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 1134 fep->hwp + FEC_ADDR_LOW); 1135 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), 1136 fep->hwp + FEC_ADDR_HIGH); 1137 } 1138 1139 /* 1140 * This function is called to start or restart the FEC during a link 1141 * change, transmit timeout, or to reconfigure the FEC. The network 1142 * packet processing for this device must be stopped before this call. 1143 */ 1144 static void 1145 fec_restart(struct net_device *ndev) 1146 { 1147 struct fec_enet_private *fep = netdev_priv(ndev); 1148 u32 rcntl = OPT_FRAME_SIZE | FEC_RCR_MII; 1149 u32 ecntl = FEC_ECR_ETHEREN; 1150 1151 if (fep->bufdesc_ex) 1152 fec_ptp_save_state(fep); 1153 1154 fec_ctrl_reset(fep, false); 1155 1156 /* 1157 * enet-mac reset will reset mac address registers too, 1158 * so need to reconfigure it. 1159 */ 1160 fec_set_hw_mac_addr(ndev); 1161 1162 /* Clear any outstanding interrupt, except MDIO. */ 1163 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT); 1164 1165 fec_enet_bd_init(ndev); 1166 1167 fec_enet_enable_ring(ndev); 1168 1169 /* Enable MII mode */ 1170 if (fep->full_duplex == DUPLEX_FULL) { 1171 /* FD enable */ 1172 writel(0x04, fep->hwp + FEC_X_CNTRL); 1173 } else { 1174 /* No Rcv on Xmit */ 1175 rcntl |= FEC_RCR_DRT; 1176 writel(0x0, fep->hwp + FEC_X_CNTRL); 1177 } 1178 1179 /* Set MII speed */ 1180 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1181 1182 #if !defined(CONFIG_M5272) 1183 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 1184 u32 val = readl(fep->hwp + FEC_RACC); 1185 1186 /* align IP header */ 1187 val |= FEC_RACC_SHIFT16; 1188 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 1189 /* set RX checksum */ 1190 val |= FEC_RACC_OPTIONS; 1191 else 1192 val &= ~FEC_RACC_OPTIONS; 1193 writel(val, fep->hwp + FEC_RACC); 1194 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); 1195 } 1196 #endif 1197 1198 /* 1199 * The phy interface and speed need to get configured 1200 * differently on enet-mac. 1201 */ 1202 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1203 /* Enable flow control and length check */ 1204 rcntl |= FEC_RCR_NLC | FEC_RCR_FLOWCTL; 1205 1206 /* RGMII, RMII or MII */ 1207 if (phy_interface_mode_is_rgmii(fep->phy_interface)) 1208 rcntl |= FEC_RCR_RGMII; 1209 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1210 rcntl |= FEC_RCR_RMII; 1211 else 1212 rcntl &= ~FEC_RCR_RMII; 1213 1214 /* 1G, 100M or 10M */ 1215 if (ndev->phydev) { 1216 if (ndev->phydev->speed == SPEED_1000) 1217 ecntl |= FEC_ECR_SPEED; 1218 else if (ndev->phydev->speed == SPEED_100) 1219 rcntl &= ~FEC_RCR_10BASET; 1220 else 1221 rcntl |= FEC_RCR_10BASET; 1222 } 1223 } else { 1224 #ifdef FEC_MIIGSK_ENR 1225 if (fep->quirks & FEC_QUIRK_USE_GASKET) { 1226 u32 cfgr; 1227 /* disable the gasket and wait */ 1228 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1229 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1230 udelay(1); 1231 1232 /* 1233 * configure the gasket: 1234 * RMII, 50 MHz, no loopback, no echo 1235 * MII, 25 MHz, no loopback, no echo 1236 */ 1237 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1238 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; 1239 if (ndev->phydev && ndev->phydev->speed == SPEED_10) 1240 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; 1241 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); 1242 1243 /* re-enable the gasket */ 1244 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1245 } 1246 #endif 1247 } 1248 1249 #if !defined(CONFIG_M5272) 1250 /* enable pause frame*/ 1251 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 1252 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 1253 ndev->phydev && ndev->phydev->pause)) { 1254 rcntl |= FEC_RCR_FLOWCTL; 1255 1256 /* set FIFO threshold parameter to reduce overrun */ 1257 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 1258 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 1259 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 1260 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); 1261 1262 /* OPD */ 1263 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); 1264 } else { 1265 rcntl &= ~FEC_RCR_FLOWCTL; 1266 } 1267 #endif /* !defined(CONFIG_M5272) */ 1268 1269 writel(rcntl, fep->hwp + FEC_R_CNTRL); 1270 1271 /* Setup multicast filter. */ 1272 set_multicast_list(ndev); 1273 #ifndef CONFIG_M5272 1274 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1275 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1276 #endif 1277 1278 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1279 /* enable ENET endian swap */ 1280 ecntl |= FEC_ECR_BYTESWP; 1281 /* enable ENET store and forward mode */ 1282 writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK); 1283 } 1284 1285 if (fep->bufdesc_ex) 1286 ecntl |= FEC_ECR_EN1588; 1287 1288 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && 1289 fep->rgmii_txc_dly) 1290 ecntl |= FEC_ENET_TXC_DLY; 1291 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && 1292 fep->rgmii_rxc_dly) 1293 ecntl |= FEC_ENET_RXC_DLY; 1294 1295 #ifndef CONFIG_M5272 1296 /* Enable the MIB statistic event counters */ 1297 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); 1298 #endif 1299 1300 /* And last, enable the transmit and receive processing */ 1301 writel(ecntl, fep->hwp + FEC_ECNTRL); 1302 fec_enet_active_rxring(ndev); 1303 1304 if (fep->bufdesc_ex) { 1305 fec_ptp_start_cyclecounter(ndev); 1306 fec_ptp_restore_state(fep); 1307 } 1308 1309 /* Enable interrupts we wish to service */ 1310 if (fep->link) 1311 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1312 else 1313 writel(0, fep->hwp + FEC_IMASK); 1314 1315 /* Init the interrupt coalescing */ 1316 if (fep->quirks & FEC_QUIRK_HAS_COALESCE) 1317 fec_enet_itr_coal_set(ndev); 1318 } 1319 1320 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) 1321 { 1322 if (!(of_machine_is_compatible("fsl,imx8qm") || 1323 of_machine_is_compatible("fsl,imx8qxp") || 1324 of_machine_is_compatible("fsl,imx8dxl"))) 1325 return 0; 1326 1327 return imx_scu_get_handle(&fep->ipc_handle); 1328 } 1329 1330 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) 1331 { 1332 struct device_node *np = fep->pdev->dev.of_node; 1333 u32 rsrc_id, val; 1334 int idx; 1335 1336 if (!np || !fep->ipc_handle) 1337 return; 1338 1339 idx = of_alias_get_id(np, "ethernet"); 1340 if (idx < 0) 1341 idx = 0; 1342 rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; 1343 1344 val = enabled ? 1 : 0; 1345 imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); 1346 } 1347 1348 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) 1349 { 1350 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1351 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; 1352 1353 if (stop_gpr->gpr) { 1354 if (enabled) 1355 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, 1356 BIT(stop_gpr->bit), 1357 BIT(stop_gpr->bit)); 1358 else 1359 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, 1360 BIT(stop_gpr->bit), 0); 1361 } else if (pdata && pdata->sleep_mode_enable) { 1362 pdata->sleep_mode_enable(enabled); 1363 } else { 1364 fec_enet_ipg_stop_set(fep, enabled); 1365 } 1366 } 1367 1368 static void fec_irqs_disable(struct net_device *ndev) 1369 { 1370 struct fec_enet_private *fep = netdev_priv(ndev); 1371 1372 writel(0, fep->hwp + FEC_IMASK); 1373 } 1374 1375 static void fec_irqs_disable_except_wakeup(struct net_device *ndev) 1376 { 1377 struct fec_enet_private *fep = netdev_priv(ndev); 1378 1379 writel(0, fep->hwp + FEC_IMASK); 1380 writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); 1381 } 1382 1383 static void 1384 fec_stop(struct net_device *ndev) 1385 { 1386 struct fec_enet_private *fep = netdev_priv(ndev); 1387 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII; 1388 u32 val; 1389 1390 /* We cannot expect a graceful transmit stop without link !!! */ 1391 if (fep->link) { 1392 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1393 udelay(10); 1394 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1395 netdev_err(ndev, "Graceful transmit stop did not complete!\n"); 1396 } 1397 1398 if (fep->bufdesc_ex) 1399 fec_ptp_save_state(fep); 1400 1401 fec_ctrl_reset(fep, true); 1402 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1403 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1404 1405 /* We have to keep ENET enabled to have MII interrupt stay working */ 1406 if (fep->quirks & FEC_QUIRK_ENET_MAC && 1407 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1408 writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL); 1409 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1410 } 1411 1412 if (fep->bufdesc_ex) { 1413 val = readl(fep->hwp + FEC_ECNTRL); 1414 val |= FEC_ECR_EN1588; 1415 writel(val, fep->hwp + FEC_ECNTRL); 1416 1417 fec_ptp_start_cyclecounter(ndev); 1418 fec_ptp_restore_state(fep); 1419 } 1420 } 1421 1422 static void 1423 fec_timeout(struct net_device *ndev, unsigned int txqueue) 1424 { 1425 struct fec_enet_private *fep = netdev_priv(ndev); 1426 1427 fec_dump(ndev); 1428 1429 ndev->stats.tx_errors++; 1430 1431 schedule_work(&fep->tx_timeout_work); 1432 } 1433 1434 static void fec_enet_timeout_work(struct work_struct *work) 1435 { 1436 struct fec_enet_private *fep = 1437 container_of(work, struct fec_enet_private, tx_timeout_work); 1438 struct net_device *ndev = fep->netdev; 1439 1440 rtnl_lock(); 1441 if (netif_device_present(ndev) || netif_running(ndev)) { 1442 napi_disable(&fep->napi); 1443 netif_tx_lock_bh(ndev); 1444 fec_restart(ndev); 1445 netif_tx_wake_all_queues(ndev); 1446 netif_tx_unlock_bh(ndev); 1447 napi_enable(&fep->napi); 1448 } 1449 rtnl_unlock(); 1450 } 1451 1452 static void 1453 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, 1454 struct skb_shared_hwtstamps *hwtstamps) 1455 { 1456 unsigned long flags; 1457 u64 ns; 1458 1459 spin_lock_irqsave(&fep->tmreg_lock, flags); 1460 ns = timecounter_cyc2time(&fep->tc, ts); 1461 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 1462 1463 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1464 hwtstamps->hwtstamp = ns_to_ktime(ns); 1465 } 1466 1467 static void 1468 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) 1469 { 1470 struct fec_enet_private *fep; 1471 struct xdp_frame *xdpf; 1472 struct bufdesc *bdp; 1473 unsigned short status; 1474 struct sk_buff *skb; 1475 struct fec_enet_priv_tx_q *txq; 1476 struct netdev_queue *nq; 1477 int index = 0; 1478 int entries_free; 1479 struct page *page; 1480 int frame_len; 1481 1482 fep = netdev_priv(ndev); 1483 1484 txq = fep->tx_queue[queue_id]; 1485 /* get next bdp of dirty_tx */ 1486 nq = netdev_get_tx_queue(ndev, queue_id); 1487 bdp = txq->dirty_tx; 1488 1489 /* get next bdp of dirty_tx */ 1490 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1491 1492 while (bdp != READ_ONCE(txq->bd.cur)) { 1493 /* Order the load of bd.cur and cbd_sc */ 1494 rmb(); 1495 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); 1496 if (status & BD_ENET_TX_READY) 1497 break; 1498 1499 index = fec_enet_get_bd_index(bdp, &txq->bd); 1500 1501 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { 1502 skb = txq->tx_buf[index].buf_p; 1503 if (bdp->cbd_bufaddr && 1504 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 1505 dma_unmap_single(&fep->pdev->dev, 1506 fec32_to_cpu(bdp->cbd_bufaddr), 1507 fec16_to_cpu(bdp->cbd_datlen), 1508 DMA_TO_DEVICE); 1509 bdp->cbd_bufaddr = cpu_to_fec32(0); 1510 if (!skb) 1511 goto tx_buf_done; 1512 } else { 1513 /* Tx processing cannot call any XDP (or page pool) APIs if 1514 * the "budget" is 0. Because NAPI is called with budget of 1515 * 0 (such as netpoll) indicates we may be in an IRQ context, 1516 * however, we can't use the page pool from IRQ context. 1517 */ 1518 if (unlikely(!budget)) 1519 break; 1520 1521 if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { 1522 xdpf = txq->tx_buf[index].buf_p; 1523 if (bdp->cbd_bufaddr) 1524 dma_unmap_single(&fep->pdev->dev, 1525 fec32_to_cpu(bdp->cbd_bufaddr), 1526 fec16_to_cpu(bdp->cbd_datlen), 1527 DMA_TO_DEVICE); 1528 } else { 1529 page = txq->tx_buf[index].buf_p; 1530 } 1531 1532 bdp->cbd_bufaddr = cpu_to_fec32(0); 1533 if (unlikely(!txq->tx_buf[index].buf_p)) { 1534 txq->tx_buf[index].type = FEC_TXBUF_T_SKB; 1535 goto tx_buf_done; 1536 } 1537 1538 frame_len = fec16_to_cpu(bdp->cbd_datlen); 1539 } 1540 1541 /* Check for errors. */ 1542 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1543 BD_ENET_TX_RL | BD_ENET_TX_UN | 1544 BD_ENET_TX_CSL)) { 1545 ndev->stats.tx_errors++; 1546 if (status & BD_ENET_TX_HB) /* No heartbeat */ 1547 ndev->stats.tx_heartbeat_errors++; 1548 if (status & BD_ENET_TX_LC) /* Late collision */ 1549 ndev->stats.tx_window_errors++; 1550 if (status & BD_ENET_TX_RL) /* Retrans limit */ 1551 ndev->stats.tx_aborted_errors++; 1552 if (status & BD_ENET_TX_UN) /* Underrun */ 1553 ndev->stats.tx_fifo_errors++; 1554 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 1555 ndev->stats.tx_carrier_errors++; 1556 } else { 1557 ndev->stats.tx_packets++; 1558 1559 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) 1560 ndev->stats.tx_bytes += skb->len; 1561 else 1562 ndev->stats.tx_bytes += frame_len; 1563 } 1564 1565 /* Deferred means some collisions occurred during transmit, 1566 * but we eventually sent the packet OK. 1567 */ 1568 if (status & BD_ENET_TX_DEF) 1569 ndev->stats.collisions++; 1570 1571 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { 1572 /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who 1573 * are to time stamp the packet, so we still need to check time 1574 * stamping enabled flag. 1575 */ 1576 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && 1577 fep->hwts_tx_en) && fep->bufdesc_ex) { 1578 struct skb_shared_hwtstamps shhwtstamps; 1579 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1580 1581 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); 1582 skb_tstamp_tx(skb, &shhwtstamps); 1583 } 1584 1585 /* Free the sk buffer associated with this last transmit */ 1586 napi_consume_skb(skb, budget); 1587 } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { 1588 xdp_return_frame_rx_napi(xdpf); 1589 } else { /* recycle pages of XDP_TX frames */ 1590 /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */ 1591 page_pool_put_page(pp_page_to_nmdesc(page)->pp, page, 1592 0, true); 1593 } 1594 1595 txq->tx_buf[index].buf_p = NULL; 1596 /* restore default tx buffer type: FEC_TXBUF_T_SKB */ 1597 txq->tx_buf[index].type = FEC_TXBUF_T_SKB; 1598 1599 tx_buf_done: 1600 /* Make sure the update to bdp and tx_buf are performed 1601 * before dirty_tx 1602 */ 1603 wmb(); 1604 txq->dirty_tx = bdp; 1605 1606 /* Update pointer to next buffer descriptor to be transmitted */ 1607 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1608 1609 /* Since we have freed up a buffer, the ring is no longer full 1610 */ 1611 if (netif_tx_queue_stopped(nq)) { 1612 entries_free = fec_enet_get_free_txdesc_num(txq); 1613 if (entries_free >= txq->tx_wake_threshold) 1614 netif_tx_wake_queue(nq); 1615 } 1616 } 1617 1618 /* ERR006358: Keep the transmitter going */ 1619 if (bdp != txq->bd.cur && 1620 readl(txq->bd.reg_desc_active) == 0) 1621 writel(0, txq->bd.reg_desc_active); 1622 } 1623 1624 static void fec_enet_tx(struct net_device *ndev, int budget) 1625 { 1626 struct fec_enet_private *fep = netdev_priv(ndev); 1627 int i; 1628 1629 /* Make sure that AVB queues are processed first. */ 1630 for (i = fep->num_tx_queues - 1; i >= 0; i--) 1631 fec_enet_tx_queue(ndev, i, budget); 1632 } 1633 1634 static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, 1635 struct bufdesc *bdp, int index) 1636 { 1637 struct page *new_page; 1638 dma_addr_t phys_addr; 1639 1640 new_page = page_pool_dev_alloc_pages(rxq->page_pool); 1641 if (unlikely(!new_page)) 1642 return -ENOMEM; 1643 1644 rxq->rx_skb_info[index].page = new_page; 1645 rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; 1646 phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; 1647 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); 1648 1649 return 0; 1650 } 1651 1652 static u32 1653 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, 1654 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu) 1655 { 1656 unsigned int sync, len = xdp->data_end - xdp->data; 1657 u32 ret = FEC_ENET_XDP_PASS; 1658 struct page *page; 1659 int err; 1660 u32 act; 1661 1662 act = bpf_prog_run_xdp(prog, xdp); 1663 1664 /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover 1665 * max len CPU touch 1666 */ 1667 sync = xdp->data_end - xdp->data; 1668 sync = max(sync, len); 1669 1670 switch (act) { 1671 case XDP_PASS: 1672 rxq->stats[RX_XDP_PASS]++; 1673 ret = FEC_ENET_XDP_PASS; 1674 break; 1675 1676 case XDP_REDIRECT: 1677 rxq->stats[RX_XDP_REDIRECT]++; 1678 err = xdp_do_redirect(fep->netdev, xdp, prog); 1679 if (unlikely(err)) 1680 goto xdp_err; 1681 1682 ret = FEC_ENET_XDP_REDIR; 1683 break; 1684 1685 case XDP_TX: 1686 rxq->stats[RX_XDP_TX]++; 1687 err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync); 1688 if (unlikely(err)) { 1689 rxq->stats[RX_XDP_TX_ERRORS]++; 1690 goto xdp_err; 1691 } 1692 1693 ret = FEC_ENET_XDP_TX; 1694 break; 1695 1696 default: 1697 bpf_warn_invalid_xdp_action(fep->netdev, prog, act); 1698 fallthrough; 1699 1700 case XDP_ABORTED: 1701 fallthrough; /* handle aborts by dropping packet */ 1702 1703 case XDP_DROP: 1704 rxq->stats[RX_XDP_DROP]++; 1705 xdp_err: 1706 ret = FEC_ENET_XDP_CONSUMED; 1707 page = virt_to_head_page(xdp->data); 1708 page_pool_put_page(rxq->page_pool, page, sync, true); 1709 if (act != XDP_DROP) 1710 trace_xdp_exception(fep->netdev, prog, act); 1711 break; 1712 } 1713 1714 return ret; 1715 } 1716 1717 static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb) 1718 { 1719 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { 1720 const struct vlan_ethhdr *vlan_header = skb_vlan_eth_hdr(skb); 1721 const u16 vlan_tag = ntohs(vlan_header->h_vlan_TCI); 1722 1723 /* Push and remove the vlan tag */ 1724 1725 memmove(skb->data + VLAN_HLEN, skb->data, ETH_ALEN * 2); 1726 skb_pull(skb, VLAN_HLEN); 1727 __vlan_hwaccel_put_tag(skb, 1728 htons(ETH_P_8021Q), 1729 vlan_tag); 1730 } 1731 } 1732 1733 /* During a receive, the bd_rx.cur points to the current incoming buffer. 1734 * When we update through the ring, if the next incoming buffer has 1735 * not been given to the system, we just set the empty indicator, 1736 * effectively tossing the packet. 1737 */ 1738 static int 1739 fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget) 1740 { 1741 struct fec_enet_private *fep = netdev_priv(ndev); 1742 struct fec_enet_priv_rx_q *rxq; 1743 struct bufdesc *bdp; 1744 unsigned short status; 1745 struct sk_buff *skb; 1746 ushort pkt_len; 1747 int pkt_received = 0; 1748 struct bufdesc_ex *ebdp = NULL; 1749 int index = 0; 1750 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; 1751 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); 1752 u32 ret, xdp_result = FEC_ENET_XDP_PASS; 1753 u32 data_start = FEC_ENET_XDP_HEADROOM; 1754 int cpu = smp_processor_id(); 1755 struct xdp_buff xdp; 1756 struct page *page; 1757 __fec32 cbd_bufaddr; 1758 u32 sub_len = 4; 1759 1760 #if !defined(CONFIG_M5272) 1761 /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of 1762 * FEC_RACC_SHIFT16 is set by default in the probe function. 1763 */ 1764 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 1765 data_start += 2; 1766 sub_len += 2; 1767 } 1768 #endif 1769 1770 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) 1771 /* 1772 * Hacky flush of all caches instead of using the DMA API for the TSO 1773 * headers. 1774 */ 1775 flush_cache_all(); 1776 #endif 1777 rxq = fep->rx_queue[queue_id]; 1778 1779 /* First, grab all of the stats for the incoming packet. 1780 * These get messed up if we get called due to a busy condition. 1781 */ 1782 bdp = rxq->bd.cur; 1783 xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); 1784 1785 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { 1786 1787 if (pkt_received >= budget) 1788 break; 1789 pkt_received++; 1790 1791 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); 1792 1793 /* Check for errors. */ 1794 status ^= BD_ENET_RX_LAST; 1795 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 1796 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | 1797 BD_ENET_RX_CL)) { 1798 ndev->stats.rx_errors++; 1799 if (status & BD_ENET_RX_OV) { 1800 /* FIFO overrun */ 1801 ndev->stats.rx_fifo_errors++; 1802 goto rx_processing_done; 1803 } 1804 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH 1805 | BD_ENET_RX_LAST)) { 1806 /* Frame too long or too short. */ 1807 ndev->stats.rx_length_errors++; 1808 if (status & BD_ENET_RX_LAST) 1809 netdev_err(ndev, "rcv is not +last\n"); 1810 } 1811 if (status & BD_ENET_RX_CR) /* CRC Error */ 1812 ndev->stats.rx_crc_errors++; 1813 /* Report late collisions as a frame error. */ 1814 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 1815 ndev->stats.rx_frame_errors++; 1816 goto rx_processing_done; 1817 } 1818 1819 /* Process the incoming frame. */ 1820 ndev->stats.rx_packets++; 1821 pkt_len = fec16_to_cpu(bdp->cbd_datlen); 1822 ndev->stats.rx_bytes += pkt_len; 1823 1824 index = fec_enet_get_bd_index(bdp, &rxq->bd); 1825 page = rxq->rx_skb_info[index].page; 1826 cbd_bufaddr = bdp->cbd_bufaddr; 1827 if (fec_enet_update_cbd(rxq, bdp, index)) { 1828 ndev->stats.rx_dropped++; 1829 goto rx_processing_done; 1830 } 1831 1832 dma_sync_single_for_cpu(&fep->pdev->dev, 1833 fec32_to_cpu(cbd_bufaddr), 1834 pkt_len, 1835 DMA_FROM_DEVICE); 1836 prefetch(page_address(page)); 1837 1838 if (xdp_prog) { 1839 xdp_buff_clear_frags_flag(&xdp); 1840 /* subtract 16bit shift and FCS */ 1841 xdp_prepare_buff(&xdp, page_address(page), 1842 data_start, pkt_len - sub_len, false); 1843 ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu); 1844 xdp_result |= ret; 1845 if (ret != FEC_ENET_XDP_PASS) 1846 goto rx_processing_done; 1847 } 1848 1849 /* The packet length includes FCS, but we don't want to 1850 * include that when passing upstream as it messes up 1851 * bridging applications. 1852 */ 1853 skb = build_skb(page_address(page), PAGE_SIZE); 1854 if (unlikely(!skb)) { 1855 page_pool_recycle_direct(rxq->page_pool, page); 1856 ndev->stats.rx_dropped++; 1857 1858 netdev_err_once(ndev, "build_skb failed!\n"); 1859 goto rx_processing_done; 1860 } 1861 1862 skb_reserve(skb, data_start); 1863 skb_put(skb, pkt_len - sub_len); 1864 skb_mark_for_recycle(skb); 1865 1866 if (unlikely(need_swap)) { 1867 u8 *data; 1868 1869 data = page_address(page) + FEC_ENET_XDP_HEADROOM; 1870 swap_buffer(data, pkt_len); 1871 } 1872 1873 /* Extract the enhanced buffer descriptor */ 1874 ebdp = NULL; 1875 if (fep->bufdesc_ex) 1876 ebdp = (struct bufdesc_ex *)bdp; 1877 1878 /* If this is a VLAN packet remove the VLAN Tag */ 1879 if (fep->bufdesc_ex && 1880 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) 1881 fec_enet_rx_vlan(ndev, skb); 1882 1883 skb->protocol = eth_type_trans(skb, ndev); 1884 1885 /* Get receive timestamp from the skb */ 1886 if (fep->hwts_rx_en && fep->bufdesc_ex) 1887 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), 1888 skb_hwtstamps(skb)); 1889 1890 if (fep->bufdesc_ex && 1891 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 1892 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { 1893 /* don't check it */ 1894 skb->ip_summed = CHECKSUM_UNNECESSARY; 1895 } else { 1896 skb_checksum_none_assert(skb); 1897 } 1898 } 1899 1900 skb_record_rx_queue(skb, queue_id); 1901 napi_gro_receive(&fep->napi, skb); 1902 1903 rx_processing_done: 1904 /* Clear the status flags for this buffer */ 1905 status &= ~BD_ENET_RX_STATS; 1906 1907 /* Mark the buffer empty */ 1908 status |= BD_ENET_RX_EMPTY; 1909 1910 if (fep->bufdesc_ex) { 1911 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1912 1913 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 1914 ebdp->cbd_prot = 0; 1915 ebdp->cbd_bdu = 0; 1916 } 1917 /* Make sure the updates to rest of the descriptor are 1918 * performed before transferring ownership. 1919 */ 1920 wmb(); 1921 bdp->cbd_sc = cpu_to_fec16(status); 1922 1923 /* Update BD pointer to next entry */ 1924 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 1925 1926 /* Doing this here will keep the FEC running while we process 1927 * incoming frames. On a heavily loaded network, we should be 1928 * able to keep up at the expense of system resources. 1929 */ 1930 writel(0, rxq->bd.reg_desc_active); 1931 } 1932 rxq->bd.cur = bdp; 1933 1934 if (xdp_result & FEC_ENET_XDP_REDIR) 1935 xdp_do_flush(); 1936 1937 return pkt_received; 1938 } 1939 1940 static int fec_enet_rx(struct net_device *ndev, int budget) 1941 { 1942 struct fec_enet_private *fep = netdev_priv(ndev); 1943 int i, done = 0; 1944 1945 /* Make sure that AVB queues are processed first. */ 1946 for (i = fep->num_rx_queues - 1; i >= 0; i--) 1947 done += fec_enet_rx_queue(ndev, i, budget - done); 1948 1949 return done; 1950 } 1951 1952 static bool fec_enet_collect_events(struct fec_enet_private *fep) 1953 { 1954 uint int_events; 1955 1956 int_events = readl(fep->hwp + FEC_IEVENT); 1957 1958 /* Don't clear MDIO events, we poll for those */ 1959 int_events &= ~FEC_ENET_MII; 1960 1961 writel(int_events, fep->hwp + FEC_IEVENT); 1962 1963 return int_events != 0; 1964 } 1965 1966 static irqreturn_t 1967 fec_enet_interrupt(int irq, void *dev_id) 1968 { 1969 struct net_device *ndev = dev_id; 1970 struct fec_enet_private *fep = netdev_priv(ndev); 1971 irqreturn_t ret = IRQ_NONE; 1972 1973 if (fec_enet_collect_events(fep) && fep->link) { 1974 ret = IRQ_HANDLED; 1975 1976 if (napi_schedule_prep(&fep->napi)) { 1977 /* Disable interrupts */ 1978 writel(0, fep->hwp + FEC_IMASK); 1979 __napi_schedule(&fep->napi); 1980 } 1981 } 1982 1983 return ret; 1984 } 1985 1986 static int fec_enet_rx_napi(struct napi_struct *napi, int budget) 1987 { 1988 struct net_device *ndev = napi->dev; 1989 struct fec_enet_private *fep = netdev_priv(ndev); 1990 int done = 0; 1991 1992 do { 1993 done += fec_enet_rx(ndev, budget - done); 1994 fec_enet_tx(ndev, budget); 1995 } while ((done < budget) && fec_enet_collect_events(fep)); 1996 1997 if (done < budget) { 1998 napi_complete_done(napi, done); 1999 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 2000 } 2001 2002 return done; 2003 } 2004 2005 /* ------------------------------------------------------------------------- */ 2006 static int fec_get_mac(struct net_device *ndev) 2007 { 2008 struct fec_enet_private *fep = netdev_priv(ndev); 2009 unsigned char *iap, tmpaddr[ETH_ALEN]; 2010 int ret; 2011 2012 /* 2013 * try to get mac address in following order: 2014 * 2015 * 1) module parameter via kernel command line in form 2016 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 2017 */ 2018 iap = macaddr; 2019 2020 /* 2021 * 2) from device tree data 2022 */ 2023 if (!is_valid_ether_addr(iap)) { 2024 struct device_node *np = fep->pdev->dev.of_node; 2025 if (np) { 2026 ret = of_get_mac_address(np, tmpaddr); 2027 if (!ret) 2028 iap = tmpaddr; 2029 else if (ret == -EPROBE_DEFER) 2030 return ret; 2031 } 2032 } 2033 2034 /* 2035 * 3) from flash or fuse (via platform data) 2036 */ 2037 if (!is_valid_ether_addr(iap)) { 2038 #ifdef CONFIG_M5272 2039 if (FEC_FLASHMAC) 2040 iap = (unsigned char *)FEC_FLASHMAC; 2041 #else 2042 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); 2043 2044 if (pdata) 2045 iap = (unsigned char *)&pdata->mac; 2046 #endif 2047 } 2048 2049 /* 2050 * 4) FEC mac registers set by bootloader 2051 */ 2052 if (!is_valid_ether_addr(iap)) { 2053 *((__be32 *) &tmpaddr[0]) = 2054 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); 2055 *((__be16 *) &tmpaddr[4]) = 2056 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 2057 iap = &tmpaddr[0]; 2058 } 2059 2060 /* 2061 * 5) random mac address 2062 */ 2063 if (!is_valid_ether_addr(iap)) { 2064 /* Report it and use a random ethernet address instead */ 2065 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap); 2066 eth_hw_addr_random(ndev); 2067 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n", 2068 ndev->dev_addr); 2069 return 0; 2070 } 2071 2072 /* Adjust MAC if using macaddr */ 2073 eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0); 2074 2075 return 0; 2076 } 2077 2078 /* ------------------------------------------------------------------------- */ 2079 2080 /* 2081 * Phy section 2082 */ 2083 2084 /* LPI Sleep Ts count base on tx clk (clk_ref). 2085 * The lpi sleep cnt value = X us / (cycle_ns). 2086 */ 2087 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) 2088 { 2089 struct fec_enet_private *fep = netdev_priv(ndev); 2090 2091 return us * (fep->clk_ref_rate / 1000) / 1000; 2092 } 2093 2094 static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer, 2095 bool enable) 2096 { 2097 struct fec_enet_private *fep = netdev_priv(ndev); 2098 unsigned int sleep_cycle, wake_cycle; 2099 2100 if (enable) { 2101 sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer); 2102 wake_cycle = sleep_cycle; 2103 } else { 2104 sleep_cycle = 0; 2105 wake_cycle = 0; 2106 } 2107 2108 writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP); 2109 writel(wake_cycle, fep->hwp + FEC_LPI_WAKE); 2110 2111 return 0; 2112 } 2113 2114 static void fec_enet_adjust_link(struct net_device *ndev) 2115 { 2116 struct fec_enet_private *fep = netdev_priv(ndev); 2117 struct phy_device *phy_dev = ndev->phydev; 2118 int status_change = 0; 2119 2120 /* 2121 * If the netdev is down, or is going down, we're not interested 2122 * in link state events, so just mark our idea of the link as down 2123 * and ignore the event. 2124 */ 2125 if (!netif_running(ndev) || !netif_device_present(ndev)) { 2126 fep->link = 0; 2127 } else if (phy_dev->link) { 2128 if (!fep->link) { 2129 fep->link = phy_dev->link; 2130 status_change = 1; 2131 } 2132 2133 if (fep->full_duplex != phy_dev->duplex) { 2134 fep->full_duplex = phy_dev->duplex; 2135 status_change = 1; 2136 } 2137 2138 if (phy_dev->speed != fep->speed) { 2139 fep->speed = phy_dev->speed; 2140 status_change = 1; 2141 } 2142 2143 /* if any of the above changed restart the FEC */ 2144 if (status_change) { 2145 netif_stop_queue(ndev); 2146 napi_disable(&fep->napi); 2147 netif_tx_lock_bh(ndev); 2148 fec_restart(ndev); 2149 netif_tx_wake_all_queues(ndev); 2150 netif_tx_unlock_bh(ndev); 2151 napi_enable(&fep->napi); 2152 } 2153 if (fep->quirks & FEC_QUIRK_HAS_EEE) 2154 fec_enet_eee_mode_set(ndev, 2155 phy_dev->eee_cfg.tx_lpi_timer, 2156 phy_dev->enable_tx_lpi); 2157 } else { 2158 if (fep->link) { 2159 netif_stop_queue(ndev); 2160 napi_disable(&fep->napi); 2161 netif_tx_lock_bh(ndev); 2162 fec_stop(ndev); 2163 netif_tx_unlock_bh(ndev); 2164 napi_enable(&fep->napi); 2165 fep->link = phy_dev->link; 2166 status_change = 1; 2167 } 2168 } 2169 2170 if (status_change) 2171 phy_print_status(phy_dev); 2172 } 2173 2174 static int fec_enet_mdio_wait(struct fec_enet_private *fep) 2175 { 2176 uint ievent; 2177 int ret; 2178 2179 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, 2180 ievent & FEC_ENET_MII, 2, 30000); 2181 2182 if (!ret) 2183 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 2184 2185 return ret; 2186 } 2187 2188 static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) 2189 { 2190 struct fec_enet_private *fep = bus->priv; 2191 struct device *dev = &fep->pdev->dev; 2192 int ret = 0, frame_start, frame_addr, frame_op; 2193 2194 ret = pm_runtime_resume_and_get(dev); 2195 if (ret < 0) 2196 return ret; 2197 2198 /* C22 read */ 2199 frame_op = FEC_MMFR_OP_READ; 2200 frame_start = FEC_MMFR_ST; 2201 frame_addr = regnum; 2202 2203 /* start a read op */ 2204 writel(frame_start | frame_op | 2205 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | 2206 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 2207 2208 /* wait for end of transfer */ 2209 ret = fec_enet_mdio_wait(fep); 2210 if (ret) { 2211 netdev_err(fep->netdev, "MDIO read timeout\n"); 2212 goto out; 2213 } 2214 2215 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 2216 2217 out: 2218 pm_runtime_mark_last_busy(dev); 2219 pm_runtime_put_autosuspend(dev); 2220 2221 return ret; 2222 } 2223 2224 static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id, 2225 int devad, int regnum) 2226 { 2227 struct fec_enet_private *fep = bus->priv; 2228 struct device *dev = &fep->pdev->dev; 2229 int ret = 0, frame_start, frame_op; 2230 2231 ret = pm_runtime_resume_and_get(dev); 2232 if (ret < 0) 2233 return ret; 2234 2235 frame_start = FEC_MMFR_ST_C45; 2236 2237 /* write address */ 2238 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | 2239 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2240 FEC_MMFR_TA | (regnum & 0xFFFF), 2241 fep->hwp + FEC_MII_DATA); 2242 2243 /* wait for end of transfer */ 2244 ret = fec_enet_mdio_wait(fep); 2245 if (ret) { 2246 netdev_err(fep->netdev, "MDIO address write timeout\n"); 2247 goto out; 2248 } 2249 2250 frame_op = FEC_MMFR_OP_READ_C45; 2251 2252 /* start a read op */ 2253 writel(frame_start | frame_op | 2254 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2255 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 2256 2257 /* wait for end of transfer */ 2258 ret = fec_enet_mdio_wait(fep); 2259 if (ret) { 2260 netdev_err(fep->netdev, "MDIO read timeout\n"); 2261 goto out; 2262 } 2263 2264 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 2265 2266 out: 2267 pm_runtime_mark_last_busy(dev); 2268 pm_runtime_put_autosuspend(dev); 2269 2270 return ret; 2271 } 2272 2273 static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, 2274 u16 value) 2275 { 2276 struct fec_enet_private *fep = bus->priv; 2277 struct device *dev = &fep->pdev->dev; 2278 int ret, frame_start, frame_addr; 2279 2280 ret = pm_runtime_resume_and_get(dev); 2281 if (ret < 0) 2282 return ret; 2283 2284 /* C22 write */ 2285 frame_start = FEC_MMFR_ST; 2286 frame_addr = regnum; 2287 2288 /* start a write op */ 2289 writel(frame_start | FEC_MMFR_OP_WRITE | 2290 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | 2291 FEC_MMFR_TA | FEC_MMFR_DATA(value), 2292 fep->hwp + FEC_MII_DATA); 2293 2294 /* wait for end of transfer */ 2295 ret = fec_enet_mdio_wait(fep); 2296 if (ret) 2297 netdev_err(fep->netdev, "MDIO write timeout\n"); 2298 2299 pm_runtime_mark_last_busy(dev); 2300 pm_runtime_put_autosuspend(dev); 2301 2302 return ret; 2303 } 2304 2305 static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id, 2306 int devad, int regnum, u16 value) 2307 { 2308 struct fec_enet_private *fep = bus->priv; 2309 struct device *dev = &fep->pdev->dev; 2310 int ret, frame_start; 2311 2312 ret = pm_runtime_resume_and_get(dev); 2313 if (ret < 0) 2314 return ret; 2315 2316 frame_start = FEC_MMFR_ST_C45; 2317 2318 /* write address */ 2319 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | 2320 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2321 FEC_MMFR_TA | (regnum & 0xFFFF), 2322 fep->hwp + FEC_MII_DATA); 2323 2324 /* wait for end of transfer */ 2325 ret = fec_enet_mdio_wait(fep); 2326 if (ret) { 2327 netdev_err(fep->netdev, "MDIO address write timeout\n"); 2328 goto out; 2329 } 2330 2331 /* start a write op */ 2332 writel(frame_start | FEC_MMFR_OP_WRITE | 2333 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2334 FEC_MMFR_TA | FEC_MMFR_DATA(value), 2335 fep->hwp + FEC_MII_DATA); 2336 2337 /* wait for end of transfer */ 2338 ret = fec_enet_mdio_wait(fep); 2339 if (ret) 2340 netdev_err(fep->netdev, "MDIO write timeout\n"); 2341 2342 out: 2343 pm_runtime_mark_last_busy(dev); 2344 pm_runtime_put_autosuspend(dev); 2345 2346 return ret; 2347 } 2348 2349 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) 2350 { 2351 struct fec_enet_private *fep = netdev_priv(ndev); 2352 struct phy_device *phy_dev = ndev->phydev; 2353 2354 if (phy_dev) { 2355 phy_reset_after_clk_enable(phy_dev); 2356 } else if (fep->phy_node) { 2357 /* 2358 * If the PHY still is not bound to the MAC, but there is 2359 * OF PHY node and a matching PHY device instance already, 2360 * use the OF PHY node to obtain the PHY device instance, 2361 * and then use that PHY device instance when triggering 2362 * the PHY reset. 2363 */ 2364 phy_dev = of_phy_find_device(fep->phy_node); 2365 phy_reset_after_clk_enable(phy_dev); 2366 put_device(&phy_dev->mdio.dev); 2367 } 2368 } 2369 2370 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 2371 { 2372 struct fec_enet_private *fep = netdev_priv(ndev); 2373 int ret; 2374 2375 if (enable) { 2376 ret = clk_prepare_enable(fep->clk_enet_out); 2377 if (ret) 2378 return ret; 2379 2380 if (fep->clk_ptp) { 2381 mutex_lock(&fep->ptp_clk_mutex); 2382 ret = clk_prepare_enable(fep->clk_ptp); 2383 if (ret) { 2384 mutex_unlock(&fep->ptp_clk_mutex); 2385 goto failed_clk_ptp; 2386 } else { 2387 fep->ptp_clk_on = true; 2388 } 2389 mutex_unlock(&fep->ptp_clk_mutex); 2390 } 2391 2392 ret = clk_prepare_enable(fep->clk_ref); 2393 if (ret) 2394 goto failed_clk_ref; 2395 2396 ret = clk_prepare_enable(fep->clk_2x_txclk); 2397 if (ret) 2398 goto failed_clk_2x_txclk; 2399 2400 fec_enet_phy_reset_after_clk_enable(ndev); 2401 } else { 2402 clk_disable_unprepare(fep->clk_enet_out); 2403 if (fep->clk_ptp) { 2404 mutex_lock(&fep->ptp_clk_mutex); 2405 clk_disable_unprepare(fep->clk_ptp); 2406 fep->ptp_clk_on = false; 2407 mutex_unlock(&fep->ptp_clk_mutex); 2408 } 2409 clk_disable_unprepare(fep->clk_ref); 2410 clk_disable_unprepare(fep->clk_2x_txclk); 2411 } 2412 2413 return 0; 2414 2415 failed_clk_2x_txclk: 2416 if (fep->clk_ref) 2417 clk_disable_unprepare(fep->clk_ref); 2418 failed_clk_ref: 2419 if (fep->clk_ptp) { 2420 mutex_lock(&fep->ptp_clk_mutex); 2421 clk_disable_unprepare(fep->clk_ptp); 2422 fep->ptp_clk_on = false; 2423 mutex_unlock(&fep->ptp_clk_mutex); 2424 } 2425 failed_clk_ptp: 2426 clk_disable_unprepare(fep->clk_enet_out); 2427 2428 return ret; 2429 } 2430 2431 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, 2432 struct device_node *np) 2433 { 2434 u32 rgmii_tx_delay, rgmii_rx_delay; 2435 2436 /* For rgmii tx internal delay, valid values are 0ps and 2000ps */ 2437 if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) { 2438 if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) { 2439 dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps"); 2440 return -EINVAL; 2441 } else if (rgmii_tx_delay == 2000) { 2442 fep->rgmii_txc_dly = true; 2443 } 2444 } 2445 2446 /* For rgmii rx internal delay, valid values are 0ps and 2000ps */ 2447 if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) { 2448 if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) { 2449 dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps"); 2450 return -EINVAL; 2451 } else if (rgmii_rx_delay == 2000) { 2452 fep->rgmii_rxc_dly = true; 2453 } 2454 } 2455 2456 return 0; 2457 } 2458 2459 static int fec_enet_mii_probe(struct net_device *ndev) 2460 { 2461 struct fec_enet_private *fep = netdev_priv(ndev); 2462 struct phy_device *phy_dev = NULL; 2463 char mdio_bus_id[MII_BUS_ID_SIZE]; 2464 char phy_name[MII_BUS_ID_SIZE + 3]; 2465 int phy_id; 2466 int dev_id = fep->dev_id; 2467 2468 if (fep->phy_node) { 2469 phy_dev = of_phy_connect(ndev, fep->phy_node, 2470 &fec_enet_adjust_link, 0, 2471 fep->phy_interface); 2472 if (!phy_dev) { 2473 netdev_err(ndev, "Unable to connect to phy\n"); 2474 return -ENODEV; 2475 } 2476 } else { 2477 /* check for attached phy */ 2478 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { 2479 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) 2480 continue; 2481 if (dev_id--) 2482 continue; 2483 strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 2484 break; 2485 } 2486 2487 if (phy_id >= PHY_MAX_ADDR) { 2488 netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); 2489 strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); 2490 phy_id = 0; 2491 } 2492 2493 snprintf(phy_name, sizeof(phy_name), 2494 PHY_ID_FMT, mdio_bus_id, phy_id); 2495 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 2496 fep->phy_interface); 2497 } 2498 2499 if (IS_ERR(phy_dev)) { 2500 netdev_err(ndev, "could not attach to PHY\n"); 2501 return PTR_ERR(phy_dev); 2502 } 2503 2504 /* mask with MAC supported features */ 2505 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { 2506 phy_set_max_speed(phy_dev, 1000); 2507 phy_remove_link_mode(phy_dev, 2508 ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 2509 #if !defined(CONFIG_M5272) 2510 phy_support_sym_pause(phy_dev); 2511 #endif 2512 } 2513 else 2514 phy_set_max_speed(phy_dev, 100); 2515 2516 if (fep->quirks & FEC_QUIRK_HAS_EEE) 2517 phy_support_eee(phy_dev); 2518 2519 fep->link = 0; 2520 fep->full_duplex = 0; 2521 2522 phy_attached_info(phy_dev); 2523 2524 return 0; 2525 } 2526 2527 static int fec_enet_mii_init(struct platform_device *pdev) 2528 { 2529 static struct mii_bus *fec0_mii_bus; 2530 struct net_device *ndev = platform_get_drvdata(pdev); 2531 struct fec_enet_private *fep = netdev_priv(ndev); 2532 bool suppress_preamble = false; 2533 struct phy_device *phydev; 2534 struct device_node *node; 2535 int err = -ENXIO; 2536 u32 mii_speed, holdtime; 2537 u32 bus_freq; 2538 int addr; 2539 2540 /* 2541 * The i.MX28 dual fec interfaces are not equal. 2542 * Here are the differences: 2543 * 2544 * - fec0 supports MII & RMII modes while fec1 only supports RMII 2545 * - fec0 acts as the 1588 time master while fec1 is slave 2546 * - external phys can only be configured by fec0 2547 * 2548 * That is to say fec1 can not work independently. It only works 2549 * when fec0 is working. The reason behind this design is that the 2550 * second interface is added primarily for Switch mode. 2551 * 2552 * Because of the last point above, both phys are attached on fec0 2553 * mdio interface in board design, and need to be configured by 2554 * fec0 mii_bus. 2555 */ 2556 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 2557 /* fec1 uses fec0 mii_bus */ 2558 if (mii_cnt && fec0_mii_bus) { 2559 fep->mii_bus = fec0_mii_bus; 2560 mii_cnt++; 2561 return 0; 2562 } 2563 return -ENOENT; 2564 } 2565 2566 bus_freq = 2500000; /* 2.5MHz by default */ 2567 node = of_get_child_by_name(pdev->dev.of_node, "mdio"); 2568 if (node) { 2569 of_property_read_u32(node, "clock-frequency", &bus_freq); 2570 suppress_preamble = of_property_read_bool(node, 2571 "suppress-preamble"); 2572 } 2573 2574 /* 2575 * Set MII speed (= clk_get_rate() / 2 * phy_speed) 2576 * 2577 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while 2578 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 2579 * Reference Manual has an error on this, and gets fixed on i.MX6Q 2580 * document. 2581 */ 2582 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); 2583 if (fep->quirks & FEC_QUIRK_ENET_MAC) 2584 mii_speed--; 2585 if (mii_speed > 63) { 2586 dev_err(&pdev->dev, 2587 "fec clock (%lu) too fast to get right mii speed\n", 2588 clk_get_rate(fep->clk_ipg)); 2589 err = -EINVAL; 2590 goto err_out; 2591 } 2592 2593 /* 2594 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka 2595 * MII_SPEED) register that defines the MDIO output hold time. Earlier 2596 * versions are RAZ there, so just ignore the difference and write the 2597 * register always. 2598 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. 2599 * HOLDTIME + 1 is the number of clk cycles the fec is holding the 2600 * output. 2601 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). 2602 * Given that ceil(clkrate / 5000000) <= 64, the calculation for 2603 * holdtime cannot result in a value greater than 3. 2604 */ 2605 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; 2606 2607 fep->phy_speed = mii_speed << 1 | holdtime << 8; 2608 2609 if (suppress_preamble) 2610 fep->phy_speed |= BIT(7); 2611 2612 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { 2613 /* Clear MMFR to avoid to generate MII event by writing MSCR. 2614 * MII event generation condition: 2615 * - writing MSCR: 2616 * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & 2617 * mscr_reg_data_in[7:0] != 0 2618 * - writing MMFR: 2619 * - mscr[7:0]_not_zero 2620 */ 2621 writel(0, fep->hwp + FEC_MII_DATA); 2622 } 2623 2624 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 2625 2626 /* Clear any pending transaction complete indication */ 2627 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 2628 2629 fep->mii_bus = mdiobus_alloc(); 2630 if (fep->mii_bus == NULL) { 2631 err = -ENOMEM; 2632 goto err_out; 2633 } 2634 2635 fep->mii_bus->name = "fec_enet_mii_bus"; 2636 fep->mii_bus->read = fec_enet_mdio_read_c22; 2637 fep->mii_bus->write = fec_enet_mdio_write_c22; 2638 if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) { 2639 fep->mii_bus->read_c45 = fec_enet_mdio_read_c45; 2640 fep->mii_bus->write_c45 = fec_enet_mdio_write_c45; 2641 } 2642 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2643 pdev->name, fep->dev_id + 1); 2644 fep->mii_bus->priv = fep; 2645 fep->mii_bus->parent = &pdev->dev; 2646 2647 err = of_mdiobus_register(fep->mii_bus, node); 2648 if (err) 2649 goto err_out_free_mdiobus; 2650 of_node_put(node); 2651 2652 /* find all the PHY devices on the bus and set mac_managed_pm to true */ 2653 for (addr = 0; addr < PHY_MAX_ADDR; addr++) { 2654 phydev = mdiobus_get_phy(fep->mii_bus, addr); 2655 if (phydev) 2656 phydev->mac_managed_pm = true; 2657 } 2658 2659 mii_cnt++; 2660 2661 /* save fec0 mii_bus */ 2662 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2663 fec0_mii_bus = fep->mii_bus; 2664 2665 return 0; 2666 2667 err_out_free_mdiobus: 2668 mdiobus_free(fep->mii_bus); 2669 err_out: 2670 of_node_put(node); 2671 return err; 2672 } 2673 2674 static void fec_enet_mii_remove(struct fec_enet_private *fep) 2675 { 2676 if (--mii_cnt == 0) { 2677 mdiobus_unregister(fep->mii_bus); 2678 mdiobus_free(fep->mii_bus); 2679 } 2680 } 2681 2682 static void fec_enet_get_drvinfo(struct net_device *ndev, 2683 struct ethtool_drvinfo *info) 2684 { 2685 struct fec_enet_private *fep = netdev_priv(ndev); 2686 2687 strscpy(info->driver, fep->pdev->dev.driver->name, 2688 sizeof(info->driver)); 2689 strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); 2690 } 2691 2692 static int fec_enet_get_regs_len(struct net_device *ndev) 2693 { 2694 struct fec_enet_private *fep = netdev_priv(ndev); 2695 struct resource *r; 2696 int s = 0; 2697 2698 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); 2699 if (r) 2700 s = resource_size(r); 2701 2702 return s; 2703 } 2704 2705 /* List of registers that can be safety be read to dump them with ethtool */ 2706 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2707 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2708 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2709 static __u32 fec_enet_register_version = 2; 2710 static u32 fec_enet_register_offset[] = { 2711 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2712 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2713 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, 2714 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, 2715 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, 2716 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, 2717 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, 2718 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, 2719 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2720 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, 2721 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, 2722 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, 2723 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2724 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2725 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2726 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2727 RMON_T_P_GTE2048, RMON_T_OCTETS, 2728 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2729 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2730 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2731 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2732 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2733 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2734 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2735 RMON_R_P_GTE2048, RMON_R_OCTETS, 2736 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2737 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2738 }; 2739 /* for i.MX6ul */ 2740 static u32 fec_enet_register_offset_6ul[] = { 2741 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2742 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2743 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, 2744 FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, 2745 FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, 2746 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2747 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, 2748 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2749 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2750 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2751 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2752 RMON_T_P_GTE2048, RMON_T_OCTETS, 2753 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2754 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2755 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2756 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2757 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2758 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2759 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2760 RMON_R_P_GTE2048, RMON_R_OCTETS, 2761 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2762 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2763 }; 2764 #else 2765 static __u32 fec_enet_register_version = 1; 2766 static u32 fec_enet_register_offset[] = { 2767 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2768 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2769 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, 2770 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, 2771 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, 2772 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, 2773 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, 2774 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, 2775 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 2776 }; 2777 #endif 2778 2779 static void fec_enet_get_regs(struct net_device *ndev, 2780 struct ethtool_regs *regs, void *regbuf) 2781 { 2782 struct fec_enet_private *fep = netdev_priv(ndev); 2783 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; 2784 struct device *dev = &fep->pdev->dev; 2785 u32 *buf = (u32 *)regbuf; 2786 u32 i, off; 2787 int ret; 2788 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2789 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2790 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2791 u32 *reg_list; 2792 u32 reg_cnt; 2793 2794 if (!of_machine_is_compatible("fsl,imx6ul")) { 2795 reg_list = fec_enet_register_offset; 2796 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); 2797 } else { 2798 reg_list = fec_enet_register_offset_6ul; 2799 reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); 2800 } 2801 #else 2802 /* coldfire */ 2803 static u32 *reg_list = fec_enet_register_offset; 2804 static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); 2805 #endif 2806 ret = pm_runtime_resume_and_get(dev); 2807 if (ret < 0) 2808 return; 2809 2810 regs->version = fec_enet_register_version; 2811 2812 memset(buf, 0, regs->len); 2813 2814 for (i = 0; i < reg_cnt; i++) { 2815 off = reg_list[i]; 2816 2817 if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && 2818 !(fep->quirks & FEC_QUIRK_HAS_FRREG)) 2819 continue; 2820 2821 off >>= 2; 2822 buf[off] = readl(&theregs[off]); 2823 } 2824 2825 pm_runtime_mark_last_busy(dev); 2826 pm_runtime_put_autosuspend(dev); 2827 } 2828 2829 static int fec_enet_get_ts_info(struct net_device *ndev, 2830 struct kernel_ethtool_ts_info *info) 2831 { 2832 struct fec_enet_private *fep = netdev_priv(ndev); 2833 2834 if (fep->bufdesc_ex) { 2835 2836 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 2837 SOF_TIMESTAMPING_TX_HARDWARE | 2838 SOF_TIMESTAMPING_RX_HARDWARE | 2839 SOF_TIMESTAMPING_RAW_HARDWARE; 2840 if (fep->ptp_clock) 2841 info->phc_index = ptp_clock_index(fep->ptp_clock); 2842 2843 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 2844 (1 << HWTSTAMP_TX_ON); 2845 2846 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2847 (1 << HWTSTAMP_FILTER_ALL); 2848 return 0; 2849 } else { 2850 return ethtool_op_get_ts_info(ndev, info); 2851 } 2852 } 2853 2854 #if !defined(CONFIG_M5272) 2855 2856 static void fec_enet_get_pauseparam(struct net_device *ndev, 2857 struct ethtool_pauseparam *pause) 2858 { 2859 struct fec_enet_private *fep = netdev_priv(ndev); 2860 2861 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; 2862 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; 2863 pause->rx_pause = pause->tx_pause; 2864 } 2865 2866 static int fec_enet_set_pauseparam(struct net_device *ndev, 2867 struct ethtool_pauseparam *pause) 2868 { 2869 struct fec_enet_private *fep = netdev_priv(ndev); 2870 2871 if (!ndev->phydev) 2872 return -ENODEV; 2873 2874 if (pause->tx_pause != pause->rx_pause) { 2875 netdev_info(ndev, 2876 "hardware only support enable/disable both tx and rx"); 2877 return -EINVAL; 2878 } 2879 2880 fep->pause_flag = 0; 2881 2882 /* tx pause must be same as rx pause */ 2883 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; 2884 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; 2885 2886 phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause, 2887 pause->autoneg); 2888 2889 if (pause->autoneg) { 2890 if (netif_running(ndev)) 2891 fec_stop(ndev); 2892 phy_start_aneg(ndev->phydev); 2893 } 2894 if (netif_running(ndev)) { 2895 napi_disable(&fep->napi); 2896 netif_tx_lock_bh(ndev); 2897 fec_restart(ndev); 2898 netif_tx_wake_all_queues(ndev); 2899 netif_tx_unlock_bh(ndev); 2900 napi_enable(&fep->napi); 2901 } 2902 2903 return 0; 2904 } 2905 2906 static const struct fec_stat { 2907 char name[ETH_GSTRING_LEN]; 2908 u16 offset; 2909 } fec_stats[] = { 2910 /* RMON TX */ 2911 { "tx_dropped", RMON_T_DROP }, 2912 { "tx_packets", RMON_T_PACKETS }, 2913 { "tx_broadcast", RMON_T_BC_PKT }, 2914 { "tx_multicast", RMON_T_MC_PKT }, 2915 { "tx_crc_errors", RMON_T_CRC_ALIGN }, 2916 { "tx_undersize", RMON_T_UNDERSIZE }, 2917 { "tx_oversize", RMON_T_OVERSIZE }, 2918 { "tx_fragment", RMON_T_FRAG }, 2919 { "tx_jabber", RMON_T_JAB }, 2920 { "tx_collision", RMON_T_COL }, 2921 { "tx_64byte", RMON_T_P64 }, 2922 { "tx_65to127byte", RMON_T_P65TO127 }, 2923 { "tx_128to255byte", RMON_T_P128TO255 }, 2924 { "tx_256to511byte", RMON_T_P256TO511 }, 2925 { "tx_512to1023byte", RMON_T_P512TO1023 }, 2926 { "tx_1024to2047byte", RMON_T_P1024TO2047 }, 2927 { "tx_GTE2048byte", RMON_T_P_GTE2048 }, 2928 { "tx_octets", RMON_T_OCTETS }, 2929 2930 /* IEEE TX */ 2931 { "IEEE_tx_drop", IEEE_T_DROP }, 2932 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, 2933 { "IEEE_tx_1col", IEEE_T_1COL }, 2934 { "IEEE_tx_mcol", IEEE_T_MCOL }, 2935 { "IEEE_tx_def", IEEE_T_DEF }, 2936 { "IEEE_tx_lcol", IEEE_T_LCOL }, 2937 { "IEEE_tx_excol", IEEE_T_EXCOL }, 2938 { "IEEE_tx_macerr", IEEE_T_MACERR }, 2939 { "IEEE_tx_cserr", IEEE_T_CSERR }, 2940 { "IEEE_tx_sqe", IEEE_T_SQE }, 2941 { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, 2942 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, 2943 2944 /* RMON RX */ 2945 { "rx_packets", RMON_R_PACKETS }, 2946 { "rx_broadcast", RMON_R_BC_PKT }, 2947 { "rx_multicast", RMON_R_MC_PKT }, 2948 { "rx_crc_errors", RMON_R_CRC_ALIGN }, 2949 { "rx_undersize", RMON_R_UNDERSIZE }, 2950 { "rx_oversize", RMON_R_OVERSIZE }, 2951 { "rx_fragment", RMON_R_FRAG }, 2952 { "rx_jabber", RMON_R_JAB }, 2953 { "rx_64byte", RMON_R_P64 }, 2954 { "rx_65to127byte", RMON_R_P65TO127 }, 2955 { "rx_128to255byte", RMON_R_P128TO255 }, 2956 { "rx_256to511byte", RMON_R_P256TO511 }, 2957 { "rx_512to1023byte", RMON_R_P512TO1023 }, 2958 { "rx_1024to2047byte", RMON_R_P1024TO2047 }, 2959 { "rx_GTE2048byte", RMON_R_P_GTE2048 }, 2960 { "rx_octets", RMON_R_OCTETS }, 2961 2962 /* IEEE RX */ 2963 { "IEEE_rx_drop", IEEE_R_DROP }, 2964 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, 2965 { "IEEE_rx_crc", IEEE_R_CRC }, 2966 { "IEEE_rx_align", IEEE_R_ALIGN }, 2967 { "IEEE_rx_macerr", IEEE_R_MACERR }, 2968 { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, 2969 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 2970 }; 2971 2972 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) 2973 2974 static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = { 2975 "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */ 2976 "rx_xdp_pass", /* RX_XDP_PASS, */ 2977 "rx_xdp_drop", /* RX_XDP_DROP, */ 2978 "rx_xdp_tx", /* RX_XDP_TX, */ 2979 "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */ 2980 "tx_xdp_xmit", /* TX_XDP_XMIT, */ 2981 "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */ 2982 }; 2983 2984 static void fec_enet_update_ethtool_stats(struct net_device *dev) 2985 { 2986 struct fec_enet_private *fep = netdev_priv(dev); 2987 int i; 2988 2989 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2990 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); 2991 } 2992 2993 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) 2994 { 2995 u64 xdp_stats[XDP_STATS_TOTAL] = { 0 }; 2996 struct fec_enet_priv_rx_q *rxq; 2997 int i, j; 2998 2999 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 3000 rxq = fep->rx_queue[i]; 3001 3002 for (j = 0; j < XDP_STATS_TOTAL; j++) 3003 xdp_stats[j] += rxq->stats[j]; 3004 } 3005 3006 memcpy(data, xdp_stats, sizeof(xdp_stats)); 3007 } 3008 3009 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) 3010 { 3011 #ifdef CONFIG_PAGE_POOL_STATS 3012 struct page_pool_stats stats = {}; 3013 struct fec_enet_priv_rx_q *rxq; 3014 int i; 3015 3016 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 3017 rxq = fep->rx_queue[i]; 3018 3019 if (!rxq->page_pool) 3020 continue; 3021 3022 page_pool_get_stats(rxq->page_pool, &stats); 3023 } 3024 3025 page_pool_ethtool_stats_get(data, &stats); 3026 #endif 3027 } 3028 3029 static void fec_enet_get_ethtool_stats(struct net_device *dev, 3030 struct ethtool_stats *stats, u64 *data) 3031 { 3032 struct fec_enet_private *fep = netdev_priv(dev); 3033 3034 if (netif_running(dev)) 3035 fec_enet_update_ethtool_stats(dev); 3036 3037 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); 3038 data += FEC_STATS_SIZE / sizeof(u64); 3039 3040 fec_enet_get_xdp_stats(fep, data); 3041 data += XDP_STATS_TOTAL; 3042 3043 fec_enet_page_pool_stats(fep, data); 3044 } 3045 3046 static void fec_enet_get_strings(struct net_device *netdev, 3047 u32 stringset, u8 *data) 3048 { 3049 int i; 3050 switch (stringset) { 3051 case ETH_SS_STATS: 3052 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { 3053 ethtool_puts(&data, fec_stats[i].name); 3054 } 3055 for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { 3056 ethtool_puts(&data, fec_xdp_stat_strs[i]); 3057 } 3058 page_pool_ethtool_stats_get_strings(data); 3059 3060 break; 3061 case ETH_SS_TEST: 3062 net_selftest_get_strings(data); 3063 break; 3064 } 3065 } 3066 3067 static int fec_enet_get_sset_count(struct net_device *dev, int sset) 3068 { 3069 int count; 3070 3071 switch (sset) { 3072 case ETH_SS_STATS: 3073 count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL; 3074 count += page_pool_ethtool_stats_get_count(); 3075 return count; 3076 3077 case ETH_SS_TEST: 3078 return net_selftest_get_count(); 3079 default: 3080 return -EOPNOTSUPP; 3081 } 3082 } 3083 3084 static void fec_enet_clear_ethtool_stats(struct net_device *dev) 3085 { 3086 struct fec_enet_private *fep = netdev_priv(dev); 3087 struct fec_enet_priv_rx_q *rxq; 3088 int i, j; 3089 3090 /* Disable MIB statistics counters */ 3091 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); 3092 3093 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 3094 writel(0, fep->hwp + fec_stats[i].offset); 3095 3096 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 3097 rxq = fep->rx_queue[i]; 3098 for (j = 0; j < XDP_STATS_TOTAL; j++) 3099 rxq->stats[j] = 0; 3100 } 3101 3102 /* Don't disable MIB statistics counters */ 3103 writel(0, fep->hwp + FEC_MIB_CTRLSTAT); 3104 } 3105 3106 #else /* !defined(CONFIG_M5272) */ 3107 #define FEC_STATS_SIZE 0 3108 static inline void fec_enet_update_ethtool_stats(struct net_device *dev) 3109 { 3110 } 3111 3112 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) 3113 { 3114 } 3115 #endif /* !defined(CONFIG_M5272) */ 3116 3117 /* ITR clock source is enet system clock (clk_ahb). 3118 * TCTT unit is cycle_ns * 64 cycle 3119 * So, the ICTT value = X us / (cycle_ns * 64) 3120 */ 3121 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) 3122 { 3123 struct fec_enet_private *fep = netdev_priv(ndev); 3124 3125 return us * (fep->itr_clk_rate / 64000) / 1000; 3126 } 3127 3128 /* Set threshold for interrupt coalescing */ 3129 static void fec_enet_itr_coal_set(struct net_device *ndev) 3130 { 3131 struct fec_enet_private *fep = netdev_priv(ndev); 3132 u32 rx_itr = 0, tx_itr = 0; 3133 int rx_ictt, tx_ictt; 3134 3135 rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); 3136 tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); 3137 3138 if (rx_ictt > 0 && fep->rx_pkts_itr > 1) { 3139 /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */ 3140 rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL; 3141 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); 3142 rx_itr |= FEC_ITR_ICTT(rx_ictt); 3143 } 3144 3145 if (tx_ictt > 0 && fep->tx_pkts_itr > 1) { 3146 /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */ 3147 tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL; 3148 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); 3149 tx_itr |= FEC_ITR_ICTT(tx_ictt); 3150 } 3151 3152 writel(tx_itr, fep->hwp + FEC_TXIC0); 3153 writel(rx_itr, fep->hwp + FEC_RXIC0); 3154 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 3155 writel(tx_itr, fep->hwp + FEC_TXIC1); 3156 writel(rx_itr, fep->hwp + FEC_RXIC1); 3157 writel(tx_itr, fep->hwp + FEC_TXIC2); 3158 writel(rx_itr, fep->hwp + FEC_RXIC2); 3159 } 3160 } 3161 3162 static int fec_enet_get_coalesce(struct net_device *ndev, 3163 struct ethtool_coalesce *ec, 3164 struct kernel_ethtool_coalesce *kernel_coal, 3165 struct netlink_ext_ack *extack) 3166 { 3167 struct fec_enet_private *fep = netdev_priv(ndev); 3168 3169 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 3170 return -EOPNOTSUPP; 3171 3172 ec->rx_coalesce_usecs = fep->rx_time_itr; 3173 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; 3174 3175 ec->tx_coalesce_usecs = fep->tx_time_itr; 3176 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; 3177 3178 return 0; 3179 } 3180 3181 static int fec_enet_set_coalesce(struct net_device *ndev, 3182 struct ethtool_coalesce *ec, 3183 struct kernel_ethtool_coalesce *kernel_coal, 3184 struct netlink_ext_ack *extack) 3185 { 3186 struct fec_enet_private *fep = netdev_priv(ndev); 3187 struct device *dev = &fep->pdev->dev; 3188 unsigned int cycle; 3189 3190 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 3191 return -EOPNOTSUPP; 3192 3193 if (ec->rx_max_coalesced_frames > 255) { 3194 dev_err(dev, "Rx coalesced frames exceed hardware limitation\n"); 3195 return -EINVAL; 3196 } 3197 3198 if (ec->tx_max_coalesced_frames > 255) { 3199 dev_err(dev, "Tx coalesced frame exceed hardware limitation\n"); 3200 return -EINVAL; 3201 } 3202 3203 cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); 3204 if (cycle > 0xFFFF) { 3205 dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); 3206 return -EINVAL; 3207 } 3208 3209 cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); 3210 if (cycle > 0xFFFF) { 3211 dev_err(dev, "Tx coalesced usec exceed hardware limitation\n"); 3212 return -EINVAL; 3213 } 3214 3215 fep->rx_time_itr = ec->rx_coalesce_usecs; 3216 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; 3217 3218 fep->tx_time_itr = ec->tx_coalesce_usecs; 3219 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; 3220 3221 fec_enet_itr_coal_set(ndev); 3222 3223 return 0; 3224 } 3225 3226 static int 3227 fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) 3228 { 3229 struct fec_enet_private *fep = netdev_priv(ndev); 3230 3231 if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) 3232 return -EOPNOTSUPP; 3233 3234 if (!netif_running(ndev)) 3235 return -ENETDOWN; 3236 3237 return phy_ethtool_get_eee(ndev->phydev, edata); 3238 } 3239 3240 static int 3241 fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) 3242 { 3243 struct fec_enet_private *fep = netdev_priv(ndev); 3244 3245 if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) 3246 return -EOPNOTSUPP; 3247 3248 if (!netif_running(ndev)) 3249 return -ENETDOWN; 3250 3251 return phy_ethtool_set_eee(ndev->phydev, edata); 3252 } 3253 3254 static void 3255 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 3256 { 3257 struct fec_enet_private *fep = netdev_priv(ndev); 3258 3259 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { 3260 wol->supported = WAKE_MAGIC; 3261 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; 3262 } else { 3263 wol->supported = wol->wolopts = 0; 3264 } 3265 } 3266 3267 static int 3268 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 3269 { 3270 struct fec_enet_private *fep = netdev_priv(ndev); 3271 3272 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) 3273 return -EINVAL; 3274 3275 if (wol->wolopts & ~WAKE_MAGIC) 3276 return -EINVAL; 3277 3278 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); 3279 if (device_may_wakeup(&ndev->dev)) 3280 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; 3281 else 3282 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); 3283 3284 return 0; 3285 } 3286 3287 static const struct ethtool_ops fec_enet_ethtool_ops = { 3288 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 3289 ETHTOOL_COALESCE_MAX_FRAMES, 3290 .get_drvinfo = fec_enet_get_drvinfo, 3291 .get_regs_len = fec_enet_get_regs_len, 3292 .get_regs = fec_enet_get_regs, 3293 .nway_reset = phy_ethtool_nway_reset, 3294 .get_link = ethtool_op_get_link, 3295 .get_coalesce = fec_enet_get_coalesce, 3296 .set_coalesce = fec_enet_set_coalesce, 3297 #ifndef CONFIG_M5272 3298 .get_pauseparam = fec_enet_get_pauseparam, 3299 .set_pauseparam = fec_enet_set_pauseparam, 3300 .get_strings = fec_enet_get_strings, 3301 .get_ethtool_stats = fec_enet_get_ethtool_stats, 3302 .get_sset_count = fec_enet_get_sset_count, 3303 #endif 3304 .get_ts_info = fec_enet_get_ts_info, 3305 .get_wol = fec_enet_get_wol, 3306 .set_wol = fec_enet_set_wol, 3307 .get_eee = fec_enet_get_eee, 3308 .set_eee = fec_enet_set_eee, 3309 .get_link_ksettings = phy_ethtool_get_link_ksettings, 3310 .set_link_ksettings = phy_ethtool_set_link_ksettings, 3311 .self_test = net_selftest, 3312 }; 3313 3314 static void fec_enet_free_buffers(struct net_device *ndev) 3315 { 3316 struct fec_enet_private *fep = netdev_priv(ndev); 3317 unsigned int i; 3318 struct fec_enet_priv_tx_q *txq; 3319 struct fec_enet_priv_rx_q *rxq; 3320 unsigned int q; 3321 3322 for (q = 0; q < fep->num_rx_queues; q++) { 3323 rxq = fep->rx_queue[q]; 3324 for (i = 0; i < rxq->bd.ring_size; i++) 3325 page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false); 3326 3327 for (i = 0; i < XDP_STATS_TOTAL; i++) 3328 rxq->stats[i] = 0; 3329 3330 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) 3331 xdp_rxq_info_unreg(&rxq->xdp_rxq); 3332 page_pool_destroy(rxq->page_pool); 3333 rxq->page_pool = NULL; 3334 } 3335 3336 for (q = 0; q < fep->num_tx_queues; q++) { 3337 txq = fep->tx_queue[q]; 3338 for (i = 0; i < txq->bd.ring_size; i++) { 3339 kfree(txq->tx_bounce[i]); 3340 txq->tx_bounce[i] = NULL; 3341 3342 if (!txq->tx_buf[i].buf_p) { 3343 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 3344 continue; 3345 } 3346 3347 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { 3348 dev_kfree_skb(txq->tx_buf[i].buf_p); 3349 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { 3350 xdp_return_frame(txq->tx_buf[i].buf_p); 3351 } else { 3352 struct page *page = txq->tx_buf[i].buf_p; 3353 3354 page_pool_put_page(pp_page_to_nmdesc(page)->pp, 3355 page, 0, false); 3356 } 3357 3358 txq->tx_buf[i].buf_p = NULL; 3359 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 3360 } 3361 } 3362 } 3363 3364 static void fec_enet_free_queue(struct net_device *ndev) 3365 { 3366 struct fec_enet_private *fep = netdev_priv(ndev); 3367 int i; 3368 struct fec_enet_priv_tx_q *txq; 3369 3370 for (i = 0; i < fep->num_tx_queues; i++) 3371 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { 3372 txq = fep->tx_queue[i]; 3373 fec_dma_free(&fep->pdev->dev, 3374 txq->bd.ring_size * TSO_HEADER_SIZE, 3375 txq->tso_hdrs, txq->tso_hdrs_dma); 3376 } 3377 3378 for (i = 0; i < fep->num_rx_queues; i++) 3379 kfree(fep->rx_queue[i]); 3380 for (i = 0; i < fep->num_tx_queues; i++) 3381 kfree(fep->tx_queue[i]); 3382 } 3383 3384 static int fec_enet_alloc_queue(struct net_device *ndev) 3385 { 3386 struct fec_enet_private *fep = netdev_priv(ndev); 3387 int i; 3388 int ret = 0; 3389 struct fec_enet_priv_tx_q *txq; 3390 3391 for (i = 0; i < fep->num_tx_queues; i++) { 3392 txq = kzalloc(sizeof(*txq), GFP_KERNEL); 3393 if (!txq) { 3394 ret = -ENOMEM; 3395 goto alloc_failed; 3396 } 3397 3398 fep->tx_queue[i] = txq; 3399 txq->bd.ring_size = TX_RING_SIZE; 3400 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; 3401 3402 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; 3403 txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; 3404 3405 txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev, 3406 txq->bd.ring_size * TSO_HEADER_SIZE, 3407 &txq->tso_hdrs_dma, GFP_KERNEL); 3408 if (!txq->tso_hdrs) { 3409 ret = -ENOMEM; 3410 goto alloc_failed; 3411 } 3412 } 3413 3414 for (i = 0; i < fep->num_rx_queues; i++) { 3415 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), 3416 GFP_KERNEL); 3417 if (!fep->rx_queue[i]) { 3418 ret = -ENOMEM; 3419 goto alloc_failed; 3420 } 3421 3422 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; 3423 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; 3424 } 3425 return ret; 3426 3427 alloc_failed: 3428 fec_enet_free_queue(ndev); 3429 return ret; 3430 } 3431 3432 static int 3433 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) 3434 { 3435 struct fec_enet_private *fep = netdev_priv(ndev); 3436 struct fec_enet_priv_rx_q *rxq; 3437 dma_addr_t phys_addr; 3438 struct bufdesc *bdp; 3439 struct page *page; 3440 int i, err; 3441 3442 rxq = fep->rx_queue[queue]; 3443 bdp = rxq->bd.base; 3444 3445 err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size); 3446 if (err < 0) { 3447 netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err); 3448 return err; 3449 } 3450 3451 for (i = 0; i < rxq->bd.ring_size; i++) { 3452 page = page_pool_dev_alloc_pages(rxq->page_pool); 3453 if (!page) 3454 goto err_alloc; 3455 3456 phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM; 3457 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); 3458 3459 rxq->rx_skb_info[i].page = page; 3460 rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM; 3461 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 3462 3463 if (fep->bufdesc_ex) { 3464 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3465 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 3466 } 3467 3468 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 3469 } 3470 3471 /* Set the last buffer to wrap. */ 3472 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 3473 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 3474 return 0; 3475 3476 err_alloc: 3477 fec_enet_free_buffers(ndev); 3478 return -ENOMEM; 3479 } 3480 3481 static int 3482 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) 3483 { 3484 struct fec_enet_private *fep = netdev_priv(ndev); 3485 unsigned int i; 3486 struct bufdesc *bdp; 3487 struct fec_enet_priv_tx_q *txq; 3488 3489 txq = fep->tx_queue[queue]; 3490 bdp = txq->bd.base; 3491 for (i = 0; i < txq->bd.ring_size; i++) { 3492 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 3493 if (!txq->tx_bounce[i]) 3494 goto err_alloc; 3495 3496 bdp->cbd_sc = cpu_to_fec16(0); 3497 bdp->cbd_bufaddr = cpu_to_fec32(0); 3498 3499 if (fep->bufdesc_ex) { 3500 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3501 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); 3502 } 3503 3504 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 3505 } 3506 3507 /* Set the last buffer to wrap. */ 3508 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 3509 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 3510 3511 return 0; 3512 3513 err_alloc: 3514 fec_enet_free_buffers(ndev); 3515 return -ENOMEM; 3516 } 3517 3518 static int fec_enet_alloc_buffers(struct net_device *ndev) 3519 { 3520 struct fec_enet_private *fep = netdev_priv(ndev); 3521 unsigned int i; 3522 3523 for (i = 0; i < fep->num_rx_queues; i++) 3524 if (fec_enet_alloc_rxq_buffers(ndev, i)) 3525 return -ENOMEM; 3526 3527 for (i = 0; i < fep->num_tx_queues; i++) 3528 if (fec_enet_alloc_txq_buffers(ndev, i)) 3529 return -ENOMEM; 3530 return 0; 3531 } 3532 3533 static int 3534 fec_enet_open(struct net_device *ndev) 3535 { 3536 struct fec_enet_private *fep = netdev_priv(ndev); 3537 int ret; 3538 bool reset_again; 3539 3540 ret = pm_runtime_resume_and_get(&fep->pdev->dev); 3541 if (ret < 0) 3542 return ret; 3543 3544 pinctrl_pm_select_default_state(&fep->pdev->dev); 3545 ret = fec_enet_clk_enable(ndev, true); 3546 if (ret) 3547 goto clk_enable; 3548 3549 /* During the first fec_enet_open call the PHY isn't probed at this 3550 * point. Therefore the phy_reset_after_clk_enable() call within 3551 * fec_enet_clk_enable() fails. As we need this reset in order to be 3552 * sure the PHY is working correctly we check if we need to reset again 3553 * later when the PHY is probed 3554 */ 3555 if (ndev->phydev && ndev->phydev->drv) 3556 reset_again = false; 3557 else 3558 reset_again = true; 3559 3560 /* I should reset the ring buffers here, but I don't yet know 3561 * a simple way to do that. 3562 */ 3563 3564 ret = fec_enet_alloc_buffers(ndev); 3565 if (ret) 3566 goto err_enet_alloc; 3567 3568 /* Init MAC prior to mii bus probe */ 3569 fec_restart(ndev); 3570 3571 /* Call phy_reset_after_clk_enable() again if it failed during 3572 * phy_reset_after_clk_enable() before because the PHY wasn't probed. 3573 */ 3574 if (reset_again) 3575 fec_enet_phy_reset_after_clk_enable(ndev); 3576 3577 /* Probe and connect to PHY when open the interface */ 3578 ret = fec_enet_mii_probe(ndev); 3579 if (ret) 3580 goto err_enet_mii_probe; 3581 3582 if (fep->quirks & FEC_QUIRK_ERR006687) 3583 imx6q_cpuidle_fec_irqs_used(); 3584 3585 if (fep->quirks & FEC_QUIRK_HAS_PMQOS) 3586 cpu_latency_qos_add_request(&fep->pm_qos_req, 0); 3587 3588 napi_enable(&fep->napi); 3589 phy_start(ndev->phydev); 3590 netif_tx_start_all_queues(ndev); 3591 3592 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & 3593 FEC_WOL_FLAG_ENABLE); 3594 3595 return 0; 3596 3597 err_enet_mii_probe: 3598 fec_enet_free_buffers(ndev); 3599 err_enet_alloc: 3600 fec_enet_clk_enable(ndev, false); 3601 clk_enable: 3602 pm_runtime_mark_last_busy(&fep->pdev->dev); 3603 pm_runtime_put_autosuspend(&fep->pdev->dev); 3604 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3605 return ret; 3606 } 3607 3608 static int 3609 fec_enet_close(struct net_device *ndev) 3610 { 3611 struct fec_enet_private *fep = netdev_priv(ndev); 3612 3613 phy_stop(ndev->phydev); 3614 3615 if (netif_device_present(ndev)) { 3616 napi_disable(&fep->napi); 3617 netif_tx_disable(ndev); 3618 fec_stop(ndev); 3619 } 3620 3621 phy_disconnect(ndev->phydev); 3622 3623 if (fep->quirks & FEC_QUIRK_ERR006687) 3624 imx6q_cpuidle_fec_irqs_unused(); 3625 3626 fec_enet_update_ethtool_stats(ndev); 3627 3628 fec_enet_clk_enable(ndev, false); 3629 if (fep->quirks & FEC_QUIRK_HAS_PMQOS) 3630 cpu_latency_qos_remove_request(&fep->pm_qos_req); 3631 3632 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3633 pm_runtime_mark_last_busy(&fep->pdev->dev); 3634 pm_runtime_put_autosuspend(&fep->pdev->dev); 3635 3636 fec_enet_free_buffers(ndev); 3637 3638 return 0; 3639 } 3640 3641 /* Set or clear the multicast filter for this adaptor. 3642 * Skeleton taken from sunlance driver. 3643 * The CPM Ethernet implementation allows Multicast as well as individual 3644 * MAC address filtering. Some of the drivers check to make sure it is 3645 * a group multicast address, and discard those that are not. I guess I 3646 * will do the same for now, but just remove the test if you want 3647 * individual filtering as well (do the upper net layers want or support 3648 * this kind of feature?). 3649 */ 3650 3651 #define FEC_HASH_BITS 6 /* #bits in hash */ 3652 3653 static void set_multicast_list(struct net_device *ndev) 3654 { 3655 struct fec_enet_private *fep = netdev_priv(ndev); 3656 struct netdev_hw_addr *ha; 3657 unsigned int crc, tmp; 3658 unsigned char hash; 3659 unsigned int hash_high = 0, hash_low = 0; 3660 3661 if (ndev->flags & IFF_PROMISC) { 3662 tmp = readl(fep->hwp + FEC_R_CNTRL); 3663 tmp |= 0x8; 3664 writel(tmp, fep->hwp + FEC_R_CNTRL); 3665 return; 3666 } 3667 3668 tmp = readl(fep->hwp + FEC_R_CNTRL); 3669 tmp &= ~0x8; 3670 writel(tmp, fep->hwp + FEC_R_CNTRL); 3671 3672 if (ndev->flags & IFF_ALLMULTI) { 3673 /* Catch all multicast addresses, so set the 3674 * filter to all 1's 3675 */ 3676 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 3677 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3678 3679 return; 3680 } 3681 3682 /* Add the addresses in hash register */ 3683 netdev_for_each_mc_addr(ha, ndev) { 3684 /* calculate crc32 value of mac address */ 3685 crc = ether_crc_le(ndev->addr_len, ha->addr); 3686 3687 /* only upper 6 bits (FEC_HASH_BITS) are used 3688 * which point to specific bit in the hash registers 3689 */ 3690 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; 3691 3692 if (hash > 31) 3693 hash_high |= 1 << (hash - 32); 3694 else 3695 hash_low |= 1 << hash; 3696 } 3697 3698 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 3699 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3700 } 3701 3702 /* Set a MAC change in hardware. */ 3703 static int 3704 fec_set_mac_address(struct net_device *ndev, void *p) 3705 { 3706 struct sockaddr *addr = p; 3707 3708 if (addr) { 3709 if (!is_valid_ether_addr(addr->sa_data)) 3710 return -EADDRNOTAVAIL; 3711 eth_hw_addr_set(ndev, addr->sa_data); 3712 } 3713 3714 /* Add netif status check here to avoid system hang in below case: 3715 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; 3716 * After ethx down, fec all clocks are gated off and then register 3717 * access causes system hang. 3718 */ 3719 if (!netif_running(ndev)) 3720 return 0; 3721 3722 fec_set_hw_mac_addr(ndev); 3723 3724 return 0; 3725 } 3726 3727 static inline void fec_enet_set_netdev_features(struct net_device *netdev, 3728 netdev_features_t features) 3729 { 3730 struct fec_enet_private *fep = netdev_priv(netdev); 3731 netdev_features_t changed = features ^ netdev->features; 3732 3733 netdev->features = features; 3734 3735 /* Receive checksum has been changed */ 3736 if (changed & NETIF_F_RXCSUM) { 3737 if (features & NETIF_F_RXCSUM) 3738 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3739 else 3740 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; 3741 } 3742 } 3743 3744 static int fec_set_features(struct net_device *netdev, 3745 netdev_features_t features) 3746 { 3747 struct fec_enet_private *fep = netdev_priv(netdev); 3748 netdev_features_t changed = features ^ netdev->features; 3749 3750 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) { 3751 napi_disable(&fep->napi); 3752 netif_tx_lock_bh(netdev); 3753 fec_stop(netdev); 3754 fec_enet_set_netdev_features(netdev, features); 3755 fec_restart(netdev); 3756 netif_tx_wake_all_queues(netdev); 3757 netif_tx_unlock_bh(netdev); 3758 napi_enable(&fep->napi); 3759 } else { 3760 fec_enet_set_netdev_features(netdev, features); 3761 } 3762 3763 return 0; 3764 } 3765 3766 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, 3767 struct net_device *sb_dev) 3768 { 3769 struct fec_enet_private *fep = netdev_priv(ndev); 3770 u16 vlan_tag = 0; 3771 3772 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 3773 return netdev_pick_tx(ndev, skb, NULL); 3774 3775 /* VLAN is present in the payload.*/ 3776 if (eth_type_vlan(skb->protocol)) { 3777 struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); 3778 3779 vlan_tag = ntohs(vhdr->h_vlan_TCI); 3780 /* VLAN is present in the skb but not yet pushed in the payload.*/ 3781 } else if (skb_vlan_tag_present(skb)) { 3782 vlan_tag = skb->vlan_tci; 3783 } else { 3784 return vlan_tag; 3785 } 3786 3787 return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; 3788 } 3789 3790 static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) 3791 { 3792 struct fec_enet_private *fep = netdev_priv(dev); 3793 bool is_run = netif_running(dev); 3794 struct bpf_prog *old_prog; 3795 3796 switch (bpf->command) { 3797 case XDP_SETUP_PROG: 3798 /* No need to support the SoCs that require to 3799 * do the frame swap because the performance wouldn't be 3800 * better than the skb mode. 3801 */ 3802 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 3803 return -EOPNOTSUPP; 3804 3805 if (!bpf->prog) 3806 xdp_features_clear_redirect_target(dev); 3807 3808 if (is_run) { 3809 napi_disable(&fep->napi); 3810 netif_tx_disable(dev); 3811 } 3812 3813 old_prog = xchg(&fep->xdp_prog, bpf->prog); 3814 if (old_prog) 3815 bpf_prog_put(old_prog); 3816 3817 fec_restart(dev); 3818 3819 if (is_run) { 3820 napi_enable(&fep->napi); 3821 netif_tx_start_all_queues(dev); 3822 } 3823 3824 if (bpf->prog) 3825 xdp_features_set_redirect_target(dev, false); 3826 3827 return 0; 3828 3829 case XDP_SETUP_XSK_POOL: 3830 return -EOPNOTSUPP; 3831 3832 default: 3833 return -EOPNOTSUPP; 3834 } 3835 } 3836 3837 static int 3838 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) 3839 { 3840 if (unlikely(index < 0)) 3841 return 0; 3842 3843 return (index % fep->num_tx_queues); 3844 } 3845 3846 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, 3847 struct fec_enet_priv_tx_q *txq, 3848 void *frame, u32 dma_sync_len, 3849 bool ndo_xmit) 3850 { 3851 unsigned int index, status, estatus; 3852 struct bufdesc *bdp; 3853 dma_addr_t dma_addr; 3854 int entries_free; 3855 u16 frame_len; 3856 3857 entries_free = fec_enet_get_free_txdesc_num(txq); 3858 if (entries_free < MAX_SKB_FRAGS + 1) { 3859 netdev_err_once(fep->netdev, "NOT enough BD for SG!\n"); 3860 return -EBUSY; 3861 } 3862 3863 /* Fill in a Tx ring entry */ 3864 bdp = txq->bd.cur; 3865 status = fec16_to_cpu(bdp->cbd_sc); 3866 status &= ~BD_ENET_TX_STATS; 3867 3868 index = fec_enet_get_bd_index(bdp, &txq->bd); 3869 3870 if (ndo_xmit) { 3871 struct xdp_frame *xdpf = frame; 3872 3873 dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data, 3874 xdpf->len, DMA_TO_DEVICE); 3875 if (dma_mapping_error(&fep->pdev->dev, dma_addr)) 3876 return -ENOMEM; 3877 3878 frame_len = xdpf->len; 3879 txq->tx_buf[index].buf_p = xdpf; 3880 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; 3881 } else { 3882 struct xdp_buff *xdpb = frame; 3883 struct page *page; 3884 3885 page = virt_to_page(xdpb->data); 3886 dma_addr = page_pool_get_dma_addr(page) + 3887 (xdpb->data - xdpb->data_hard_start); 3888 dma_sync_single_for_device(&fep->pdev->dev, dma_addr, 3889 dma_sync_len, DMA_BIDIRECTIONAL); 3890 frame_len = xdpb->data_end - xdpb->data; 3891 txq->tx_buf[index].buf_p = page; 3892 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX; 3893 } 3894 3895 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 3896 if (fep->bufdesc_ex) 3897 estatus = BD_ENET_TX_INT; 3898 3899 bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); 3900 bdp->cbd_datlen = cpu_to_fec16(frame_len); 3901 3902 if (fep->bufdesc_ex) { 3903 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3904 3905 if (fep->quirks & FEC_QUIRK_HAS_AVB) 3906 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 3907 3908 ebdp->cbd_bdu = 0; 3909 ebdp->cbd_esc = cpu_to_fec32(estatus); 3910 } 3911 3912 /* Make sure the updates to rest of the descriptor are performed before 3913 * transferring ownership. 3914 */ 3915 dma_wmb(); 3916 3917 /* Send it on its way. Tell FEC it's ready, interrupt when done, 3918 * it's the last BD of the frame, and to put the CRC on the end. 3919 */ 3920 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 3921 bdp->cbd_sc = cpu_to_fec16(status); 3922 3923 /* If this was the last BD in the ring, start at the beginning again. */ 3924 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 3925 3926 /* Make sure the update to bdp are performed before txq->bd.cur. */ 3927 dma_wmb(); 3928 3929 txq->bd.cur = bdp; 3930 3931 /* Trigger transmission start */ 3932 writel(0, txq->bd.reg_desc_active); 3933 3934 return 0; 3935 } 3936 3937 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, 3938 int cpu, struct xdp_buff *xdp, 3939 u32 dma_sync_len) 3940 { 3941 struct fec_enet_priv_tx_q *txq; 3942 struct netdev_queue *nq; 3943 int queue, ret; 3944 3945 queue = fec_enet_xdp_get_tx_queue(fep, cpu); 3946 txq = fep->tx_queue[queue]; 3947 nq = netdev_get_tx_queue(fep->netdev, queue); 3948 3949 __netif_tx_lock(nq, cpu); 3950 3951 /* Avoid tx timeout as XDP shares the queue with kernel stack */ 3952 txq_trans_cond_update(nq); 3953 ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false); 3954 3955 __netif_tx_unlock(nq); 3956 3957 return ret; 3958 } 3959 3960 static int fec_enet_xdp_xmit(struct net_device *dev, 3961 int num_frames, 3962 struct xdp_frame **frames, 3963 u32 flags) 3964 { 3965 struct fec_enet_private *fep = netdev_priv(dev); 3966 struct fec_enet_priv_tx_q *txq; 3967 int cpu = smp_processor_id(); 3968 unsigned int sent_frames = 0; 3969 struct netdev_queue *nq; 3970 unsigned int queue; 3971 int i; 3972 3973 queue = fec_enet_xdp_get_tx_queue(fep, cpu); 3974 txq = fep->tx_queue[queue]; 3975 nq = netdev_get_tx_queue(fep->netdev, queue); 3976 3977 __netif_tx_lock(nq, cpu); 3978 3979 /* Avoid tx timeout as XDP shares the queue with kernel stack */ 3980 txq_trans_cond_update(nq); 3981 for (i = 0; i < num_frames; i++) { 3982 if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0) 3983 break; 3984 sent_frames++; 3985 } 3986 3987 __netif_tx_unlock(nq); 3988 3989 return sent_frames; 3990 } 3991 3992 static int fec_hwtstamp_get(struct net_device *ndev, 3993 struct kernel_hwtstamp_config *config) 3994 { 3995 struct fec_enet_private *fep = netdev_priv(ndev); 3996 3997 if (!netif_running(ndev)) 3998 return -EINVAL; 3999 4000 if (!fep->bufdesc_ex) 4001 return -EOPNOTSUPP; 4002 4003 fec_ptp_get(ndev, config); 4004 4005 return 0; 4006 } 4007 4008 static int fec_hwtstamp_set(struct net_device *ndev, 4009 struct kernel_hwtstamp_config *config, 4010 struct netlink_ext_ack *extack) 4011 { 4012 struct fec_enet_private *fep = netdev_priv(ndev); 4013 4014 if (!netif_running(ndev)) 4015 return -EINVAL; 4016 4017 if (!fep->bufdesc_ex) 4018 return -EOPNOTSUPP; 4019 4020 return fec_ptp_set(ndev, config, extack); 4021 } 4022 4023 static const struct net_device_ops fec_netdev_ops = { 4024 .ndo_open = fec_enet_open, 4025 .ndo_stop = fec_enet_close, 4026 .ndo_start_xmit = fec_enet_start_xmit, 4027 .ndo_select_queue = fec_enet_select_queue, 4028 .ndo_set_rx_mode = set_multicast_list, 4029 .ndo_validate_addr = eth_validate_addr, 4030 .ndo_tx_timeout = fec_timeout, 4031 .ndo_set_mac_address = fec_set_mac_address, 4032 .ndo_eth_ioctl = phy_do_ioctl_running, 4033 .ndo_set_features = fec_set_features, 4034 .ndo_bpf = fec_enet_bpf, 4035 .ndo_xdp_xmit = fec_enet_xdp_xmit, 4036 .ndo_hwtstamp_get = fec_hwtstamp_get, 4037 .ndo_hwtstamp_set = fec_hwtstamp_set, 4038 }; 4039 4040 static const unsigned short offset_des_active_rxq[] = { 4041 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 4042 }; 4043 4044 static const unsigned short offset_des_active_txq[] = { 4045 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 4046 }; 4047 4048 /* 4049 * XXX: We need to clean up on failure exits here. 4050 * 4051 */ 4052 static int fec_enet_init(struct net_device *ndev) 4053 { 4054 struct fec_enet_private *fep = netdev_priv(ndev); 4055 struct bufdesc *cbd_base; 4056 dma_addr_t bd_dma; 4057 int bd_size; 4058 unsigned int i; 4059 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : 4060 sizeof(struct bufdesc); 4061 unsigned dsize_log2 = __fls(dsize); 4062 int ret; 4063 4064 WARN_ON(dsize != (1 << dsize_log2)); 4065 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 4066 fep->rx_align = 0xf; 4067 fep->tx_align = 0xf; 4068 #else 4069 fep->rx_align = 0x3; 4070 fep->tx_align = 0x3; 4071 #endif 4072 fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT; 4073 fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT; 4074 fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT; 4075 fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT; 4076 4077 /* Check mask of the streaming and coherent API */ 4078 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); 4079 if (ret < 0) { 4080 dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); 4081 return ret; 4082 } 4083 4084 ret = fec_enet_alloc_queue(ndev); 4085 if (ret) 4086 return ret; 4087 4088 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; 4089 4090 /* Allocate memory for buffer descriptors. */ 4091 cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma, 4092 GFP_KERNEL); 4093 if (!cbd_base) { 4094 ret = -ENOMEM; 4095 goto free_queue_mem; 4096 } 4097 4098 /* Get the Ethernet address */ 4099 ret = fec_get_mac(ndev); 4100 if (ret) 4101 goto free_queue_mem; 4102 4103 /* Set receive and transmit descriptor base. */ 4104 for (i = 0; i < fep->num_rx_queues; i++) { 4105 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; 4106 unsigned size = dsize * rxq->bd.ring_size; 4107 4108 rxq->bd.qid = i; 4109 rxq->bd.base = cbd_base; 4110 rxq->bd.cur = cbd_base; 4111 rxq->bd.dma = bd_dma; 4112 rxq->bd.dsize = dsize; 4113 rxq->bd.dsize_log2 = dsize_log2; 4114 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; 4115 bd_dma += size; 4116 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 4117 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 4118 } 4119 4120 for (i = 0; i < fep->num_tx_queues; i++) { 4121 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; 4122 unsigned size = dsize * txq->bd.ring_size; 4123 4124 txq->bd.qid = i; 4125 txq->bd.base = cbd_base; 4126 txq->bd.cur = cbd_base; 4127 txq->bd.dma = bd_dma; 4128 txq->bd.dsize = dsize; 4129 txq->bd.dsize_log2 = dsize_log2; 4130 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; 4131 bd_dma += size; 4132 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 4133 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 4134 } 4135 4136 4137 /* The FEC Ethernet specific entries in the device structure */ 4138 ndev->watchdog_timeo = TX_TIMEOUT; 4139 ndev->netdev_ops = &fec_netdev_ops; 4140 ndev->ethtool_ops = &fec_enet_ethtool_ops; 4141 4142 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 4143 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi); 4144 4145 if (fep->quirks & FEC_QUIRK_HAS_VLAN) 4146 /* enable hw VLAN support */ 4147 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 4148 4149 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { 4150 netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS); 4151 4152 /* enable hw accelerator */ 4153 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 4154 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); 4155 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 4156 } 4157 4158 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 4159 fep->tx_align = 0; 4160 fep->rx_align = 0x3f; 4161 } 4162 4163 ndev->hw_features = ndev->features; 4164 4165 if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) 4166 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | 4167 NETDEV_XDP_ACT_REDIRECT; 4168 4169 fec_restart(ndev); 4170 4171 if (fep->quirks & FEC_QUIRK_MIB_CLEAR) 4172 fec_enet_clear_ethtool_stats(ndev); 4173 else 4174 fec_enet_update_ethtool_stats(ndev); 4175 4176 return 0; 4177 4178 free_queue_mem: 4179 fec_enet_free_queue(ndev); 4180 return ret; 4181 } 4182 4183 static void fec_enet_deinit(struct net_device *ndev) 4184 { 4185 struct fec_enet_private *fep = netdev_priv(ndev); 4186 4187 netif_napi_del(&fep->napi); 4188 fec_enet_free_queue(ndev); 4189 } 4190 4191 #ifdef CONFIG_OF 4192 static int fec_reset_phy(struct platform_device *pdev) 4193 { 4194 struct gpio_desc *phy_reset; 4195 int msec = 1, phy_post_delay = 0; 4196 struct device_node *np = pdev->dev.of_node; 4197 int err; 4198 4199 if (!np) 4200 return 0; 4201 4202 err = of_property_read_u32(np, "phy-reset-duration", &msec); 4203 /* A sane reset duration should not be longer than 1s */ 4204 if (!err && msec > 1000) 4205 msec = 1; 4206 4207 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); 4208 /* valid reset duration should be less than 1s */ 4209 if (!err && phy_post_delay > 1000) 4210 return -EINVAL; 4211 4212 phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset", 4213 GPIOD_OUT_HIGH); 4214 if (IS_ERR(phy_reset)) 4215 return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset), 4216 "failed to get phy-reset-gpios\n"); 4217 4218 if (!phy_reset) 4219 return 0; 4220 4221 if (msec > 20) 4222 msleep(msec); 4223 else 4224 usleep_range(msec * 1000, msec * 1000 + 1000); 4225 4226 gpiod_set_value_cansleep(phy_reset, 0); 4227 4228 if (!phy_post_delay) 4229 return 0; 4230 4231 if (phy_post_delay > 20) 4232 msleep(phy_post_delay); 4233 else 4234 usleep_range(phy_post_delay * 1000, 4235 phy_post_delay * 1000 + 1000); 4236 4237 return 0; 4238 } 4239 #else /* CONFIG_OF */ 4240 static int fec_reset_phy(struct platform_device *pdev) 4241 { 4242 /* 4243 * In case of platform probe, the reset has been done 4244 * by machine code. 4245 */ 4246 return 0; 4247 } 4248 #endif /* CONFIG_OF */ 4249 4250 static void 4251 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) 4252 { 4253 struct device_node *np = pdev->dev.of_node; 4254 4255 *num_tx = *num_rx = 1; 4256 4257 if (!np || !of_device_is_available(np)) 4258 return; 4259 4260 /* parse the num of tx and rx queues */ 4261 of_property_read_u32(np, "fsl,num-tx-queues", num_tx); 4262 4263 of_property_read_u32(np, "fsl,num-rx-queues", num_rx); 4264 4265 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { 4266 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", 4267 *num_tx); 4268 *num_tx = 1; 4269 return; 4270 } 4271 4272 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { 4273 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", 4274 *num_rx); 4275 *num_rx = 1; 4276 return; 4277 } 4278 4279 } 4280 4281 static int fec_enet_get_irq_cnt(struct platform_device *pdev) 4282 { 4283 int irq_cnt = platform_irq_count(pdev); 4284 4285 if (irq_cnt > FEC_IRQ_NUM) 4286 irq_cnt = FEC_IRQ_NUM; /* last for pps */ 4287 else if (irq_cnt == 2) 4288 irq_cnt = 1; /* last for pps */ 4289 else if (irq_cnt <= 0) 4290 irq_cnt = 1; /* At least 1 irq is needed */ 4291 return irq_cnt; 4292 } 4293 4294 static void fec_enet_get_wakeup_irq(struct platform_device *pdev) 4295 { 4296 struct net_device *ndev = platform_get_drvdata(pdev); 4297 struct fec_enet_private *fep = netdev_priv(ndev); 4298 4299 if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2) 4300 fep->wake_irq = fep->irq[2]; 4301 else 4302 fep->wake_irq = fep->irq[0]; 4303 } 4304 4305 static int fec_enet_init_stop_mode(struct fec_enet_private *fep, 4306 struct device_node *np) 4307 { 4308 struct device_node *gpr_np; 4309 u32 out_val[3]; 4310 int ret = 0; 4311 4312 gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0); 4313 if (!gpr_np) 4314 return 0; 4315 4316 ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, 4317 ARRAY_SIZE(out_val)); 4318 if (ret) { 4319 dev_dbg(&fep->pdev->dev, "no stop mode property\n"); 4320 goto out; 4321 } 4322 4323 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); 4324 if (IS_ERR(fep->stop_gpr.gpr)) { 4325 dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); 4326 ret = PTR_ERR(fep->stop_gpr.gpr); 4327 fep->stop_gpr.gpr = NULL; 4328 goto out; 4329 } 4330 4331 fep->stop_gpr.reg = out_val[1]; 4332 fep->stop_gpr.bit = out_val[2]; 4333 4334 out: 4335 of_node_put(gpr_np); 4336 4337 return ret; 4338 } 4339 4340 static int 4341 fec_probe(struct platform_device *pdev) 4342 { 4343 struct fec_enet_private *fep; 4344 struct fec_platform_data *pdata; 4345 phy_interface_t interface; 4346 struct net_device *ndev; 4347 int i, irq, ret = 0; 4348 static int dev_id; 4349 struct device_node *np = pdev->dev.of_node, *phy_node; 4350 int num_tx_qs; 4351 int num_rx_qs; 4352 char irq_name[8]; 4353 int irq_cnt; 4354 const struct fec_devinfo *dev_info; 4355 4356 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 4357 4358 /* Init network device */ 4359 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) + 4360 FEC_STATS_SIZE, num_tx_qs, num_rx_qs); 4361 if (!ndev) 4362 return -ENOMEM; 4363 4364 SET_NETDEV_DEV(ndev, &pdev->dev); 4365 4366 /* setup board info structure */ 4367 fep = netdev_priv(ndev); 4368 4369 dev_info = device_get_match_data(&pdev->dev); 4370 if (!dev_info) 4371 dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data; 4372 if (dev_info) 4373 fep->quirks = dev_info->quirks; 4374 4375 fep->netdev = ndev; 4376 fep->num_rx_queues = num_rx_qs; 4377 fep->num_tx_queues = num_tx_qs; 4378 4379 #if !defined(CONFIG_M5272) 4380 /* default enable pause frame auto negotiation */ 4381 if (fep->quirks & FEC_QUIRK_HAS_GBIT) 4382 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 4383 #endif 4384 4385 /* Select default pin state */ 4386 pinctrl_pm_select_default_state(&pdev->dev); 4387 4388 fep->hwp = devm_platform_ioremap_resource(pdev, 0); 4389 if (IS_ERR(fep->hwp)) { 4390 ret = PTR_ERR(fep->hwp); 4391 goto failed_ioremap; 4392 } 4393 4394 fep->pdev = pdev; 4395 fep->dev_id = dev_id++; 4396 4397 platform_set_drvdata(pdev, ndev); 4398 4399 if ((of_machine_is_compatible("fsl,imx6q") || 4400 of_machine_is_compatible("fsl,imx6dl")) && 4401 !of_property_read_bool(np, "fsl,err006687-workaround-present")) 4402 fep->quirks |= FEC_QUIRK_ERR006687; 4403 4404 ret = fec_enet_ipc_handle_init(fep); 4405 if (ret) 4406 goto failed_ipc_init; 4407 4408 if (of_property_read_bool(np, "fsl,magic-packet")) 4409 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; 4410 4411 ret = fec_enet_init_stop_mode(fep, np); 4412 if (ret) 4413 goto failed_stop_mode; 4414 4415 phy_node = of_parse_phandle(np, "phy-handle", 0); 4416 if (!phy_node && of_phy_is_fixed_link(np)) { 4417 ret = of_phy_register_fixed_link(np); 4418 if (ret < 0) { 4419 dev_err(&pdev->dev, 4420 "broken fixed-link specification\n"); 4421 goto failed_phy; 4422 } 4423 phy_node = of_node_get(np); 4424 } 4425 fep->phy_node = phy_node; 4426 4427 ret = of_get_phy_mode(pdev->dev.of_node, &interface); 4428 if (ret) { 4429 pdata = dev_get_platdata(&pdev->dev); 4430 if (pdata) 4431 fep->phy_interface = pdata->phy; 4432 else 4433 fep->phy_interface = PHY_INTERFACE_MODE_MII; 4434 } else { 4435 fep->phy_interface = interface; 4436 } 4437 4438 ret = fec_enet_parse_rgmii_delay(fep, np); 4439 if (ret) 4440 goto failed_rgmii_delay; 4441 4442 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 4443 if (IS_ERR(fep->clk_ipg)) { 4444 ret = PTR_ERR(fep->clk_ipg); 4445 goto failed_clk; 4446 } 4447 4448 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 4449 if (IS_ERR(fep->clk_ahb)) { 4450 ret = PTR_ERR(fep->clk_ahb); 4451 goto failed_clk; 4452 } 4453 4454 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); 4455 4456 /* enet_out is optional, depends on board */ 4457 fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out"); 4458 if (IS_ERR(fep->clk_enet_out)) { 4459 ret = PTR_ERR(fep->clk_enet_out); 4460 goto failed_clk; 4461 } 4462 4463 fep->ptp_clk_on = false; 4464 mutex_init(&fep->ptp_clk_mutex); 4465 4466 /* clk_ref is optional, depends on board */ 4467 fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); 4468 if (IS_ERR(fep->clk_ref)) { 4469 ret = PTR_ERR(fep->clk_ref); 4470 goto failed_clk; 4471 } 4472 fep->clk_ref_rate = clk_get_rate(fep->clk_ref); 4473 4474 /* clk_2x_txclk is optional, depends on board */ 4475 if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) { 4476 fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk"); 4477 if (IS_ERR(fep->clk_2x_txclk)) 4478 fep->clk_2x_txclk = NULL; 4479 } 4480 4481 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; 4482 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); 4483 if (IS_ERR(fep->clk_ptp)) { 4484 fep->clk_ptp = NULL; 4485 fep->bufdesc_ex = false; 4486 } 4487 4488 ret = fec_enet_clk_enable(ndev, true); 4489 if (ret) 4490 goto failed_clk; 4491 4492 ret = clk_prepare_enable(fep->clk_ipg); 4493 if (ret) 4494 goto failed_clk_ipg; 4495 ret = clk_prepare_enable(fep->clk_ahb); 4496 if (ret) 4497 goto failed_clk_ahb; 4498 4499 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); 4500 if (!IS_ERR(fep->reg_phy)) { 4501 ret = regulator_enable(fep->reg_phy); 4502 if (ret) { 4503 dev_err(&pdev->dev, 4504 "Failed to enable phy regulator: %d\n", ret); 4505 goto failed_regulator; 4506 } 4507 } else { 4508 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { 4509 ret = -EPROBE_DEFER; 4510 goto failed_regulator; 4511 } 4512 fep->reg_phy = NULL; 4513 } 4514 4515 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 4516 pm_runtime_use_autosuspend(&pdev->dev); 4517 pm_runtime_get_noresume(&pdev->dev); 4518 pm_runtime_set_active(&pdev->dev); 4519 pm_runtime_enable(&pdev->dev); 4520 4521 ret = fec_reset_phy(pdev); 4522 if (ret) 4523 goto failed_reset; 4524 4525 irq_cnt = fec_enet_get_irq_cnt(pdev); 4526 if (fep->bufdesc_ex) 4527 fec_ptp_init(pdev, irq_cnt); 4528 4529 ret = fec_enet_init(ndev); 4530 if (ret) 4531 goto failed_init; 4532 4533 for (i = 0; i < irq_cnt; i++) { 4534 snprintf(irq_name, sizeof(irq_name), "int%d", i); 4535 irq = platform_get_irq_byname_optional(pdev, irq_name); 4536 if (irq < 0) 4537 irq = platform_get_irq(pdev, i); 4538 if (irq < 0) { 4539 ret = irq; 4540 goto failed_irq; 4541 } 4542 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 4543 0, pdev->name, ndev); 4544 if (ret) 4545 goto failed_irq; 4546 4547 fep->irq[i] = irq; 4548 } 4549 4550 /* Decide which interrupt line is wakeup capable */ 4551 fec_enet_get_wakeup_irq(pdev); 4552 4553 ret = fec_enet_mii_init(pdev); 4554 if (ret) 4555 goto failed_mii_init; 4556 4557 /* Carrier starts down, phylib will bring it up */ 4558 netif_carrier_off(ndev); 4559 fec_enet_clk_enable(ndev, false); 4560 pinctrl_pm_select_sleep_state(&pdev->dev); 4561 4562 ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN; 4563 4564 ret = register_netdev(ndev); 4565 if (ret) 4566 goto failed_register; 4567 4568 device_init_wakeup(&ndev->dev, fep->wol_flag & 4569 FEC_WOL_HAS_MAGIC_PACKET); 4570 4571 if (fep->bufdesc_ex && fep->ptp_clock) 4572 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 4573 4574 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 4575 4576 pm_runtime_mark_last_busy(&pdev->dev); 4577 pm_runtime_put_autosuspend(&pdev->dev); 4578 4579 return 0; 4580 4581 failed_register: 4582 fec_enet_mii_remove(fep); 4583 failed_mii_init: 4584 failed_irq: 4585 fec_enet_deinit(ndev); 4586 failed_init: 4587 fec_ptp_stop(pdev); 4588 failed_reset: 4589 pm_runtime_put_noidle(&pdev->dev); 4590 pm_runtime_disable(&pdev->dev); 4591 if (fep->reg_phy) 4592 regulator_disable(fep->reg_phy); 4593 failed_regulator: 4594 clk_disable_unprepare(fep->clk_ahb); 4595 failed_clk_ahb: 4596 clk_disable_unprepare(fep->clk_ipg); 4597 failed_clk_ipg: 4598 fec_enet_clk_enable(ndev, false); 4599 failed_clk: 4600 failed_rgmii_delay: 4601 if (of_phy_is_fixed_link(np)) 4602 of_phy_deregister_fixed_link(np); 4603 of_node_put(phy_node); 4604 failed_stop_mode: 4605 failed_ipc_init: 4606 failed_phy: 4607 dev_id--; 4608 failed_ioremap: 4609 free_netdev(ndev); 4610 4611 return ret; 4612 } 4613 4614 static void 4615 fec_drv_remove(struct platform_device *pdev) 4616 { 4617 struct net_device *ndev = platform_get_drvdata(pdev); 4618 struct fec_enet_private *fep = netdev_priv(ndev); 4619 struct device_node *np = pdev->dev.of_node; 4620 int ret; 4621 4622 ret = pm_runtime_get_sync(&pdev->dev); 4623 if (ret < 0) 4624 dev_err(&pdev->dev, 4625 "Failed to resume device in remove callback (%pe)\n", 4626 ERR_PTR(ret)); 4627 4628 cancel_work_sync(&fep->tx_timeout_work); 4629 fec_ptp_stop(pdev); 4630 unregister_netdev(ndev); 4631 fec_enet_mii_remove(fep); 4632 if (fep->reg_phy) 4633 regulator_disable(fep->reg_phy); 4634 4635 if (of_phy_is_fixed_link(np)) 4636 of_phy_deregister_fixed_link(np); 4637 of_node_put(fep->phy_node); 4638 4639 /* After pm_runtime_get_sync() failed, the clks are still off, so skip 4640 * disabling them again. 4641 */ 4642 if (ret >= 0) { 4643 clk_disable_unprepare(fep->clk_ahb); 4644 clk_disable_unprepare(fep->clk_ipg); 4645 } 4646 pm_runtime_put_noidle(&pdev->dev); 4647 pm_runtime_disable(&pdev->dev); 4648 4649 fec_enet_deinit(ndev); 4650 free_netdev(ndev); 4651 } 4652 4653 static int fec_suspend(struct device *dev) 4654 { 4655 struct net_device *ndev = dev_get_drvdata(dev); 4656 struct fec_enet_private *fep = netdev_priv(ndev); 4657 int ret; 4658 4659 rtnl_lock(); 4660 if (netif_running(ndev)) { 4661 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) 4662 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; 4663 phy_stop(ndev->phydev); 4664 napi_disable(&fep->napi); 4665 netif_tx_lock_bh(ndev); 4666 netif_device_detach(ndev); 4667 netif_tx_unlock_bh(ndev); 4668 fec_stop(ndev); 4669 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 4670 fec_irqs_disable(ndev); 4671 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 4672 } else { 4673 fec_irqs_disable_except_wakeup(ndev); 4674 if (fep->wake_irq > 0) { 4675 disable_irq(fep->wake_irq); 4676 enable_irq_wake(fep->wake_irq); 4677 } 4678 fec_enet_stop_mode(fep, true); 4679 } 4680 /* It's safe to disable clocks since interrupts are masked */ 4681 fec_enet_clk_enable(ndev, false); 4682 4683 fep->rpm_active = !pm_runtime_status_suspended(dev); 4684 if (fep->rpm_active) { 4685 ret = pm_runtime_force_suspend(dev); 4686 if (ret < 0) { 4687 rtnl_unlock(); 4688 return ret; 4689 } 4690 } 4691 } 4692 rtnl_unlock(); 4693 4694 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 4695 regulator_disable(fep->reg_phy); 4696 4697 /* SOC supply clock to phy, when clock is disabled, phy link down 4698 * SOC control phy regulator, when regulator is disabled, phy link down 4699 */ 4700 if (fep->clk_enet_out || fep->reg_phy) 4701 fep->link = 0; 4702 4703 return 0; 4704 } 4705 4706 static int fec_resume(struct device *dev) 4707 { 4708 struct net_device *ndev = dev_get_drvdata(dev); 4709 struct fec_enet_private *fep = netdev_priv(ndev); 4710 int ret; 4711 int val; 4712 4713 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 4714 ret = regulator_enable(fep->reg_phy); 4715 if (ret) 4716 return ret; 4717 } 4718 4719 rtnl_lock(); 4720 if (netif_running(ndev)) { 4721 if (fep->rpm_active) 4722 pm_runtime_force_resume(dev); 4723 4724 ret = fec_enet_clk_enable(ndev, true); 4725 if (ret) { 4726 rtnl_unlock(); 4727 goto failed_clk; 4728 } 4729 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { 4730 fec_enet_stop_mode(fep, false); 4731 if (fep->wake_irq) { 4732 disable_irq_wake(fep->wake_irq); 4733 enable_irq(fep->wake_irq); 4734 } 4735 4736 val = readl(fep->hwp + FEC_ECNTRL); 4737 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 4738 writel(val, fep->hwp + FEC_ECNTRL); 4739 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; 4740 } else { 4741 pinctrl_pm_select_default_state(&fep->pdev->dev); 4742 } 4743 fec_restart(ndev); 4744 netif_tx_lock_bh(ndev); 4745 netif_device_attach(ndev); 4746 netif_tx_unlock_bh(ndev); 4747 napi_enable(&fep->napi); 4748 phy_init_hw(ndev->phydev); 4749 phy_start(ndev->phydev); 4750 } 4751 rtnl_unlock(); 4752 4753 return 0; 4754 4755 failed_clk: 4756 if (fep->reg_phy) 4757 regulator_disable(fep->reg_phy); 4758 return ret; 4759 } 4760 4761 static int fec_runtime_suspend(struct device *dev) 4762 { 4763 struct net_device *ndev = dev_get_drvdata(dev); 4764 struct fec_enet_private *fep = netdev_priv(ndev); 4765 4766 clk_disable_unprepare(fep->clk_ahb); 4767 clk_disable_unprepare(fep->clk_ipg); 4768 4769 return 0; 4770 } 4771 4772 static int fec_runtime_resume(struct device *dev) 4773 { 4774 struct net_device *ndev = dev_get_drvdata(dev); 4775 struct fec_enet_private *fep = netdev_priv(ndev); 4776 int ret; 4777 4778 ret = clk_prepare_enable(fep->clk_ahb); 4779 if (ret) 4780 return ret; 4781 ret = clk_prepare_enable(fep->clk_ipg); 4782 if (ret) 4783 goto failed_clk_ipg; 4784 4785 return 0; 4786 4787 failed_clk_ipg: 4788 clk_disable_unprepare(fep->clk_ahb); 4789 return ret; 4790 } 4791 4792 static const struct dev_pm_ops fec_pm_ops = { 4793 SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 4794 RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 4795 }; 4796 4797 static struct platform_driver fec_driver = { 4798 .driver = { 4799 .name = DRIVER_NAME, 4800 .pm = pm_ptr(&fec_pm_ops), 4801 .of_match_table = fec_dt_ids, 4802 .suppress_bind_attrs = true, 4803 }, 4804 .id_table = fec_devtype, 4805 .probe = fec_probe, 4806 .remove = fec_drv_remove, 4807 }; 4808 4809 module_platform_driver(fec_driver); 4810 4811 MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver"); 4812 MODULE_LICENSE("GPL"); 4813