1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 5 * 6 * Right now, I am very wasteful with the buffers. I allocate memory 7 * pages and then divide them into 2K frame buffers. This way I know I 8 * have buffers large enough to hold one frame within one buffer descriptor. 9 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 10 * will be much more memory efficient and will easily handle lots of 11 * small packets. 12 * 13 * Much better multiple PHY support by Magnus Damm. 14 * Copyright (c) 2000 Ericsson Radio Systems AB. 15 * 16 * Support for FEC controller of ColdFire processors. 17 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 18 * 19 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 20 * Copyright (c) 2004-2006 Macq Electronique SA. 21 * 22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 23 */ 24 25 #include <linux/bitops.h> 26 #include <linux/bpf.h> 27 #include <linux/bpf_trace.h> 28 #include <linux/cacheflush.h> 29 #include <linux/clk.h> 30 #include <linux/crc32.h> 31 #include <linux/delay.h> 32 #include <linux/errno.h> 33 #include <linux/etherdevice.h> 34 #include <linux/fec.h> 35 #include <linux/filter.h> 36 #include <linux/gpio/consumer.h> 37 #include <linux/icmp.h> 38 #include <linux/if_vlan.h> 39 #include <linux/in.h> 40 #include <linux/interrupt.h> 41 #include <linux/io.h> 42 #include <linux/ioport.h> 43 #include <linux/ip.h> 44 #include <linux/irq.h> 45 #include <linux/kernel.h> 46 #include <linux/mdio.h> 47 #include <linux/mfd/syscon.h> 48 #include <linux/module.h> 49 #include <linux/netdevice.h> 50 #include <linux/of.h> 51 #include <linux/of_mdio.h> 52 #include <linux/of_net.h> 53 #include <linux/phy.h> 54 #include <linux/pinctrl/consumer.h> 55 #include <linux/platform_device.h> 56 #include <linux/pm_runtime.h> 57 #include <linux/prefetch.h> 58 #include <linux/property.h> 59 #include <linux/ptrace.h> 60 #include <linux/regmap.h> 61 #include <linux/regulator/consumer.h> 62 #include <linux/skbuff.h> 63 #include <linux/slab.h> 64 #include <linux/spinlock.h> 65 #include <linux/string.h> 66 #include <linux/tcp.h> 67 #include <linux/udp.h> 68 #include <linux/workqueue.h> 69 #include <net/ip.h> 70 #include <net/page_pool/helpers.h> 71 #include <net/selftests.h> 72 #include <net/tso.h> 73 #include <soc/imx/cpuidle.h> 74 75 #include "fec.h" 76 77 static void set_multicast_list(struct net_device *ndev); 78 static void fec_enet_itr_coal_set(struct net_device *ndev); 79 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, 80 int cpu, struct xdp_buff *xdp, 81 u32 dma_sync_len); 82 83 #define DRIVER_NAME "fec" 84 85 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; 86 87 #define FEC_ENET_RSEM_V 0x84 88 #define FEC_ENET_RSFL_V 16 89 #define FEC_ENET_RAEM_V 0x8 90 #define FEC_ENET_RAFL_V 0x8 91 #define FEC_ENET_OPD_V 0xFFF0 92 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 93 94 #define FEC_ENET_XDP_PASS 0 95 #define FEC_ENET_XDP_CONSUMED BIT(0) 96 #define FEC_ENET_XDP_TX BIT(1) 97 #define FEC_ENET_XDP_REDIR BIT(2) 98 99 struct fec_devinfo { 100 u32 quirks; 101 }; 102 103 static const struct fec_devinfo fec_imx25_info = { 104 .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | 105 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45, 106 }; 107 108 static const struct fec_devinfo fec_imx27_info = { 109 .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG | 110 FEC_QUIRK_HAS_MDIO_C45, 111 }; 112 113 static const struct fec_devinfo fec_imx28_info = { 114 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 115 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | 116 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII | 117 FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45, 118 }; 119 120 static const struct fec_devinfo fec_imx6q_info = { 121 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 122 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 123 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | 124 FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII | 125 FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45, 126 }; 127 128 static const struct fec_devinfo fec_mvf600_info = { 129 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC | 130 FEC_QUIRK_HAS_MDIO_C45, 131 }; 132 133 static const struct fec_devinfo fec_imx6sx_info = { 134 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 135 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 136 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 137 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 138 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 139 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 140 FEC_QUIRK_HAS_MDIO_C45, 141 }; 142 143 static const struct fec_devinfo fec_imx6ul_info = { 144 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 145 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 146 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | 147 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | 148 FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII | 149 FEC_QUIRK_HAS_MDIO_C45, 150 }; 151 152 static const struct fec_devinfo fec_imx8mq_info = { 153 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 154 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 155 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 156 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 157 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 158 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 159 FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 | 160 FEC_QUIRK_HAS_MDIO_C45, 161 }; 162 163 static const struct fec_devinfo fec_imx8qm_info = { 164 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 165 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 166 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 167 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 168 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 169 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 170 FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45, 171 }; 172 173 static const struct fec_devinfo fec_s32v234_info = { 174 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 175 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 176 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 177 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 178 FEC_QUIRK_HAS_MDIO_C45, 179 }; 180 181 static struct platform_device_id fec_devtype[] = { 182 { 183 /* keep it for coldfire */ 184 .name = DRIVER_NAME, 185 .driver_data = 0, 186 }, { 187 /* sentinel */ 188 } 189 }; 190 MODULE_DEVICE_TABLE(platform, fec_devtype); 191 192 static const struct of_device_id fec_dt_ids[] = { 193 { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, }, 194 { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, }, 195 { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, }, 196 { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, }, 197 { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, }, 198 { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, }, 199 { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, }, 200 { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, }, 201 { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, }, 202 { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, }, 203 { /* sentinel */ } 204 }; 205 MODULE_DEVICE_TABLE(of, fec_dt_ids); 206 207 static unsigned char macaddr[ETH_ALEN]; 208 module_param_array(macaddr, byte, NULL, 0); 209 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 210 211 #if defined(CONFIG_M5272) 212 /* 213 * Some hardware gets it MAC address out of local flash memory. 214 * if this is non-zero then assume it is the address to get MAC from. 215 */ 216 #if defined(CONFIG_NETtel) 217 #define FEC_FLASHMAC 0xf0006006 218 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 219 #define FEC_FLASHMAC 0xf0006000 220 #elif defined(CONFIG_CANCam) 221 #define FEC_FLASHMAC 0xf0020000 222 #elif defined (CONFIG_M5272C3) 223 #define FEC_FLASHMAC (0xffe04000 + 4) 224 #elif defined(CONFIG_MOD5272) 225 #define FEC_FLASHMAC 0xffc0406b 226 #else 227 #define FEC_FLASHMAC 0 228 #endif 229 #endif /* CONFIG_M5272 */ 230 231 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. 232 * 233 * 2048 byte skbufs are allocated. However, alignment requirements 234 * varies between FEC variants. Worst case is 64, so round down by 64. 235 */ 236 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) 237 #define PKT_MINBUF_SIZE 64 238 239 /* FEC receive acceleration */ 240 #define FEC_RACC_IPDIS BIT(1) 241 #define FEC_RACC_PRODIS BIT(2) 242 #define FEC_RACC_SHIFT16 BIT(7) 243 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 244 245 /* MIB Control Register */ 246 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31) 247 248 /* 249 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 250 * size bits. Other FEC hardware does not, so we need to take that into 251 * account when setting it. 252 */ 253 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 254 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 255 defined(CONFIG_ARM64) 256 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 257 #else 258 #define OPT_FRAME_SIZE 0 259 #endif 260 261 /* FEC MII MMFR bits definition */ 262 #define FEC_MMFR_ST (1 << 30) 263 #define FEC_MMFR_ST_C45 (0) 264 #define FEC_MMFR_OP_READ (2 << 28) 265 #define FEC_MMFR_OP_READ_C45 (3 << 28) 266 #define FEC_MMFR_OP_WRITE (1 << 28) 267 #define FEC_MMFR_OP_ADDR_WRITE (0) 268 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 269 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 270 #define FEC_MMFR_TA (2 << 16) 271 #define FEC_MMFR_DATA(v) (v & 0xffff) 272 /* FEC ECR bits definition */ 273 #define FEC_ECR_RESET BIT(0) 274 #define FEC_ECR_ETHEREN BIT(1) 275 #define FEC_ECR_MAGICEN BIT(2) 276 #define FEC_ECR_SLEEP BIT(3) 277 #define FEC_ECR_EN1588 BIT(4) 278 #define FEC_ECR_SPEED BIT(5) 279 #define FEC_ECR_BYTESWP BIT(8) 280 /* FEC RCR bits definition */ 281 #define FEC_RCR_LOOP BIT(0) 282 #define FEC_RCR_DRT BIT(1) 283 #define FEC_RCR_MII BIT(2) 284 #define FEC_RCR_PROMISC BIT(3) 285 #define FEC_RCR_BC_REJ BIT(4) 286 #define FEC_RCR_FLOWCTL BIT(5) 287 #define FEC_RCR_RGMII BIT(6) 288 #define FEC_RCR_RMII BIT(8) 289 #define FEC_RCR_10BASET BIT(9) 290 #define FEC_RCR_NLC BIT(30) 291 /* TX WMARK bits */ 292 #define FEC_TXWMRK_STRFWD BIT(8) 293 294 #define FEC_MII_TIMEOUT 30000 /* us */ 295 296 /* Transmitter timeout */ 297 #define TX_TIMEOUT (2 * HZ) 298 299 #define FEC_PAUSE_FLAG_AUTONEG 0x1 300 #define FEC_PAUSE_FLAG_ENABLE 0x2 301 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) 302 #define FEC_WOL_FLAG_ENABLE (0x1 << 1) 303 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) 304 305 /* Max number of allowed TCP segments for software TSO */ 306 #define FEC_MAX_TSO_SEGS 100 307 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 308 309 #define IS_TSO_HEADER(txq, addr) \ 310 ((addr >= txq->tso_hdrs_dma) && \ 311 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 312 313 static int mii_cnt; 314 315 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, 316 struct bufdesc_prop *bd) 317 { 318 return (bdp >= bd->last) ? bd->base 319 : (struct bufdesc *)(((void *)bdp) + bd->dsize); 320 } 321 322 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, 323 struct bufdesc_prop *bd) 324 { 325 return (bdp <= bd->base) ? bd->last 326 : (struct bufdesc *)(((void *)bdp) - bd->dsize); 327 } 328 329 static int fec_enet_get_bd_index(struct bufdesc *bdp, 330 struct bufdesc_prop *bd) 331 { 332 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; 333 } 334 335 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) 336 { 337 int entries; 338 339 entries = (((const char *)txq->dirty_tx - 340 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; 341 342 return entries >= 0 ? entries : entries + txq->bd.ring_size; 343 } 344 345 static void swap_buffer(void *bufaddr, int len) 346 { 347 int i; 348 unsigned int *buf = bufaddr; 349 350 for (i = 0; i < len; i += 4, buf++) 351 swab32s(buf); 352 } 353 354 static void fec_dump(struct net_device *ndev) 355 { 356 struct fec_enet_private *fep = netdev_priv(ndev); 357 struct bufdesc *bdp; 358 struct fec_enet_priv_tx_q *txq; 359 int index = 0; 360 361 netdev_info(ndev, "TX ring dump\n"); 362 pr_info("Nr SC addr len SKB\n"); 363 364 txq = fep->tx_queue[0]; 365 bdp = txq->bd.base; 366 367 do { 368 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", 369 index, 370 bdp == txq->bd.cur ? 'S' : ' ', 371 bdp == txq->dirty_tx ? 'H' : ' ', 372 fec16_to_cpu(bdp->cbd_sc), 373 fec32_to_cpu(bdp->cbd_bufaddr), 374 fec16_to_cpu(bdp->cbd_datlen), 375 txq->tx_buf[index].buf_p); 376 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 377 index++; 378 } while (bdp != txq->bd.base); 379 } 380 381 /* 382 * Coldfire does not support DMA coherent allocations, and has historically used 383 * a band-aid with a manual flush in fec_enet_rx_queue. 384 */ 385 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) 386 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 387 gfp_t gfp) 388 { 389 return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp); 390 } 391 392 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, 393 dma_addr_t handle) 394 { 395 dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL); 396 } 397 #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ 398 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 399 gfp_t gfp) 400 { 401 return dma_alloc_coherent(dev, size, handle, gfp); 402 } 403 404 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, 405 dma_addr_t handle) 406 { 407 dma_free_coherent(dev, size, cpu_addr, handle); 408 } 409 #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ 410 411 struct fec_dma_devres { 412 size_t size; 413 void *vaddr; 414 dma_addr_t dma_handle; 415 }; 416 417 static void fec_dmam_release(struct device *dev, void *res) 418 { 419 struct fec_dma_devres *this = res; 420 421 fec_dma_free(dev, this->size, this->vaddr, this->dma_handle); 422 } 423 424 static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle, 425 gfp_t gfp) 426 { 427 struct fec_dma_devres *dr; 428 void *vaddr; 429 430 dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp); 431 if (!dr) 432 return NULL; 433 vaddr = fec_dma_alloc(dev, size, handle, gfp); 434 if (!vaddr) { 435 devres_free(dr); 436 return NULL; 437 } 438 dr->vaddr = vaddr; 439 dr->dma_handle = *handle; 440 dr->size = size; 441 devres_add(dev, dr); 442 return vaddr; 443 } 444 445 static inline bool is_ipv4_pkt(struct sk_buff *skb) 446 { 447 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 448 } 449 450 static int 451 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 452 { 453 /* Only run for packets requiring a checksum. */ 454 if (skb->ip_summed != CHECKSUM_PARTIAL) 455 return 0; 456 457 if (unlikely(skb_cow_head(skb, 0))) 458 return -1; 459 460 if (is_ipv4_pkt(skb)) 461 ip_hdr(skb)->check = 0; 462 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 463 464 return 0; 465 } 466 467 static int 468 fec_enet_create_page_pool(struct fec_enet_private *fep, 469 struct fec_enet_priv_rx_q *rxq, int size) 470 { 471 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); 472 struct page_pool_params pp_params = { 473 .order = 0, 474 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 475 .pool_size = size, 476 .nid = dev_to_node(&fep->pdev->dev), 477 .dev = &fep->pdev->dev, 478 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, 479 .offset = FEC_ENET_XDP_HEADROOM, 480 .max_len = FEC_ENET_RX_FRSIZE, 481 }; 482 int err; 483 484 rxq->page_pool = page_pool_create(&pp_params); 485 if (IS_ERR(rxq->page_pool)) { 486 err = PTR_ERR(rxq->page_pool); 487 rxq->page_pool = NULL; 488 return err; 489 } 490 491 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); 492 if (err < 0) 493 goto err_free_pp; 494 495 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 496 rxq->page_pool); 497 if (err) 498 goto err_unregister_rxq; 499 500 return 0; 501 502 err_unregister_rxq: 503 xdp_rxq_info_unreg(&rxq->xdp_rxq); 504 err_free_pp: 505 page_pool_destroy(rxq->page_pool); 506 rxq->page_pool = NULL; 507 return err; 508 } 509 510 static struct bufdesc * 511 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, 512 struct sk_buff *skb, 513 struct net_device *ndev) 514 { 515 struct fec_enet_private *fep = netdev_priv(ndev); 516 struct bufdesc *bdp = txq->bd.cur; 517 struct bufdesc_ex *ebdp; 518 int nr_frags = skb_shinfo(skb)->nr_frags; 519 int frag, frag_len; 520 unsigned short status; 521 unsigned int estatus = 0; 522 skb_frag_t *this_frag; 523 unsigned int index; 524 void *bufaddr; 525 dma_addr_t addr; 526 int i; 527 528 for (frag = 0; frag < nr_frags; frag++) { 529 this_frag = &skb_shinfo(skb)->frags[frag]; 530 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 531 ebdp = (struct bufdesc_ex *)bdp; 532 533 status = fec16_to_cpu(bdp->cbd_sc); 534 status &= ~BD_ENET_TX_STATS; 535 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 536 frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]); 537 538 /* Handle the last BD specially */ 539 if (frag == nr_frags - 1) { 540 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 541 if (fep->bufdesc_ex) { 542 estatus |= BD_ENET_TX_INT; 543 if (unlikely(skb_shinfo(skb)->tx_flags & 544 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 545 estatus |= BD_ENET_TX_TS; 546 } 547 } 548 549 if (fep->bufdesc_ex) { 550 if (fep->quirks & FEC_QUIRK_HAS_AVB) 551 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 552 if (skb->ip_summed == CHECKSUM_PARTIAL) 553 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 554 555 ebdp->cbd_bdu = 0; 556 ebdp->cbd_esc = cpu_to_fec32(estatus); 557 } 558 559 bufaddr = skb_frag_address(this_frag); 560 561 index = fec_enet_get_bd_index(bdp, &txq->bd); 562 if (((unsigned long) bufaddr) & fep->tx_align || 563 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 564 memcpy(txq->tx_bounce[index], bufaddr, frag_len); 565 bufaddr = txq->tx_bounce[index]; 566 567 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 568 swap_buffer(bufaddr, frag_len); 569 } 570 571 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, 572 DMA_TO_DEVICE); 573 if (dma_mapping_error(&fep->pdev->dev, addr)) { 574 if (net_ratelimit()) 575 netdev_err(ndev, "Tx DMA memory map failed\n"); 576 goto dma_mapping_error; 577 } 578 579 bdp->cbd_bufaddr = cpu_to_fec32(addr); 580 bdp->cbd_datlen = cpu_to_fec16(frag_len); 581 /* Make sure the updates to rest of the descriptor are 582 * performed before transferring ownership. 583 */ 584 wmb(); 585 bdp->cbd_sc = cpu_to_fec16(status); 586 } 587 588 return bdp; 589 dma_mapping_error: 590 bdp = txq->bd.cur; 591 for (i = 0; i < frag; i++) { 592 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 593 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), 594 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); 595 } 596 return ERR_PTR(-ENOMEM); 597 } 598 599 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, 600 struct sk_buff *skb, struct net_device *ndev) 601 { 602 struct fec_enet_private *fep = netdev_priv(ndev); 603 int nr_frags = skb_shinfo(skb)->nr_frags; 604 struct bufdesc *bdp, *last_bdp; 605 void *bufaddr; 606 dma_addr_t addr; 607 unsigned short status; 608 unsigned short buflen; 609 unsigned int estatus = 0; 610 unsigned int index; 611 int entries_free; 612 613 entries_free = fec_enet_get_free_txdesc_num(txq); 614 if (entries_free < MAX_SKB_FRAGS + 1) { 615 dev_kfree_skb_any(skb); 616 if (net_ratelimit()) 617 netdev_err(ndev, "NOT enough BD for SG!\n"); 618 return NETDEV_TX_OK; 619 } 620 621 /* Protocol checksum off-load for TCP and UDP. */ 622 if (fec_enet_clear_csum(skb, ndev)) { 623 dev_kfree_skb_any(skb); 624 return NETDEV_TX_OK; 625 } 626 627 /* Fill in a Tx ring entry */ 628 bdp = txq->bd.cur; 629 last_bdp = bdp; 630 status = fec16_to_cpu(bdp->cbd_sc); 631 status &= ~BD_ENET_TX_STATS; 632 633 /* Set buffer length and buffer pointer */ 634 bufaddr = skb->data; 635 buflen = skb_headlen(skb); 636 637 index = fec_enet_get_bd_index(bdp, &txq->bd); 638 if (((unsigned long) bufaddr) & fep->tx_align || 639 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 640 memcpy(txq->tx_bounce[index], skb->data, buflen); 641 bufaddr = txq->tx_bounce[index]; 642 643 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 644 swap_buffer(bufaddr, buflen); 645 } 646 647 /* Push the data cache so the CPM does not get stale memory data. */ 648 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); 649 if (dma_mapping_error(&fep->pdev->dev, addr)) { 650 dev_kfree_skb_any(skb); 651 if (net_ratelimit()) 652 netdev_err(ndev, "Tx DMA memory map failed\n"); 653 return NETDEV_TX_OK; 654 } 655 656 if (nr_frags) { 657 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); 658 if (IS_ERR(last_bdp)) { 659 dma_unmap_single(&fep->pdev->dev, addr, 660 buflen, DMA_TO_DEVICE); 661 dev_kfree_skb_any(skb); 662 return NETDEV_TX_OK; 663 } 664 } else { 665 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 666 if (fep->bufdesc_ex) { 667 estatus = BD_ENET_TX_INT; 668 if (unlikely(skb_shinfo(skb)->tx_flags & 669 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 670 estatus |= BD_ENET_TX_TS; 671 } 672 } 673 bdp->cbd_bufaddr = cpu_to_fec32(addr); 674 bdp->cbd_datlen = cpu_to_fec16(buflen); 675 676 if (fep->bufdesc_ex) { 677 678 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 679 680 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 681 fep->hwts_tx_en)) 682 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 683 684 if (fep->quirks & FEC_QUIRK_HAS_AVB) 685 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 686 687 if (skb->ip_summed == CHECKSUM_PARTIAL) 688 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 689 690 ebdp->cbd_bdu = 0; 691 ebdp->cbd_esc = cpu_to_fec32(estatus); 692 } 693 694 index = fec_enet_get_bd_index(last_bdp, &txq->bd); 695 /* Save skb pointer */ 696 txq->tx_buf[index].buf_p = skb; 697 698 /* Make sure the updates to rest of the descriptor are performed before 699 * transferring ownership. 700 */ 701 wmb(); 702 703 /* Send it on its way. Tell FEC it's ready, interrupt when done, 704 * it's the last BD of the frame, and to put the CRC on the end. 705 */ 706 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 707 bdp->cbd_sc = cpu_to_fec16(status); 708 709 /* If this was the last BD in the ring, start at the beginning again. */ 710 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); 711 712 skb_tx_timestamp(skb); 713 714 /* Make sure the update to bdp is performed before txq->bd.cur. */ 715 wmb(); 716 txq->bd.cur = bdp; 717 718 /* Trigger transmission start */ 719 if (!(fep->quirks & FEC_QUIRK_ERR007885) || 720 !readl(txq->bd.reg_desc_active) || 721 !readl(txq->bd.reg_desc_active) || 722 !readl(txq->bd.reg_desc_active) || 723 !readl(txq->bd.reg_desc_active)) 724 writel(0, txq->bd.reg_desc_active); 725 726 return 0; 727 } 728 729 static int 730 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, 731 struct net_device *ndev, 732 struct bufdesc *bdp, int index, char *data, 733 int size, bool last_tcp, bool is_last) 734 { 735 struct fec_enet_private *fep = netdev_priv(ndev); 736 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 737 unsigned short status; 738 unsigned int estatus = 0; 739 dma_addr_t addr; 740 741 status = fec16_to_cpu(bdp->cbd_sc); 742 status &= ~BD_ENET_TX_STATS; 743 744 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 745 746 if (((unsigned long) data) & fep->tx_align || 747 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 748 memcpy(txq->tx_bounce[index], data, size); 749 data = txq->tx_bounce[index]; 750 751 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 752 swap_buffer(data, size); 753 } 754 755 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); 756 if (dma_mapping_error(&fep->pdev->dev, addr)) { 757 dev_kfree_skb_any(skb); 758 if (net_ratelimit()) 759 netdev_err(ndev, "Tx DMA memory map failed\n"); 760 return NETDEV_TX_OK; 761 } 762 763 bdp->cbd_datlen = cpu_to_fec16(size); 764 bdp->cbd_bufaddr = cpu_to_fec32(addr); 765 766 if (fep->bufdesc_ex) { 767 if (fep->quirks & FEC_QUIRK_HAS_AVB) 768 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 769 if (skb->ip_summed == CHECKSUM_PARTIAL) 770 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 771 ebdp->cbd_bdu = 0; 772 ebdp->cbd_esc = cpu_to_fec32(estatus); 773 } 774 775 /* Handle the last BD specially */ 776 if (last_tcp) 777 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); 778 if (is_last) { 779 status |= BD_ENET_TX_INTR; 780 if (fep->bufdesc_ex) 781 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); 782 } 783 784 bdp->cbd_sc = cpu_to_fec16(status); 785 786 return 0; 787 } 788 789 static int 790 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, 791 struct sk_buff *skb, struct net_device *ndev, 792 struct bufdesc *bdp, int index) 793 { 794 struct fec_enet_private *fep = netdev_priv(ndev); 795 int hdr_len = skb_tcp_all_headers(skb); 796 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 797 void *bufaddr; 798 unsigned long dmabuf; 799 unsigned short status; 800 unsigned int estatus = 0; 801 802 status = fec16_to_cpu(bdp->cbd_sc); 803 status &= ~BD_ENET_TX_STATS; 804 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 805 806 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 807 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; 808 if (((unsigned long)bufaddr) & fep->tx_align || 809 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 810 memcpy(txq->tx_bounce[index], skb->data, hdr_len); 811 bufaddr = txq->tx_bounce[index]; 812 813 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 814 swap_buffer(bufaddr, hdr_len); 815 816 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, 817 hdr_len, DMA_TO_DEVICE); 818 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { 819 dev_kfree_skb_any(skb); 820 if (net_ratelimit()) 821 netdev_err(ndev, "Tx DMA memory map failed\n"); 822 return NETDEV_TX_OK; 823 } 824 } 825 826 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); 827 bdp->cbd_datlen = cpu_to_fec16(hdr_len); 828 829 if (fep->bufdesc_ex) { 830 if (fep->quirks & FEC_QUIRK_HAS_AVB) 831 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 832 if (skb->ip_summed == CHECKSUM_PARTIAL) 833 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 834 ebdp->cbd_bdu = 0; 835 ebdp->cbd_esc = cpu_to_fec32(estatus); 836 } 837 838 bdp->cbd_sc = cpu_to_fec16(status); 839 840 return 0; 841 } 842 843 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, 844 struct sk_buff *skb, 845 struct net_device *ndev) 846 { 847 struct fec_enet_private *fep = netdev_priv(ndev); 848 int hdr_len, total_len, data_left; 849 struct bufdesc *bdp = txq->bd.cur; 850 struct bufdesc *tmp_bdp; 851 struct bufdesc_ex *ebdp; 852 struct tso_t tso; 853 unsigned int index = 0; 854 int ret; 855 856 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { 857 dev_kfree_skb_any(skb); 858 if (net_ratelimit()) 859 netdev_err(ndev, "NOT enough BD for TSO!\n"); 860 return NETDEV_TX_OK; 861 } 862 863 /* Protocol checksum off-load for TCP and UDP. */ 864 if (fec_enet_clear_csum(skb, ndev)) { 865 dev_kfree_skb_any(skb); 866 return NETDEV_TX_OK; 867 } 868 869 /* Initialize the TSO handler, and prepare the first payload */ 870 hdr_len = tso_start(skb, &tso); 871 872 total_len = skb->len - hdr_len; 873 while (total_len > 0) { 874 char *hdr; 875 876 index = fec_enet_get_bd_index(bdp, &txq->bd); 877 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 878 total_len -= data_left; 879 880 /* prepare packet headers: MAC + IP + TCP */ 881 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 882 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 883 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); 884 if (ret) 885 goto err_release; 886 887 while (data_left > 0) { 888 int size; 889 890 size = min_t(int, tso.size, data_left); 891 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 892 index = fec_enet_get_bd_index(bdp, &txq->bd); 893 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, 894 bdp, index, 895 tso.data, size, 896 size == data_left, 897 total_len == 0); 898 if (ret) 899 goto err_release; 900 901 data_left -= size; 902 tso_build_data(skb, &tso, size); 903 } 904 905 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 906 } 907 908 /* Save skb pointer */ 909 txq->tx_buf[index].buf_p = skb; 910 911 skb_tx_timestamp(skb); 912 txq->bd.cur = bdp; 913 914 /* Trigger transmission start */ 915 if (!(fep->quirks & FEC_QUIRK_ERR007885) || 916 !readl(txq->bd.reg_desc_active) || 917 !readl(txq->bd.reg_desc_active) || 918 !readl(txq->bd.reg_desc_active) || 919 !readl(txq->bd.reg_desc_active)) 920 writel(0, txq->bd.reg_desc_active); 921 922 return 0; 923 924 err_release: 925 /* Release all used data descriptors for TSO */ 926 tmp_bdp = txq->bd.cur; 927 928 while (tmp_bdp != bdp) { 929 /* Unmap data buffers */ 930 if (tmp_bdp->cbd_bufaddr && 931 !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr))) 932 dma_unmap_single(&fep->pdev->dev, 933 fec32_to_cpu(tmp_bdp->cbd_bufaddr), 934 fec16_to_cpu(tmp_bdp->cbd_datlen), 935 DMA_TO_DEVICE); 936 937 /* Clear standard buffer descriptor fields */ 938 tmp_bdp->cbd_sc = 0; 939 tmp_bdp->cbd_datlen = 0; 940 tmp_bdp->cbd_bufaddr = 0; 941 942 /* Handle extended descriptor if enabled */ 943 if (fep->bufdesc_ex) { 944 ebdp = (struct bufdesc_ex *)tmp_bdp; 945 ebdp->cbd_esc = 0; 946 } 947 948 tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd); 949 } 950 951 dev_kfree_skb_any(skb); 952 953 return ret; 954 } 955 956 static netdev_tx_t 957 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 958 { 959 struct fec_enet_private *fep = netdev_priv(ndev); 960 int entries_free; 961 unsigned short queue; 962 struct fec_enet_priv_tx_q *txq; 963 struct netdev_queue *nq; 964 int ret; 965 966 queue = skb_get_queue_mapping(skb); 967 txq = fep->tx_queue[queue]; 968 nq = netdev_get_tx_queue(ndev, queue); 969 970 if (skb_is_gso(skb)) 971 ret = fec_enet_txq_submit_tso(txq, skb, ndev); 972 else 973 ret = fec_enet_txq_submit_skb(txq, skb, ndev); 974 if (ret) 975 return ret; 976 977 entries_free = fec_enet_get_free_txdesc_num(txq); 978 if (entries_free <= txq->tx_stop_threshold) 979 netif_tx_stop_queue(nq); 980 981 return NETDEV_TX_OK; 982 } 983 984 /* Init RX & TX buffer descriptors 985 */ 986 static void fec_enet_bd_init(struct net_device *dev) 987 { 988 struct fec_enet_private *fep = netdev_priv(dev); 989 struct fec_enet_priv_tx_q *txq; 990 struct fec_enet_priv_rx_q *rxq; 991 struct bufdesc *bdp; 992 unsigned int i; 993 unsigned int q; 994 995 for (q = 0; q < fep->num_rx_queues; q++) { 996 /* Initialize the receive buffer descriptors. */ 997 rxq = fep->rx_queue[q]; 998 bdp = rxq->bd.base; 999 1000 for (i = 0; i < rxq->bd.ring_size; i++) { 1001 1002 /* Initialize the BD for every fragment in the page. */ 1003 if (bdp->cbd_bufaddr) 1004 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 1005 else 1006 bdp->cbd_sc = cpu_to_fec16(0); 1007 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 1008 } 1009 1010 /* Set the last buffer to wrap */ 1011 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 1012 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 1013 1014 rxq->bd.cur = rxq->bd.base; 1015 } 1016 1017 for (q = 0; q < fep->num_tx_queues; q++) { 1018 /* ...and the same for transmit */ 1019 txq = fep->tx_queue[q]; 1020 bdp = txq->bd.base; 1021 txq->bd.cur = bdp; 1022 1023 for (i = 0; i < txq->bd.ring_size; i++) { 1024 /* Initialize the BD for every fragment in the page. */ 1025 bdp->cbd_sc = cpu_to_fec16(0); 1026 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { 1027 if (bdp->cbd_bufaddr && 1028 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 1029 dma_unmap_single(&fep->pdev->dev, 1030 fec32_to_cpu(bdp->cbd_bufaddr), 1031 fec16_to_cpu(bdp->cbd_datlen), 1032 DMA_TO_DEVICE); 1033 if (txq->tx_buf[i].buf_p) 1034 dev_kfree_skb_any(txq->tx_buf[i].buf_p); 1035 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { 1036 if (bdp->cbd_bufaddr) 1037 dma_unmap_single(&fep->pdev->dev, 1038 fec32_to_cpu(bdp->cbd_bufaddr), 1039 fec16_to_cpu(bdp->cbd_datlen), 1040 DMA_TO_DEVICE); 1041 1042 if (txq->tx_buf[i].buf_p) 1043 xdp_return_frame(txq->tx_buf[i].buf_p); 1044 } else { 1045 struct page *page = txq->tx_buf[i].buf_p; 1046 1047 if (page) 1048 page_pool_put_page(page->pp, page, 0, false); 1049 } 1050 1051 txq->tx_buf[i].buf_p = NULL; 1052 /* restore default tx buffer type: FEC_TXBUF_T_SKB */ 1053 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 1054 bdp->cbd_bufaddr = cpu_to_fec32(0); 1055 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1056 } 1057 1058 /* Set the last buffer to wrap */ 1059 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 1060 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 1061 txq->dirty_tx = bdp; 1062 } 1063 } 1064 1065 static void fec_enet_active_rxring(struct net_device *ndev) 1066 { 1067 struct fec_enet_private *fep = netdev_priv(ndev); 1068 int i; 1069 1070 for (i = 0; i < fep->num_rx_queues; i++) 1071 writel(0, fep->rx_queue[i]->bd.reg_desc_active); 1072 } 1073 1074 static void fec_enet_enable_ring(struct net_device *ndev) 1075 { 1076 struct fec_enet_private *fep = netdev_priv(ndev); 1077 struct fec_enet_priv_tx_q *txq; 1078 struct fec_enet_priv_rx_q *rxq; 1079 int i; 1080 1081 for (i = 0; i < fep->num_rx_queues; i++) { 1082 rxq = fep->rx_queue[i]; 1083 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); 1084 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); 1085 1086 /* enable DMA1/2 */ 1087 if (i) 1088 writel(RCMR_MATCHEN | RCMR_CMP(i), 1089 fep->hwp + FEC_RCMR(i)); 1090 } 1091 1092 for (i = 0; i < fep->num_tx_queues; i++) { 1093 txq = fep->tx_queue[i]; 1094 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); 1095 1096 /* enable DMA1/2 */ 1097 if (i) 1098 writel(DMA_CLASS_EN | IDLE_SLOPE(i), 1099 fep->hwp + FEC_DMA_CFG(i)); 1100 } 1101 } 1102 1103 /* Whack a reset. We should wait for this. 1104 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1105 * instead of reset MAC itself. 1106 */ 1107 static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol) 1108 { 1109 u32 val; 1110 1111 if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1112 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || 1113 ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { 1114 writel(0, fep->hwp + FEC_ECNTRL); 1115 } else { 1116 writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); 1117 udelay(10); 1118 } 1119 } else { 1120 val = readl(fep->hwp + FEC_ECNTRL); 1121 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 1122 writel(val, fep->hwp + FEC_ECNTRL); 1123 } 1124 } 1125 1126 static void fec_set_hw_mac_addr(struct net_device *ndev) 1127 { 1128 struct fec_enet_private *fep = netdev_priv(ndev); 1129 1130 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 1131 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 1132 fep->hwp + FEC_ADDR_LOW); 1133 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), 1134 fep->hwp + FEC_ADDR_HIGH); 1135 } 1136 1137 /* 1138 * This function is called to start or restart the FEC during a link 1139 * change, transmit timeout, or to reconfigure the FEC. The network 1140 * packet processing for this device must be stopped before this call. 1141 */ 1142 static void 1143 fec_restart(struct net_device *ndev) 1144 { 1145 struct fec_enet_private *fep = netdev_priv(ndev); 1146 u32 rcntl = OPT_FRAME_SIZE | FEC_RCR_MII; 1147 u32 ecntl = FEC_ECR_ETHEREN; 1148 1149 if (fep->bufdesc_ex) 1150 fec_ptp_save_state(fep); 1151 1152 fec_ctrl_reset(fep, false); 1153 1154 /* 1155 * enet-mac reset will reset mac address registers too, 1156 * so need to reconfigure it. 1157 */ 1158 fec_set_hw_mac_addr(ndev); 1159 1160 /* Clear any outstanding interrupt, except MDIO. */ 1161 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT); 1162 1163 fec_enet_bd_init(ndev); 1164 1165 fec_enet_enable_ring(ndev); 1166 1167 /* Enable MII mode */ 1168 if (fep->full_duplex == DUPLEX_FULL) { 1169 /* FD enable */ 1170 writel(0x04, fep->hwp + FEC_X_CNTRL); 1171 } else { 1172 /* No Rcv on Xmit */ 1173 rcntl |= FEC_RCR_DRT; 1174 writel(0x0, fep->hwp + FEC_X_CNTRL); 1175 } 1176 1177 /* Set MII speed */ 1178 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1179 1180 #if !defined(CONFIG_M5272) 1181 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 1182 u32 val = readl(fep->hwp + FEC_RACC); 1183 1184 /* align IP header */ 1185 val |= FEC_RACC_SHIFT16; 1186 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 1187 /* set RX checksum */ 1188 val |= FEC_RACC_OPTIONS; 1189 else 1190 val &= ~FEC_RACC_OPTIONS; 1191 writel(val, fep->hwp + FEC_RACC); 1192 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); 1193 } 1194 #endif 1195 1196 /* 1197 * The phy interface and speed need to get configured 1198 * differently on enet-mac. 1199 */ 1200 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1201 /* Enable flow control and length check */ 1202 rcntl |= FEC_RCR_NLC | FEC_RCR_FLOWCTL; 1203 1204 /* RGMII, RMII or MII */ 1205 if (phy_interface_mode_is_rgmii(fep->phy_interface)) 1206 rcntl |= FEC_RCR_RGMII; 1207 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1208 rcntl |= FEC_RCR_RMII; 1209 else 1210 rcntl &= ~FEC_RCR_RMII; 1211 1212 /* 1G, 100M or 10M */ 1213 if (ndev->phydev) { 1214 if (ndev->phydev->speed == SPEED_1000) 1215 ecntl |= FEC_ECR_SPEED; 1216 else if (ndev->phydev->speed == SPEED_100) 1217 rcntl &= ~FEC_RCR_10BASET; 1218 else 1219 rcntl |= FEC_RCR_10BASET; 1220 } 1221 } else { 1222 #ifdef FEC_MIIGSK_ENR 1223 if (fep->quirks & FEC_QUIRK_USE_GASKET) { 1224 u32 cfgr; 1225 /* disable the gasket and wait */ 1226 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1227 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1228 udelay(1); 1229 1230 /* 1231 * configure the gasket: 1232 * RMII, 50 MHz, no loopback, no echo 1233 * MII, 25 MHz, no loopback, no echo 1234 */ 1235 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1236 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; 1237 if (ndev->phydev && ndev->phydev->speed == SPEED_10) 1238 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; 1239 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); 1240 1241 /* re-enable the gasket */ 1242 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1243 } 1244 #endif 1245 } 1246 1247 #if !defined(CONFIG_M5272) 1248 /* enable pause frame*/ 1249 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 1250 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 1251 ndev->phydev && ndev->phydev->pause)) { 1252 rcntl |= FEC_RCR_FLOWCTL; 1253 1254 /* set FIFO threshold parameter to reduce overrun */ 1255 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 1256 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 1257 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 1258 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); 1259 1260 /* OPD */ 1261 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); 1262 } else { 1263 rcntl &= ~FEC_RCR_FLOWCTL; 1264 } 1265 #endif /* !defined(CONFIG_M5272) */ 1266 1267 writel(rcntl, fep->hwp + FEC_R_CNTRL); 1268 1269 /* Setup multicast filter. */ 1270 set_multicast_list(ndev); 1271 #ifndef CONFIG_M5272 1272 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1273 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1274 #endif 1275 1276 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1277 /* enable ENET endian swap */ 1278 ecntl |= FEC_ECR_BYTESWP; 1279 /* enable ENET store and forward mode */ 1280 writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK); 1281 } 1282 1283 if (fep->bufdesc_ex) 1284 ecntl |= FEC_ECR_EN1588; 1285 1286 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && 1287 fep->rgmii_txc_dly) 1288 ecntl |= FEC_ENET_TXC_DLY; 1289 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && 1290 fep->rgmii_rxc_dly) 1291 ecntl |= FEC_ENET_RXC_DLY; 1292 1293 #ifndef CONFIG_M5272 1294 /* Enable the MIB statistic event counters */ 1295 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); 1296 #endif 1297 1298 /* And last, enable the transmit and receive processing */ 1299 writel(ecntl, fep->hwp + FEC_ECNTRL); 1300 fec_enet_active_rxring(ndev); 1301 1302 if (fep->bufdesc_ex) { 1303 fec_ptp_start_cyclecounter(ndev); 1304 fec_ptp_restore_state(fep); 1305 } 1306 1307 /* Enable interrupts we wish to service */ 1308 if (fep->link) 1309 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1310 else 1311 writel(0, fep->hwp + FEC_IMASK); 1312 1313 /* Init the interrupt coalescing */ 1314 if (fep->quirks & FEC_QUIRK_HAS_COALESCE) 1315 fec_enet_itr_coal_set(ndev); 1316 } 1317 1318 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) 1319 { 1320 if (!(of_machine_is_compatible("fsl,imx8qm") || 1321 of_machine_is_compatible("fsl,imx8qxp") || 1322 of_machine_is_compatible("fsl,imx8dxl"))) 1323 return 0; 1324 1325 return imx_scu_get_handle(&fep->ipc_handle); 1326 } 1327 1328 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) 1329 { 1330 struct device_node *np = fep->pdev->dev.of_node; 1331 u32 rsrc_id, val; 1332 int idx; 1333 1334 if (!np || !fep->ipc_handle) 1335 return; 1336 1337 idx = of_alias_get_id(np, "ethernet"); 1338 if (idx < 0) 1339 idx = 0; 1340 rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; 1341 1342 val = enabled ? 1 : 0; 1343 imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); 1344 } 1345 1346 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) 1347 { 1348 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1349 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; 1350 1351 if (stop_gpr->gpr) { 1352 if (enabled) 1353 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, 1354 BIT(stop_gpr->bit), 1355 BIT(stop_gpr->bit)); 1356 else 1357 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, 1358 BIT(stop_gpr->bit), 0); 1359 } else if (pdata && pdata->sleep_mode_enable) { 1360 pdata->sleep_mode_enable(enabled); 1361 } else { 1362 fec_enet_ipg_stop_set(fep, enabled); 1363 } 1364 } 1365 1366 static void fec_irqs_disable(struct net_device *ndev) 1367 { 1368 struct fec_enet_private *fep = netdev_priv(ndev); 1369 1370 writel(0, fep->hwp + FEC_IMASK); 1371 } 1372 1373 static void fec_irqs_disable_except_wakeup(struct net_device *ndev) 1374 { 1375 struct fec_enet_private *fep = netdev_priv(ndev); 1376 1377 writel(0, fep->hwp + FEC_IMASK); 1378 writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); 1379 } 1380 1381 static void 1382 fec_stop(struct net_device *ndev) 1383 { 1384 struct fec_enet_private *fep = netdev_priv(ndev); 1385 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII; 1386 u32 val; 1387 1388 /* We cannot expect a graceful transmit stop without link !!! */ 1389 if (fep->link) { 1390 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1391 udelay(10); 1392 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1393 netdev_err(ndev, "Graceful transmit stop did not complete!\n"); 1394 } 1395 1396 if (fep->bufdesc_ex) 1397 fec_ptp_save_state(fep); 1398 1399 fec_ctrl_reset(fep, true); 1400 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1401 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1402 1403 /* We have to keep ENET enabled to have MII interrupt stay working */ 1404 if (fep->quirks & FEC_QUIRK_ENET_MAC && 1405 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1406 writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL); 1407 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1408 } 1409 1410 if (fep->bufdesc_ex) { 1411 val = readl(fep->hwp + FEC_ECNTRL); 1412 val |= FEC_ECR_EN1588; 1413 writel(val, fep->hwp + FEC_ECNTRL); 1414 1415 fec_ptp_start_cyclecounter(ndev); 1416 fec_ptp_restore_state(fep); 1417 } 1418 } 1419 1420 static void 1421 fec_timeout(struct net_device *ndev, unsigned int txqueue) 1422 { 1423 struct fec_enet_private *fep = netdev_priv(ndev); 1424 1425 fec_dump(ndev); 1426 1427 ndev->stats.tx_errors++; 1428 1429 schedule_work(&fep->tx_timeout_work); 1430 } 1431 1432 static void fec_enet_timeout_work(struct work_struct *work) 1433 { 1434 struct fec_enet_private *fep = 1435 container_of(work, struct fec_enet_private, tx_timeout_work); 1436 struct net_device *ndev = fep->netdev; 1437 1438 rtnl_lock(); 1439 if (netif_device_present(ndev) || netif_running(ndev)) { 1440 napi_disable(&fep->napi); 1441 netif_tx_lock_bh(ndev); 1442 fec_restart(ndev); 1443 netif_tx_wake_all_queues(ndev); 1444 netif_tx_unlock_bh(ndev); 1445 napi_enable(&fep->napi); 1446 } 1447 rtnl_unlock(); 1448 } 1449 1450 static void 1451 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, 1452 struct skb_shared_hwtstamps *hwtstamps) 1453 { 1454 unsigned long flags; 1455 u64 ns; 1456 1457 spin_lock_irqsave(&fep->tmreg_lock, flags); 1458 ns = timecounter_cyc2time(&fep->tc, ts); 1459 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 1460 1461 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1462 hwtstamps->hwtstamp = ns_to_ktime(ns); 1463 } 1464 1465 static void 1466 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) 1467 { 1468 struct fec_enet_private *fep; 1469 struct xdp_frame *xdpf; 1470 struct bufdesc *bdp; 1471 unsigned short status; 1472 struct sk_buff *skb; 1473 struct fec_enet_priv_tx_q *txq; 1474 struct netdev_queue *nq; 1475 int index = 0; 1476 int entries_free; 1477 struct page *page; 1478 int frame_len; 1479 1480 fep = netdev_priv(ndev); 1481 1482 txq = fep->tx_queue[queue_id]; 1483 /* get next bdp of dirty_tx */ 1484 nq = netdev_get_tx_queue(ndev, queue_id); 1485 bdp = txq->dirty_tx; 1486 1487 /* get next bdp of dirty_tx */ 1488 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1489 1490 while (bdp != READ_ONCE(txq->bd.cur)) { 1491 /* Order the load of bd.cur and cbd_sc */ 1492 rmb(); 1493 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); 1494 if (status & BD_ENET_TX_READY) 1495 break; 1496 1497 index = fec_enet_get_bd_index(bdp, &txq->bd); 1498 1499 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { 1500 skb = txq->tx_buf[index].buf_p; 1501 if (bdp->cbd_bufaddr && 1502 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 1503 dma_unmap_single(&fep->pdev->dev, 1504 fec32_to_cpu(bdp->cbd_bufaddr), 1505 fec16_to_cpu(bdp->cbd_datlen), 1506 DMA_TO_DEVICE); 1507 bdp->cbd_bufaddr = cpu_to_fec32(0); 1508 if (!skb) 1509 goto tx_buf_done; 1510 } else { 1511 /* Tx processing cannot call any XDP (or page pool) APIs if 1512 * the "budget" is 0. Because NAPI is called with budget of 1513 * 0 (such as netpoll) indicates we may be in an IRQ context, 1514 * however, we can't use the page pool from IRQ context. 1515 */ 1516 if (unlikely(!budget)) 1517 break; 1518 1519 if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { 1520 xdpf = txq->tx_buf[index].buf_p; 1521 if (bdp->cbd_bufaddr) 1522 dma_unmap_single(&fep->pdev->dev, 1523 fec32_to_cpu(bdp->cbd_bufaddr), 1524 fec16_to_cpu(bdp->cbd_datlen), 1525 DMA_TO_DEVICE); 1526 } else { 1527 page = txq->tx_buf[index].buf_p; 1528 } 1529 1530 bdp->cbd_bufaddr = cpu_to_fec32(0); 1531 if (unlikely(!txq->tx_buf[index].buf_p)) { 1532 txq->tx_buf[index].type = FEC_TXBUF_T_SKB; 1533 goto tx_buf_done; 1534 } 1535 1536 frame_len = fec16_to_cpu(bdp->cbd_datlen); 1537 } 1538 1539 /* Check for errors. */ 1540 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1541 BD_ENET_TX_RL | BD_ENET_TX_UN | 1542 BD_ENET_TX_CSL)) { 1543 ndev->stats.tx_errors++; 1544 if (status & BD_ENET_TX_HB) /* No heartbeat */ 1545 ndev->stats.tx_heartbeat_errors++; 1546 if (status & BD_ENET_TX_LC) /* Late collision */ 1547 ndev->stats.tx_window_errors++; 1548 if (status & BD_ENET_TX_RL) /* Retrans limit */ 1549 ndev->stats.tx_aborted_errors++; 1550 if (status & BD_ENET_TX_UN) /* Underrun */ 1551 ndev->stats.tx_fifo_errors++; 1552 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 1553 ndev->stats.tx_carrier_errors++; 1554 } else { 1555 ndev->stats.tx_packets++; 1556 1557 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) 1558 ndev->stats.tx_bytes += skb->len; 1559 else 1560 ndev->stats.tx_bytes += frame_len; 1561 } 1562 1563 /* Deferred means some collisions occurred during transmit, 1564 * but we eventually sent the packet OK. 1565 */ 1566 if (status & BD_ENET_TX_DEF) 1567 ndev->stats.collisions++; 1568 1569 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { 1570 /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who 1571 * are to time stamp the packet, so we still need to check time 1572 * stamping enabled flag. 1573 */ 1574 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && 1575 fep->hwts_tx_en) && fep->bufdesc_ex) { 1576 struct skb_shared_hwtstamps shhwtstamps; 1577 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1578 1579 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); 1580 skb_tstamp_tx(skb, &shhwtstamps); 1581 } 1582 1583 /* Free the sk buffer associated with this last transmit */ 1584 napi_consume_skb(skb, budget); 1585 } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { 1586 xdp_return_frame_rx_napi(xdpf); 1587 } else { /* recycle pages of XDP_TX frames */ 1588 /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */ 1589 page_pool_put_page(page->pp, page, 0, true); 1590 } 1591 1592 txq->tx_buf[index].buf_p = NULL; 1593 /* restore default tx buffer type: FEC_TXBUF_T_SKB */ 1594 txq->tx_buf[index].type = FEC_TXBUF_T_SKB; 1595 1596 tx_buf_done: 1597 /* Make sure the update to bdp and tx_buf are performed 1598 * before dirty_tx 1599 */ 1600 wmb(); 1601 txq->dirty_tx = bdp; 1602 1603 /* Update pointer to next buffer descriptor to be transmitted */ 1604 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1605 1606 /* Since we have freed up a buffer, the ring is no longer full 1607 */ 1608 if (netif_tx_queue_stopped(nq)) { 1609 entries_free = fec_enet_get_free_txdesc_num(txq); 1610 if (entries_free >= txq->tx_wake_threshold) 1611 netif_tx_wake_queue(nq); 1612 } 1613 } 1614 1615 /* ERR006358: Keep the transmitter going */ 1616 if (bdp != txq->bd.cur && 1617 readl(txq->bd.reg_desc_active) == 0) 1618 writel(0, txq->bd.reg_desc_active); 1619 } 1620 1621 static void fec_enet_tx(struct net_device *ndev, int budget) 1622 { 1623 struct fec_enet_private *fep = netdev_priv(ndev); 1624 int i; 1625 1626 /* Make sure that AVB queues are processed first. */ 1627 for (i = fep->num_tx_queues - 1; i >= 0; i--) 1628 fec_enet_tx_queue(ndev, i, budget); 1629 } 1630 1631 static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, 1632 struct bufdesc *bdp, int index) 1633 { 1634 struct page *new_page; 1635 dma_addr_t phys_addr; 1636 1637 new_page = page_pool_dev_alloc_pages(rxq->page_pool); 1638 if (unlikely(!new_page)) 1639 return -ENOMEM; 1640 1641 rxq->rx_skb_info[index].page = new_page; 1642 rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; 1643 phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; 1644 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); 1645 1646 return 0; 1647 } 1648 1649 static u32 1650 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, 1651 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu) 1652 { 1653 unsigned int sync, len = xdp->data_end - xdp->data; 1654 u32 ret = FEC_ENET_XDP_PASS; 1655 struct page *page; 1656 int err; 1657 u32 act; 1658 1659 act = bpf_prog_run_xdp(prog, xdp); 1660 1661 /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover 1662 * max len CPU touch 1663 */ 1664 sync = xdp->data_end - xdp->data; 1665 sync = max(sync, len); 1666 1667 switch (act) { 1668 case XDP_PASS: 1669 rxq->stats[RX_XDP_PASS]++; 1670 ret = FEC_ENET_XDP_PASS; 1671 break; 1672 1673 case XDP_REDIRECT: 1674 rxq->stats[RX_XDP_REDIRECT]++; 1675 err = xdp_do_redirect(fep->netdev, xdp, prog); 1676 if (unlikely(err)) 1677 goto xdp_err; 1678 1679 ret = FEC_ENET_XDP_REDIR; 1680 break; 1681 1682 case XDP_TX: 1683 rxq->stats[RX_XDP_TX]++; 1684 err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync); 1685 if (unlikely(err)) { 1686 rxq->stats[RX_XDP_TX_ERRORS]++; 1687 goto xdp_err; 1688 } 1689 1690 ret = FEC_ENET_XDP_TX; 1691 break; 1692 1693 default: 1694 bpf_warn_invalid_xdp_action(fep->netdev, prog, act); 1695 fallthrough; 1696 1697 case XDP_ABORTED: 1698 fallthrough; /* handle aborts by dropping packet */ 1699 1700 case XDP_DROP: 1701 rxq->stats[RX_XDP_DROP]++; 1702 xdp_err: 1703 ret = FEC_ENET_XDP_CONSUMED; 1704 page = virt_to_head_page(xdp->data); 1705 page_pool_put_page(rxq->page_pool, page, sync, true); 1706 if (act != XDP_DROP) 1707 trace_xdp_exception(fep->netdev, prog, act); 1708 break; 1709 } 1710 1711 return ret; 1712 } 1713 1714 static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb) 1715 { 1716 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { 1717 const struct vlan_ethhdr *vlan_header = skb_vlan_eth_hdr(skb); 1718 const u16 vlan_tag = ntohs(vlan_header->h_vlan_TCI); 1719 1720 /* Push and remove the vlan tag */ 1721 1722 memmove(skb->data + VLAN_HLEN, skb->data, ETH_ALEN * 2); 1723 skb_pull(skb, VLAN_HLEN); 1724 __vlan_hwaccel_put_tag(skb, 1725 htons(ETH_P_8021Q), 1726 vlan_tag); 1727 } 1728 } 1729 1730 /* During a receive, the bd_rx.cur points to the current incoming buffer. 1731 * When we update through the ring, if the next incoming buffer has 1732 * not been given to the system, we just set the empty indicator, 1733 * effectively tossing the packet. 1734 */ 1735 static int 1736 fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget) 1737 { 1738 struct fec_enet_private *fep = netdev_priv(ndev); 1739 struct fec_enet_priv_rx_q *rxq; 1740 struct bufdesc *bdp; 1741 unsigned short status; 1742 struct sk_buff *skb; 1743 ushort pkt_len; 1744 int pkt_received = 0; 1745 struct bufdesc_ex *ebdp = NULL; 1746 int index = 0; 1747 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; 1748 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); 1749 u32 ret, xdp_result = FEC_ENET_XDP_PASS; 1750 u32 data_start = FEC_ENET_XDP_HEADROOM; 1751 int cpu = smp_processor_id(); 1752 struct xdp_buff xdp; 1753 struct page *page; 1754 __fec32 cbd_bufaddr; 1755 u32 sub_len = 4; 1756 1757 #if !defined(CONFIG_M5272) 1758 /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of 1759 * FEC_RACC_SHIFT16 is set by default in the probe function. 1760 */ 1761 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 1762 data_start += 2; 1763 sub_len += 2; 1764 } 1765 #endif 1766 1767 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) 1768 /* 1769 * Hacky flush of all caches instead of using the DMA API for the TSO 1770 * headers. 1771 */ 1772 flush_cache_all(); 1773 #endif 1774 rxq = fep->rx_queue[queue_id]; 1775 1776 /* First, grab all of the stats for the incoming packet. 1777 * These get messed up if we get called due to a busy condition. 1778 */ 1779 bdp = rxq->bd.cur; 1780 xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); 1781 1782 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { 1783 1784 if (pkt_received >= budget) 1785 break; 1786 pkt_received++; 1787 1788 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); 1789 1790 /* Check for errors. */ 1791 status ^= BD_ENET_RX_LAST; 1792 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 1793 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | 1794 BD_ENET_RX_CL)) { 1795 ndev->stats.rx_errors++; 1796 if (status & BD_ENET_RX_OV) { 1797 /* FIFO overrun */ 1798 ndev->stats.rx_fifo_errors++; 1799 goto rx_processing_done; 1800 } 1801 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH 1802 | BD_ENET_RX_LAST)) { 1803 /* Frame too long or too short. */ 1804 ndev->stats.rx_length_errors++; 1805 if (status & BD_ENET_RX_LAST) 1806 netdev_err(ndev, "rcv is not +last\n"); 1807 } 1808 if (status & BD_ENET_RX_CR) /* CRC Error */ 1809 ndev->stats.rx_crc_errors++; 1810 /* Report late collisions as a frame error. */ 1811 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 1812 ndev->stats.rx_frame_errors++; 1813 goto rx_processing_done; 1814 } 1815 1816 /* Process the incoming frame. */ 1817 ndev->stats.rx_packets++; 1818 pkt_len = fec16_to_cpu(bdp->cbd_datlen); 1819 ndev->stats.rx_bytes += pkt_len; 1820 1821 index = fec_enet_get_bd_index(bdp, &rxq->bd); 1822 page = rxq->rx_skb_info[index].page; 1823 cbd_bufaddr = bdp->cbd_bufaddr; 1824 if (fec_enet_update_cbd(rxq, bdp, index)) { 1825 ndev->stats.rx_dropped++; 1826 goto rx_processing_done; 1827 } 1828 1829 dma_sync_single_for_cpu(&fep->pdev->dev, 1830 fec32_to_cpu(cbd_bufaddr), 1831 pkt_len, 1832 DMA_FROM_DEVICE); 1833 prefetch(page_address(page)); 1834 1835 if (xdp_prog) { 1836 xdp_buff_clear_frags_flag(&xdp); 1837 /* subtract 16bit shift and FCS */ 1838 xdp_prepare_buff(&xdp, page_address(page), 1839 data_start, pkt_len - sub_len, false); 1840 ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu); 1841 xdp_result |= ret; 1842 if (ret != FEC_ENET_XDP_PASS) 1843 goto rx_processing_done; 1844 } 1845 1846 /* The packet length includes FCS, but we don't want to 1847 * include that when passing upstream as it messes up 1848 * bridging applications. 1849 */ 1850 skb = build_skb(page_address(page), PAGE_SIZE); 1851 if (unlikely(!skb)) { 1852 page_pool_recycle_direct(rxq->page_pool, page); 1853 ndev->stats.rx_dropped++; 1854 1855 netdev_err_once(ndev, "build_skb failed!\n"); 1856 goto rx_processing_done; 1857 } 1858 1859 skb_reserve(skb, data_start); 1860 skb_put(skb, pkt_len - sub_len); 1861 skb_mark_for_recycle(skb); 1862 1863 if (unlikely(need_swap)) { 1864 u8 *data; 1865 1866 data = page_address(page) + FEC_ENET_XDP_HEADROOM; 1867 swap_buffer(data, pkt_len); 1868 } 1869 1870 /* Extract the enhanced buffer descriptor */ 1871 ebdp = NULL; 1872 if (fep->bufdesc_ex) 1873 ebdp = (struct bufdesc_ex *)bdp; 1874 1875 /* If this is a VLAN packet remove the VLAN Tag */ 1876 if (fep->bufdesc_ex && 1877 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) 1878 fec_enet_rx_vlan(ndev, skb); 1879 1880 skb->protocol = eth_type_trans(skb, ndev); 1881 1882 /* Get receive timestamp from the skb */ 1883 if (fep->hwts_rx_en && fep->bufdesc_ex) 1884 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), 1885 skb_hwtstamps(skb)); 1886 1887 if (fep->bufdesc_ex && 1888 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 1889 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { 1890 /* don't check it */ 1891 skb->ip_summed = CHECKSUM_UNNECESSARY; 1892 } else { 1893 skb_checksum_none_assert(skb); 1894 } 1895 } 1896 1897 skb_record_rx_queue(skb, queue_id); 1898 napi_gro_receive(&fep->napi, skb); 1899 1900 rx_processing_done: 1901 /* Clear the status flags for this buffer */ 1902 status &= ~BD_ENET_RX_STATS; 1903 1904 /* Mark the buffer empty */ 1905 status |= BD_ENET_RX_EMPTY; 1906 1907 if (fep->bufdesc_ex) { 1908 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1909 1910 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 1911 ebdp->cbd_prot = 0; 1912 ebdp->cbd_bdu = 0; 1913 } 1914 /* Make sure the updates to rest of the descriptor are 1915 * performed before transferring ownership. 1916 */ 1917 wmb(); 1918 bdp->cbd_sc = cpu_to_fec16(status); 1919 1920 /* Update BD pointer to next entry */ 1921 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 1922 1923 /* Doing this here will keep the FEC running while we process 1924 * incoming frames. On a heavily loaded network, we should be 1925 * able to keep up at the expense of system resources. 1926 */ 1927 writel(0, rxq->bd.reg_desc_active); 1928 } 1929 rxq->bd.cur = bdp; 1930 1931 if (xdp_result & FEC_ENET_XDP_REDIR) 1932 xdp_do_flush(); 1933 1934 return pkt_received; 1935 } 1936 1937 static int fec_enet_rx(struct net_device *ndev, int budget) 1938 { 1939 struct fec_enet_private *fep = netdev_priv(ndev); 1940 int i, done = 0; 1941 1942 /* Make sure that AVB queues are processed first. */ 1943 for (i = fep->num_rx_queues - 1; i >= 0; i--) 1944 done += fec_enet_rx_queue(ndev, i, budget - done); 1945 1946 return done; 1947 } 1948 1949 static bool fec_enet_collect_events(struct fec_enet_private *fep) 1950 { 1951 uint int_events; 1952 1953 int_events = readl(fep->hwp + FEC_IEVENT); 1954 1955 /* Don't clear MDIO events, we poll for those */ 1956 int_events &= ~FEC_ENET_MII; 1957 1958 writel(int_events, fep->hwp + FEC_IEVENT); 1959 1960 return int_events != 0; 1961 } 1962 1963 static irqreturn_t 1964 fec_enet_interrupt(int irq, void *dev_id) 1965 { 1966 struct net_device *ndev = dev_id; 1967 struct fec_enet_private *fep = netdev_priv(ndev); 1968 irqreturn_t ret = IRQ_NONE; 1969 1970 if (fec_enet_collect_events(fep) && fep->link) { 1971 ret = IRQ_HANDLED; 1972 1973 if (napi_schedule_prep(&fep->napi)) { 1974 /* Disable interrupts */ 1975 writel(0, fep->hwp + FEC_IMASK); 1976 __napi_schedule(&fep->napi); 1977 } 1978 } 1979 1980 return ret; 1981 } 1982 1983 static int fec_enet_rx_napi(struct napi_struct *napi, int budget) 1984 { 1985 struct net_device *ndev = napi->dev; 1986 struct fec_enet_private *fep = netdev_priv(ndev); 1987 int done = 0; 1988 1989 do { 1990 done += fec_enet_rx(ndev, budget - done); 1991 fec_enet_tx(ndev, budget); 1992 } while ((done < budget) && fec_enet_collect_events(fep)); 1993 1994 if (done < budget) { 1995 napi_complete_done(napi, done); 1996 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1997 } 1998 1999 return done; 2000 } 2001 2002 /* ------------------------------------------------------------------------- */ 2003 static int fec_get_mac(struct net_device *ndev) 2004 { 2005 struct fec_enet_private *fep = netdev_priv(ndev); 2006 unsigned char *iap, tmpaddr[ETH_ALEN]; 2007 int ret; 2008 2009 /* 2010 * try to get mac address in following order: 2011 * 2012 * 1) module parameter via kernel command line in form 2013 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 2014 */ 2015 iap = macaddr; 2016 2017 /* 2018 * 2) from device tree data 2019 */ 2020 if (!is_valid_ether_addr(iap)) { 2021 struct device_node *np = fep->pdev->dev.of_node; 2022 if (np) { 2023 ret = of_get_mac_address(np, tmpaddr); 2024 if (!ret) 2025 iap = tmpaddr; 2026 else if (ret == -EPROBE_DEFER) 2027 return ret; 2028 } 2029 } 2030 2031 /* 2032 * 3) from flash or fuse (via platform data) 2033 */ 2034 if (!is_valid_ether_addr(iap)) { 2035 #ifdef CONFIG_M5272 2036 if (FEC_FLASHMAC) 2037 iap = (unsigned char *)FEC_FLASHMAC; 2038 #else 2039 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); 2040 2041 if (pdata) 2042 iap = (unsigned char *)&pdata->mac; 2043 #endif 2044 } 2045 2046 /* 2047 * 4) FEC mac registers set by bootloader 2048 */ 2049 if (!is_valid_ether_addr(iap)) { 2050 *((__be32 *) &tmpaddr[0]) = 2051 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); 2052 *((__be16 *) &tmpaddr[4]) = 2053 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 2054 iap = &tmpaddr[0]; 2055 } 2056 2057 /* 2058 * 5) random mac address 2059 */ 2060 if (!is_valid_ether_addr(iap)) { 2061 /* Report it and use a random ethernet address instead */ 2062 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap); 2063 eth_hw_addr_random(ndev); 2064 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n", 2065 ndev->dev_addr); 2066 return 0; 2067 } 2068 2069 /* Adjust MAC if using macaddr */ 2070 eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0); 2071 2072 return 0; 2073 } 2074 2075 /* ------------------------------------------------------------------------- */ 2076 2077 /* 2078 * Phy section 2079 */ 2080 2081 /* LPI Sleep Ts count base on tx clk (clk_ref). 2082 * The lpi sleep cnt value = X us / (cycle_ns). 2083 */ 2084 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) 2085 { 2086 struct fec_enet_private *fep = netdev_priv(ndev); 2087 2088 return us * (fep->clk_ref_rate / 1000) / 1000; 2089 } 2090 2091 static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer, 2092 bool enable) 2093 { 2094 struct fec_enet_private *fep = netdev_priv(ndev); 2095 unsigned int sleep_cycle, wake_cycle; 2096 2097 if (enable) { 2098 sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer); 2099 wake_cycle = sleep_cycle; 2100 } else { 2101 sleep_cycle = 0; 2102 wake_cycle = 0; 2103 } 2104 2105 writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP); 2106 writel(wake_cycle, fep->hwp + FEC_LPI_WAKE); 2107 2108 return 0; 2109 } 2110 2111 static void fec_enet_adjust_link(struct net_device *ndev) 2112 { 2113 struct fec_enet_private *fep = netdev_priv(ndev); 2114 struct phy_device *phy_dev = ndev->phydev; 2115 int status_change = 0; 2116 2117 /* 2118 * If the netdev is down, or is going down, we're not interested 2119 * in link state events, so just mark our idea of the link as down 2120 * and ignore the event. 2121 */ 2122 if (!netif_running(ndev) || !netif_device_present(ndev)) { 2123 fep->link = 0; 2124 } else if (phy_dev->link) { 2125 if (!fep->link) { 2126 fep->link = phy_dev->link; 2127 status_change = 1; 2128 } 2129 2130 if (fep->full_duplex != phy_dev->duplex) { 2131 fep->full_duplex = phy_dev->duplex; 2132 status_change = 1; 2133 } 2134 2135 if (phy_dev->speed != fep->speed) { 2136 fep->speed = phy_dev->speed; 2137 status_change = 1; 2138 } 2139 2140 /* if any of the above changed restart the FEC */ 2141 if (status_change) { 2142 netif_stop_queue(ndev); 2143 napi_disable(&fep->napi); 2144 netif_tx_lock_bh(ndev); 2145 fec_restart(ndev); 2146 netif_tx_wake_all_queues(ndev); 2147 netif_tx_unlock_bh(ndev); 2148 napi_enable(&fep->napi); 2149 } 2150 if (fep->quirks & FEC_QUIRK_HAS_EEE) 2151 fec_enet_eee_mode_set(ndev, 2152 phy_dev->eee_cfg.tx_lpi_timer, 2153 phy_dev->enable_tx_lpi); 2154 } else { 2155 if (fep->link) { 2156 netif_stop_queue(ndev); 2157 napi_disable(&fep->napi); 2158 netif_tx_lock_bh(ndev); 2159 fec_stop(ndev); 2160 netif_tx_unlock_bh(ndev); 2161 napi_enable(&fep->napi); 2162 fep->link = phy_dev->link; 2163 status_change = 1; 2164 } 2165 } 2166 2167 if (status_change) 2168 phy_print_status(phy_dev); 2169 } 2170 2171 static int fec_enet_mdio_wait(struct fec_enet_private *fep) 2172 { 2173 uint ievent; 2174 int ret; 2175 2176 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, 2177 ievent & FEC_ENET_MII, 2, 30000); 2178 2179 if (!ret) 2180 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 2181 2182 return ret; 2183 } 2184 2185 static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) 2186 { 2187 struct fec_enet_private *fep = bus->priv; 2188 struct device *dev = &fep->pdev->dev; 2189 int ret = 0, frame_start, frame_addr, frame_op; 2190 2191 ret = pm_runtime_resume_and_get(dev); 2192 if (ret < 0) 2193 return ret; 2194 2195 /* C22 read */ 2196 frame_op = FEC_MMFR_OP_READ; 2197 frame_start = FEC_MMFR_ST; 2198 frame_addr = regnum; 2199 2200 /* start a read op */ 2201 writel(frame_start | frame_op | 2202 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | 2203 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 2204 2205 /* wait for end of transfer */ 2206 ret = fec_enet_mdio_wait(fep); 2207 if (ret) { 2208 netdev_err(fep->netdev, "MDIO read timeout\n"); 2209 goto out; 2210 } 2211 2212 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 2213 2214 out: 2215 pm_runtime_mark_last_busy(dev); 2216 pm_runtime_put_autosuspend(dev); 2217 2218 return ret; 2219 } 2220 2221 static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id, 2222 int devad, int regnum) 2223 { 2224 struct fec_enet_private *fep = bus->priv; 2225 struct device *dev = &fep->pdev->dev; 2226 int ret = 0, frame_start, frame_op; 2227 2228 ret = pm_runtime_resume_and_get(dev); 2229 if (ret < 0) 2230 return ret; 2231 2232 frame_start = FEC_MMFR_ST_C45; 2233 2234 /* write address */ 2235 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | 2236 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2237 FEC_MMFR_TA | (regnum & 0xFFFF), 2238 fep->hwp + FEC_MII_DATA); 2239 2240 /* wait for end of transfer */ 2241 ret = fec_enet_mdio_wait(fep); 2242 if (ret) { 2243 netdev_err(fep->netdev, "MDIO address write timeout\n"); 2244 goto out; 2245 } 2246 2247 frame_op = FEC_MMFR_OP_READ_C45; 2248 2249 /* start a read op */ 2250 writel(frame_start | frame_op | 2251 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2252 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 2253 2254 /* wait for end of transfer */ 2255 ret = fec_enet_mdio_wait(fep); 2256 if (ret) { 2257 netdev_err(fep->netdev, "MDIO read timeout\n"); 2258 goto out; 2259 } 2260 2261 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 2262 2263 out: 2264 pm_runtime_mark_last_busy(dev); 2265 pm_runtime_put_autosuspend(dev); 2266 2267 return ret; 2268 } 2269 2270 static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, 2271 u16 value) 2272 { 2273 struct fec_enet_private *fep = bus->priv; 2274 struct device *dev = &fep->pdev->dev; 2275 int ret, frame_start, frame_addr; 2276 2277 ret = pm_runtime_resume_and_get(dev); 2278 if (ret < 0) 2279 return ret; 2280 2281 /* C22 write */ 2282 frame_start = FEC_MMFR_ST; 2283 frame_addr = regnum; 2284 2285 /* start a write op */ 2286 writel(frame_start | FEC_MMFR_OP_WRITE | 2287 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | 2288 FEC_MMFR_TA | FEC_MMFR_DATA(value), 2289 fep->hwp + FEC_MII_DATA); 2290 2291 /* wait for end of transfer */ 2292 ret = fec_enet_mdio_wait(fep); 2293 if (ret) 2294 netdev_err(fep->netdev, "MDIO write timeout\n"); 2295 2296 pm_runtime_mark_last_busy(dev); 2297 pm_runtime_put_autosuspend(dev); 2298 2299 return ret; 2300 } 2301 2302 static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id, 2303 int devad, int regnum, u16 value) 2304 { 2305 struct fec_enet_private *fep = bus->priv; 2306 struct device *dev = &fep->pdev->dev; 2307 int ret, frame_start; 2308 2309 ret = pm_runtime_resume_and_get(dev); 2310 if (ret < 0) 2311 return ret; 2312 2313 frame_start = FEC_MMFR_ST_C45; 2314 2315 /* write address */ 2316 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | 2317 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2318 FEC_MMFR_TA | (regnum & 0xFFFF), 2319 fep->hwp + FEC_MII_DATA); 2320 2321 /* wait for end of transfer */ 2322 ret = fec_enet_mdio_wait(fep); 2323 if (ret) { 2324 netdev_err(fep->netdev, "MDIO address write timeout\n"); 2325 goto out; 2326 } 2327 2328 /* start a write op */ 2329 writel(frame_start | FEC_MMFR_OP_WRITE | 2330 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2331 FEC_MMFR_TA | FEC_MMFR_DATA(value), 2332 fep->hwp + FEC_MII_DATA); 2333 2334 /* wait for end of transfer */ 2335 ret = fec_enet_mdio_wait(fep); 2336 if (ret) 2337 netdev_err(fep->netdev, "MDIO write timeout\n"); 2338 2339 out: 2340 pm_runtime_mark_last_busy(dev); 2341 pm_runtime_put_autosuspend(dev); 2342 2343 return ret; 2344 } 2345 2346 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) 2347 { 2348 struct fec_enet_private *fep = netdev_priv(ndev); 2349 struct phy_device *phy_dev = ndev->phydev; 2350 2351 if (phy_dev) { 2352 phy_reset_after_clk_enable(phy_dev); 2353 } else if (fep->phy_node) { 2354 /* 2355 * If the PHY still is not bound to the MAC, but there is 2356 * OF PHY node and a matching PHY device instance already, 2357 * use the OF PHY node to obtain the PHY device instance, 2358 * and then use that PHY device instance when triggering 2359 * the PHY reset. 2360 */ 2361 phy_dev = of_phy_find_device(fep->phy_node); 2362 phy_reset_after_clk_enable(phy_dev); 2363 put_device(&phy_dev->mdio.dev); 2364 } 2365 } 2366 2367 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 2368 { 2369 struct fec_enet_private *fep = netdev_priv(ndev); 2370 int ret; 2371 2372 if (enable) { 2373 ret = clk_prepare_enable(fep->clk_enet_out); 2374 if (ret) 2375 return ret; 2376 2377 if (fep->clk_ptp) { 2378 mutex_lock(&fep->ptp_clk_mutex); 2379 ret = clk_prepare_enable(fep->clk_ptp); 2380 if (ret) { 2381 mutex_unlock(&fep->ptp_clk_mutex); 2382 goto failed_clk_ptp; 2383 } else { 2384 fep->ptp_clk_on = true; 2385 } 2386 mutex_unlock(&fep->ptp_clk_mutex); 2387 } 2388 2389 ret = clk_prepare_enable(fep->clk_ref); 2390 if (ret) 2391 goto failed_clk_ref; 2392 2393 ret = clk_prepare_enable(fep->clk_2x_txclk); 2394 if (ret) 2395 goto failed_clk_2x_txclk; 2396 2397 fec_enet_phy_reset_after_clk_enable(ndev); 2398 } else { 2399 clk_disable_unprepare(fep->clk_enet_out); 2400 if (fep->clk_ptp) { 2401 mutex_lock(&fep->ptp_clk_mutex); 2402 clk_disable_unprepare(fep->clk_ptp); 2403 fep->ptp_clk_on = false; 2404 mutex_unlock(&fep->ptp_clk_mutex); 2405 } 2406 clk_disable_unprepare(fep->clk_ref); 2407 clk_disable_unprepare(fep->clk_2x_txclk); 2408 } 2409 2410 return 0; 2411 2412 failed_clk_2x_txclk: 2413 if (fep->clk_ref) 2414 clk_disable_unprepare(fep->clk_ref); 2415 failed_clk_ref: 2416 if (fep->clk_ptp) { 2417 mutex_lock(&fep->ptp_clk_mutex); 2418 clk_disable_unprepare(fep->clk_ptp); 2419 fep->ptp_clk_on = false; 2420 mutex_unlock(&fep->ptp_clk_mutex); 2421 } 2422 failed_clk_ptp: 2423 clk_disable_unprepare(fep->clk_enet_out); 2424 2425 return ret; 2426 } 2427 2428 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, 2429 struct device_node *np) 2430 { 2431 u32 rgmii_tx_delay, rgmii_rx_delay; 2432 2433 /* For rgmii tx internal delay, valid values are 0ps and 2000ps */ 2434 if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) { 2435 if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) { 2436 dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps"); 2437 return -EINVAL; 2438 } else if (rgmii_tx_delay == 2000) { 2439 fep->rgmii_txc_dly = true; 2440 } 2441 } 2442 2443 /* For rgmii rx internal delay, valid values are 0ps and 2000ps */ 2444 if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) { 2445 if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) { 2446 dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps"); 2447 return -EINVAL; 2448 } else if (rgmii_rx_delay == 2000) { 2449 fep->rgmii_rxc_dly = true; 2450 } 2451 } 2452 2453 return 0; 2454 } 2455 2456 static int fec_enet_mii_probe(struct net_device *ndev) 2457 { 2458 struct fec_enet_private *fep = netdev_priv(ndev); 2459 struct phy_device *phy_dev = NULL; 2460 char mdio_bus_id[MII_BUS_ID_SIZE]; 2461 char phy_name[MII_BUS_ID_SIZE + 3]; 2462 int phy_id; 2463 int dev_id = fep->dev_id; 2464 2465 if (fep->phy_node) { 2466 phy_dev = of_phy_connect(ndev, fep->phy_node, 2467 &fec_enet_adjust_link, 0, 2468 fep->phy_interface); 2469 if (!phy_dev) { 2470 netdev_err(ndev, "Unable to connect to phy\n"); 2471 return -ENODEV; 2472 } 2473 } else { 2474 /* check for attached phy */ 2475 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { 2476 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) 2477 continue; 2478 if (dev_id--) 2479 continue; 2480 strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 2481 break; 2482 } 2483 2484 if (phy_id >= PHY_MAX_ADDR) { 2485 netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); 2486 strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); 2487 phy_id = 0; 2488 } 2489 2490 snprintf(phy_name, sizeof(phy_name), 2491 PHY_ID_FMT, mdio_bus_id, phy_id); 2492 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 2493 fep->phy_interface); 2494 } 2495 2496 if (IS_ERR(phy_dev)) { 2497 netdev_err(ndev, "could not attach to PHY\n"); 2498 return PTR_ERR(phy_dev); 2499 } 2500 2501 /* mask with MAC supported features */ 2502 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { 2503 phy_set_max_speed(phy_dev, 1000); 2504 phy_remove_link_mode(phy_dev, 2505 ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 2506 #if !defined(CONFIG_M5272) 2507 phy_support_sym_pause(phy_dev); 2508 #endif 2509 } 2510 else 2511 phy_set_max_speed(phy_dev, 100); 2512 2513 if (fep->quirks & FEC_QUIRK_HAS_EEE) 2514 phy_support_eee(phy_dev); 2515 2516 fep->link = 0; 2517 fep->full_duplex = 0; 2518 2519 phy_attached_info(phy_dev); 2520 2521 return 0; 2522 } 2523 2524 static int fec_enet_mii_init(struct platform_device *pdev) 2525 { 2526 static struct mii_bus *fec0_mii_bus; 2527 struct net_device *ndev = platform_get_drvdata(pdev); 2528 struct fec_enet_private *fep = netdev_priv(ndev); 2529 bool suppress_preamble = false; 2530 struct phy_device *phydev; 2531 struct device_node *node; 2532 int err = -ENXIO; 2533 u32 mii_speed, holdtime; 2534 u32 bus_freq; 2535 int addr; 2536 2537 /* 2538 * The i.MX28 dual fec interfaces are not equal. 2539 * Here are the differences: 2540 * 2541 * - fec0 supports MII & RMII modes while fec1 only supports RMII 2542 * - fec0 acts as the 1588 time master while fec1 is slave 2543 * - external phys can only be configured by fec0 2544 * 2545 * That is to say fec1 can not work independently. It only works 2546 * when fec0 is working. The reason behind this design is that the 2547 * second interface is added primarily for Switch mode. 2548 * 2549 * Because of the last point above, both phys are attached on fec0 2550 * mdio interface in board design, and need to be configured by 2551 * fec0 mii_bus. 2552 */ 2553 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 2554 /* fec1 uses fec0 mii_bus */ 2555 if (mii_cnt && fec0_mii_bus) { 2556 fep->mii_bus = fec0_mii_bus; 2557 mii_cnt++; 2558 return 0; 2559 } 2560 return -ENOENT; 2561 } 2562 2563 bus_freq = 2500000; /* 2.5MHz by default */ 2564 node = of_get_child_by_name(pdev->dev.of_node, "mdio"); 2565 if (node) { 2566 of_property_read_u32(node, "clock-frequency", &bus_freq); 2567 suppress_preamble = of_property_read_bool(node, 2568 "suppress-preamble"); 2569 } 2570 2571 /* 2572 * Set MII speed (= clk_get_rate() / 2 * phy_speed) 2573 * 2574 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while 2575 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 2576 * Reference Manual has an error on this, and gets fixed on i.MX6Q 2577 * document. 2578 */ 2579 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); 2580 if (fep->quirks & FEC_QUIRK_ENET_MAC) 2581 mii_speed--; 2582 if (mii_speed > 63) { 2583 dev_err(&pdev->dev, 2584 "fec clock (%lu) too fast to get right mii speed\n", 2585 clk_get_rate(fep->clk_ipg)); 2586 err = -EINVAL; 2587 goto err_out; 2588 } 2589 2590 /* 2591 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka 2592 * MII_SPEED) register that defines the MDIO output hold time. Earlier 2593 * versions are RAZ there, so just ignore the difference and write the 2594 * register always. 2595 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. 2596 * HOLDTIME + 1 is the number of clk cycles the fec is holding the 2597 * output. 2598 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). 2599 * Given that ceil(clkrate / 5000000) <= 64, the calculation for 2600 * holdtime cannot result in a value greater than 3. 2601 */ 2602 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; 2603 2604 fep->phy_speed = mii_speed << 1 | holdtime << 8; 2605 2606 if (suppress_preamble) 2607 fep->phy_speed |= BIT(7); 2608 2609 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { 2610 /* Clear MMFR to avoid to generate MII event by writing MSCR. 2611 * MII event generation condition: 2612 * - writing MSCR: 2613 * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & 2614 * mscr_reg_data_in[7:0] != 0 2615 * - writing MMFR: 2616 * - mscr[7:0]_not_zero 2617 */ 2618 writel(0, fep->hwp + FEC_MII_DATA); 2619 } 2620 2621 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 2622 2623 /* Clear any pending transaction complete indication */ 2624 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 2625 2626 fep->mii_bus = mdiobus_alloc(); 2627 if (fep->mii_bus == NULL) { 2628 err = -ENOMEM; 2629 goto err_out; 2630 } 2631 2632 fep->mii_bus->name = "fec_enet_mii_bus"; 2633 fep->mii_bus->read = fec_enet_mdio_read_c22; 2634 fep->mii_bus->write = fec_enet_mdio_write_c22; 2635 if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) { 2636 fep->mii_bus->read_c45 = fec_enet_mdio_read_c45; 2637 fep->mii_bus->write_c45 = fec_enet_mdio_write_c45; 2638 } 2639 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2640 pdev->name, fep->dev_id + 1); 2641 fep->mii_bus->priv = fep; 2642 fep->mii_bus->parent = &pdev->dev; 2643 2644 err = of_mdiobus_register(fep->mii_bus, node); 2645 if (err) 2646 goto err_out_free_mdiobus; 2647 of_node_put(node); 2648 2649 /* find all the PHY devices on the bus and set mac_managed_pm to true */ 2650 for (addr = 0; addr < PHY_MAX_ADDR; addr++) { 2651 phydev = mdiobus_get_phy(fep->mii_bus, addr); 2652 if (phydev) 2653 phydev->mac_managed_pm = true; 2654 } 2655 2656 mii_cnt++; 2657 2658 /* save fec0 mii_bus */ 2659 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2660 fec0_mii_bus = fep->mii_bus; 2661 2662 return 0; 2663 2664 err_out_free_mdiobus: 2665 mdiobus_free(fep->mii_bus); 2666 err_out: 2667 of_node_put(node); 2668 return err; 2669 } 2670 2671 static void fec_enet_mii_remove(struct fec_enet_private *fep) 2672 { 2673 if (--mii_cnt == 0) { 2674 mdiobus_unregister(fep->mii_bus); 2675 mdiobus_free(fep->mii_bus); 2676 } 2677 } 2678 2679 static void fec_enet_get_drvinfo(struct net_device *ndev, 2680 struct ethtool_drvinfo *info) 2681 { 2682 struct fec_enet_private *fep = netdev_priv(ndev); 2683 2684 strscpy(info->driver, fep->pdev->dev.driver->name, 2685 sizeof(info->driver)); 2686 strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); 2687 } 2688 2689 static int fec_enet_get_regs_len(struct net_device *ndev) 2690 { 2691 struct fec_enet_private *fep = netdev_priv(ndev); 2692 struct resource *r; 2693 int s = 0; 2694 2695 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); 2696 if (r) 2697 s = resource_size(r); 2698 2699 return s; 2700 } 2701 2702 /* List of registers that can be safety be read to dump them with ethtool */ 2703 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2704 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2705 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2706 static __u32 fec_enet_register_version = 2; 2707 static u32 fec_enet_register_offset[] = { 2708 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2709 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2710 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, 2711 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, 2712 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, 2713 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, 2714 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, 2715 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, 2716 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2717 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, 2718 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, 2719 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, 2720 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2721 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2722 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2723 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2724 RMON_T_P_GTE2048, RMON_T_OCTETS, 2725 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2726 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2727 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2728 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2729 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2730 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2731 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2732 RMON_R_P_GTE2048, RMON_R_OCTETS, 2733 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2734 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2735 }; 2736 /* for i.MX6ul */ 2737 static u32 fec_enet_register_offset_6ul[] = { 2738 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2739 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2740 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, 2741 FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, 2742 FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, 2743 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2744 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, 2745 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2746 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2747 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2748 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2749 RMON_T_P_GTE2048, RMON_T_OCTETS, 2750 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2751 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2752 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2753 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2754 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2755 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2756 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2757 RMON_R_P_GTE2048, RMON_R_OCTETS, 2758 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2759 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2760 }; 2761 #else 2762 static __u32 fec_enet_register_version = 1; 2763 static u32 fec_enet_register_offset[] = { 2764 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2765 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2766 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, 2767 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, 2768 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, 2769 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, 2770 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, 2771 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, 2772 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 2773 }; 2774 #endif 2775 2776 static void fec_enet_get_regs(struct net_device *ndev, 2777 struct ethtool_regs *regs, void *regbuf) 2778 { 2779 struct fec_enet_private *fep = netdev_priv(ndev); 2780 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; 2781 struct device *dev = &fep->pdev->dev; 2782 u32 *buf = (u32 *)regbuf; 2783 u32 i, off; 2784 int ret; 2785 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2786 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2787 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2788 u32 *reg_list; 2789 u32 reg_cnt; 2790 2791 if (!of_machine_is_compatible("fsl,imx6ul")) { 2792 reg_list = fec_enet_register_offset; 2793 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); 2794 } else { 2795 reg_list = fec_enet_register_offset_6ul; 2796 reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); 2797 } 2798 #else 2799 /* coldfire */ 2800 static u32 *reg_list = fec_enet_register_offset; 2801 static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); 2802 #endif 2803 ret = pm_runtime_resume_and_get(dev); 2804 if (ret < 0) 2805 return; 2806 2807 regs->version = fec_enet_register_version; 2808 2809 memset(buf, 0, regs->len); 2810 2811 for (i = 0; i < reg_cnt; i++) { 2812 off = reg_list[i]; 2813 2814 if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && 2815 !(fep->quirks & FEC_QUIRK_HAS_FRREG)) 2816 continue; 2817 2818 off >>= 2; 2819 buf[off] = readl(&theregs[off]); 2820 } 2821 2822 pm_runtime_mark_last_busy(dev); 2823 pm_runtime_put_autosuspend(dev); 2824 } 2825 2826 static int fec_enet_get_ts_info(struct net_device *ndev, 2827 struct kernel_ethtool_ts_info *info) 2828 { 2829 struct fec_enet_private *fep = netdev_priv(ndev); 2830 2831 if (fep->bufdesc_ex) { 2832 2833 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 2834 SOF_TIMESTAMPING_TX_HARDWARE | 2835 SOF_TIMESTAMPING_RX_HARDWARE | 2836 SOF_TIMESTAMPING_RAW_HARDWARE; 2837 if (fep->ptp_clock) 2838 info->phc_index = ptp_clock_index(fep->ptp_clock); 2839 2840 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 2841 (1 << HWTSTAMP_TX_ON); 2842 2843 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2844 (1 << HWTSTAMP_FILTER_ALL); 2845 return 0; 2846 } else { 2847 return ethtool_op_get_ts_info(ndev, info); 2848 } 2849 } 2850 2851 #if !defined(CONFIG_M5272) 2852 2853 static void fec_enet_get_pauseparam(struct net_device *ndev, 2854 struct ethtool_pauseparam *pause) 2855 { 2856 struct fec_enet_private *fep = netdev_priv(ndev); 2857 2858 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; 2859 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; 2860 pause->rx_pause = pause->tx_pause; 2861 } 2862 2863 static int fec_enet_set_pauseparam(struct net_device *ndev, 2864 struct ethtool_pauseparam *pause) 2865 { 2866 struct fec_enet_private *fep = netdev_priv(ndev); 2867 2868 if (!ndev->phydev) 2869 return -ENODEV; 2870 2871 if (pause->tx_pause != pause->rx_pause) { 2872 netdev_info(ndev, 2873 "hardware only support enable/disable both tx and rx"); 2874 return -EINVAL; 2875 } 2876 2877 fep->pause_flag = 0; 2878 2879 /* tx pause must be same as rx pause */ 2880 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; 2881 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; 2882 2883 phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause, 2884 pause->autoneg); 2885 2886 if (pause->autoneg) { 2887 if (netif_running(ndev)) 2888 fec_stop(ndev); 2889 phy_start_aneg(ndev->phydev); 2890 } 2891 if (netif_running(ndev)) { 2892 napi_disable(&fep->napi); 2893 netif_tx_lock_bh(ndev); 2894 fec_restart(ndev); 2895 netif_tx_wake_all_queues(ndev); 2896 netif_tx_unlock_bh(ndev); 2897 napi_enable(&fep->napi); 2898 } 2899 2900 return 0; 2901 } 2902 2903 static const struct fec_stat { 2904 char name[ETH_GSTRING_LEN]; 2905 u16 offset; 2906 } fec_stats[] = { 2907 /* RMON TX */ 2908 { "tx_dropped", RMON_T_DROP }, 2909 { "tx_packets", RMON_T_PACKETS }, 2910 { "tx_broadcast", RMON_T_BC_PKT }, 2911 { "tx_multicast", RMON_T_MC_PKT }, 2912 { "tx_crc_errors", RMON_T_CRC_ALIGN }, 2913 { "tx_undersize", RMON_T_UNDERSIZE }, 2914 { "tx_oversize", RMON_T_OVERSIZE }, 2915 { "tx_fragment", RMON_T_FRAG }, 2916 { "tx_jabber", RMON_T_JAB }, 2917 { "tx_collision", RMON_T_COL }, 2918 { "tx_64byte", RMON_T_P64 }, 2919 { "tx_65to127byte", RMON_T_P65TO127 }, 2920 { "tx_128to255byte", RMON_T_P128TO255 }, 2921 { "tx_256to511byte", RMON_T_P256TO511 }, 2922 { "tx_512to1023byte", RMON_T_P512TO1023 }, 2923 { "tx_1024to2047byte", RMON_T_P1024TO2047 }, 2924 { "tx_GTE2048byte", RMON_T_P_GTE2048 }, 2925 { "tx_octets", RMON_T_OCTETS }, 2926 2927 /* IEEE TX */ 2928 { "IEEE_tx_drop", IEEE_T_DROP }, 2929 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, 2930 { "IEEE_tx_1col", IEEE_T_1COL }, 2931 { "IEEE_tx_mcol", IEEE_T_MCOL }, 2932 { "IEEE_tx_def", IEEE_T_DEF }, 2933 { "IEEE_tx_lcol", IEEE_T_LCOL }, 2934 { "IEEE_tx_excol", IEEE_T_EXCOL }, 2935 { "IEEE_tx_macerr", IEEE_T_MACERR }, 2936 { "IEEE_tx_cserr", IEEE_T_CSERR }, 2937 { "IEEE_tx_sqe", IEEE_T_SQE }, 2938 { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, 2939 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, 2940 2941 /* RMON RX */ 2942 { "rx_packets", RMON_R_PACKETS }, 2943 { "rx_broadcast", RMON_R_BC_PKT }, 2944 { "rx_multicast", RMON_R_MC_PKT }, 2945 { "rx_crc_errors", RMON_R_CRC_ALIGN }, 2946 { "rx_undersize", RMON_R_UNDERSIZE }, 2947 { "rx_oversize", RMON_R_OVERSIZE }, 2948 { "rx_fragment", RMON_R_FRAG }, 2949 { "rx_jabber", RMON_R_JAB }, 2950 { "rx_64byte", RMON_R_P64 }, 2951 { "rx_65to127byte", RMON_R_P65TO127 }, 2952 { "rx_128to255byte", RMON_R_P128TO255 }, 2953 { "rx_256to511byte", RMON_R_P256TO511 }, 2954 { "rx_512to1023byte", RMON_R_P512TO1023 }, 2955 { "rx_1024to2047byte", RMON_R_P1024TO2047 }, 2956 { "rx_GTE2048byte", RMON_R_P_GTE2048 }, 2957 { "rx_octets", RMON_R_OCTETS }, 2958 2959 /* IEEE RX */ 2960 { "IEEE_rx_drop", IEEE_R_DROP }, 2961 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, 2962 { "IEEE_rx_crc", IEEE_R_CRC }, 2963 { "IEEE_rx_align", IEEE_R_ALIGN }, 2964 { "IEEE_rx_macerr", IEEE_R_MACERR }, 2965 { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, 2966 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 2967 }; 2968 2969 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) 2970 2971 static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = { 2972 "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */ 2973 "rx_xdp_pass", /* RX_XDP_PASS, */ 2974 "rx_xdp_drop", /* RX_XDP_DROP, */ 2975 "rx_xdp_tx", /* RX_XDP_TX, */ 2976 "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */ 2977 "tx_xdp_xmit", /* TX_XDP_XMIT, */ 2978 "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */ 2979 }; 2980 2981 static void fec_enet_update_ethtool_stats(struct net_device *dev) 2982 { 2983 struct fec_enet_private *fep = netdev_priv(dev); 2984 int i; 2985 2986 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2987 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); 2988 } 2989 2990 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) 2991 { 2992 u64 xdp_stats[XDP_STATS_TOTAL] = { 0 }; 2993 struct fec_enet_priv_rx_q *rxq; 2994 int i, j; 2995 2996 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 2997 rxq = fep->rx_queue[i]; 2998 2999 for (j = 0; j < XDP_STATS_TOTAL; j++) 3000 xdp_stats[j] += rxq->stats[j]; 3001 } 3002 3003 memcpy(data, xdp_stats, sizeof(xdp_stats)); 3004 } 3005 3006 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) 3007 { 3008 #ifdef CONFIG_PAGE_POOL_STATS 3009 struct page_pool_stats stats = {}; 3010 struct fec_enet_priv_rx_q *rxq; 3011 int i; 3012 3013 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 3014 rxq = fep->rx_queue[i]; 3015 3016 if (!rxq->page_pool) 3017 continue; 3018 3019 page_pool_get_stats(rxq->page_pool, &stats); 3020 } 3021 3022 page_pool_ethtool_stats_get(data, &stats); 3023 #endif 3024 } 3025 3026 static void fec_enet_get_ethtool_stats(struct net_device *dev, 3027 struct ethtool_stats *stats, u64 *data) 3028 { 3029 struct fec_enet_private *fep = netdev_priv(dev); 3030 3031 if (netif_running(dev)) 3032 fec_enet_update_ethtool_stats(dev); 3033 3034 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); 3035 data += FEC_STATS_SIZE / sizeof(u64); 3036 3037 fec_enet_get_xdp_stats(fep, data); 3038 data += XDP_STATS_TOTAL; 3039 3040 fec_enet_page_pool_stats(fep, data); 3041 } 3042 3043 static void fec_enet_get_strings(struct net_device *netdev, 3044 u32 stringset, u8 *data) 3045 { 3046 int i; 3047 switch (stringset) { 3048 case ETH_SS_STATS: 3049 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { 3050 ethtool_puts(&data, fec_stats[i].name); 3051 } 3052 for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { 3053 ethtool_puts(&data, fec_xdp_stat_strs[i]); 3054 } 3055 page_pool_ethtool_stats_get_strings(data); 3056 3057 break; 3058 case ETH_SS_TEST: 3059 net_selftest_get_strings(data); 3060 break; 3061 } 3062 } 3063 3064 static int fec_enet_get_sset_count(struct net_device *dev, int sset) 3065 { 3066 int count; 3067 3068 switch (sset) { 3069 case ETH_SS_STATS: 3070 count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL; 3071 count += page_pool_ethtool_stats_get_count(); 3072 return count; 3073 3074 case ETH_SS_TEST: 3075 return net_selftest_get_count(); 3076 default: 3077 return -EOPNOTSUPP; 3078 } 3079 } 3080 3081 static void fec_enet_clear_ethtool_stats(struct net_device *dev) 3082 { 3083 struct fec_enet_private *fep = netdev_priv(dev); 3084 struct fec_enet_priv_rx_q *rxq; 3085 int i, j; 3086 3087 /* Disable MIB statistics counters */ 3088 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); 3089 3090 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 3091 writel(0, fep->hwp + fec_stats[i].offset); 3092 3093 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 3094 rxq = fep->rx_queue[i]; 3095 for (j = 0; j < XDP_STATS_TOTAL; j++) 3096 rxq->stats[j] = 0; 3097 } 3098 3099 /* Don't disable MIB statistics counters */ 3100 writel(0, fep->hwp + FEC_MIB_CTRLSTAT); 3101 } 3102 3103 #else /* !defined(CONFIG_M5272) */ 3104 #define FEC_STATS_SIZE 0 3105 static inline void fec_enet_update_ethtool_stats(struct net_device *dev) 3106 { 3107 } 3108 3109 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) 3110 { 3111 } 3112 #endif /* !defined(CONFIG_M5272) */ 3113 3114 /* ITR clock source is enet system clock (clk_ahb). 3115 * TCTT unit is cycle_ns * 64 cycle 3116 * So, the ICTT value = X us / (cycle_ns * 64) 3117 */ 3118 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) 3119 { 3120 struct fec_enet_private *fep = netdev_priv(ndev); 3121 3122 return us * (fep->itr_clk_rate / 64000) / 1000; 3123 } 3124 3125 /* Set threshold for interrupt coalescing */ 3126 static void fec_enet_itr_coal_set(struct net_device *ndev) 3127 { 3128 struct fec_enet_private *fep = netdev_priv(ndev); 3129 u32 rx_itr = 0, tx_itr = 0; 3130 int rx_ictt, tx_ictt; 3131 3132 rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); 3133 tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); 3134 3135 if (rx_ictt > 0 && fep->rx_pkts_itr > 1) { 3136 /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */ 3137 rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL; 3138 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); 3139 rx_itr |= FEC_ITR_ICTT(rx_ictt); 3140 } 3141 3142 if (tx_ictt > 0 && fep->tx_pkts_itr > 1) { 3143 /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */ 3144 tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL; 3145 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); 3146 tx_itr |= FEC_ITR_ICTT(tx_ictt); 3147 } 3148 3149 writel(tx_itr, fep->hwp + FEC_TXIC0); 3150 writel(rx_itr, fep->hwp + FEC_RXIC0); 3151 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 3152 writel(tx_itr, fep->hwp + FEC_TXIC1); 3153 writel(rx_itr, fep->hwp + FEC_RXIC1); 3154 writel(tx_itr, fep->hwp + FEC_TXIC2); 3155 writel(rx_itr, fep->hwp + FEC_RXIC2); 3156 } 3157 } 3158 3159 static int fec_enet_get_coalesce(struct net_device *ndev, 3160 struct ethtool_coalesce *ec, 3161 struct kernel_ethtool_coalesce *kernel_coal, 3162 struct netlink_ext_ack *extack) 3163 { 3164 struct fec_enet_private *fep = netdev_priv(ndev); 3165 3166 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 3167 return -EOPNOTSUPP; 3168 3169 ec->rx_coalesce_usecs = fep->rx_time_itr; 3170 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; 3171 3172 ec->tx_coalesce_usecs = fep->tx_time_itr; 3173 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; 3174 3175 return 0; 3176 } 3177 3178 static int fec_enet_set_coalesce(struct net_device *ndev, 3179 struct ethtool_coalesce *ec, 3180 struct kernel_ethtool_coalesce *kernel_coal, 3181 struct netlink_ext_ack *extack) 3182 { 3183 struct fec_enet_private *fep = netdev_priv(ndev); 3184 struct device *dev = &fep->pdev->dev; 3185 unsigned int cycle; 3186 3187 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 3188 return -EOPNOTSUPP; 3189 3190 if (ec->rx_max_coalesced_frames > 255) { 3191 dev_err(dev, "Rx coalesced frames exceed hardware limitation\n"); 3192 return -EINVAL; 3193 } 3194 3195 if (ec->tx_max_coalesced_frames > 255) { 3196 dev_err(dev, "Tx coalesced frame exceed hardware limitation\n"); 3197 return -EINVAL; 3198 } 3199 3200 cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); 3201 if (cycle > 0xFFFF) { 3202 dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); 3203 return -EINVAL; 3204 } 3205 3206 cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); 3207 if (cycle > 0xFFFF) { 3208 dev_err(dev, "Tx coalesced usec exceed hardware limitation\n"); 3209 return -EINVAL; 3210 } 3211 3212 fep->rx_time_itr = ec->rx_coalesce_usecs; 3213 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; 3214 3215 fep->tx_time_itr = ec->tx_coalesce_usecs; 3216 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; 3217 3218 fec_enet_itr_coal_set(ndev); 3219 3220 return 0; 3221 } 3222 3223 static int 3224 fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) 3225 { 3226 struct fec_enet_private *fep = netdev_priv(ndev); 3227 3228 if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) 3229 return -EOPNOTSUPP; 3230 3231 if (!netif_running(ndev)) 3232 return -ENETDOWN; 3233 3234 return phy_ethtool_get_eee(ndev->phydev, edata); 3235 } 3236 3237 static int 3238 fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) 3239 { 3240 struct fec_enet_private *fep = netdev_priv(ndev); 3241 3242 if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) 3243 return -EOPNOTSUPP; 3244 3245 if (!netif_running(ndev)) 3246 return -ENETDOWN; 3247 3248 return phy_ethtool_set_eee(ndev->phydev, edata); 3249 } 3250 3251 static void 3252 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 3253 { 3254 struct fec_enet_private *fep = netdev_priv(ndev); 3255 3256 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { 3257 wol->supported = WAKE_MAGIC; 3258 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; 3259 } else { 3260 wol->supported = wol->wolopts = 0; 3261 } 3262 } 3263 3264 static int 3265 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 3266 { 3267 struct fec_enet_private *fep = netdev_priv(ndev); 3268 3269 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) 3270 return -EINVAL; 3271 3272 if (wol->wolopts & ~WAKE_MAGIC) 3273 return -EINVAL; 3274 3275 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); 3276 if (device_may_wakeup(&ndev->dev)) 3277 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; 3278 else 3279 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); 3280 3281 return 0; 3282 } 3283 3284 static const struct ethtool_ops fec_enet_ethtool_ops = { 3285 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 3286 ETHTOOL_COALESCE_MAX_FRAMES, 3287 .get_drvinfo = fec_enet_get_drvinfo, 3288 .get_regs_len = fec_enet_get_regs_len, 3289 .get_regs = fec_enet_get_regs, 3290 .nway_reset = phy_ethtool_nway_reset, 3291 .get_link = ethtool_op_get_link, 3292 .get_coalesce = fec_enet_get_coalesce, 3293 .set_coalesce = fec_enet_set_coalesce, 3294 #ifndef CONFIG_M5272 3295 .get_pauseparam = fec_enet_get_pauseparam, 3296 .set_pauseparam = fec_enet_set_pauseparam, 3297 .get_strings = fec_enet_get_strings, 3298 .get_ethtool_stats = fec_enet_get_ethtool_stats, 3299 .get_sset_count = fec_enet_get_sset_count, 3300 #endif 3301 .get_ts_info = fec_enet_get_ts_info, 3302 .get_wol = fec_enet_get_wol, 3303 .set_wol = fec_enet_set_wol, 3304 .get_eee = fec_enet_get_eee, 3305 .set_eee = fec_enet_set_eee, 3306 .get_link_ksettings = phy_ethtool_get_link_ksettings, 3307 .set_link_ksettings = phy_ethtool_set_link_ksettings, 3308 .self_test = net_selftest, 3309 }; 3310 3311 static void fec_enet_free_buffers(struct net_device *ndev) 3312 { 3313 struct fec_enet_private *fep = netdev_priv(ndev); 3314 unsigned int i; 3315 struct fec_enet_priv_tx_q *txq; 3316 struct fec_enet_priv_rx_q *rxq; 3317 unsigned int q; 3318 3319 for (q = 0; q < fep->num_rx_queues; q++) { 3320 rxq = fep->rx_queue[q]; 3321 for (i = 0; i < rxq->bd.ring_size; i++) 3322 page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false); 3323 3324 for (i = 0; i < XDP_STATS_TOTAL; i++) 3325 rxq->stats[i] = 0; 3326 3327 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) 3328 xdp_rxq_info_unreg(&rxq->xdp_rxq); 3329 page_pool_destroy(rxq->page_pool); 3330 rxq->page_pool = NULL; 3331 } 3332 3333 for (q = 0; q < fep->num_tx_queues; q++) { 3334 txq = fep->tx_queue[q]; 3335 for (i = 0; i < txq->bd.ring_size; i++) { 3336 kfree(txq->tx_bounce[i]); 3337 txq->tx_bounce[i] = NULL; 3338 3339 if (!txq->tx_buf[i].buf_p) { 3340 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 3341 continue; 3342 } 3343 3344 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { 3345 dev_kfree_skb(txq->tx_buf[i].buf_p); 3346 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { 3347 xdp_return_frame(txq->tx_buf[i].buf_p); 3348 } else { 3349 struct page *page = txq->tx_buf[i].buf_p; 3350 3351 page_pool_put_page(page->pp, page, 0, false); 3352 } 3353 3354 txq->tx_buf[i].buf_p = NULL; 3355 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 3356 } 3357 } 3358 } 3359 3360 static void fec_enet_free_queue(struct net_device *ndev) 3361 { 3362 struct fec_enet_private *fep = netdev_priv(ndev); 3363 int i; 3364 struct fec_enet_priv_tx_q *txq; 3365 3366 for (i = 0; i < fep->num_tx_queues; i++) 3367 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { 3368 txq = fep->tx_queue[i]; 3369 fec_dma_free(&fep->pdev->dev, 3370 txq->bd.ring_size * TSO_HEADER_SIZE, 3371 txq->tso_hdrs, txq->tso_hdrs_dma); 3372 } 3373 3374 for (i = 0; i < fep->num_rx_queues; i++) 3375 kfree(fep->rx_queue[i]); 3376 for (i = 0; i < fep->num_tx_queues; i++) 3377 kfree(fep->tx_queue[i]); 3378 } 3379 3380 static int fec_enet_alloc_queue(struct net_device *ndev) 3381 { 3382 struct fec_enet_private *fep = netdev_priv(ndev); 3383 int i; 3384 int ret = 0; 3385 struct fec_enet_priv_tx_q *txq; 3386 3387 for (i = 0; i < fep->num_tx_queues; i++) { 3388 txq = kzalloc(sizeof(*txq), GFP_KERNEL); 3389 if (!txq) { 3390 ret = -ENOMEM; 3391 goto alloc_failed; 3392 } 3393 3394 fep->tx_queue[i] = txq; 3395 txq->bd.ring_size = TX_RING_SIZE; 3396 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; 3397 3398 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; 3399 txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; 3400 3401 txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev, 3402 txq->bd.ring_size * TSO_HEADER_SIZE, 3403 &txq->tso_hdrs_dma, GFP_KERNEL); 3404 if (!txq->tso_hdrs) { 3405 ret = -ENOMEM; 3406 goto alloc_failed; 3407 } 3408 } 3409 3410 for (i = 0; i < fep->num_rx_queues; i++) { 3411 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), 3412 GFP_KERNEL); 3413 if (!fep->rx_queue[i]) { 3414 ret = -ENOMEM; 3415 goto alloc_failed; 3416 } 3417 3418 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; 3419 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; 3420 } 3421 return ret; 3422 3423 alloc_failed: 3424 fec_enet_free_queue(ndev); 3425 return ret; 3426 } 3427 3428 static int 3429 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) 3430 { 3431 struct fec_enet_private *fep = netdev_priv(ndev); 3432 struct fec_enet_priv_rx_q *rxq; 3433 dma_addr_t phys_addr; 3434 struct bufdesc *bdp; 3435 struct page *page; 3436 int i, err; 3437 3438 rxq = fep->rx_queue[queue]; 3439 bdp = rxq->bd.base; 3440 3441 err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size); 3442 if (err < 0) { 3443 netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err); 3444 return err; 3445 } 3446 3447 for (i = 0; i < rxq->bd.ring_size; i++) { 3448 page = page_pool_dev_alloc_pages(rxq->page_pool); 3449 if (!page) 3450 goto err_alloc; 3451 3452 phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM; 3453 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); 3454 3455 rxq->rx_skb_info[i].page = page; 3456 rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM; 3457 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 3458 3459 if (fep->bufdesc_ex) { 3460 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3461 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 3462 } 3463 3464 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 3465 } 3466 3467 /* Set the last buffer to wrap. */ 3468 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 3469 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 3470 return 0; 3471 3472 err_alloc: 3473 fec_enet_free_buffers(ndev); 3474 return -ENOMEM; 3475 } 3476 3477 static int 3478 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) 3479 { 3480 struct fec_enet_private *fep = netdev_priv(ndev); 3481 unsigned int i; 3482 struct bufdesc *bdp; 3483 struct fec_enet_priv_tx_q *txq; 3484 3485 txq = fep->tx_queue[queue]; 3486 bdp = txq->bd.base; 3487 for (i = 0; i < txq->bd.ring_size; i++) { 3488 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 3489 if (!txq->tx_bounce[i]) 3490 goto err_alloc; 3491 3492 bdp->cbd_sc = cpu_to_fec16(0); 3493 bdp->cbd_bufaddr = cpu_to_fec32(0); 3494 3495 if (fep->bufdesc_ex) { 3496 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3497 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); 3498 } 3499 3500 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 3501 } 3502 3503 /* Set the last buffer to wrap. */ 3504 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 3505 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 3506 3507 return 0; 3508 3509 err_alloc: 3510 fec_enet_free_buffers(ndev); 3511 return -ENOMEM; 3512 } 3513 3514 static int fec_enet_alloc_buffers(struct net_device *ndev) 3515 { 3516 struct fec_enet_private *fep = netdev_priv(ndev); 3517 unsigned int i; 3518 3519 for (i = 0; i < fep->num_rx_queues; i++) 3520 if (fec_enet_alloc_rxq_buffers(ndev, i)) 3521 return -ENOMEM; 3522 3523 for (i = 0; i < fep->num_tx_queues; i++) 3524 if (fec_enet_alloc_txq_buffers(ndev, i)) 3525 return -ENOMEM; 3526 return 0; 3527 } 3528 3529 static int 3530 fec_enet_open(struct net_device *ndev) 3531 { 3532 struct fec_enet_private *fep = netdev_priv(ndev); 3533 int ret; 3534 bool reset_again; 3535 3536 ret = pm_runtime_resume_and_get(&fep->pdev->dev); 3537 if (ret < 0) 3538 return ret; 3539 3540 pinctrl_pm_select_default_state(&fep->pdev->dev); 3541 ret = fec_enet_clk_enable(ndev, true); 3542 if (ret) 3543 goto clk_enable; 3544 3545 /* During the first fec_enet_open call the PHY isn't probed at this 3546 * point. Therefore the phy_reset_after_clk_enable() call within 3547 * fec_enet_clk_enable() fails. As we need this reset in order to be 3548 * sure the PHY is working correctly we check if we need to reset again 3549 * later when the PHY is probed 3550 */ 3551 if (ndev->phydev && ndev->phydev->drv) 3552 reset_again = false; 3553 else 3554 reset_again = true; 3555 3556 /* I should reset the ring buffers here, but I don't yet know 3557 * a simple way to do that. 3558 */ 3559 3560 ret = fec_enet_alloc_buffers(ndev); 3561 if (ret) 3562 goto err_enet_alloc; 3563 3564 /* Init MAC prior to mii bus probe */ 3565 fec_restart(ndev); 3566 3567 /* Call phy_reset_after_clk_enable() again if it failed during 3568 * phy_reset_after_clk_enable() before because the PHY wasn't probed. 3569 */ 3570 if (reset_again) 3571 fec_enet_phy_reset_after_clk_enable(ndev); 3572 3573 /* Probe and connect to PHY when open the interface */ 3574 ret = fec_enet_mii_probe(ndev); 3575 if (ret) 3576 goto err_enet_mii_probe; 3577 3578 if (fep->quirks & FEC_QUIRK_ERR006687) 3579 imx6q_cpuidle_fec_irqs_used(); 3580 3581 if (fep->quirks & FEC_QUIRK_HAS_PMQOS) 3582 cpu_latency_qos_add_request(&fep->pm_qos_req, 0); 3583 3584 napi_enable(&fep->napi); 3585 phy_start(ndev->phydev); 3586 netif_tx_start_all_queues(ndev); 3587 3588 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & 3589 FEC_WOL_FLAG_ENABLE); 3590 3591 return 0; 3592 3593 err_enet_mii_probe: 3594 fec_enet_free_buffers(ndev); 3595 err_enet_alloc: 3596 fec_enet_clk_enable(ndev, false); 3597 clk_enable: 3598 pm_runtime_mark_last_busy(&fep->pdev->dev); 3599 pm_runtime_put_autosuspend(&fep->pdev->dev); 3600 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3601 return ret; 3602 } 3603 3604 static int 3605 fec_enet_close(struct net_device *ndev) 3606 { 3607 struct fec_enet_private *fep = netdev_priv(ndev); 3608 3609 phy_stop(ndev->phydev); 3610 3611 if (netif_device_present(ndev)) { 3612 napi_disable(&fep->napi); 3613 netif_tx_disable(ndev); 3614 fec_stop(ndev); 3615 } 3616 3617 phy_disconnect(ndev->phydev); 3618 3619 if (fep->quirks & FEC_QUIRK_ERR006687) 3620 imx6q_cpuidle_fec_irqs_unused(); 3621 3622 fec_enet_update_ethtool_stats(ndev); 3623 3624 fec_enet_clk_enable(ndev, false); 3625 if (fep->quirks & FEC_QUIRK_HAS_PMQOS) 3626 cpu_latency_qos_remove_request(&fep->pm_qos_req); 3627 3628 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3629 pm_runtime_mark_last_busy(&fep->pdev->dev); 3630 pm_runtime_put_autosuspend(&fep->pdev->dev); 3631 3632 fec_enet_free_buffers(ndev); 3633 3634 return 0; 3635 } 3636 3637 /* Set or clear the multicast filter for this adaptor. 3638 * Skeleton taken from sunlance driver. 3639 * The CPM Ethernet implementation allows Multicast as well as individual 3640 * MAC address filtering. Some of the drivers check to make sure it is 3641 * a group multicast address, and discard those that are not. I guess I 3642 * will do the same for now, but just remove the test if you want 3643 * individual filtering as well (do the upper net layers want or support 3644 * this kind of feature?). 3645 */ 3646 3647 #define FEC_HASH_BITS 6 /* #bits in hash */ 3648 3649 static void set_multicast_list(struct net_device *ndev) 3650 { 3651 struct fec_enet_private *fep = netdev_priv(ndev); 3652 struct netdev_hw_addr *ha; 3653 unsigned int crc, tmp; 3654 unsigned char hash; 3655 unsigned int hash_high = 0, hash_low = 0; 3656 3657 if (ndev->flags & IFF_PROMISC) { 3658 tmp = readl(fep->hwp + FEC_R_CNTRL); 3659 tmp |= 0x8; 3660 writel(tmp, fep->hwp + FEC_R_CNTRL); 3661 return; 3662 } 3663 3664 tmp = readl(fep->hwp + FEC_R_CNTRL); 3665 tmp &= ~0x8; 3666 writel(tmp, fep->hwp + FEC_R_CNTRL); 3667 3668 if (ndev->flags & IFF_ALLMULTI) { 3669 /* Catch all multicast addresses, so set the 3670 * filter to all 1's 3671 */ 3672 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 3673 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3674 3675 return; 3676 } 3677 3678 /* Add the addresses in hash register */ 3679 netdev_for_each_mc_addr(ha, ndev) { 3680 /* calculate crc32 value of mac address */ 3681 crc = ether_crc_le(ndev->addr_len, ha->addr); 3682 3683 /* only upper 6 bits (FEC_HASH_BITS) are used 3684 * which point to specific bit in the hash registers 3685 */ 3686 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; 3687 3688 if (hash > 31) 3689 hash_high |= 1 << (hash - 32); 3690 else 3691 hash_low |= 1 << hash; 3692 } 3693 3694 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 3695 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3696 } 3697 3698 /* Set a MAC change in hardware. */ 3699 static int 3700 fec_set_mac_address(struct net_device *ndev, void *p) 3701 { 3702 struct sockaddr *addr = p; 3703 3704 if (addr) { 3705 if (!is_valid_ether_addr(addr->sa_data)) 3706 return -EADDRNOTAVAIL; 3707 eth_hw_addr_set(ndev, addr->sa_data); 3708 } 3709 3710 /* Add netif status check here to avoid system hang in below case: 3711 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; 3712 * After ethx down, fec all clocks are gated off and then register 3713 * access causes system hang. 3714 */ 3715 if (!netif_running(ndev)) 3716 return 0; 3717 3718 fec_set_hw_mac_addr(ndev); 3719 3720 return 0; 3721 } 3722 3723 static inline void fec_enet_set_netdev_features(struct net_device *netdev, 3724 netdev_features_t features) 3725 { 3726 struct fec_enet_private *fep = netdev_priv(netdev); 3727 netdev_features_t changed = features ^ netdev->features; 3728 3729 netdev->features = features; 3730 3731 /* Receive checksum has been changed */ 3732 if (changed & NETIF_F_RXCSUM) { 3733 if (features & NETIF_F_RXCSUM) 3734 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3735 else 3736 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; 3737 } 3738 } 3739 3740 static int fec_set_features(struct net_device *netdev, 3741 netdev_features_t features) 3742 { 3743 struct fec_enet_private *fep = netdev_priv(netdev); 3744 netdev_features_t changed = features ^ netdev->features; 3745 3746 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) { 3747 napi_disable(&fep->napi); 3748 netif_tx_lock_bh(netdev); 3749 fec_stop(netdev); 3750 fec_enet_set_netdev_features(netdev, features); 3751 fec_restart(netdev); 3752 netif_tx_wake_all_queues(netdev); 3753 netif_tx_unlock_bh(netdev); 3754 napi_enable(&fep->napi); 3755 } else { 3756 fec_enet_set_netdev_features(netdev, features); 3757 } 3758 3759 return 0; 3760 } 3761 3762 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, 3763 struct net_device *sb_dev) 3764 { 3765 struct fec_enet_private *fep = netdev_priv(ndev); 3766 u16 vlan_tag = 0; 3767 3768 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 3769 return netdev_pick_tx(ndev, skb, NULL); 3770 3771 /* VLAN is present in the payload.*/ 3772 if (eth_type_vlan(skb->protocol)) { 3773 struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); 3774 3775 vlan_tag = ntohs(vhdr->h_vlan_TCI); 3776 /* VLAN is present in the skb but not yet pushed in the payload.*/ 3777 } else if (skb_vlan_tag_present(skb)) { 3778 vlan_tag = skb->vlan_tci; 3779 } else { 3780 return vlan_tag; 3781 } 3782 3783 return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; 3784 } 3785 3786 static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) 3787 { 3788 struct fec_enet_private *fep = netdev_priv(dev); 3789 bool is_run = netif_running(dev); 3790 struct bpf_prog *old_prog; 3791 3792 switch (bpf->command) { 3793 case XDP_SETUP_PROG: 3794 /* No need to support the SoCs that require to 3795 * do the frame swap because the performance wouldn't be 3796 * better than the skb mode. 3797 */ 3798 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 3799 return -EOPNOTSUPP; 3800 3801 if (!bpf->prog) 3802 xdp_features_clear_redirect_target(dev); 3803 3804 if (is_run) { 3805 napi_disable(&fep->napi); 3806 netif_tx_disable(dev); 3807 } 3808 3809 old_prog = xchg(&fep->xdp_prog, bpf->prog); 3810 if (old_prog) 3811 bpf_prog_put(old_prog); 3812 3813 fec_restart(dev); 3814 3815 if (is_run) { 3816 napi_enable(&fep->napi); 3817 netif_tx_start_all_queues(dev); 3818 } 3819 3820 if (bpf->prog) 3821 xdp_features_set_redirect_target(dev, false); 3822 3823 return 0; 3824 3825 case XDP_SETUP_XSK_POOL: 3826 return -EOPNOTSUPP; 3827 3828 default: 3829 return -EOPNOTSUPP; 3830 } 3831 } 3832 3833 static int 3834 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) 3835 { 3836 if (unlikely(index < 0)) 3837 return 0; 3838 3839 return (index % fep->num_tx_queues); 3840 } 3841 3842 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, 3843 struct fec_enet_priv_tx_q *txq, 3844 void *frame, u32 dma_sync_len, 3845 bool ndo_xmit) 3846 { 3847 unsigned int index, status, estatus; 3848 struct bufdesc *bdp; 3849 dma_addr_t dma_addr; 3850 int entries_free; 3851 u16 frame_len; 3852 3853 entries_free = fec_enet_get_free_txdesc_num(txq); 3854 if (entries_free < MAX_SKB_FRAGS + 1) { 3855 netdev_err_once(fep->netdev, "NOT enough BD for SG!\n"); 3856 return -EBUSY; 3857 } 3858 3859 /* Fill in a Tx ring entry */ 3860 bdp = txq->bd.cur; 3861 status = fec16_to_cpu(bdp->cbd_sc); 3862 status &= ~BD_ENET_TX_STATS; 3863 3864 index = fec_enet_get_bd_index(bdp, &txq->bd); 3865 3866 if (ndo_xmit) { 3867 struct xdp_frame *xdpf = frame; 3868 3869 dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data, 3870 xdpf->len, DMA_TO_DEVICE); 3871 if (dma_mapping_error(&fep->pdev->dev, dma_addr)) 3872 return -ENOMEM; 3873 3874 frame_len = xdpf->len; 3875 txq->tx_buf[index].buf_p = xdpf; 3876 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; 3877 } else { 3878 struct xdp_buff *xdpb = frame; 3879 struct page *page; 3880 3881 page = virt_to_page(xdpb->data); 3882 dma_addr = page_pool_get_dma_addr(page) + 3883 (xdpb->data - xdpb->data_hard_start); 3884 dma_sync_single_for_device(&fep->pdev->dev, dma_addr, 3885 dma_sync_len, DMA_BIDIRECTIONAL); 3886 frame_len = xdpb->data_end - xdpb->data; 3887 txq->tx_buf[index].buf_p = page; 3888 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX; 3889 } 3890 3891 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 3892 if (fep->bufdesc_ex) 3893 estatus = BD_ENET_TX_INT; 3894 3895 bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); 3896 bdp->cbd_datlen = cpu_to_fec16(frame_len); 3897 3898 if (fep->bufdesc_ex) { 3899 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3900 3901 if (fep->quirks & FEC_QUIRK_HAS_AVB) 3902 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 3903 3904 ebdp->cbd_bdu = 0; 3905 ebdp->cbd_esc = cpu_to_fec32(estatus); 3906 } 3907 3908 /* Make sure the updates to rest of the descriptor are performed before 3909 * transferring ownership. 3910 */ 3911 dma_wmb(); 3912 3913 /* Send it on its way. Tell FEC it's ready, interrupt when done, 3914 * it's the last BD of the frame, and to put the CRC on the end. 3915 */ 3916 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 3917 bdp->cbd_sc = cpu_to_fec16(status); 3918 3919 /* If this was the last BD in the ring, start at the beginning again. */ 3920 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 3921 3922 /* Make sure the update to bdp are performed before txq->bd.cur. */ 3923 dma_wmb(); 3924 3925 txq->bd.cur = bdp; 3926 3927 /* Trigger transmission start */ 3928 writel(0, txq->bd.reg_desc_active); 3929 3930 return 0; 3931 } 3932 3933 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, 3934 int cpu, struct xdp_buff *xdp, 3935 u32 dma_sync_len) 3936 { 3937 struct fec_enet_priv_tx_q *txq; 3938 struct netdev_queue *nq; 3939 int queue, ret; 3940 3941 queue = fec_enet_xdp_get_tx_queue(fep, cpu); 3942 txq = fep->tx_queue[queue]; 3943 nq = netdev_get_tx_queue(fep->netdev, queue); 3944 3945 __netif_tx_lock(nq, cpu); 3946 3947 /* Avoid tx timeout as XDP shares the queue with kernel stack */ 3948 txq_trans_cond_update(nq); 3949 ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false); 3950 3951 __netif_tx_unlock(nq); 3952 3953 return ret; 3954 } 3955 3956 static int fec_enet_xdp_xmit(struct net_device *dev, 3957 int num_frames, 3958 struct xdp_frame **frames, 3959 u32 flags) 3960 { 3961 struct fec_enet_private *fep = netdev_priv(dev); 3962 struct fec_enet_priv_tx_q *txq; 3963 int cpu = smp_processor_id(); 3964 unsigned int sent_frames = 0; 3965 struct netdev_queue *nq; 3966 unsigned int queue; 3967 int i; 3968 3969 queue = fec_enet_xdp_get_tx_queue(fep, cpu); 3970 txq = fep->tx_queue[queue]; 3971 nq = netdev_get_tx_queue(fep->netdev, queue); 3972 3973 __netif_tx_lock(nq, cpu); 3974 3975 /* Avoid tx timeout as XDP shares the queue with kernel stack */ 3976 txq_trans_cond_update(nq); 3977 for (i = 0; i < num_frames; i++) { 3978 if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0) 3979 break; 3980 sent_frames++; 3981 } 3982 3983 __netif_tx_unlock(nq); 3984 3985 return sent_frames; 3986 } 3987 3988 static int fec_hwtstamp_get(struct net_device *ndev, 3989 struct kernel_hwtstamp_config *config) 3990 { 3991 struct fec_enet_private *fep = netdev_priv(ndev); 3992 3993 if (!netif_running(ndev)) 3994 return -EINVAL; 3995 3996 if (!fep->bufdesc_ex) 3997 return -EOPNOTSUPP; 3998 3999 fec_ptp_get(ndev, config); 4000 4001 return 0; 4002 } 4003 4004 static int fec_hwtstamp_set(struct net_device *ndev, 4005 struct kernel_hwtstamp_config *config, 4006 struct netlink_ext_ack *extack) 4007 { 4008 struct fec_enet_private *fep = netdev_priv(ndev); 4009 4010 if (!netif_running(ndev)) 4011 return -EINVAL; 4012 4013 if (!fep->bufdesc_ex) 4014 return -EOPNOTSUPP; 4015 4016 return fec_ptp_set(ndev, config, extack); 4017 } 4018 4019 static const struct net_device_ops fec_netdev_ops = { 4020 .ndo_open = fec_enet_open, 4021 .ndo_stop = fec_enet_close, 4022 .ndo_start_xmit = fec_enet_start_xmit, 4023 .ndo_select_queue = fec_enet_select_queue, 4024 .ndo_set_rx_mode = set_multicast_list, 4025 .ndo_validate_addr = eth_validate_addr, 4026 .ndo_tx_timeout = fec_timeout, 4027 .ndo_set_mac_address = fec_set_mac_address, 4028 .ndo_eth_ioctl = phy_do_ioctl_running, 4029 .ndo_set_features = fec_set_features, 4030 .ndo_bpf = fec_enet_bpf, 4031 .ndo_xdp_xmit = fec_enet_xdp_xmit, 4032 .ndo_hwtstamp_get = fec_hwtstamp_get, 4033 .ndo_hwtstamp_set = fec_hwtstamp_set, 4034 }; 4035 4036 static const unsigned short offset_des_active_rxq[] = { 4037 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 4038 }; 4039 4040 static const unsigned short offset_des_active_txq[] = { 4041 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 4042 }; 4043 4044 /* 4045 * XXX: We need to clean up on failure exits here. 4046 * 4047 */ 4048 static int fec_enet_init(struct net_device *ndev) 4049 { 4050 struct fec_enet_private *fep = netdev_priv(ndev); 4051 struct bufdesc *cbd_base; 4052 dma_addr_t bd_dma; 4053 int bd_size; 4054 unsigned int i; 4055 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : 4056 sizeof(struct bufdesc); 4057 unsigned dsize_log2 = __fls(dsize); 4058 int ret; 4059 4060 WARN_ON(dsize != (1 << dsize_log2)); 4061 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 4062 fep->rx_align = 0xf; 4063 fep->tx_align = 0xf; 4064 #else 4065 fep->rx_align = 0x3; 4066 fep->tx_align = 0x3; 4067 #endif 4068 fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT; 4069 fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT; 4070 fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT; 4071 fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT; 4072 4073 /* Check mask of the streaming and coherent API */ 4074 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); 4075 if (ret < 0) { 4076 dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); 4077 return ret; 4078 } 4079 4080 ret = fec_enet_alloc_queue(ndev); 4081 if (ret) 4082 return ret; 4083 4084 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; 4085 4086 /* Allocate memory for buffer descriptors. */ 4087 cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma, 4088 GFP_KERNEL); 4089 if (!cbd_base) { 4090 ret = -ENOMEM; 4091 goto free_queue_mem; 4092 } 4093 4094 /* Get the Ethernet address */ 4095 ret = fec_get_mac(ndev); 4096 if (ret) 4097 goto free_queue_mem; 4098 4099 /* Set receive and transmit descriptor base. */ 4100 for (i = 0; i < fep->num_rx_queues; i++) { 4101 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; 4102 unsigned size = dsize * rxq->bd.ring_size; 4103 4104 rxq->bd.qid = i; 4105 rxq->bd.base = cbd_base; 4106 rxq->bd.cur = cbd_base; 4107 rxq->bd.dma = bd_dma; 4108 rxq->bd.dsize = dsize; 4109 rxq->bd.dsize_log2 = dsize_log2; 4110 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; 4111 bd_dma += size; 4112 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 4113 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 4114 } 4115 4116 for (i = 0; i < fep->num_tx_queues; i++) { 4117 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; 4118 unsigned size = dsize * txq->bd.ring_size; 4119 4120 txq->bd.qid = i; 4121 txq->bd.base = cbd_base; 4122 txq->bd.cur = cbd_base; 4123 txq->bd.dma = bd_dma; 4124 txq->bd.dsize = dsize; 4125 txq->bd.dsize_log2 = dsize_log2; 4126 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; 4127 bd_dma += size; 4128 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 4129 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 4130 } 4131 4132 4133 /* The FEC Ethernet specific entries in the device structure */ 4134 ndev->watchdog_timeo = TX_TIMEOUT; 4135 ndev->netdev_ops = &fec_netdev_ops; 4136 ndev->ethtool_ops = &fec_enet_ethtool_ops; 4137 4138 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 4139 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi); 4140 4141 if (fep->quirks & FEC_QUIRK_HAS_VLAN) 4142 /* enable hw VLAN support */ 4143 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 4144 4145 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { 4146 netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS); 4147 4148 /* enable hw accelerator */ 4149 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 4150 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); 4151 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 4152 } 4153 4154 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 4155 fep->tx_align = 0; 4156 fep->rx_align = 0x3f; 4157 } 4158 4159 ndev->hw_features = ndev->features; 4160 4161 if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) 4162 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | 4163 NETDEV_XDP_ACT_REDIRECT; 4164 4165 fec_restart(ndev); 4166 4167 if (fep->quirks & FEC_QUIRK_MIB_CLEAR) 4168 fec_enet_clear_ethtool_stats(ndev); 4169 else 4170 fec_enet_update_ethtool_stats(ndev); 4171 4172 return 0; 4173 4174 free_queue_mem: 4175 fec_enet_free_queue(ndev); 4176 return ret; 4177 } 4178 4179 static void fec_enet_deinit(struct net_device *ndev) 4180 { 4181 struct fec_enet_private *fep = netdev_priv(ndev); 4182 4183 netif_napi_del(&fep->napi); 4184 fec_enet_free_queue(ndev); 4185 } 4186 4187 #ifdef CONFIG_OF 4188 static int fec_reset_phy(struct platform_device *pdev) 4189 { 4190 struct gpio_desc *phy_reset; 4191 int msec = 1, phy_post_delay = 0; 4192 struct device_node *np = pdev->dev.of_node; 4193 int err; 4194 4195 if (!np) 4196 return 0; 4197 4198 err = of_property_read_u32(np, "phy-reset-duration", &msec); 4199 /* A sane reset duration should not be longer than 1s */ 4200 if (!err && msec > 1000) 4201 msec = 1; 4202 4203 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); 4204 /* valid reset duration should be less than 1s */ 4205 if (!err && phy_post_delay > 1000) 4206 return -EINVAL; 4207 4208 phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset", 4209 GPIOD_OUT_HIGH); 4210 if (IS_ERR(phy_reset)) 4211 return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset), 4212 "failed to get phy-reset-gpios\n"); 4213 4214 if (!phy_reset) 4215 return 0; 4216 4217 if (msec > 20) 4218 msleep(msec); 4219 else 4220 usleep_range(msec * 1000, msec * 1000 + 1000); 4221 4222 gpiod_set_value_cansleep(phy_reset, 0); 4223 4224 if (!phy_post_delay) 4225 return 0; 4226 4227 if (phy_post_delay > 20) 4228 msleep(phy_post_delay); 4229 else 4230 usleep_range(phy_post_delay * 1000, 4231 phy_post_delay * 1000 + 1000); 4232 4233 return 0; 4234 } 4235 #else /* CONFIG_OF */ 4236 static int fec_reset_phy(struct platform_device *pdev) 4237 { 4238 /* 4239 * In case of platform probe, the reset has been done 4240 * by machine code. 4241 */ 4242 return 0; 4243 } 4244 #endif /* CONFIG_OF */ 4245 4246 static void 4247 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) 4248 { 4249 struct device_node *np = pdev->dev.of_node; 4250 4251 *num_tx = *num_rx = 1; 4252 4253 if (!np || !of_device_is_available(np)) 4254 return; 4255 4256 /* parse the num of tx and rx queues */ 4257 of_property_read_u32(np, "fsl,num-tx-queues", num_tx); 4258 4259 of_property_read_u32(np, "fsl,num-rx-queues", num_rx); 4260 4261 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { 4262 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", 4263 *num_tx); 4264 *num_tx = 1; 4265 return; 4266 } 4267 4268 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { 4269 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", 4270 *num_rx); 4271 *num_rx = 1; 4272 return; 4273 } 4274 4275 } 4276 4277 static int fec_enet_get_irq_cnt(struct platform_device *pdev) 4278 { 4279 int irq_cnt = platform_irq_count(pdev); 4280 4281 if (irq_cnt > FEC_IRQ_NUM) 4282 irq_cnt = FEC_IRQ_NUM; /* last for pps */ 4283 else if (irq_cnt == 2) 4284 irq_cnt = 1; /* last for pps */ 4285 else if (irq_cnt <= 0) 4286 irq_cnt = 1; /* At least 1 irq is needed */ 4287 return irq_cnt; 4288 } 4289 4290 static void fec_enet_get_wakeup_irq(struct platform_device *pdev) 4291 { 4292 struct net_device *ndev = platform_get_drvdata(pdev); 4293 struct fec_enet_private *fep = netdev_priv(ndev); 4294 4295 if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2) 4296 fep->wake_irq = fep->irq[2]; 4297 else 4298 fep->wake_irq = fep->irq[0]; 4299 } 4300 4301 static int fec_enet_init_stop_mode(struct fec_enet_private *fep, 4302 struct device_node *np) 4303 { 4304 struct device_node *gpr_np; 4305 u32 out_val[3]; 4306 int ret = 0; 4307 4308 gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0); 4309 if (!gpr_np) 4310 return 0; 4311 4312 ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, 4313 ARRAY_SIZE(out_val)); 4314 if (ret) { 4315 dev_dbg(&fep->pdev->dev, "no stop mode property\n"); 4316 goto out; 4317 } 4318 4319 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); 4320 if (IS_ERR(fep->stop_gpr.gpr)) { 4321 dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); 4322 ret = PTR_ERR(fep->stop_gpr.gpr); 4323 fep->stop_gpr.gpr = NULL; 4324 goto out; 4325 } 4326 4327 fep->stop_gpr.reg = out_val[1]; 4328 fep->stop_gpr.bit = out_val[2]; 4329 4330 out: 4331 of_node_put(gpr_np); 4332 4333 return ret; 4334 } 4335 4336 static int 4337 fec_probe(struct platform_device *pdev) 4338 { 4339 struct fec_enet_private *fep; 4340 struct fec_platform_data *pdata; 4341 phy_interface_t interface; 4342 struct net_device *ndev; 4343 int i, irq, ret = 0; 4344 static int dev_id; 4345 struct device_node *np = pdev->dev.of_node, *phy_node; 4346 int num_tx_qs; 4347 int num_rx_qs; 4348 char irq_name[8]; 4349 int irq_cnt; 4350 const struct fec_devinfo *dev_info; 4351 4352 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 4353 4354 /* Init network device */ 4355 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) + 4356 FEC_STATS_SIZE, num_tx_qs, num_rx_qs); 4357 if (!ndev) 4358 return -ENOMEM; 4359 4360 SET_NETDEV_DEV(ndev, &pdev->dev); 4361 4362 /* setup board info structure */ 4363 fep = netdev_priv(ndev); 4364 4365 dev_info = device_get_match_data(&pdev->dev); 4366 if (!dev_info) 4367 dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data; 4368 if (dev_info) 4369 fep->quirks = dev_info->quirks; 4370 4371 fep->netdev = ndev; 4372 fep->num_rx_queues = num_rx_qs; 4373 fep->num_tx_queues = num_tx_qs; 4374 4375 #if !defined(CONFIG_M5272) 4376 /* default enable pause frame auto negotiation */ 4377 if (fep->quirks & FEC_QUIRK_HAS_GBIT) 4378 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 4379 #endif 4380 4381 /* Select default pin state */ 4382 pinctrl_pm_select_default_state(&pdev->dev); 4383 4384 fep->hwp = devm_platform_ioremap_resource(pdev, 0); 4385 if (IS_ERR(fep->hwp)) { 4386 ret = PTR_ERR(fep->hwp); 4387 goto failed_ioremap; 4388 } 4389 4390 fep->pdev = pdev; 4391 fep->dev_id = dev_id++; 4392 4393 platform_set_drvdata(pdev, ndev); 4394 4395 if ((of_machine_is_compatible("fsl,imx6q") || 4396 of_machine_is_compatible("fsl,imx6dl")) && 4397 !of_property_read_bool(np, "fsl,err006687-workaround-present")) 4398 fep->quirks |= FEC_QUIRK_ERR006687; 4399 4400 ret = fec_enet_ipc_handle_init(fep); 4401 if (ret) 4402 goto failed_ipc_init; 4403 4404 if (of_property_read_bool(np, "fsl,magic-packet")) 4405 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; 4406 4407 ret = fec_enet_init_stop_mode(fep, np); 4408 if (ret) 4409 goto failed_stop_mode; 4410 4411 phy_node = of_parse_phandle(np, "phy-handle", 0); 4412 if (!phy_node && of_phy_is_fixed_link(np)) { 4413 ret = of_phy_register_fixed_link(np); 4414 if (ret < 0) { 4415 dev_err(&pdev->dev, 4416 "broken fixed-link specification\n"); 4417 goto failed_phy; 4418 } 4419 phy_node = of_node_get(np); 4420 } 4421 fep->phy_node = phy_node; 4422 4423 ret = of_get_phy_mode(pdev->dev.of_node, &interface); 4424 if (ret) { 4425 pdata = dev_get_platdata(&pdev->dev); 4426 if (pdata) 4427 fep->phy_interface = pdata->phy; 4428 else 4429 fep->phy_interface = PHY_INTERFACE_MODE_MII; 4430 } else { 4431 fep->phy_interface = interface; 4432 } 4433 4434 ret = fec_enet_parse_rgmii_delay(fep, np); 4435 if (ret) 4436 goto failed_rgmii_delay; 4437 4438 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 4439 if (IS_ERR(fep->clk_ipg)) { 4440 ret = PTR_ERR(fep->clk_ipg); 4441 goto failed_clk; 4442 } 4443 4444 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 4445 if (IS_ERR(fep->clk_ahb)) { 4446 ret = PTR_ERR(fep->clk_ahb); 4447 goto failed_clk; 4448 } 4449 4450 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); 4451 4452 /* enet_out is optional, depends on board */ 4453 fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out"); 4454 if (IS_ERR(fep->clk_enet_out)) { 4455 ret = PTR_ERR(fep->clk_enet_out); 4456 goto failed_clk; 4457 } 4458 4459 fep->ptp_clk_on = false; 4460 mutex_init(&fep->ptp_clk_mutex); 4461 4462 /* clk_ref is optional, depends on board */ 4463 fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); 4464 if (IS_ERR(fep->clk_ref)) { 4465 ret = PTR_ERR(fep->clk_ref); 4466 goto failed_clk; 4467 } 4468 fep->clk_ref_rate = clk_get_rate(fep->clk_ref); 4469 4470 /* clk_2x_txclk is optional, depends on board */ 4471 if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) { 4472 fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk"); 4473 if (IS_ERR(fep->clk_2x_txclk)) 4474 fep->clk_2x_txclk = NULL; 4475 } 4476 4477 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; 4478 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); 4479 if (IS_ERR(fep->clk_ptp)) { 4480 fep->clk_ptp = NULL; 4481 fep->bufdesc_ex = false; 4482 } 4483 4484 ret = fec_enet_clk_enable(ndev, true); 4485 if (ret) 4486 goto failed_clk; 4487 4488 ret = clk_prepare_enable(fep->clk_ipg); 4489 if (ret) 4490 goto failed_clk_ipg; 4491 ret = clk_prepare_enable(fep->clk_ahb); 4492 if (ret) 4493 goto failed_clk_ahb; 4494 4495 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); 4496 if (!IS_ERR(fep->reg_phy)) { 4497 ret = regulator_enable(fep->reg_phy); 4498 if (ret) { 4499 dev_err(&pdev->dev, 4500 "Failed to enable phy regulator: %d\n", ret); 4501 goto failed_regulator; 4502 } 4503 } else { 4504 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { 4505 ret = -EPROBE_DEFER; 4506 goto failed_regulator; 4507 } 4508 fep->reg_phy = NULL; 4509 } 4510 4511 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 4512 pm_runtime_use_autosuspend(&pdev->dev); 4513 pm_runtime_get_noresume(&pdev->dev); 4514 pm_runtime_set_active(&pdev->dev); 4515 pm_runtime_enable(&pdev->dev); 4516 4517 ret = fec_reset_phy(pdev); 4518 if (ret) 4519 goto failed_reset; 4520 4521 irq_cnt = fec_enet_get_irq_cnt(pdev); 4522 if (fep->bufdesc_ex) 4523 fec_ptp_init(pdev, irq_cnt); 4524 4525 ret = fec_enet_init(ndev); 4526 if (ret) 4527 goto failed_init; 4528 4529 for (i = 0; i < irq_cnt; i++) { 4530 snprintf(irq_name, sizeof(irq_name), "int%d", i); 4531 irq = platform_get_irq_byname_optional(pdev, irq_name); 4532 if (irq < 0) 4533 irq = platform_get_irq(pdev, i); 4534 if (irq < 0) { 4535 ret = irq; 4536 goto failed_irq; 4537 } 4538 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 4539 0, pdev->name, ndev); 4540 if (ret) 4541 goto failed_irq; 4542 4543 fep->irq[i] = irq; 4544 } 4545 4546 /* Decide which interrupt line is wakeup capable */ 4547 fec_enet_get_wakeup_irq(pdev); 4548 4549 ret = fec_enet_mii_init(pdev); 4550 if (ret) 4551 goto failed_mii_init; 4552 4553 /* Carrier starts down, phylib will bring it up */ 4554 netif_carrier_off(ndev); 4555 fec_enet_clk_enable(ndev, false); 4556 pinctrl_pm_select_sleep_state(&pdev->dev); 4557 4558 ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN; 4559 4560 ret = register_netdev(ndev); 4561 if (ret) 4562 goto failed_register; 4563 4564 device_init_wakeup(&ndev->dev, fep->wol_flag & 4565 FEC_WOL_HAS_MAGIC_PACKET); 4566 4567 if (fep->bufdesc_ex && fep->ptp_clock) 4568 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 4569 4570 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 4571 4572 pm_runtime_mark_last_busy(&pdev->dev); 4573 pm_runtime_put_autosuspend(&pdev->dev); 4574 4575 return 0; 4576 4577 failed_register: 4578 fec_enet_mii_remove(fep); 4579 failed_mii_init: 4580 failed_irq: 4581 fec_enet_deinit(ndev); 4582 failed_init: 4583 fec_ptp_stop(pdev); 4584 failed_reset: 4585 pm_runtime_put_noidle(&pdev->dev); 4586 pm_runtime_disable(&pdev->dev); 4587 if (fep->reg_phy) 4588 regulator_disable(fep->reg_phy); 4589 failed_regulator: 4590 clk_disable_unprepare(fep->clk_ahb); 4591 failed_clk_ahb: 4592 clk_disable_unprepare(fep->clk_ipg); 4593 failed_clk_ipg: 4594 fec_enet_clk_enable(ndev, false); 4595 failed_clk: 4596 failed_rgmii_delay: 4597 if (of_phy_is_fixed_link(np)) 4598 of_phy_deregister_fixed_link(np); 4599 of_node_put(phy_node); 4600 failed_stop_mode: 4601 failed_ipc_init: 4602 failed_phy: 4603 dev_id--; 4604 failed_ioremap: 4605 free_netdev(ndev); 4606 4607 return ret; 4608 } 4609 4610 static void 4611 fec_drv_remove(struct platform_device *pdev) 4612 { 4613 struct net_device *ndev = platform_get_drvdata(pdev); 4614 struct fec_enet_private *fep = netdev_priv(ndev); 4615 struct device_node *np = pdev->dev.of_node; 4616 int ret; 4617 4618 ret = pm_runtime_get_sync(&pdev->dev); 4619 if (ret < 0) 4620 dev_err(&pdev->dev, 4621 "Failed to resume device in remove callback (%pe)\n", 4622 ERR_PTR(ret)); 4623 4624 cancel_work_sync(&fep->tx_timeout_work); 4625 fec_ptp_stop(pdev); 4626 unregister_netdev(ndev); 4627 fec_enet_mii_remove(fep); 4628 if (fep->reg_phy) 4629 regulator_disable(fep->reg_phy); 4630 4631 if (of_phy_is_fixed_link(np)) 4632 of_phy_deregister_fixed_link(np); 4633 of_node_put(fep->phy_node); 4634 4635 /* After pm_runtime_get_sync() failed, the clks are still off, so skip 4636 * disabling them again. 4637 */ 4638 if (ret >= 0) { 4639 clk_disable_unprepare(fep->clk_ahb); 4640 clk_disable_unprepare(fep->clk_ipg); 4641 } 4642 pm_runtime_put_noidle(&pdev->dev); 4643 pm_runtime_disable(&pdev->dev); 4644 4645 fec_enet_deinit(ndev); 4646 free_netdev(ndev); 4647 } 4648 4649 static int fec_suspend(struct device *dev) 4650 { 4651 struct net_device *ndev = dev_get_drvdata(dev); 4652 struct fec_enet_private *fep = netdev_priv(ndev); 4653 int ret; 4654 4655 rtnl_lock(); 4656 if (netif_running(ndev)) { 4657 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) 4658 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; 4659 phy_stop(ndev->phydev); 4660 napi_disable(&fep->napi); 4661 netif_tx_lock_bh(ndev); 4662 netif_device_detach(ndev); 4663 netif_tx_unlock_bh(ndev); 4664 fec_stop(ndev); 4665 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 4666 fec_irqs_disable(ndev); 4667 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 4668 } else { 4669 fec_irqs_disable_except_wakeup(ndev); 4670 if (fep->wake_irq > 0) { 4671 disable_irq(fep->wake_irq); 4672 enable_irq_wake(fep->wake_irq); 4673 } 4674 fec_enet_stop_mode(fep, true); 4675 } 4676 /* It's safe to disable clocks since interrupts are masked */ 4677 fec_enet_clk_enable(ndev, false); 4678 4679 fep->rpm_active = !pm_runtime_status_suspended(dev); 4680 if (fep->rpm_active) { 4681 ret = pm_runtime_force_suspend(dev); 4682 if (ret < 0) { 4683 rtnl_unlock(); 4684 return ret; 4685 } 4686 } 4687 } 4688 rtnl_unlock(); 4689 4690 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 4691 regulator_disable(fep->reg_phy); 4692 4693 /* SOC supply clock to phy, when clock is disabled, phy link down 4694 * SOC control phy regulator, when regulator is disabled, phy link down 4695 */ 4696 if (fep->clk_enet_out || fep->reg_phy) 4697 fep->link = 0; 4698 4699 return 0; 4700 } 4701 4702 static int fec_resume(struct device *dev) 4703 { 4704 struct net_device *ndev = dev_get_drvdata(dev); 4705 struct fec_enet_private *fep = netdev_priv(ndev); 4706 int ret; 4707 int val; 4708 4709 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 4710 ret = regulator_enable(fep->reg_phy); 4711 if (ret) 4712 return ret; 4713 } 4714 4715 rtnl_lock(); 4716 if (netif_running(ndev)) { 4717 if (fep->rpm_active) 4718 pm_runtime_force_resume(dev); 4719 4720 ret = fec_enet_clk_enable(ndev, true); 4721 if (ret) { 4722 rtnl_unlock(); 4723 goto failed_clk; 4724 } 4725 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { 4726 fec_enet_stop_mode(fep, false); 4727 if (fep->wake_irq) { 4728 disable_irq_wake(fep->wake_irq); 4729 enable_irq(fep->wake_irq); 4730 } 4731 4732 val = readl(fep->hwp + FEC_ECNTRL); 4733 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 4734 writel(val, fep->hwp + FEC_ECNTRL); 4735 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; 4736 } else { 4737 pinctrl_pm_select_default_state(&fep->pdev->dev); 4738 } 4739 fec_restart(ndev); 4740 netif_tx_lock_bh(ndev); 4741 netif_device_attach(ndev); 4742 netif_tx_unlock_bh(ndev); 4743 napi_enable(&fep->napi); 4744 phy_init_hw(ndev->phydev); 4745 phy_start(ndev->phydev); 4746 } 4747 rtnl_unlock(); 4748 4749 return 0; 4750 4751 failed_clk: 4752 if (fep->reg_phy) 4753 regulator_disable(fep->reg_phy); 4754 return ret; 4755 } 4756 4757 static int fec_runtime_suspend(struct device *dev) 4758 { 4759 struct net_device *ndev = dev_get_drvdata(dev); 4760 struct fec_enet_private *fep = netdev_priv(ndev); 4761 4762 clk_disable_unprepare(fep->clk_ahb); 4763 clk_disable_unprepare(fep->clk_ipg); 4764 4765 return 0; 4766 } 4767 4768 static int fec_runtime_resume(struct device *dev) 4769 { 4770 struct net_device *ndev = dev_get_drvdata(dev); 4771 struct fec_enet_private *fep = netdev_priv(ndev); 4772 int ret; 4773 4774 ret = clk_prepare_enable(fep->clk_ahb); 4775 if (ret) 4776 return ret; 4777 ret = clk_prepare_enable(fep->clk_ipg); 4778 if (ret) 4779 goto failed_clk_ipg; 4780 4781 return 0; 4782 4783 failed_clk_ipg: 4784 clk_disable_unprepare(fep->clk_ahb); 4785 return ret; 4786 } 4787 4788 static const struct dev_pm_ops fec_pm_ops = { 4789 SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 4790 RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 4791 }; 4792 4793 static struct platform_driver fec_driver = { 4794 .driver = { 4795 .name = DRIVER_NAME, 4796 .pm = pm_ptr(&fec_pm_ops), 4797 .of_match_table = fec_dt_ids, 4798 .suppress_bind_attrs = true, 4799 }, 4800 .id_table = fec_devtype, 4801 .probe = fec_probe, 4802 .remove = fec_drv_remove, 4803 }; 4804 4805 module_platform_driver(fec_driver); 4806 4807 MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver"); 4808 MODULE_LICENSE("GPL"); 4809