11f508124SFabio Estevam // SPDX-License-Identifier: GPL-2.0+ 2793fc096SFrank Li /* 3793fc096SFrank Li * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 4793fc096SFrank Li * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 5793fc096SFrank Li * 6793fc096SFrank Li * Right now, I am very wasteful with the buffers. I allocate memory 7793fc096SFrank Li * pages and then divide them into 2K frame buffers. This way I know I 8793fc096SFrank Li * have buffers large enough to hold one frame within one buffer descriptor. 9793fc096SFrank Li * Once I get this working, I will use 64 or 128 byte CPM buffers, which 10793fc096SFrank Li * will be much more memory efficient and will easily handle lots of 11793fc096SFrank Li * small packets. 12793fc096SFrank Li * 13793fc096SFrank Li * Much better multiple PHY support by Magnus Damm. 14793fc096SFrank Li * Copyright (c) 2000 Ericsson Radio Systems AB. 15793fc096SFrank Li * 16793fc096SFrank Li * Support for FEC controller of ColdFire processors. 17793fc096SFrank Li * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 18793fc096SFrank Li * 19793fc096SFrank Li * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 20793fc096SFrank Li * Copyright (c) 2004-2006 Macq Electronique SA. 21793fc096SFrank Li * 22793fc096SFrank Li * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 23793fc096SFrank Li */ 24793fc096SFrank Li 25793fc096SFrank Li #include <linux/module.h> 26793fc096SFrank Li #include <linux/kernel.h> 27793fc096SFrank Li #include <linux/string.h> 288fff755eSAndrew Lunn #include <linux/pm_runtime.h> 29793fc096SFrank Li #include <linux/ptrace.h> 30793fc096SFrank Li #include <linux/errno.h> 31793fc096SFrank Li #include <linux/ioport.h> 32793fc096SFrank Li #include <linux/slab.h> 33793fc096SFrank Li #include <linux/interrupt.h> 34793fc096SFrank Li #include <linux/delay.h> 35793fc096SFrank Li #include <linux/netdevice.h> 36793fc096SFrank Li #include <linux/etherdevice.h> 37793fc096SFrank Li #include <linux/skbuff.h> 384c09eed9SJim Baxter #include <linux/in.h> 394c09eed9SJim Baxter #include <linux/ip.h> 404c09eed9SJim Baxter #include <net/ip.h> 41a9ca9f9cSYunsheng Lin #include <net/page_pool/helpers.h> 426016ba34SOleksij Rempel #include <net/selftests.h> 4379f33912SNimrod Andy #include <net/tso.h> 444c09eed9SJim Baxter #include <linux/tcp.h> 454c09eed9SJim Baxter #include <linux/udp.h> 464c09eed9SJim Baxter #include <linux/icmp.h> 47793fc096SFrank Li #include <linux/spinlock.h> 48793fc096SFrank Li #include <linux/workqueue.h> 49793fc096SFrank Li #include <linux/bitops.h> 50793fc096SFrank Li #include <linux/io.h> 51793fc096SFrank Li #include <linux/irq.h> 52793fc096SFrank Li #include <linux/clk.h> 5316f6e983SKrzysztof Kozlowski #include <linux/crc32.h> 54793fc096SFrank Li #include <linux/platform_device.h> 55b0377116SRob Herring #include <linux/property.h> 567f854420SAndrew Lunn #include <linux/mdio.h> 57793fc096SFrank Li #include <linux/phy.h> 58793fc096SFrank Li #include <linux/fec.h> 59793fc096SFrank Li #include <linux/of.h> 60407066f8SUwe Kleine-König #include <linux/of_mdio.h> 61793fc096SFrank Li #include <linux/of_net.h> 62793fc096SFrank Li #include <linux/regulator/consumer.h> 63cdffcf1bSJim Baxter #include <linux/if_vlan.h> 64a68ab98eSFabio Estevam #include <linux/pinctrl/consumer.h> 65468ba54bSArnd Bergmann #include <linux/gpio/consumer.h> 66c259c132SFrank Li #include <linux/prefetch.h> 67da722186SMartin Fuzzey #include <linux/mfd/syscon.h> 68da722186SMartin Fuzzey #include <linux/regmap.h> 6929380905SLucas Stach #include <soc/imx/cpuidle.h> 7095698ff6SShenwei Wang #include <linux/filter.h> 7195698ff6SShenwei Wang #include <linux/bpf.h> 72f601899eSWei Fang #include <linux/bpf_trace.h> 73793fc096SFrank Li 74793fc096SFrank Li #include <asm/cacheflush.h> 75793fc096SFrank Li 76793fc096SFrank Li #include "fec.h" 77793fc096SFrank Li 78772e42b0SChristoph Müllner static void set_multicast_list(struct net_device *ndev); 79df727d45SRasmus Villemoes static void fec_enet_itr_coal_set(struct net_device *ndev); 80f601899eSWei Fang static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, 81f601899eSWei Fang int cpu, struct xdp_buff *xdp, 82f601899eSWei Fang u32 dma_sync_len); 83772e42b0SChristoph Müllner 84793fc096SFrank Li #define DRIVER_NAME "fec" 85793fc096SFrank Li 8652c4a1a8SFugang Duan static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; 8752c4a1a8SFugang Duan 88793fc096SFrank Li #define FEC_ENET_RSEM_V 0x84 89793fc096SFrank Li #define FEC_ENET_RSFL_V 16 90793fc096SFrank Li #define FEC_ENET_RAEM_V 0x8 91793fc096SFrank Li #define FEC_ENET_RAFL_V 0x8 92793fc096SFrank Li #define FEC_ENET_OPD_V 0xFFF0 938fff755eSAndrew Lunn #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 94793fc096SFrank Li 956d6b39f1SShenwei Wang #define FEC_ENET_XDP_PASS 0 966d6b39f1SShenwei Wang #define FEC_ENET_XDP_CONSUMED BIT(0) 976d6b39f1SShenwei Wang #define FEC_ENET_XDP_TX BIT(1) 986d6b39f1SShenwei Wang #define FEC_ENET_XDP_REDIR BIT(2) 996d6b39f1SShenwei Wang 100da722186SMartin Fuzzey struct fec_devinfo { 101da722186SMartin Fuzzey u32 quirks; 102da722186SMartin Fuzzey }; 103da722186SMartin Fuzzey 104da722186SMartin Fuzzey static const struct fec_devinfo fec_imx25_info = { 105da722186SMartin Fuzzey .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | 106abc33494SGreg Ungerer FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45, 107da722186SMartin Fuzzey }; 108da722186SMartin Fuzzey 109da722186SMartin Fuzzey static const struct fec_devinfo fec_imx27_info = { 110abc33494SGreg Ungerer .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG | 111abc33494SGreg Ungerer FEC_QUIRK_HAS_MDIO_C45, 112da722186SMartin Fuzzey }; 113da722186SMartin Fuzzey 114da722186SMartin Fuzzey static const struct fec_devinfo fec_imx28_info = { 115da722186SMartin Fuzzey .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 116da722186SMartin Fuzzey FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | 117c730ab42SLaurent Badel FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII | 118abc33494SGreg Ungerer FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45, 119da722186SMartin Fuzzey }; 120da722186SMartin Fuzzey 121da722186SMartin Fuzzey static const struct fec_devinfo fec_imx6q_info = { 122da722186SMartin Fuzzey .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 123da722186SMartin Fuzzey FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 124da722186SMartin Fuzzey FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | 1257d650df9SWei Fang FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII | 126abc33494SGreg Ungerer FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45, 127da722186SMartin Fuzzey }; 128da722186SMartin Fuzzey 129da722186SMartin Fuzzey static const struct fec_devinfo fec_mvf600_info = { 130abc33494SGreg Ungerer .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC | 131abc33494SGreg Ungerer FEC_QUIRK_HAS_MDIO_C45, 132da722186SMartin Fuzzey }; 133da722186SMartin Fuzzey 134da722186SMartin Fuzzey static const struct fec_devinfo fec_imx6x_info = { 135da722186SMartin Fuzzey .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 136da722186SMartin Fuzzey FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 137da722186SMartin Fuzzey FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 138da722186SMartin Fuzzey FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 1391e6114f5SGreg Ungerer FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 140abc33494SGreg Ungerer FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 141abc33494SGreg Ungerer FEC_QUIRK_HAS_MDIO_C45, 142da722186SMartin Fuzzey }; 143da722186SMartin Fuzzey 144da722186SMartin Fuzzey static const struct fec_devinfo fec_imx6ul_info = { 145da722186SMartin Fuzzey .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 146da722186SMartin Fuzzey FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 147da722186SMartin Fuzzey FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | 148da722186SMartin Fuzzey FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | 149abc33494SGreg Ungerer FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII | 150abc33494SGreg Ungerer FEC_QUIRK_HAS_MDIO_C45, 151da722186SMartin Fuzzey }; 152da722186SMartin Fuzzey 153947240ebSFugang Duan static const struct fec_devinfo fec_imx8mq_info = { 154947240ebSFugang Duan .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 155947240ebSFugang Duan FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 156947240ebSFugang Duan FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 157947240ebSFugang Duan FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 158947240ebSFugang Duan FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 159947240ebSFugang Duan FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 160abc33494SGreg Ungerer FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 | 161abc33494SGreg Ungerer FEC_QUIRK_HAS_MDIO_C45, 162947240ebSFugang Duan }; 163947240ebSFugang Duan 164947240ebSFugang Duan static const struct fec_devinfo fec_imx8qm_info = { 165947240ebSFugang Duan .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 166947240ebSFugang Duan FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 167947240ebSFugang Duan FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 168947240ebSFugang Duan FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 169947240ebSFugang Duan FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 170947240ebSFugang Duan FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 171abc33494SGreg Ungerer FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45, 172947240ebSFugang Duan }; 173947240ebSFugang Duan 174167d5fe0SWei Fang static const struct fec_devinfo fec_s32v234_info = { 175167d5fe0SWei Fang .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 176167d5fe0SWei Fang FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 177167d5fe0SWei Fang FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 178abc33494SGreg Ungerer FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 179abc33494SGreg Ungerer FEC_QUIRK_HAS_MDIO_C45, 180167d5fe0SWei Fang }; 181167d5fe0SWei Fang 182793fc096SFrank Li static struct platform_device_id fec_devtype[] = { 183793fc096SFrank Li { 184793fc096SFrank Li /* keep it for coldfire */ 185793fc096SFrank Li .name = DRIVER_NAME, 186793fc096SFrank Li .driver_data = 0, 187793fc096SFrank Li }, { 188793fc096SFrank Li /* sentinel */ 189793fc096SFrank Li } 190793fc096SFrank Li }; 191793fc096SFrank Li MODULE_DEVICE_TABLE(platform, fec_devtype); 192793fc096SFrank Li 193793fc096SFrank Li static const struct of_device_id fec_dt_ids[] = { 194e6809dbaSAlexander Stein { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, }, 195e6809dbaSAlexander Stein { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, }, 196e6809dbaSAlexander Stein { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, }, 197e6809dbaSAlexander Stein { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, }, 198e6809dbaSAlexander Stein { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, }, 199e6809dbaSAlexander Stein { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, }, 200e6809dbaSAlexander Stein { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, }, 201e6809dbaSAlexander Stein { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, }, 202e6809dbaSAlexander Stein { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, }, 203e6809dbaSAlexander Stein { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, }, 204793fc096SFrank Li { /* sentinel */ } 205793fc096SFrank Li }; 206793fc096SFrank Li MODULE_DEVICE_TABLE(of, fec_dt_ids); 207793fc096SFrank Li 208793fc096SFrank Li static unsigned char macaddr[ETH_ALEN]; 209793fc096SFrank Li module_param_array(macaddr, byte, NULL, 0); 210793fc096SFrank Li MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 211793fc096SFrank Li 212793fc096SFrank Li #if defined(CONFIG_M5272) 213793fc096SFrank Li /* 214793fc096SFrank Li * Some hardware gets it MAC address out of local flash memory. 215793fc096SFrank Li * if this is non-zero then assume it is the address to get MAC from. 216793fc096SFrank Li */ 217793fc096SFrank Li #if defined(CONFIG_NETtel) 218793fc096SFrank Li #define FEC_FLASHMAC 0xf0006006 219793fc096SFrank Li #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 220793fc096SFrank Li #define FEC_FLASHMAC 0xf0006000 221793fc096SFrank Li #elif defined(CONFIG_CANCam) 222793fc096SFrank Li #define FEC_FLASHMAC 0xf0020000 223793fc096SFrank Li #elif defined (CONFIG_M5272C3) 224793fc096SFrank Li #define FEC_FLASHMAC (0xffe04000 + 4) 225793fc096SFrank Li #elif defined(CONFIG_MOD5272) 226793fc096SFrank Li #define FEC_FLASHMAC 0xffc0406b 227793fc096SFrank Li #else 228793fc096SFrank Li #define FEC_FLASHMAC 0 229793fc096SFrank Li #endif 230793fc096SFrank Li #endif /* CONFIG_M5272 */ 231793fc096SFrank Li 232cdffcf1bSJim Baxter /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. 233fbbeefddSAndrew Lunn * 234fbbeefddSAndrew Lunn * 2048 byte skbufs are allocated. However, alignment requirements 235fbbeefddSAndrew Lunn * varies between FEC variants. Worst case is 64, so round down by 64. 236793fc096SFrank Li */ 237fbbeefddSAndrew Lunn #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) 238793fc096SFrank Li #define PKT_MINBUF_SIZE 64 239793fc096SFrank Li 2404c09eed9SJim Baxter /* FEC receive acceleration */ 241ff049886SCsókás Bence #define FEC_RACC_IPDIS BIT(1) 242ff049886SCsókás Bence #define FEC_RACC_PRODIS BIT(2) 2433ac72b7bSEric Nelson #define FEC_RACC_SHIFT16 BIT(7) 2444c09eed9SJim Baxter #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 2454c09eed9SJim Baxter 2462b30842bSAndrew Lunn /* MIB Control Register */ 2472b30842bSAndrew Lunn #define FEC_MIB_CTRLSTAT_DISABLE BIT(31) 2482b30842bSAndrew Lunn 249793fc096SFrank Li /* 250793fc096SFrank Li * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 251793fc096SFrank Li * size bits. Other FEC hardware does not, so we need to take that into 252793fc096SFrank Li * account when setting it. 253793fc096SFrank Li */ 254793fc096SFrank Li #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2553f1dcc6aSLucas Stach defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2563f1dcc6aSLucas Stach defined(CONFIG_ARM64) 257793fc096SFrank Li #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 258793fc096SFrank Li #else 259793fc096SFrank Li #define OPT_FRAME_SIZE 0 260793fc096SFrank Li #endif 261793fc096SFrank Li 262793fc096SFrank Li /* FEC MII MMFR bits definition */ 263793fc096SFrank Li #define FEC_MMFR_ST (1 << 30) 264d3ee8ec7SMarco Hartmann #define FEC_MMFR_ST_C45 (0) 265793fc096SFrank Li #define FEC_MMFR_OP_READ (2 << 28) 266d3ee8ec7SMarco Hartmann #define FEC_MMFR_OP_READ_C45 (3 << 28) 267793fc096SFrank Li #define FEC_MMFR_OP_WRITE (1 << 28) 268d3ee8ec7SMarco Hartmann #define FEC_MMFR_OP_ADDR_WRITE (0) 269793fc096SFrank Li #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 270793fc096SFrank Li #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 271793fc096SFrank Li #define FEC_MMFR_TA (2 << 16) 272793fc096SFrank Li #define FEC_MMFR_DATA(v) (v & 0xffff) 273de40ed31SNimrod Andy /* FEC ECR bits definition */ 274ff049886SCsókás Bence #define FEC_ECR_RESET BIT(0) 275ff049886SCsókás Bence #define FEC_ECR_ETHEREN BIT(1) 276ff049886SCsókás Bence #define FEC_ECR_MAGICEN BIT(2) 277ff049886SCsókás Bence #define FEC_ECR_SLEEP BIT(3) 278ff049886SCsókás Bence #define FEC_ECR_EN1588 BIT(4) 279ff049886SCsókás Bence #define FEC_ECR_BYTESWP BIT(8) 280ff049886SCsókás Bence /* FEC RCR bits definition */ 281ff049886SCsókás Bence #define FEC_RCR_LOOP BIT(0) 282ff049886SCsókás Bence #define FEC_RCR_HALFDPX BIT(1) 283ff049886SCsókás Bence #define FEC_RCR_MII BIT(2) 284ff049886SCsókás Bence #define FEC_RCR_PROMISC BIT(3) 285ff049886SCsókás Bence #define FEC_RCR_BC_REJ BIT(4) 286ff049886SCsókás Bence #define FEC_RCR_FLOWCTL BIT(5) 287ff049886SCsókás Bence #define FEC_RCR_RMII BIT(8) 288ff049886SCsókás Bence #define FEC_RCR_10BASET BIT(9) 289ff049886SCsókás Bence /* TX WMARK bits */ 290ff049886SCsókás Bence #define FEC_TXWMRK_STRFWD BIT(8) 291793fc096SFrank Li 292793fc096SFrank Li #define FEC_MII_TIMEOUT 30000 /* us */ 293793fc096SFrank Li 294793fc096SFrank Li /* Transmitter timeout */ 295793fc096SFrank Li #define TX_TIMEOUT (2 * HZ) 296793fc096SFrank Li 297793fc096SFrank Li #define FEC_PAUSE_FLAG_AUTONEG 0x1 298793fc096SFrank Li #define FEC_PAUSE_FLAG_ENABLE 0x2 299de40ed31SNimrod Andy #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) 300de40ed31SNimrod Andy #define FEC_WOL_FLAG_ENABLE (0x1 << 1) 301de40ed31SNimrod Andy #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) 302793fc096SFrank Li 30379f33912SNimrod Andy /* Max number of allowed TCP segments for software TSO */ 30479f33912SNimrod Andy #define FEC_MAX_TSO_SEGS 100 30579f33912SNimrod Andy #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 30679f33912SNimrod Andy 30779f33912SNimrod Andy #define IS_TSO_HEADER(txq, addr) \ 30879f33912SNimrod Andy ((addr >= txq->tso_hdrs_dma) && \ 3097355f276STroy Kisky (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 31079f33912SNimrod Andy 311793fc096SFrank Li static int mii_cnt; 312793fc096SFrank Li 3137355f276STroy Kisky static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, 3147355f276STroy Kisky struct bufdesc_prop *bd) 315793fc096SFrank Li { 3167355f276STroy Kisky return (bdp >= bd->last) ? bd->base 317145d6e29SFugang Duan : (struct bufdesc *)(((void *)bdp) + bd->dsize); 318793fc096SFrank Li } 319793fc096SFrank Li 3207355f276STroy Kisky static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, 3217355f276STroy Kisky struct bufdesc_prop *bd) 32236e24e2eSDuan Fugang-B38611 { 3237355f276STroy Kisky return (bdp <= bd->base) ? bd->last 324145d6e29SFugang Duan : (struct bufdesc *)(((void *)bdp) - bd->dsize); 32536e24e2eSDuan Fugang-B38611 } 32636e24e2eSDuan Fugang-B38611 3277355f276STroy Kisky static int fec_enet_get_bd_index(struct bufdesc *bdp, 3287355f276STroy Kisky struct bufdesc_prop *bd) 32961a4427bSNimrod Andy { 3307355f276STroy Kisky return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; 33161a4427bSNimrod Andy } 33261a4427bSNimrod Andy 3337355f276STroy Kisky static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) 3346e909283SNimrod Andy { 3356e909283SNimrod Andy int entries; 3366e909283SNimrod Andy 3377355f276STroy Kisky entries = (((const char *)txq->dirty_tx - 3387355f276STroy Kisky (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; 3396e909283SNimrod Andy 3407355f276STroy Kisky return entries >= 0 ? entries : entries + txq->bd.ring_size; 3416e909283SNimrod Andy } 3426e909283SNimrod Andy 343c20e599bSLothar Waßmann static void swap_buffer(void *bufaddr, int len) 344793fc096SFrank Li { 345793fc096SFrank Li int i; 346793fc096SFrank Li unsigned int *buf = bufaddr; 347793fc096SFrank Li 3487b487d07SLothar Waßmann for (i = 0; i < len; i += 4, buf++) 349e453789aSLothar Waßmann swab32s(buf); 350793fc096SFrank Li } 351793fc096SFrank Li 352344756f6SRussell King static void fec_dump(struct net_device *ndev) 353344756f6SRussell King { 354344756f6SRussell King struct fec_enet_private *fep = netdev_priv(ndev); 3554d494cdcSFugang Duan struct bufdesc *bdp; 3564d494cdcSFugang Duan struct fec_enet_priv_tx_q *txq; 3574d494cdcSFugang Duan int index = 0; 358344756f6SRussell King 359344756f6SRussell King netdev_info(ndev, "TX ring dump\n"); 360344756f6SRussell King pr_info("Nr SC addr len SKB\n"); 361344756f6SRussell King 3624d494cdcSFugang Duan txq = fep->tx_queue[0]; 3637355f276STroy Kisky bdp = txq->bd.base; 3644d494cdcSFugang Duan 365344756f6SRussell King do { 3665cfa3039SJohannes Berg pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", 367344756f6SRussell King index, 3687355f276STroy Kisky bdp == txq->bd.cur ? 'S' : ' ', 3694d494cdcSFugang Duan bdp == txq->dirty_tx ? 'H' : ' ', 3705cfa3039SJohannes Berg fec16_to_cpu(bdp->cbd_sc), 3715cfa3039SJohannes Berg fec32_to_cpu(bdp->cbd_bufaddr), 3725cfa3039SJohannes Berg fec16_to_cpu(bdp->cbd_datlen), 373af6f4791SWei Fang txq->tx_buf[index].buf_p); 3747355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 375344756f6SRussell King index++; 3767355f276STroy Kisky } while (bdp != txq->bd.base); 377344756f6SRussell King } 378344756f6SRussell King 379ffd32a92SChristoph Hellwig /* 380ffd32a92SChristoph Hellwig * Coldfire does not support DMA coherent allocations, and has historically used 381ffd32a92SChristoph Hellwig * a band-aid with a manual flush in fec_enet_rx_queue. 382ffd32a92SChristoph Hellwig */ 383ffd32a92SChristoph Hellwig #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) 384ffd32a92SChristoph Hellwig static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 385ffd32a92SChristoph Hellwig gfp_t gfp) 386ffd32a92SChristoph Hellwig { 387ffd32a92SChristoph Hellwig return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp); 388ffd32a92SChristoph Hellwig } 389ffd32a92SChristoph Hellwig 390ffd32a92SChristoph Hellwig static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, 391ffd32a92SChristoph Hellwig dma_addr_t handle) 392ffd32a92SChristoph Hellwig { 393ffd32a92SChristoph Hellwig dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL); 394ffd32a92SChristoph Hellwig } 395ffd32a92SChristoph Hellwig #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ 396ffd32a92SChristoph Hellwig static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 397ffd32a92SChristoph Hellwig gfp_t gfp) 398ffd32a92SChristoph Hellwig { 399ffd32a92SChristoph Hellwig return dma_alloc_coherent(dev, size, handle, gfp); 400ffd32a92SChristoph Hellwig } 401ffd32a92SChristoph Hellwig 402ffd32a92SChristoph Hellwig static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, 403ffd32a92SChristoph Hellwig dma_addr_t handle) 404ffd32a92SChristoph Hellwig { 405ffd32a92SChristoph Hellwig dma_free_coherent(dev, size, cpu_addr, handle); 406ffd32a92SChristoph Hellwig } 407ffd32a92SChristoph Hellwig #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ 408ffd32a92SChristoph Hellwig 409ffd32a92SChristoph Hellwig struct fec_dma_devres { 410ffd32a92SChristoph Hellwig size_t size; 411ffd32a92SChristoph Hellwig void *vaddr; 412ffd32a92SChristoph Hellwig dma_addr_t dma_handle; 413ffd32a92SChristoph Hellwig }; 414ffd32a92SChristoph Hellwig 415ffd32a92SChristoph Hellwig static void fec_dmam_release(struct device *dev, void *res) 416ffd32a92SChristoph Hellwig { 417ffd32a92SChristoph Hellwig struct fec_dma_devres *this = res; 418ffd32a92SChristoph Hellwig 419ffd32a92SChristoph Hellwig fec_dma_free(dev, this->size, this->vaddr, this->dma_handle); 420ffd32a92SChristoph Hellwig } 421ffd32a92SChristoph Hellwig 422ffd32a92SChristoph Hellwig static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle, 423ffd32a92SChristoph Hellwig gfp_t gfp) 424ffd32a92SChristoph Hellwig { 425ffd32a92SChristoph Hellwig struct fec_dma_devres *dr; 426ffd32a92SChristoph Hellwig void *vaddr; 427ffd32a92SChristoph Hellwig 428ffd32a92SChristoph Hellwig dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp); 429ffd32a92SChristoph Hellwig if (!dr) 430ffd32a92SChristoph Hellwig return NULL; 431ffd32a92SChristoph Hellwig vaddr = fec_dma_alloc(dev, size, handle, gfp); 432ffd32a92SChristoph Hellwig if (!vaddr) { 433ffd32a92SChristoph Hellwig devres_free(dr); 434ffd32a92SChristoph Hellwig return NULL; 435ffd32a92SChristoph Hellwig } 436ffd32a92SChristoph Hellwig dr->vaddr = vaddr; 437ffd32a92SChristoph Hellwig dr->dma_handle = *handle; 438ffd32a92SChristoph Hellwig dr->size = size; 439ffd32a92SChristoph Hellwig devres_add(dev, dr); 440ffd32a92SChristoph Hellwig return vaddr; 441ffd32a92SChristoph Hellwig } 442ffd32a92SChristoph Hellwig 44362a02c98SFugang Duan static inline bool is_ipv4_pkt(struct sk_buff *skb) 44462a02c98SFugang Duan { 44562a02c98SFugang Duan return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 44662a02c98SFugang Duan } 44762a02c98SFugang Duan 4484c09eed9SJim Baxter static int 4494c09eed9SJim Baxter fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 4504c09eed9SJim Baxter { 4514c09eed9SJim Baxter /* Only run for packets requiring a checksum. */ 4524c09eed9SJim Baxter if (skb->ip_summed != CHECKSUM_PARTIAL) 4534c09eed9SJim Baxter return 0; 4544c09eed9SJim Baxter 4554c09eed9SJim Baxter if (unlikely(skb_cow_head(skb, 0))) 4564c09eed9SJim Baxter return -1; 4574c09eed9SJim Baxter 45862a02c98SFugang Duan if (is_ipv4_pkt(skb)) 45996c50caaSNimrod Andy ip_hdr(skb)->check = 0; 4604c09eed9SJim Baxter *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 4614c09eed9SJim Baxter 4624c09eed9SJim Baxter return 0; 4634c09eed9SJim Baxter } 4644c09eed9SJim Baxter 46595698ff6SShenwei Wang static int 46695698ff6SShenwei Wang fec_enet_create_page_pool(struct fec_enet_private *fep, 46795698ff6SShenwei Wang struct fec_enet_priv_rx_q *rxq, int size) 46895698ff6SShenwei Wang { 4696d6b39f1SShenwei Wang struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); 47095698ff6SShenwei Wang struct page_pool_params pp_params = { 47195698ff6SShenwei Wang .order = 0, 47295698ff6SShenwei Wang .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 47395698ff6SShenwei Wang .pool_size = size, 47495698ff6SShenwei Wang .nid = dev_to_node(&fep->pdev->dev), 47595698ff6SShenwei Wang .dev = &fep->pdev->dev, 4766d6b39f1SShenwei Wang .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, 47795698ff6SShenwei Wang .offset = FEC_ENET_XDP_HEADROOM, 47895698ff6SShenwei Wang .max_len = FEC_ENET_RX_FRSIZE, 47995698ff6SShenwei Wang }; 48095698ff6SShenwei Wang int err; 48195698ff6SShenwei Wang 48295698ff6SShenwei Wang rxq->page_pool = page_pool_create(&pp_params); 48395698ff6SShenwei Wang if (IS_ERR(rxq->page_pool)) { 48495698ff6SShenwei Wang err = PTR_ERR(rxq->page_pool); 48595698ff6SShenwei Wang rxq->page_pool = NULL; 48695698ff6SShenwei Wang return err; 48795698ff6SShenwei Wang } 48895698ff6SShenwei Wang 48995698ff6SShenwei Wang err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); 49095698ff6SShenwei Wang if (err < 0) 49195698ff6SShenwei Wang goto err_free_pp; 49295698ff6SShenwei Wang 49395698ff6SShenwei Wang err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 49495698ff6SShenwei Wang rxq->page_pool); 49595698ff6SShenwei Wang if (err) 49695698ff6SShenwei Wang goto err_unregister_rxq; 49795698ff6SShenwei Wang 49895698ff6SShenwei Wang return 0; 49995698ff6SShenwei Wang 50095698ff6SShenwei Wang err_unregister_rxq: 50195698ff6SShenwei Wang xdp_rxq_info_unreg(&rxq->xdp_rxq); 50295698ff6SShenwei Wang err_free_pp: 50395698ff6SShenwei Wang page_pool_destroy(rxq->page_pool); 50495698ff6SShenwei Wang rxq->page_pool = NULL; 50595698ff6SShenwei Wang return err; 50695698ff6SShenwei Wang } 50795698ff6SShenwei Wang 508c4bc44c6SKevin Hao static struct bufdesc * 5094d494cdcSFugang Duan fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, 5104d494cdcSFugang Duan struct sk_buff *skb, 5114d494cdcSFugang Duan struct net_device *ndev) 5126e909283SNimrod Andy { 5136e909283SNimrod Andy struct fec_enet_private *fep = netdev_priv(ndev); 5147355f276STroy Kisky struct bufdesc *bdp = txq->bd.cur; 5156e909283SNimrod Andy struct bufdesc_ex *ebdp; 5166e909283SNimrod Andy int nr_frags = skb_shinfo(skb)->nr_frags; 5176e909283SNimrod Andy int frag, frag_len; 5186e909283SNimrod Andy unsigned short status; 5196e909283SNimrod Andy unsigned int estatus = 0; 5206e909283SNimrod Andy skb_frag_t *this_frag; 5216e909283SNimrod Andy unsigned int index; 5226e909283SNimrod Andy void *bufaddr; 523d6bf3143SRussell King dma_addr_t addr; 5246e909283SNimrod Andy int i; 5256e909283SNimrod Andy 5266e909283SNimrod Andy for (frag = 0; frag < nr_frags; frag++) { 5276e909283SNimrod Andy this_frag = &skb_shinfo(skb)->frags[frag]; 5287355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 5296e909283SNimrod Andy ebdp = (struct bufdesc_ex *)bdp; 5306e909283SNimrod Andy 5315cfa3039SJohannes Berg status = fec16_to_cpu(bdp->cbd_sc); 5326e909283SNimrod Andy status &= ~BD_ENET_TX_STATS; 5336e909283SNimrod Andy status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 534d7840976SMatthew Wilcox (Oracle) frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]); 5356e909283SNimrod Andy 5366e909283SNimrod Andy /* Handle the last BD specially */ 5376e909283SNimrod Andy if (frag == nr_frags - 1) { 5386e909283SNimrod Andy status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 5396e909283SNimrod Andy if (fep->bufdesc_ex) { 5406e909283SNimrod Andy estatus |= BD_ENET_TX_INT; 5416e909283SNimrod Andy if (unlikely(skb_shinfo(skb)->tx_flags & 5426e909283SNimrod Andy SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 5436e909283SNimrod Andy estatus |= BD_ENET_TX_TS; 5446e909283SNimrod Andy } 5456e909283SNimrod Andy } 5466e909283SNimrod Andy 5476e909283SNimrod Andy if (fep->bufdesc_ex) { 5486b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_HAS_AVB) 54953bb20d1STroy Kisky estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 5506e909283SNimrod Andy if (skb->ip_summed == CHECKSUM_PARTIAL) 5516e909283SNimrod Andy estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 552471ff445SJoakim Zhang 5536e909283SNimrod Andy ebdp->cbd_bdu = 0; 5545cfa3039SJohannes Berg ebdp->cbd_esc = cpu_to_fec32(estatus); 5556e909283SNimrod Andy } 5566e909283SNimrod Andy 557d7840976SMatthew Wilcox (Oracle) bufaddr = skb_frag_address(this_frag); 5586e909283SNimrod Andy 5597355f276STroy Kisky index = fec_enet_get_bd_index(bdp, &txq->bd); 56041ef84ceSFugang Duan if (((unsigned long) bufaddr) & fep->tx_align || 5616b7e4008SLothar Waßmann fep->quirks & FEC_QUIRK_SWAP_FRAME) { 5624d494cdcSFugang Duan memcpy(txq->tx_bounce[index], bufaddr, frag_len); 5634d494cdcSFugang Duan bufaddr = txq->tx_bounce[index]; 5646e909283SNimrod Andy 5656b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 5666e909283SNimrod Andy swap_buffer(bufaddr, frag_len); 5676e909283SNimrod Andy } 5686e909283SNimrod Andy 569d6bf3143SRussell King addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, 570d6bf3143SRussell King DMA_TO_DEVICE); 571d6bf3143SRussell King if (dma_mapping_error(&fep->pdev->dev, addr)) { 5726e909283SNimrod Andy if (net_ratelimit()) 5736e909283SNimrod Andy netdev_err(ndev, "Tx DMA memory map failed\n"); 5746e909283SNimrod Andy goto dma_mapping_error; 5756e909283SNimrod Andy } 5766e909283SNimrod Andy 5775cfa3039SJohannes Berg bdp->cbd_bufaddr = cpu_to_fec32(addr); 5785cfa3039SJohannes Berg bdp->cbd_datlen = cpu_to_fec16(frag_len); 579be293467STroy Kisky /* Make sure the updates to rest of the descriptor are 580be293467STroy Kisky * performed before transferring ownership. 581be293467STroy Kisky */ 582be293467STroy Kisky wmb(); 5835cfa3039SJohannes Berg bdp->cbd_sc = cpu_to_fec16(status); 5846e909283SNimrod Andy } 5856e909283SNimrod Andy 586c4bc44c6SKevin Hao return bdp; 5876e909283SNimrod Andy dma_mapping_error: 5887355f276STroy Kisky bdp = txq->bd.cur; 5896e909283SNimrod Andy for (i = 0; i < frag; i++) { 5907355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 5915cfa3039SJohannes Berg dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), 5925cfa3039SJohannes Berg fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); 5936e909283SNimrod Andy } 594c4bc44c6SKevin Hao return ERR_PTR(-ENOMEM); 5956e909283SNimrod Andy } 5966e909283SNimrod Andy 5974d494cdcSFugang Duan static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, 5984d494cdcSFugang Duan struct sk_buff *skb, struct net_device *ndev) 5996e909283SNimrod Andy { 6006e909283SNimrod Andy struct fec_enet_private *fep = netdev_priv(ndev); 6016e909283SNimrod Andy int nr_frags = skb_shinfo(skb)->nr_frags; 6026e909283SNimrod Andy struct bufdesc *bdp, *last_bdp; 6036e909283SNimrod Andy void *bufaddr; 604d6bf3143SRussell King dma_addr_t addr; 6056e909283SNimrod Andy unsigned short status; 6066e909283SNimrod Andy unsigned short buflen; 6076e909283SNimrod Andy unsigned int estatus = 0; 6086e909283SNimrod Andy unsigned int index; 60979f33912SNimrod Andy int entries_free; 6106e909283SNimrod Andy 6117355f276STroy Kisky entries_free = fec_enet_get_free_txdesc_num(txq); 61279f33912SNimrod Andy if (entries_free < MAX_SKB_FRAGS + 1) { 61379f33912SNimrod Andy dev_kfree_skb_any(skb); 61479f33912SNimrod Andy if (net_ratelimit()) 61579f33912SNimrod Andy netdev_err(ndev, "NOT enough BD for SG!\n"); 61679f33912SNimrod Andy return NETDEV_TX_OK; 61779f33912SNimrod Andy } 61879f33912SNimrod Andy 6196e909283SNimrod Andy /* Protocol checksum off-load for TCP and UDP. */ 6206e909283SNimrod Andy if (fec_enet_clear_csum(skb, ndev)) { 6216e909283SNimrod Andy dev_kfree_skb_any(skb); 6226e909283SNimrod Andy return NETDEV_TX_OK; 6236e909283SNimrod Andy } 6246e909283SNimrod Andy 6256e909283SNimrod Andy /* Fill in a Tx ring entry */ 6267355f276STroy Kisky bdp = txq->bd.cur; 627c4bc44c6SKevin Hao last_bdp = bdp; 6285cfa3039SJohannes Berg status = fec16_to_cpu(bdp->cbd_sc); 6296e909283SNimrod Andy status &= ~BD_ENET_TX_STATS; 6306e909283SNimrod Andy 6316e909283SNimrod Andy /* Set buffer length and buffer pointer */ 6326e909283SNimrod Andy bufaddr = skb->data; 6336e909283SNimrod Andy buflen = skb_headlen(skb); 6346e909283SNimrod Andy 6357355f276STroy Kisky index = fec_enet_get_bd_index(bdp, &txq->bd); 63641ef84ceSFugang Duan if (((unsigned long) bufaddr) & fep->tx_align || 6376b7e4008SLothar Waßmann fep->quirks & FEC_QUIRK_SWAP_FRAME) { 6384d494cdcSFugang Duan memcpy(txq->tx_bounce[index], skb->data, buflen); 6394d494cdcSFugang Duan bufaddr = txq->tx_bounce[index]; 6406e909283SNimrod Andy 6416b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 6426e909283SNimrod Andy swap_buffer(bufaddr, buflen); 6436e909283SNimrod Andy } 6446e909283SNimrod Andy 645d6bf3143SRussell King /* Push the data cache so the CPM does not get stale memory data. */ 646d6bf3143SRussell King addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); 647d6bf3143SRussell King if (dma_mapping_error(&fep->pdev->dev, addr)) { 6486e909283SNimrod Andy dev_kfree_skb_any(skb); 6496e909283SNimrod Andy if (net_ratelimit()) 6506e909283SNimrod Andy netdev_err(ndev, "Tx DMA memory map failed\n"); 6516e909283SNimrod Andy return NETDEV_TX_OK; 6526e909283SNimrod Andy } 6536e909283SNimrod Andy 6546e909283SNimrod Andy if (nr_frags) { 655c4bc44c6SKevin Hao last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); 656fc75ba51STroy Kisky if (IS_ERR(last_bdp)) { 657fc75ba51STroy Kisky dma_unmap_single(&fep->pdev->dev, addr, 658fc75ba51STroy Kisky buflen, DMA_TO_DEVICE); 659fc75ba51STroy Kisky dev_kfree_skb_any(skb); 660c4bc44c6SKevin Hao return NETDEV_TX_OK; 661fc75ba51STroy Kisky } 6626e909283SNimrod Andy } else { 6636e909283SNimrod Andy status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 6646e909283SNimrod Andy if (fep->bufdesc_ex) { 6656e909283SNimrod Andy estatus = BD_ENET_TX_INT; 6666e909283SNimrod Andy if (unlikely(skb_shinfo(skb)->tx_flags & 6676e909283SNimrod Andy SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 6686e909283SNimrod Andy estatus |= BD_ENET_TX_TS; 6696e909283SNimrod Andy } 6706e909283SNimrod Andy } 671fc75ba51STroy Kisky bdp->cbd_bufaddr = cpu_to_fec32(addr); 672fc75ba51STroy Kisky bdp->cbd_datlen = cpu_to_fec16(buflen); 6736e909283SNimrod Andy 6746e909283SNimrod Andy if (fep->bufdesc_ex) { 6756e909283SNimrod Andy 6766e909283SNimrod Andy struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 6776e909283SNimrod Andy 6786e909283SNimrod Andy if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 6796e909283SNimrod Andy fep->hwts_tx_en)) 6806e909283SNimrod Andy skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 6816e909283SNimrod Andy 6826b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_HAS_AVB) 68353bb20d1STroy Kisky estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 684befe8213SNimrod Andy 6856e909283SNimrod Andy if (skb->ip_summed == CHECKSUM_PARTIAL) 6866e909283SNimrod Andy estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 6876e909283SNimrod Andy 6886e909283SNimrod Andy ebdp->cbd_bdu = 0; 6895cfa3039SJohannes Berg ebdp->cbd_esc = cpu_to_fec32(estatus); 6906e909283SNimrod Andy } 6916e909283SNimrod Andy 6927355f276STroy Kisky index = fec_enet_get_bd_index(last_bdp, &txq->bd); 6936e909283SNimrod Andy /* Save skb pointer */ 694af6f4791SWei Fang txq->tx_buf[index].buf_p = skb; 6956e909283SNimrod Andy 696be293467STroy Kisky /* Make sure the updates to rest of the descriptor are performed before 697be293467STroy Kisky * transferring ownership. 698be293467STroy Kisky */ 699be293467STroy Kisky wmb(); 7006e909283SNimrod Andy 7016e909283SNimrod Andy /* Send it on its way. Tell FEC it's ready, interrupt when done, 7026e909283SNimrod Andy * it's the last BD of the frame, and to put the CRC on the end. 7036e909283SNimrod Andy */ 7046e909283SNimrod Andy status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 7055cfa3039SJohannes Berg bdp->cbd_sc = cpu_to_fec16(status); 7066e909283SNimrod Andy 707793fc096SFrank Li /* If this was the last BD in the ring, start at the beginning again. */ 7087355f276STroy Kisky bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); 709793fc096SFrank Li 7107a2a8451SEric Dumazet skb_tx_timestamp(skb); 7117a2a8451SEric Dumazet 71220f79739SWei Fang /* Make sure the update to bdp is performed before txq->bd.cur. */ 713c4bc44c6SKevin Hao wmb(); 7147355f276STroy Kisky txq->bd.cur = bdp; 715793fc096SFrank Li 716793fc096SFrank Li /* Trigger transmission start */ 71753bb20d1STroy Kisky writel(0, txq->bd.reg_desc_active); 718793fc096SFrank Li 7196e909283SNimrod Andy return 0; 720793fc096SFrank Li } 721793fc096SFrank Li 72279f33912SNimrod Andy static int 7234d494cdcSFugang Duan fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, 7244d494cdcSFugang Duan struct net_device *ndev, 72579f33912SNimrod Andy struct bufdesc *bdp, int index, char *data, 72679f33912SNimrod Andy int size, bool last_tcp, bool is_last) 72779f33912SNimrod Andy { 72879f33912SNimrod Andy struct fec_enet_private *fep = netdev_priv(ndev); 72961cd2ebbSFabian Frederick struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 73079f33912SNimrod Andy unsigned short status; 73179f33912SNimrod Andy unsigned int estatus = 0; 732d6bf3143SRussell King dma_addr_t addr; 73379f33912SNimrod Andy 7345cfa3039SJohannes Berg status = fec16_to_cpu(bdp->cbd_sc); 73579f33912SNimrod Andy status &= ~BD_ENET_TX_STATS; 73679f33912SNimrod Andy 73779f33912SNimrod Andy status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 73879f33912SNimrod Andy 73941ef84ceSFugang Duan if (((unsigned long) data) & fep->tx_align || 7406b7e4008SLothar Waßmann fep->quirks & FEC_QUIRK_SWAP_FRAME) { 7414d494cdcSFugang Duan memcpy(txq->tx_bounce[index], data, size); 7424d494cdcSFugang Duan data = txq->tx_bounce[index]; 74379f33912SNimrod Andy 7446b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 74579f33912SNimrod Andy swap_buffer(data, size); 74679f33912SNimrod Andy } 74779f33912SNimrod Andy 748d6bf3143SRussell King addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); 749d6bf3143SRussell King if (dma_mapping_error(&fep->pdev->dev, addr)) { 75079f33912SNimrod Andy dev_kfree_skb_any(skb); 75179f33912SNimrod Andy if (net_ratelimit()) 75279f33912SNimrod Andy netdev_err(ndev, "Tx DMA memory map failed\n"); 75306a4df58SZhang Changzhong return NETDEV_TX_OK; 75479f33912SNimrod Andy } 75579f33912SNimrod Andy 7565cfa3039SJohannes Berg bdp->cbd_datlen = cpu_to_fec16(size); 7575cfa3039SJohannes Berg bdp->cbd_bufaddr = cpu_to_fec32(addr); 758d6bf3143SRussell King 75979f33912SNimrod Andy if (fep->bufdesc_ex) { 7606b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_HAS_AVB) 76153bb20d1STroy Kisky estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 76279f33912SNimrod Andy if (skb->ip_summed == CHECKSUM_PARTIAL) 76379f33912SNimrod Andy estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 76479f33912SNimrod Andy ebdp->cbd_bdu = 0; 7655cfa3039SJohannes Berg ebdp->cbd_esc = cpu_to_fec32(estatus); 76679f33912SNimrod Andy } 76779f33912SNimrod Andy 76879f33912SNimrod Andy /* Handle the last BD specially */ 76979f33912SNimrod Andy if (last_tcp) 77079f33912SNimrod Andy status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); 77179f33912SNimrod Andy if (is_last) { 77279f33912SNimrod Andy status |= BD_ENET_TX_INTR; 77379f33912SNimrod Andy if (fep->bufdesc_ex) 7745cfa3039SJohannes Berg ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); 77579f33912SNimrod Andy } 77679f33912SNimrod Andy 7775cfa3039SJohannes Berg bdp->cbd_sc = cpu_to_fec16(status); 77879f33912SNimrod Andy 77979f33912SNimrod Andy return 0; 78079f33912SNimrod Andy } 78179f33912SNimrod Andy 78279f33912SNimrod Andy static int 7834d494cdcSFugang Duan fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, 7844d494cdcSFugang Duan struct sk_buff *skb, struct net_device *ndev, 78579f33912SNimrod Andy struct bufdesc *bdp, int index) 78679f33912SNimrod Andy { 78779f33912SNimrod Andy struct fec_enet_private *fep = netdev_priv(ndev); 788504148feSEric Dumazet int hdr_len = skb_tcp_all_headers(skb); 78961cd2ebbSFabian Frederick struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 79079f33912SNimrod Andy void *bufaddr; 79179f33912SNimrod Andy unsigned long dmabuf; 79279f33912SNimrod Andy unsigned short status; 79379f33912SNimrod Andy unsigned int estatus = 0; 79479f33912SNimrod Andy 7955cfa3039SJohannes Berg status = fec16_to_cpu(bdp->cbd_sc); 79679f33912SNimrod Andy status &= ~BD_ENET_TX_STATS; 79779f33912SNimrod Andy status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 79879f33912SNimrod Andy 7994d494cdcSFugang Duan bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 8004d494cdcSFugang Duan dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; 80141ef84ceSFugang Duan if (((unsigned long)bufaddr) & fep->tx_align || 8026b7e4008SLothar Waßmann fep->quirks & FEC_QUIRK_SWAP_FRAME) { 8034d494cdcSFugang Duan memcpy(txq->tx_bounce[index], skb->data, hdr_len); 8044d494cdcSFugang Duan bufaddr = txq->tx_bounce[index]; 80579f33912SNimrod Andy 8066b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 80779f33912SNimrod Andy swap_buffer(bufaddr, hdr_len); 80879f33912SNimrod Andy 80979f33912SNimrod Andy dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, 81079f33912SNimrod Andy hdr_len, DMA_TO_DEVICE); 81179f33912SNimrod Andy if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { 81279f33912SNimrod Andy dev_kfree_skb_any(skb); 81379f33912SNimrod Andy if (net_ratelimit()) 81479f33912SNimrod Andy netdev_err(ndev, "Tx DMA memory map failed\n"); 81506a4df58SZhang Changzhong return NETDEV_TX_OK; 81679f33912SNimrod Andy } 81779f33912SNimrod Andy } 81879f33912SNimrod Andy 8195cfa3039SJohannes Berg bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); 8205cfa3039SJohannes Berg bdp->cbd_datlen = cpu_to_fec16(hdr_len); 82179f33912SNimrod Andy 82279f33912SNimrod Andy if (fep->bufdesc_ex) { 8236b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_HAS_AVB) 82453bb20d1STroy Kisky estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 82579f33912SNimrod Andy if (skb->ip_summed == CHECKSUM_PARTIAL) 82679f33912SNimrod Andy estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 82779f33912SNimrod Andy ebdp->cbd_bdu = 0; 8285cfa3039SJohannes Berg ebdp->cbd_esc = cpu_to_fec32(estatus); 82979f33912SNimrod Andy } 83079f33912SNimrod Andy 8315cfa3039SJohannes Berg bdp->cbd_sc = cpu_to_fec16(status); 83279f33912SNimrod Andy 83379f33912SNimrod Andy return 0; 83479f33912SNimrod Andy } 83579f33912SNimrod Andy 8364d494cdcSFugang Duan static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, 8374d494cdcSFugang Duan struct sk_buff *skb, 8384d494cdcSFugang Duan struct net_device *ndev) 83979f33912SNimrod Andy { 84079f33912SNimrod Andy struct fec_enet_private *fep = netdev_priv(ndev); 841761b331cSEric Dumazet int hdr_len, total_len, data_left; 8427355f276STroy Kisky struct bufdesc *bdp = txq->bd.cur; 84379f33912SNimrod Andy struct tso_t tso; 84479f33912SNimrod Andy unsigned int index = 0; 84579f33912SNimrod Andy int ret; 84679f33912SNimrod Andy 8477355f276STroy Kisky if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { 84879f33912SNimrod Andy dev_kfree_skb_any(skb); 84979f33912SNimrod Andy if (net_ratelimit()) 85079f33912SNimrod Andy netdev_err(ndev, "NOT enough BD for TSO!\n"); 85179f33912SNimrod Andy return NETDEV_TX_OK; 85279f33912SNimrod Andy } 85379f33912SNimrod Andy 85479f33912SNimrod Andy /* Protocol checksum off-load for TCP and UDP. */ 85579f33912SNimrod Andy if (fec_enet_clear_csum(skb, ndev)) { 85679f33912SNimrod Andy dev_kfree_skb_any(skb); 85779f33912SNimrod Andy return NETDEV_TX_OK; 85879f33912SNimrod Andy } 85979f33912SNimrod Andy 86079f33912SNimrod Andy /* Initialize the TSO handler, and prepare the first payload */ 861761b331cSEric Dumazet hdr_len = tso_start(skb, &tso); 86279f33912SNimrod Andy 86379f33912SNimrod Andy total_len = skb->len - hdr_len; 86479f33912SNimrod Andy while (total_len > 0) { 86579f33912SNimrod Andy char *hdr; 86679f33912SNimrod Andy 8677355f276STroy Kisky index = fec_enet_get_bd_index(bdp, &txq->bd); 86879f33912SNimrod Andy data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 86979f33912SNimrod Andy total_len -= data_left; 87079f33912SNimrod Andy 87179f33912SNimrod Andy /* prepare packet headers: MAC + IP + TCP */ 8724d494cdcSFugang Duan hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 87379f33912SNimrod Andy tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 8744d494cdcSFugang Duan ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); 87579f33912SNimrod Andy if (ret) 87679f33912SNimrod Andy goto err_release; 87779f33912SNimrod Andy 87879f33912SNimrod Andy while (data_left > 0) { 87979f33912SNimrod Andy int size; 88079f33912SNimrod Andy 88179f33912SNimrod Andy size = min_t(int, tso.size, data_left); 8827355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 8837355f276STroy Kisky index = fec_enet_get_bd_index(bdp, &txq->bd); 8844d494cdcSFugang Duan ret = fec_enet_txq_put_data_tso(txq, skb, ndev, 8854d494cdcSFugang Duan bdp, index, 8864d494cdcSFugang Duan tso.data, size, 8874d494cdcSFugang Duan size == data_left, 88879f33912SNimrod Andy total_len == 0); 88979f33912SNimrod Andy if (ret) 89079f33912SNimrod Andy goto err_release; 89179f33912SNimrod Andy 89279f33912SNimrod Andy data_left -= size; 89379f33912SNimrod Andy tso_build_data(skb, &tso, size); 89479f33912SNimrod Andy } 89579f33912SNimrod Andy 8967355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 89779f33912SNimrod Andy } 89879f33912SNimrod Andy 89979f33912SNimrod Andy /* Save skb pointer */ 900af6f4791SWei Fang txq->tx_buf[index].buf_p = skb; 90179f33912SNimrod Andy 90279f33912SNimrod Andy skb_tx_timestamp(skb); 9037355f276STroy Kisky txq->bd.cur = bdp; 90479f33912SNimrod Andy 90579f33912SNimrod Andy /* Trigger transmission start */ 9066b7e4008SLothar Waßmann if (!(fep->quirks & FEC_QUIRK_ERR007885) || 90753bb20d1STroy Kisky !readl(txq->bd.reg_desc_active) || 90853bb20d1STroy Kisky !readl(txq->bd.reg_desc_active) || 90953bb20d1STroy Kisky !readl(txq->bd.reg_desc_active) || 91053bb20d1STroy Kisky !readl(txq->bd.reg_desc_active)) 91153bb20d1STroy Kisky writel(0, txq->bd.reg_desc_active); 91279f33912SNimrod Andy 91379f33912SNimrod Andy return 0; 91479f33912SNimrod Andy 91579f33912SNimrod Andy err_release: 91679f33912SNimrod Andy /* TODO: Release all used data descriptors for TSO */ 91779f33912SNimrod Andy return ret; 91879f33912SNimrod Andy } 91979f33912SNimrod Andy 92061a4427bSNimrod Andy static netdev_tx_t 92161a4427bSNimrod Andy fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 92261a4427bSNimrod Andy { 92361a4427bSNimrod Andy struct fec_enet_private *fep = netdev_priv(ndev); 9246e909283SNimrod Andy int entries_free; 9254d494cdcSFugang Duan unsigned short queue; 9264d494cdcSFugang Duan struct fec_enet_priv_tx_q *txq; 9274d494cdcSFugang Duan struct netdev_queue *nq; 92861a4427bSNimrod Andy int ret; 92961a4427bSNimrod Andy 9304d494cdcSFugang Duan queue = skb_get_queue_mapping(skb); 9314d494cdcSFugang Duan txq = fep->tx_queue[queue]; 9324d494cdcSFugang Duan nq = netdev_get_tx_queue(ndev, queue); 9334d494cdcSFugang Duan 93479f33912SNimrod Andy if (skb_is_gso(skb)) 9354d494cdcSFugang Duan ret = fec_enet_txq_submit_tso(txq, skb, ndev); 93679f33912SNimrod Andy else 9374d494cdcSFugang Duan ret = fec_enet_txq_submit_skb(txq, skb, ndev); 9386e909283SNimrod Andy if (ret) 9396e909283SNimrod Andy return ret; 94061a4427bSNimrod Andy 9417355f276STroy Kisky entries_free = fec_enet_get_free_txdesc_num(txq); 9424d494cdcSFugang Duan if (entries_free <= txq->tx_stop_threshold) 9434d494cdcSFugang Duan netif_tx_stop_queue(nq); 94461a4427bSNimrod Andy 94561a4427bSNimrod Andy return NETDEV_TX_OK; 94661a4427bSNimrod Andy } 94761a4427bSNimrod Andy 948a210576cSDavid S. Miller /* Init RX & TX buffer descriptors 949a210576cSDavid S. Miller */ 950a210576cSDavid S. Miller static void fec_enet_bd_init(struct net_device *dev) 951a210576cSDavid S. Miller { 952a210576cSDavid S. Miller struct fec_enet_private *fep = netdev_priv(dev); 9534d494cdcSFugang Duan struct fec_enet_priv_tx_q *txq; 9544d494cdcSFugang Duan struct fec_enet_priv_rx_q *rxq; 955a210576cSDavid S. Miller struct bufdesc *bdp; 956a210576cSDavid S. Miller unsigned int i; 95759d0f746SFrank Li unsigned int q; 958a210576cSDavid S. Miller 95959d0f746SFrank Li for (q = 0; q < fep->num_rx_queues; q++) { 960a210576cSDavid S. Miller /* Initialize the receive buffer descriptors. */ 96159d0f746SFrank Li rxq = fep->rx_queue[q]; 9627355f276STroy Kisky bdp = rxq->bd.base; 9634d494cdcSFugang Duan 9647355f276STroy Kisky for (i = 0; i < rxq->bd.ring_size; i++) { 965a210576cSDavid S. Miller 966a210576cSDavid S. Miller /* Initialize the BD for every fragment in the page. */ 967a210576cSDavid S. Miller if (bdp->cbd_bufaddr) 9685cfa3039SJohannes Berg bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 969a210576cSDavid S. Miller else 9705cfa3039SJohannes Berg bdp->cbd_sc = cpu_to_fec16(0); 9717355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 972a210576cSDavid S. Miller } 973a210576cSDavid S. Miller 974a210576cSDavid S. Miller /* Set the last buffer to wrap */ 9757355f276STroy Kisky bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 9765cfa3039SJohannes Berg bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 977a210576cSDavid S. Miller 9787355f276STroy Kisky rxq->bd.cur = rxq->bd.base; 97959d0f746SFrank Li } 980a210576cSDavid S. Miller 98159d0f746SFrank Li for (q = 0; q < fep->num_tx_queues; q++) { 982a210576cSDavid S. Miller /* ...and the same for transmit */ 98359d0f746SFrank Li txq = fep->tx_queue[q]; 9847355f276STroy Kisky bdp = txq->bd.base; 9857355f276STroy Kisky txq->bd.cur = bdp; 986a210576cSDavid S. Miller 9877355f276STroy Kisky for (i = 0; i < txq->bd.ring_size; i++) { 988a210576cSDavid S. Miller /* Initialize the BD for every fragment in the page. */ 9895cfa3039SJohannes Berg bdp->cbd_sc = cpu_to_fec16(0); 99020f79739SWei Fang if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { 991178e5f57SFugang Duan if (bdp->cbd_bufaddr && 992178e5f57SFugang Duan !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 993178e5f57SFugang Duan dma_unmap_single(&fep->pdev->dev, 994178e5f57SFugang Duan fec32_to_cpu(bdp->cbd_bufaddr), 995178e5f57SFugang Duan fec16_to_cpu(bdp->cbd_datlen), 996178e5f57SFugang Duan DMA_TO_DEVICE); 997af6f4791SWei Fang if (txq->tx_buf[i].buf_p) 998af6f4791SWei Fang dev_kfree_skb_any(txq->tx_buf[i].buf_p); 999af6f4791SWei Fang } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { 1000af6f4791SWei Fang if (bdp->cbd_bufaddr) 100120f79739SWei Fang dma_unmap_single(&fep->pdev->dev, 100220f79739SWei Fang fec32_to_cpu(bdp->cbd_bufaddr), 100320f79739SWei Fang fec16_to_cpu(bdp->cbd_datlen), 100420f79739SWei Fang DMA_TO_DEVICE); 100520f79739SWei Fang 1006af6f4791SWei Fang if (txq->tx_buf[i].buf_p) 1007af6f4791SWei Fang xdp_return_frame(txq->tx_buf[i].buf_p); 1008af6f4791SWei Fang } else { 1009af6f4791SWei Fang struct page *page = txq->tx_buf[i].buf_p; 1010af6f4791SWei Fang 1011af6f4791SWei Fang if (page) 1012af6f4791SWei Fang page_pool_put_page(page->pp, page, 0, false); 101320f79739SWei Fang } 101420f79739SWei Fang 1015af6f4791SWei Fang txq->tx_buf[i].buf_p = NULL; 101620f79739SWei Fang /* restore default tx buffer type: FEC_TXBUF_T_SKB */ 101720f79739SWei Fang txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 10185cfa3039SJohannes Berg bdp->cbd_bufaddr = cpu_to_fec32(0); 10197355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1020a210576cSDavid S. Miller } 1021a210576cSDavid S. Miller 1022a210576cSDavid S. Miller /* Set the last buffer to wrap */ 10237355f276STroy Kisky bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 10245cfa3039SJohannes Berg bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 10254d494cdcSFugang Duan txq->dirty_tx = bdp; 1026a210576cSDavid S. Miller } 102759d0f746SFrank Li } 102859d0f746SFrank Li 1029ce99d0d3SFrank Li static void fec_enet_active_rxring(struct net_device *ndev) 1030ce99d0d3SFrank Li { 1031ce99d0d3SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 1032ce99d0d3SFrank Li int i; 1033ce99d0d3SFrank Li 1034ce99d0d3SFrank Li for (i = 0; i < fep->num_rx_queues; i++) 103553bb20d1STroy Kisky writel(0, fep->rx_queue[i]->bd.reg_desc_active); 1036ce99d0d3SFrank Li } 1037ce99d0d3SFrank Li 103859d0f746SFrank Li static void fec_enet_enable_ring(struct net_device *ndev) 103959d0f746SFrank Li { 104059d0f746SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 104159d0f746SFrank Li struct fec_enet_priv_tx_q *txq; 104259d0f746SFrank Li struct fec_enet_priv_rx_q *rxq; 104359d0f746SFrank Li int i; 104459d0f746SFrank Li 104559d0f746SFrank Li for (i = 0; i < fep->num_rx_queues; i++) { 104659d0f746SFrank Li rxq = fep->rx_queue[i]; 10477355f276STroy Kisky writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); 1048fbbeefddSAndrew Lunn writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); 104959d0f746SFrank Li 105059d0f746SFrank Li /* enable DMA1/2 */ 105159d0f746SFrank Li if (i) 105259d0f746SFrank Li writel(RCMR_MATCHEN | RCMR_CMP(i), 105359d0f746SFrank Li fep->hwp + FEC_RCMR(i)); 105459d0f746SFrank Li } 105559d0f746SFrank Li 105659d0f746SFrank Li for (i = 0; i < fep->num_tx_queues; i++) { 105759d0f746SFrank Li txq = fep->tx_queue[i]; 10587355f276STroy Kisky writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); 105959d0f746SFrank Li 106059d0f746SFrank Li /* enable DMA1/2 */ 106159d0f746SFrank Li if (i) 106259d0f746SFrank Li writel(DMA_CLASS_EN | IDLE_SLOPE(i), 106359d0f746SFrank Li fep->hwp + FEC_DMA_CFG(i)); 106459d0f746SFrank Li } 106559d0f746SFrank Li } 106659d0f746SFrank Li 1067dbc64a8eSRussell King /* 1068dbc64a8eSRussell King * This function is called to start or restart the FEC during a link 1069dbc64a8eSRussell King * change, transmit timeout, or to reconfigure the FEC. The network 1070dbc64a8eSRussell King * packet processing for this device must be stopped before this call. 1071793fc096SFrank Li */ 1072793fc096SFrank Li static void 1073ef83337dSRussell King fec_restart(struct net_device *ndev) 1074793fc096SFrank Li { 1075793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 1076793fc096SFrank Li u32 temp_mac[2]; 1077793fc096SFrank Li u32 rcntl = OPT_FRAME_SIZE | 0x04; 1078ff049886SCsókás Bence u32 ecntl = FEC_ECR_ETHEREN; 1079793fc096SFrank Li 1080106c314cSFugang Duan /* Whack a reset. We should wait for this. 1081106c314cSFugang Duan * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1082106c314cSFugang Duan * instead of reset MAC itself. 1083106c314cSFugang Duan */ 1084471ff445SJoakim Zhang if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || 1085c730ab42SLaurent Badel ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { 1086106c314cSFugang Duan writel(0, fep->hwp + FEC_ECNTRL); 1087106c314cSFugang Duan } else { 1088793fc096SFrank Li writel(1, fep->hwp + FEC_ECNTRL); 1089793fc096SFrank Li udelay(10); 1090106c314cSFugang Duan } 1091793fc096SFrank Li 1092793fc096SFrank Li /* 1093793fc096SFrank Li * enet-mac reset will reset mac address registers too, 1094793fc096SFrank Li * so need to reconfigure it. 1095793fc096SFrank Li */ 1096793fc096SFrank Li memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); 10975cfa3039SJohannes Berg writel((__force u32)cpu_to_be32(temp_mac[0]), 10985cfa3039SJohannes Berg fep->hwp + FEC_ADDR_LOW); 10995cfa3039SJohannes Berg writel((__force u32)cpu_to_be32(temp_mac[1]), 11005cfa3039SJohannes Berg fep->hwp + FEC_ADDR_HIGH); 1101793fc096SFrank Li 1102f166f890SAndrew Lunn /* Clear any outstanding interrupt, except MDIO. */ 1103f166f890SAndrew Lunn writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT); 1104793fc096SFrank Li 1105a210576cSDavid S. Miller fec_enet_bd_init(ndev); 1106a210576cSDavid S. Miller 110759d0f746SFrank Li fec_enet_enable_ring(ndev); 1108793fc096SFrank Li 1109793fc096SFrank Li /* Enable MII mode */ 1110ef83337dSRussell King if (fep->full_duplex == DUPLEX_FULL) { 1111793fc096SFrank Li /* FD enable */ 1112793fc096SFrank Li writel(0x04, fep->hwp + FEC_X_CNTRL); 1113793fc096SFrank Li } else { 1114793fc096SFrank Li /* No Rcv on Xmit */ 1115793fc096SFrank Li rcntl |= 0x02; 1116793fc096SFrank Li writel(0x0, fep->hwp + FEC_X_CNTRL); 1117793fc096SFrank Li } 1118793fc096SFrank Li 1119793fc096SFrank Li /* Set MII speed */ 1120793fc096SFrank Li writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1121793fc096SFrank Li 1122d1391930SGuenter Roeck #if !defined(CONFIG_M5272) 112318803495SGreg Ungerer if (fep->quirks & FEC_QUIRK_HAS_RACC) { 112432d1bbb1SGeert Uytterhoeven u32 val = readl(fep->hwp + FEC_RACC); 112532d1bbb1SGeert Uytterhoeven 11263ac72b7bSEric Nelson /* align IP header */ 11273ac72b7bSEric Nelson val |= FEC_RACC_SHIFT16; 11284c09eed9SJim Baxter if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 11293ac72b7bSEric Nelson /* set RX checksum */ 11304c09eed9SJim Baxter val |= FEC_RACC_OPTIONS; 11314c09eed9SJim Baxter else 11324c09eed9SJim Baxter val &= ~FEC_RACC_OPTIONS; 11334c09eed9SJim Baxter writel(val, fep->hwp + FEC_RACC); 113455cd48c8STroy Kisky writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); 113532867fccSFabio Estevam } 1136d1391930SGuenter Roeck #endif 11374c09eed9SJim Baxter 1138793fc096SFrank Li /* 1139793fc096SFrank Li * The phy interface and speed need to get configured 1140793fc096SFrank Li * differently on enet-mac. 1141793fc096SFrank Li */ 11426b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1143793fc096SFrank Li /* Enable flow control and length check */ 1144793fc096SFrank Li rcntl |= 0x40000000 | 0x00000020; 1145793fc096SFrank Li 1146793fc096SFrank Li /* RGMII, RMII or MII */ 1147e813bb2bSMarkus Pargmann if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || 1148e813bb2bSMarkus Pargmann fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 1149e813bb2bSMarkus Pargmann fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || 1150e813bb2bSMarkus Pargmann fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) 1151793fc096SFrank Li rcntl |= (1 << 6); 1152793fc096SFrank Li else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1153ff049886SCsókás Bence rcntl |= FEC_RCR_RMII; 1154793fc096SFrank Li else 1155ff049886SCsókás Bence rcntl &= ~FEC_RCR_RMII; 1156793fc096SFrank Li 1157793fc096SFrank Li /* 1G, 100M or 10M */ 115845f5c327SPhilippe Reynes if (ndev->phydev) { 115945f5c327SPhilippe Reynes if (ndev->phydev->speed == SPEED_1000) 1160793fc096SFrank Li ecntl |= (1 << 5); 116145f5c327SPhilippe Reynes else if (ndev->phydev->speed == SPEED_100) 1162ff049886SCsókás Bence rcntl &= ~FEC_RCR_10BASET; 1163793fc096SFrank Li else 1164ff049886SCsókás Bence rcntl |= FEC_RCR_10BASET; 1165793fc096SFrank Li } 1166793fc096SFrank Li } else { 1167793fc096SFrank Li #ifdef FEC_MIIGSK_ENR 11686b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_USE_GASKET) { 1169793fc096SFrank Li u32 cfgr; 1170793fc096SFrank Li /* disable the gasket and wait */ 1171793fc096SFrank Li writel(0, fep->hwp + FEC_MIIGSK_ENR); 1172793fc096SFrank Li while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1173793fc096SFrank Li udelay(1); 1174793fc096SFrank Li 1175793fc096SFrank Li /* 1176793fc096SFrank Li * configure the gasket: 1177793fc096SFrank Li * RMII, 50 MHz, no loopback, no echo 1178793fc096SFrank Li * MII, 25 MHz, no loopback, no echo 1179793fc096SFrank Li */ 1180793fc096SFrank Li cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1181793fc096SFrank Li ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; 118245f5c327SPhilippe Reynes if (ndev->phydev && ndev->phydev->speed == SPEED_10) 1183793fc096SFrank Li cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; 1184793fc096SFrank Li writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); 1185793fc096SFrank Li 1186793fc096SFrank Li /* re-enable the gasket */ 1187793fc096SFrank Li writel(2, fep->hwp + FEC_MIIGSK_ENR); 1188793fc096SFrank Li } 1189793fc096SFrank Li #endif 1190793fc096SFrank Li } 1191793fc096SFrank Li 1192d1391930SGuenter Roeck #if !defined(CONFIG_M5272) 1193793fc096SFrank Li /* enable pause frame*/ 1194793fc096SFrank Li if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 1195793fc096SFrank Li ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 119645f5c327SPhilippe Reynes ndev->phydev && ndev->phydev->pause)) { 1197f7859a03SCsókás Bence rcntl |= FEC_RCR_FLOWCTL; 1198793fc096SFrank Li 11994c09eed9SJim Baxter /* set FIFO threshold parameter to reduce overrun */ 1200793fc096SFrank Li writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 1201793fc096SFrank Li writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 1202793fc096SFrank Li writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 1203793fc096SFrank Li writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); 1204793fc096SFrank Li 1205793fc096SFrank Li /* OPD */ 1206793fc096SFrank Li writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); 1207793fc096SFrank Li } else { 1208f7859a03SCsókás Bence rcntl &= ~FEC_RCR_FLOWCTL; 1209793fc096SFrank Li } 1210d1391930SGuenter Roeck #endif /* !defined(CONFIG_M5272) */ 1211793fc096SFrank Li 1212793fc096SFrank Li writel(rcntl, fep->hwp + FEC_R_CNTRL); 1213793fc096SFrank Li 121484fe6182SStefan Wahren /* Setup multicast filter. */ 121584fe6182SStefan Wahren set_multicast_list(ndev); 121684fe6182SStefan Wahren #ifndef CONFIG_M5272 121784fe6182SStefan Wahren writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 121884fe6182SStefan Wahren writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 121984fe6182SStefan Wahren #endif 122084fe6182SStefan Wahren 12216b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1222793fc096SFrank Li /* enable ENET endian swap */ 1223ff049886SCsókás Bence ecntl |= FEC_ECR_BYTESWP; 1224793fc096SFrank Li /* enable ENET store and forward mode */ 1225ff049886SCsókás Bence writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK); 1226793fc096SFrank Li } 1227793fc096SFrank Li 1228793fc096SFrank Li if (fep->bufdesc_ex) 1229ff049886SCsókás Bence ecntl |= FEC_ECR_EN1588; 1230793fc096SFrank Li 1231fc539459SFugang Duan if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && 1232fc539459SFugang Duan fep->rgmii_txc_dly) 1233fc539459SFugang Duan ecntl |= FEC_ENET_TXC_DLY; 1234fc539459SFugang Duan if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && 1235fc539459SFugang Duan fep->rgmii_rxc_dly) 1236fc539459SFugang Duan ecntl |= FEC_ENET_RXC_DLY; 1237fc539459SFugang Duan 123838ae92dcSChris Healy #ifndef CONFIG_M5272 1239b9eef55cSJim Baxter /* Enable the MIB statistic event counters */ 1240b9eef55cSJim Baxter writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); 124138ae92dcSChris Healy #endif 124238ae92dcSChris Healy 1243793fc096SFrank Li /* And last, enable the transmit and receive processing */ 1244793fc096SFrank Li writel(ecntl, fep->hwp + FEC_ECNTRL); 1245ce99d0d3SFrank Li fec_enet_active_rxring(ndev); 1246793fc096SFrank Li 1247793fc096SFrank Li if (fep->bufdesc_ex) 1248793fc096SFrank Li fec_ptp_start_cyclecounter(ndev); 1249793fc096SFrank Li 1250793fc096SFrank Li /* Enable interrupts we wish to service */ 12510c5a3aefSNimrod Andy if (fep->link) 1252793fc096SFrank Li writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 12530c5a3aefSNimrod Andy else 1254f166f890SAndrew Lunn writel(0, fep->hwp + FEC_IMASK); 1255d851b47bSFugang Duan 1256d851b47bSFugang Duan /* Init the interrupt coalescing */ 12577e630356SRasmus Villemoes if (fep->quirks & FEC_QUIRK_HAS_COALESCE) 1258df727d45SRasmus Villemoes fec_enet_itr_coal_set(ndev); 1259793fc096SFrank Li } 1260793fc096SFrank Li 126140c79ce1SWei Fang static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) 126240c79ce1SWei Fang { 126340c79ce1SWei Fang if (!(of_machine_is_compatible("fsl,imx8qm") || 126440c79ce1SWei Fang of_machine_is_compatible("fsl,imx8qxp") || 126540c79ce1SWei Fang of_machine_is_compatible("fsl,imx8dxl"))) 126640c79ce1SWei Fang return 0; 126740c79ce1SWei Fang 126840c79ce1SWei Fang return imx_scu_get_handle(&fep->ipc_handle); 126940c79ce1SWei Fang } 127040c79ce1SWei Fang 127140c79ce1SWei Fang static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) 127240c79ce1SWei Fang { 127340c79ce1SWei Fang struct device_node *np = fep->pdev->dev.of_node; 127440c79ce1SWei Fang u32 rsrc_id, val; 127540c79ce1SWei Fang int idx; 127640c79ce1SWei Fang 127740c79ce1SWei Fang if (!np || !fep->ipc_handle) 127840c79ce1SWei Fang return; 127940c79ce1SWei Fang 128040c79ce1SWei Fang idx = of_alias_get_id(np, "ethernet"); 128140c79ce1SWei Fang if (idx < 0) 128240c79ce1SWei Fang idx = 0; 128340c79ce1SWei Fang rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; 128440c79ce1SWei Fang 128540c79ce1SWei Fang val = enabled ? 1 : 0; 128640c79ce1SWei Fang imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); 128740c79ce1SWei Fang } 128840c79ce1SWei Fang 1289da722186SMartin Fuzzey static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) 1290da722186SMartin Fuzzey { 1291da722186SMartin Fuzzey struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1292da722186SMartin Fuzzey struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; 1293da722186SMartin Fuzzey 1294da722186SMartin Fuzzey if (stop_gpr->gpr) { 1295da722186SMartin Fuzzey if (enabled) 1296da722186SMartin Fuzzey regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, 1297da722186SMartin Fuzzey BIT(stop_gpr->bit), 1298da722186SMartin Fuzzey BIT(stop_gpr->bit)); 1299da722186SMartin Fuzzey else 1300da722186SMartin Fuzzey regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, 1301da722186SMartin Fuzzey BIT(stop_gpr->bit), 0); 1302da722186SMartin Fuzzey } else if (pdata && pdata->sleep_mode_enable) { 1303da722186SMartin Fuzzey pdata->sleep_mode_enable(enabled); 130440c79ce1SWei Fang } else { 130540c79ce1SWei Fang fec_enet_ipg_stop_set(fep, enabled); 1306da722186SMartin Fuzzey } 1307da722186SMartin Fuzzey } 1308da722186SMartin Fuzzey 13090b6f65c7SJoakim Zhang static void fec_irqs_disable(struct net_device *ndev) 13100b6f65c7SJoakim Zhang { 13110b6f65c7SJoakim Zhang struct fec_enet_private *fep = netdev_priv(ndev); 13120b6f65c7SJoakim Zhang 13130b6f65c7SJoakim Zhang writel(0, fep->hwp + FEC_IMASK); 13140b6f65c7SJoakim Zhang } 13150b6f65c7SJoakim Zhang 13160b6f65c7SJoakim Zhang static void fec_irqs_disable_except_wakeup(struct net_device *ndev) 13170b6f65c7SJoakim Zhang { 13180b6f65c7SJoakim Zhang struct fec_enet_private *fep = netdev_priv(ndev); 13190b6f65c7SJoakim Zhang 13200b6f65c7SJoakim Zhang writel(0, fep->hwp + FEC_IMASK); 13210b6f65c7SJoakim Zhang writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); 13220b6f65c7SJoakim Zhang } 13230b6f65c7SJoakim Zhang 1324793fc096SFrank Li static void 1325793fc096SFrank Li fec_stop(struct net_device *ndev) 1326793fc096SFrank Li { 1327793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 1328ff049886SCsókás Bence u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII; 1329de40ed31SNimrod Andy u32 val; 1330793fc096SFrank Li 1331793fc096SFrank Li /* We cannot expect a graceful transmit stop without link !!! */ 1332793fc096SFrank Li if (fep->link) { 1333793fc096SFrank Li writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1334793fc096SFrank Li udelay(10); 1335793fc096SFrank Li if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 133631b7720cSJoe Perches netdev_err(ndev, "Graceful transmit stop did not complete!\n"); 1337793fc096SFrank Li } 1338793fc096SFrank Li 1339106c314cSFugang Duan /* Whack a reset. We should wait for this. 1340106c314cSFugang Duan * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1341106c314cSFugang Duan * instead of reset MAC itself. 1342106c314cSFugang Duan */ 1343de40ed31SNimrod Andy if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1344471ff445SJoakim Zhang if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 1345106c314cSFugang Duan writel(0, fep->hwp + FEC_ECNTRL); 1346106c314cSFugang Duan } else { 1347ff049886SCsókás Bence writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); 1348793fc096SFrank Li udelay(10); 1349106c314cSFugang Duan } 1350de40ed31SNimrod Andy } else { 1351de40ed31SNimrod Andy val = readl(fep->hwp + FEC_ECNTRL); 1352de40ed31SNimrod Andy val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 1353de40ed31SNimrod Andy writel(val, fep->hwp + FEC_ECNTRL); 1354de40ed31SNimrod Andy } 1355de40ed31SNimrod Andy writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 13560b6f65c7SJoakim Zhang writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1357793fc096SFrank Li 1358793fc096SFrank Li /* We have to keep ENET enabled to have MII interrupt stay working */ 1359de40ed31SNimrod Andy if (fep->quirks & FEC_QUIRK_ENET_MAC && 1360de40ed31SNimrod Andy !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1361ff049886SCsókás Bence writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL); 1362793fc096SFrank Li writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1363793fc096SFrank Li } 1364793fc096SFrank Li } 1365793fc096SFrank Li 1366793fc096SFrank Li static void 13670290bd29SMichael S. Tsirkin fec_timeout(struct net_device *ndev, unsigned int txqueue) 1368793fc096SFrank Li { 1369793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 1370793fc096SFrank Li 1371344756f6SRussell King fec_dump(ndev); 1372344756f6SRussell King 1373793fc096SFrank Li ndev->stats.tx_errors++; 1374793fc096SFrank Li 137536cdc743SRussell King schedule_work(&fep->tx_timeout_work); 137654309fa6SFrank Li } 137754309fa6SFrank Li 137836cdc743SRussell King static void fec_enet_timeout_work(struct work_struct *work) 137954309fa6SFrank Li { 138054309fa6SFrank Li struct fec_enet_private *fep = 138136cdc743SRussell King container_of(work, struct fec_enet_private, tx_timeout_work); 13828ce5624fSRussell King struct net_device *ndev = fep->netdev; 138354309fa6SFrank Li 1384da1774e5SRussell King rtnl_lock(); 13858ce5624fSRussell King if (netif_device_present(ndev) || netif_running(ndev)) { 1386dbc64a8eSRussell King napi_disable(&fep->napi); 1387dbc64a8eSRussell King netif_tx_lock_bh(ndev); 1388ef83337dSRussell King fec_restart(ndev); 1389657ade07SRickard x Andersson netif_tx_wake_all_queues(ndev); 13906af42d42SRussell King netif_tx_unlock_bh(ndev); 1391dbc64a8eSRussell King napi_enable(&fep->napi); 13928ce5624fSRussell King } 1393da1774e5SRussell King rtnl_unlock(); 139454309fa6SFrank Li } 1395793fc096SFrank Li 1396793fc096SFrank Li static void 1397bfd4ecddSRussell King fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, 1398bfd4ecddSRussell King struct skb_shared_hwtstamps *hwtstamps) 1399bfd4ecddSRussell King { 1400bfd4ecddSRussell King unsigned long flags; 1401bfd4ecddSRussell King u64 ns; 1402bfd4ecddSRussell King 1403bfd4ecddSRussell King spin_lock_irqsave(&fep->tmreg_lock, flags); 1404bfd4ecddSRussell King ns = timecounter_cyc2time(&fep->tc, ts); 1405bfd4ecddSRussell King spin_unlock_irqrestore(&fep->tmreg_lock, flags); 1406bfd4ecddSRussell King 1407bfd4ecddSRussell King memset(hwtstamps, 0, sizeof(*hwtstamps)); 1408bfd4ecddSRussell King hwtstamps->hwtstamp = ns_to_ktime(ns); 1409bfd4ecddSRussell King } 1410bfd4ecddSRussell King 1411bfd4ecddSRussell King static void 141215cec633SWei Fang fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) 1413793fc096SFrank Li { 1414793fc096SFrank Li struct fec_enet_private *fep; 141520f79739SWei Fang struct xdp_frame *xdpf; 1416a2fe37b6SFabio Estevam struct bufdesc *bdp; 1417793fc096SFrank Li unsigned short status; 1418793fc096SFrank Li struct sk_buff *skb; 14194d494cdcSFugang Duan struct fec_enet_priv_tx_q *txq; 14204d494cdcSFugang Duan struct netdev_queue *nq; 1421793fc096SFrank Li int index = 0; 142279f33912SNimrod Andy int entries_free; 1423af6f4791SWei Fang struct page *page; 1424af6f4791SWei Fang int frame_len; 1425793fc096SFrank Li 1426793fc096SFrank Li fep = netdev_priv(ndev); 14274d494cdcSFugang Duan 14284d494cdcSFugang Duan txq = fep->tx_queue[queue_id]; 14294d494cdcSFugang Duan /* get next bdp of dirty_tx */ 14304d494cdcSFugang Duan nq = netdev_get_tx_queue(ndev, queue_id); 14314d494cdcSFugang Duan bdp = txq->dirty_tx; 1432793fc096SFrank Li 1433793fc096SFrank Li /* get next bdp of dirty_tx */ 14347355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1435793fc096SFrank Li 14367355f276STroy Kisky while (bdp != READ_ONCE(txq->bd.cur)) { 14377355f276STroy Kisky /* Order the load of bd.cur and cbd_sc */ 1438c4bc44c6SKevin Hao rmb(); 14395cfa3039SJohannes Berg status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); 1440c4bc44c6SKevin Hao if (status & BD_ENET_TX_READY) 1441793fc096SFrank Li break; 1442793fc096SFrank Li 14437355f276STroy Kisky index = fec_enet_get_bd_index(bdp, &txq->bd); 14442b995f63SNimrod Andy 144520f79739SWei Fang if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { 1446af6f4791SWei Fang skb = txq->tx_buf[index].buf_p; 144720f79739SWei Fang if (bdp->cbd_bufaddr && 144820f79739SWei Fang !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 14495cfa3039SJohannes Berg dma_unmap_single(&fep->pdev->dev, 14505cfa3039SJohannes Berg fec32_to_cpu(bdp->cbd_bufaddr), 14515cfa3039SJohannes Berg fec16_to_cpu(bdp->cbd_datlen), 14525cfa3039SJohannes Berg DMA_TO_DEVICE); 14535cfa3039SJohannes Berg bdp->cbd_bufaddr = cpu_to_fec32(0); 14547fafe803STroy Kisky if (!skb) 145520f79739SWei Fang goto tx_buf_done; 145620f79739SWei Fang } else { 145715cec633SWei Fang /* Tx processing cannot call any XDP (or page pool) APIs if 145815cec633SWei Fang * the "budget" is 0. Because NAPI is called with budget of 145915cec633SWei Fang * 0 (such as netpoll) indicates we may be in an IRQ context, 146015cec633SWei Fang * however, we can't use the page pool from IRQ context. 146115cec633SWei Fang */ 146215cec633SWei Fang if (unlikely(!budget)) 146315cec633SWei Fang break; 146415cec633SWei Fang 1465af6f4791SWei Fang if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { 1466af6f4791SWei Fang xdpf = txq->tx_buf[index].buf_p; 1467af6f4791SWei Fang if (bdp->cbd_bufaddr) 146820f79739SWei Fang dma_unmap_single(&fep->pdev->dev, 146920f79739SWei Fang fec32_to_cpu(bdp->cbd_bufaddr), 147020f79739SWei Fang fec16_to_cpu(bdp->cbd_datlen), 147120f79739SWei Fang DMA_TO_DEVICE); 1472af6f4791SWei Fang } else { 1473af6f4791SWei Fang page = txq->tx_buf[index].buf_p; 1474af6f4791SWei Fang } 1475af6f4791SWei Fang 147620f79739SWei Fang bdp->cbd_bufaddr = cpu_to_fec32(0); 1477af6f4791SWei Fang if (unlikely(!txq->tx_buf[index].buf_p)) { 147820f79739SWei Fang txq->tx_buf[index].type = FEC_TXBUF_T_SKB; 147920f79739SWei Fang goto tx_buf_done; 148020f79739SWei Fang } 1481af6f4791SWei Fang 1482af6f4791SWei Fang frame_len = fec16_to_cpu(bdp->cbd_datlen); 148320f79739SWei Fang } 1484793fc096SFrank Li 1485793fc096SFrank Li /* Check for errors. */ 1486793fc096SFrank Li if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1487793fc096SFrank Li BD_ENET_TX_RL | BD_ENET_TX_UN | 1488793fc096SFrank Li BD_ENET_TX_CSL)) { 1489793fc096SFrank Li ndev->stats.tx_errors++; 1490793fc096SFrank Li if (status & BD_ENET_TX_HB) /* No heartbeat */ 1491793fc096SFrank Li ndev->stats.tx_heartbeat_errors++; 1492793fc096SFrank Li if (status & BD_ENET_TX_LC) /* Late collision */ 1493793fc096SFrank Li ndev->stats.tx_window_errors++; 1494793fc096SFrank Li if (status & BD_ENET_TX_RL) /* Retrans limit */ 1495793fc096SFrank Li ndev->stats.tx_aborted_errors++; 1496793fc096SFrank Li if (status & BD_ENET_TX_UN) /* Underrun */ 1497793fc096SFrank Li ndev->stats.tx_fifo_errors++; 1498793fc096SFrank Li if (status & BD_ENET_TX_CSL) /* Carrier lost */ 1499793fc096SFrank Li ndev->stats.tx_carrier_errors++; 1500793fc096SFrank Li } else { 1501793fc096SFrank Li ndev->stats.tx_packets++; 150220f79739SWei Fang 150320f79739SWei Fang if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) 15046e909283SNimrod Andy ndev->stats.tx_bytes += skb->len; 150520f79739SWei Fang else 1506af6f4791SWei Fang ndev->stats.tx_bytes += frame_len; 1507793fc096SFrank Li } 1508793fc096SFrank Li 1509793fc096SFrank Li /* Deferred means some collisions occurred during transmit, 1510793fc096SFrank Li * but we eventually sent the packet OK. 1511793fc096SFrank Li */ 1512793fc096SFrank Li if (status & BD_ENET_TX_DEF) 1513793fc096SFrank Li ndev->stats.collisions++; 1514793fc096SFrank Li 151520f79739SWei Fang if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { 151620f79739SWei Fang /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who 151720f79739SWei Fang * are to time stamp the packet, so we still need to check time 151820f79739SWei Fang * stamping enabled flag. 151920f79739SWei Fang */ 152020f79739SWei Fang if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && 152120f79739SWei Fang fep->hwts_tx_en) && fep->bufdesc_ex) { 152220f79739SWei Fang struct skb_shared_hwtstamps shhwtstamps; 152320f79739SWei Fang struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 152420f79739SWei Fang 152520f79739SWei Fang fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); 152620f79739SWei Fang skb_tstamp_tx(skb, &shhwtstamps); 152720f79739SWei Fang } 152820f79739SWei Fang 1529793fc096SFrank Li /* Free the sk buffer associated with this last transmit */ 153091a10efcSWei Fang napi_consume_skb(skb, budget); 1531af6f4791SWei Fang } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { 1532f601899eSWei Fang xdp_return_frame_rx_napi(xdpf); 1533f601899eSWei Fang } else { /* recycle pages of XDP_TX frames */ 1534af6f4791SWei Fang /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */ 1535f601899eSWei Fang page_pool_put_page(page->pp, page, 0, true); 1536f601899eSWei Fang } 153720f79739SWei Fang 1538af6f4791SWei Fang txq->tx_buf[index].buf_p = NULL; 153920f79739SWei Fang /* restore default tx buffer type: FEC_TXBUF_T_SKB */ 154020f79739SWei Fang txq->tx_buf[index].type = FEC_TXBUF_T_SKB; 154120f79739SWei Fang 154220f79739SWei Fang tx_buf_done: 154320f79739SWei Fang /* Make sure the update to bdp and tx_buf are performed 1544c4bc44c6SKevin Hao * before dirty_tx 1545c4bc44c6SKevin Hao */ 1546c4bc44c6SKevin Hao wmb(); 15474d494cdcSFugang Duan txq->dirty_tx = bdp; 1548793fc096SFrank Li 1549793fc096SFrank Li /* Update pointer to next buffer descriptor to be transmitted */ 15507355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1551793fc096SFrank Li 1552793fc096SFrank Li /* Since we have freed up a buffer, the ring is no longer full 1553793fc096SFrank Li */ 1554657ade07SRickard x Andersson if (netif_tx_queue_stopped(nq)) { 15557355f276STroy Kisky entries_free = fec_enet_get_free_txdesc_num(txq); 15564d494cdcSFugang Duan if (entries_free >= txq->tx_wake_threshold) 15574d494cdcSFugang Duan netif_tx_wake_queue(nq); 1558793fc096SFrank Li } 155979f33912SNimrod Andy } 1560ccea2968SRussell King 1561c10bc0e7SFugang Duan /* ERR006358: Keep the transmitter going */ 15627355f276STroy Kisky if (bdp != txq->bd.cur && 156353bb20d1STroy Kisky readl(txq->bd.reg_desc_active) == 0) 156453bb20d1STroy Kisky writel(0, txq->bd.reg_desc_active); 15654d494cdcSFugang Duan } 15664d494cdcSFugang Duan 156715cec633SWei Fang static void fec_enet_tx(struct net_device *ndev, int budget) 15684d494cdcSFugang Duan { 15694d494cdcSFugang Duan struct fec_enet_private *fep = netdev_priv(ndev); 15707cdaa4ccSTobias Waldekranz int i; 15717cdaa4ccSTobias Waldekranz 15727cdaa4ccSTobias Waldekranz /* Make sure that AVB queues are processed first. */ 15737cdaa4ccSTobias Waldekranz for (i = fep->num_tx_queues - 1; i >= 0; i--) 157415cec633SWei Fang fec_enet_tx_queue(ndev, i, budget); 1575793fc096SFrank Li } 1576793fc096SFrank Li 157795698ff6SShenwei Wang static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, 157895698ff6SShenwei Wang struct bufdesc *bdp, int index) 157995698ff6SShenwei Wang { 158095698ff6SShenwei Wang struct page *new_page; 158195698ff6SShenwei Wang dma_addr_t phys_addr; 158295698ff6SShenwei Wang 158395698ff6SShenwei Wang new_page = page_pool_dev_alloc_pages(rxq->page_pool); 158495698ff6SShenwei Wang WARN_ON(!new_page); 158595698ff6SShenwei Wang rxq->rx_skb_info[index].page = new_page; 158695698ff6SShenwei Wang 158795698ff6SShenwei Wang rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; 158895698ff6SShenwei Wang phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; 158995698ff6SShenwei Wang bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); 159095698ff6SShenwei Wang } 159195698ff6SShenwei Wang 15926d6b39f1SShenwei Wang static u32 15936d6b39f1SShenwei Wang fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, 1594f601899eSWei Fang struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu) 15956d6b39f1SShenwei Wang { 15966d6b39f1SShenwei Wang unsigned int sync, len = xdp->data_end - xdp->data; 15976d6b39f1SShenwei Wang u32 ret = FEC_ENET_XDP_PASS; 15986d6b39f1SShenwei Wang struct page *page; 15996d6b39f1SShenwei Wang int err; 16006d6b39f1SShenwei Wang u32 act; 16016d6b39f1SShenwei Wang 16026d6b39f1SShenwei Wang act = bpf_prog_run_xdp(prog, xdp); 16036d6b39f1SShenwei Wang 1604f601899eSWei Fang /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover 1605f601899eSWei Fang * max len CPU touch 1606f601899eSWei Fang */ 1607f601899eSWei Fang sync = xdp->data_end - xdp->data; 16086d6b39f1SShenwei Wang sync = max(sync, len); 16096d6b39f1SShenwei Wang 16106d6b39f1SShenwei Wang switch (act) { 16116d6b39f1SShenwei Wang case XDP_PASS: 16126970ef27SShenwei Wang rxq->stats[RX_XDP_PASS]++; 16136d6b39f1SShenwei Wang ret = FEC_ENET_XDP_PASS; 16146d6b39f1SShenwei Wang break; 16156d6b39f1SShenwei Wang 16166d6b39f1SShenwei Wang case XDP_REDIRECT: 16176970ef27SShenwei Wang rxq->stats[RX_XDP_REDIRECT]++; 16186d6b39f1SShenwei Wang err = xdp_do_redirect(fep->netdev, xdp, prog); 1619e83fabb7SWei Fang if (unlikely(err)) 1620e83fabb7SWei Fang goto xdp_err; 1621e83fabb7SWei Fang 16226d6b39f1SShenwei Wang ret = FEC_ENET_XDP_REDIR; 16236d6b39f1SShenwei Wang break; 16246d6b39f1SShenwei Wang 16256d6b39f1SShenwei Wang case XDP_TX: 162695403294SWei Fang rxq->stats[RX_XDP_TX]++; 1627f601899eSWei Fang err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync); 162895403294SWei Fang if (unlikely(err)) { 162995403294SWei Fang rxq->stats[RX_XDP_TX_ERRORS]++; 1630e83fabb7SWei Fang goto xdp_err; 163195403294SWei Fang } 1632e83fabb7SWei Fang 1633f601899eSWei Fang ret = FEC_ENET_XDP_TX; 1634f601899eSWei Fang break; 1635f601899eSWei Fang 1636f601899eSWei Fang default: 16376d6b39f1SShenwei Wang bpf_warn_invalid_xdp_action(fep->netdev, prog, act); 16386d6b39f1SShenwei Wang fallthrough; 16396d6b39f1SShenwei Wang 16406d6b39f1SShenwei Wang case XDP_ABORTED: 16416d6b39f1SShenwei Wang fallthrough; /* handle aborts by dropping packet */ 16426d6b39f1SShenwei Wang 16436d6b39f1SShenwei Wang case XDP_DROP: 16446970ef27SShenwei Wang rxq->stats[RX_XDP_DROP]++; 1645e83fabb7SWei Fang xdp_err: 16466d6b39f1SShenwei Wang ret = FEC_ENET_XDP_CONSUMED; 16476d6b39f1SShenwei Wang page = virt_to_head_page(xdp->data); 16486d6b39f1SShenwei Wang page_pool_put_page(rxq->page_pool, page, sync, true); 1649e83fabb7SWei Fang if (act != XDP_DROP) 1650e83fabb7SWei Fang trace_xdp_exception(fep->netdev, prog, act); 16516d6b39f1SShenwei Wang break; 16526d6b39f1SShenwei Wang } 16536d6b39f1SShenwei Wang 16546d6b39f1SShenwei Wang return ret; 16556d6b39f1SShenwei Wang } 16566d6b39f1SShenwei Wang 16577355f276STroy Kisky /* During a receive, the bd_rx.cur points to the current incoming buffer. 1658793fc096SFrank Li * When we update through the ring, if the next incoming buffer has 1659793fc096SFrank Li * not been given to the system, we just set the empty indicator, 1660793fc096SFrank Li * effectively tossing the packet. 1661793fc096SFrank Li */ 1662793fc096SFrank Li static int 16634d494cdcSFugang Duan fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) 1664793fc096SFrank Li { 1665793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 16664d494cdcSFugang Duan struct fec_enet_priv_rx_q *rxq; 1667793fc096SFrank Li struct bufdesc *bdp; 1668793fc096SFrank Li unsigned short status; 1669793fc096SFrank Li struct sk_buff *skb; 1670793fc096SFrank Li ushort pkt_len; 1671793fc096SFrank Li __u8 *data; 1672793fc096SFrank Li int pkt_received = 0; 1673cdffcf1bSJim Baxter struct bufdesc_ex *ebdp = NULL; 1674cdffcf1bSJim Baxter bool vlan_packet_rcvd = false; 1675cdffcf1bSJim Baxter u16 vlan_tag; 1676d842a31fSDuan Fugang-B38611 int index = 0; 16776b7e4008SLothar Waßmann bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; 16786d6b39f1SShenwei Wang struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); 16796d6b39f1SShenwei Wang u32 ret, xdp_result = FEC_ENET_XDP_PASS; 16806c8fae0cSShenwei Wang u32 data_start = FEC_ENET_XDP_HEADROOM; 1681f601899eSWei Fang int cpu = smp_processor_id(); 16826d6b39f1SShenwei Wang struct xdp_buff xdp; 168395698ff6SShenwei Wang struct page *page; 16846c8fae0cSShenwei Wang u32 sub_len = 4; 16856c8fae0cSShenwei Wang 16866c8fae0cSShenwei Wang #if !defined(CONFIG_M5272) 16876c8fae0cSShenwei Wang /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of 16886c8fae0cSShenwei Wang * FEC_RACC_SHIFT16 is set by default in the probe function. 16896c8fae0cSShenwei Wang */ 16906c8fae0cSShenwei Wang if (fep->quirks & FEC_QUIRK_HAS_RACC) { 16916c8fae0cSShenwei Wang data_start += 2; 16926c8fae0cSShenwei Wang sub_len += 2; 16936c8fae0cSShenwei Wang } 16946c8fae0cSShenwei Wang #endif 1695793fc096SFrank Li 1696ffd32a92SChristoph Hellwig #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) 1697ffd32a92SChristoph Hellwig /* 1698ffd32a92SChristoph Hellwig * Hacky flush of all caches instead of using the DMA API for the TSO 1699ffd32a92SChristoph Hellwig * headers. 1700ffd32a92SChristoph Hellwig */ 1701793fc096SFrank Li flush_cache_all(); 1702793fc096SFrank Li #endif 17034d494cdcSFugang Duan rxq = fep->rx_queue[queue_id]; 1704793fc096SFrank Li 1705793fc096SFrank Li /* First, grab all of the stats for the incoming packet. 1706793fc096SFrank Li * These get messed up if we get called due to a busy condition. 1707793fc096SFrank Li */ 17087355f276STroy Kisky bdp = rxq->bd.cur; 17096d6b39f1SShenwei Wang xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); 1710793fc096SFrank Li 17115cfa3039SJohannes Berg while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { 1712793fc096SFrank Li 1713793fc096SFrank Li if (pkt_received >= budget) 1714793fc096SFrank Li break; 1715793fc096SFrank Li pkt_received++; 1716793fc096SFrank Li 1717b5bd95d1SJoakim Zhang writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); 1718db3421c1SRussell King 1719793fc096SFrank Li /* Check for errors. */ 1720095098e1STroy Kisky status ^= BD_ENET_RX_LAST; 1721793fc096SFrank Li if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 1722095098e1STroy Kisky BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | 1723095098e1STroy Kisky BD_ENET_RX_CL)) { 1724793fc096SFrank Li ndev->stats.rx_errors++; 1725095098e1STroy Kisky if (status & BD_ENET_RX_OV) { 1726095098e1STroy Kisky /* FIFO overrun */ 1727095098e1STroy Kisky ndev->stats.rx_fifo_errors++; 1728095098e1STroy Kisky goto rx_processing_done; 1729095098e1STroy Kisky } 1730095098e1STroy Kisky if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH 1731095098e1STroy Kisky | BD_ENET_RX_LAST)) { 1732793fc096SFrank Li /* Frame too long or too short. */ 1733793fc096SFrank Li ndev->stats.rx_length_errors++; 1734095098e1STroy Kisky if (status & BD_ENET_RX_LAST) 1735095098e1STroy Kisky netdev_err(ndev, "rcv is not +last\n"); 1736793fc096SFrank Li } 1737793fc096SFrank Li if (status & BD_ENET_RX_CR) /* CRC Error */ 1738793fc096SFrank Li ndev->stats.rx_crc_errors++; 1739095098e1STroy Kisky /* Report late collisions as a frame error. */ 1740095098e1STroy Kisky if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 1741793fc096SFrank Li ndev->stats.rx_frame_errors++; 1742793fc096SFrank Li goto rx_processing_done; 1743793fc096SFrank Li } 1744793fc096SFrank Li 1745793fc096SFrank Li /* Process the incoming frame. */ 1746793fc096SFrank Li ndev->stats.rx_packets++; 17475cfa3039SJohannes Berg pkt_len = fec16_to_cpu(bdp->cbd_datlen); 1748793fc096SFrank Li ndev->stats.rx_bytes += pkt_len; 1749793fc096SFrank Li 17507355f276STroy Kisky index = fec_enet_get_bd_index(bdp, &rxq->bd); 175195698ff6SShenwei Wang page = rxq->rx_skb_info[index].page; 175295698ff6SShenwei Wang dma_sync_single_for_cpu(&fep->pdev->dev, 175395698ff6SShenwei Wang fec32_to_cpu(bdp->cbd_bufaddr), 175495698ff6SShenwei Wang pkt_len, 175595698ff6SShenwei Wang DMA_FROM_DEVICE); 175695698ff6SShenwei Wang prefetch(page_address(page)); 175795698ff6SShenwei Wang fec_enet_update_cbd(rxq, bdp, index); 17581b7bde6dSNimrod Andy 17596d6b39f1SShenwei Wang if (xdp_prog) { 17606d6b39f1SShenwei Wang xdp_buff_clear_frags_flag(&xdp); 17616c8fae0cSShenwei Wang /* subtract 16bit shift and FCS */ 17626d6b39f1SShenwei Wang xdp_prepare_buff(&xdp, page_address(page), 17636c8fae0cSShenwei Wang data_start, pkt_len - sub_len, false); 1764f601899eSWei Fang ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu); 17656d6b39f1SShenwei Wang xdp_result |= ret; 17666d6b39f1SShenwei Wang if (ret != FEC_ENET_XDP_PASS) 17676d6b39f1SShenwei Wang goto rx_processing_done; 17686d6b39f1SShenwei Wang } 17696d6b39f1SShenwei Wang 17701b7bde6dSNimrod Andy /* The packet length includes FCS, but we don't want to 17711b7bde6dSNimrod Andy * include that when passing upstream as it messes up 17721b7bde6dSNimrod Andy * bridging applications. 17731b7bde6dSNimrod Andy */ 177495698ff6SShenwei Wang skb = build_skb(page_address(page), PAGE_SIZE); 177519e72b06SWei Fang if (unlikely(!skb)) { 177619e72b06SWei Fang page_pool_recycle_direct(rxq->page_pool, page); 177719e72b06SWei Fang ndev->stats.rx_dropped++; 177819e72b06SWei Fang 177919e72b06SWei Fang netdev_err_once(ndev, "build_skb failed!\n"); 178019e72b06SWei Fang goto rx_processing_done; 178119e72b06SWei Fang } 178219e72b06SWei Fang 17836c8fae0cSShenwei Wang skb_reserve(skb, data_start); 17846c8fae0cSShenwei Wang skb_put(skb, pkt_len - sub_len); 178595698ff6SShenwei Wang skb_mark_for_recycle(skb); 17863ac72b7bSEric Nelson 17876c8fae0cSShenwei Wang if (unlikely(need_swap)) { 17886c8fae0cSShenwei Wang data = page_address(page) + FEC_ENET_XDP_HEADROOM; 1789235bde1eSFabio Estevam swap_buffer(data, pkt_len); 17906c8fae0cSShenwei Wang } 17916c8fae0cSShenwei Wang data = skb->data; 17923ac72b7bSEric Nelson 1793cdffcf1bSJim Baxter /* Extract the enhanced buffer descriptor */ 1794cdffcf1bSJim Baxter ebdp = NULL; 1795cdffcf1bSJim Baxter if (fep->bufdesc_ex) 1796cdffcf1bSJim Baxter ebdp = (struct bufdesc_ex *)bdp; 1797cdffcf1bSJim Baxter 1798cdffcf1bSJim Baxter /* If this is a VLAN packet remove the VLAN Tag */ 1799cdffcf1bSJim Baxter vlan_packet_rcvd = false; 1800cdffcf1bSJim Baxter if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && 18015cfa3039SJohannes Berg fep->bufdesc_ex && 18025cfa3039SJohannes Berg (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { 1803cdffcf1bSJim Baxter /* Push and remove the vlan tag */ 1804cdffcf1bSJim Baxter struct vlan_hdr *vlan_header = 1805cdffcf1bSJim Baxter (struct vlan_hdr *) (data + ETH_HLEN); 1806cdffcf1bSJim Baxter vlan_tag = ntohs(vlan_header->h_vlan_TCI); 1807cdffcf1bSJim Baxter 1808cdffcf1bSJim Baxter vlan_packet_rcvd = true; 18091b7bde6dSNimrod Andy 1810af5cbc98SNimrod Andy memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); 18111b7bde6dSNimrod Andy skb_pull(skb, VLAN_HLEN); 1812cdffcf1bSJim Baxter } 1813cdffcf1bSJim Baxter 1814793fc096SFrank Li skb->protocol = eth_type_trans(skb, ndev); 1815793fc096SFrank Li 1816793fc096SFrank Li /* Get receive timestamp from the skb */ 1817bfd4ecddSRussell King if (fep->hwts_rx_en && fep->bufdesc_ex) 18185cfa3039SJohannes Berg fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), 1819bfd4ecddSRussell King skb_hwtstamps(skb)); 1820793fc096SFrank Li 18214c09eed9SJim Baxter if (fep->bufdesc_ex && 18224c09eed9SJim Baxter (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 18235cfa3039SJohannes Berg if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { 18244c09eed9SJim Baxter /* don't check it */ 18254c09eed9SJim Baxter skb->ip_summed = CHECKSUM_UNNECESSARY; 18264c09eed9SJim Baxter } else { 18274c09eed9SJim Baxter skb_checksum_none_assert(skb); 18284c09eed9SJim Baxter } 18294c09eed9SJim Baxter } 18304c09eed9SJim Baxter 1831cdffcf1bSJim Baxter /* Handle received VLAN packets */ 1832cdffcf1bSJim Baxter if (vlan_packet_rcvd) 1833cdffcf1bSJim Baxter __vlan_hwaccel_put_tag(skb, 1834cdffcf1bSJim Baxter htons(ETH_P_8021Q), 1835cdffcf1bSJim Baxter vlan_tag); 1836cdffcf1bSJim Baxter 18377cdaa4ccSTobias Waldekranz skb_record_rx_queue(skb, queue_id); 1838793fc096SFrank Li napi_gro_receive(&fep->napi, skb); 1839793fc096SFrank Li 1840793fc096SFrank Li rx_processing_done: 1841793fc096SFrank Li /* Clear the status flags for this buffer */ 1842793fc096SFrank Li status &= ~BD_ENET_RX_STATS; 1843793fc096SFrank Li 1844793fc096SFrank Li /* Mark the buffer empty */ 1845793fc096SFrank Li status |= BD_ENET_RX_EMPTY; 1846793fc096SFrank Li 1847793fc096SFrank Li if (fep->bufdesc_ex) { 1848793fc096SFrank Li struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1849793fc096SFrank Li 18505cfa3039SJohannes Berg ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 1851793fc096SFrank Li ebdp->cbd_prot = 0; 1852793fc096SFrank Li ebdp->cbd_bdu = 0; 1853793fc096SFrank Li } 1854be293467STroy Kisky /* Make sure the updates to rest of the descriptor are 1855be293467STroy Kisky * performed before transferring ownership. 1856be293467STroy Kisky */ 1857be293467STroy Kisky wmb(); 1858be293467STroy Kisky bdp->cbd_sc = cpu_to_fec16(status); 1859793fc096SFrank Li 1860793fc096SFrank Li /* Update BD pointer to next entry */ 18617355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 186236e24e2eSDuan Fugang-B38611 1863793fc096SFrank Li /* Doing this here will keep the FEC running while we process 1864793fc096SFrank Li * incoming frames. On a heavily loaded network, we should be 1865793fc096SFrank Li * able to keep up at the expense of system resources. 1866793fc096SFrank Li */ 186753bb20d1STroy Kisky writel(0, rxq->bd.reg_desc_active); 1868793fc096SFrank Li } 18697355f276STroy Kisky rxq->bd.cur = bdp; 18706d6b39f1SShenwei Wang 18716d6b39f1SShenwei Wang if (xdp_result & FEC_ENET_XDP_REDIR) 18727f04bd10SSebastian Andrzej Siewior xdp_do_flush(); 18736d6b39f1SShenwei Wang 1874793fc096SFrank Li return pkt_received; 1875793fc096SFrank Li } 1876793fc096SFrank Li 18777cdaa4ccSTobias Waldekranz static int fec_enet_rx(struct net_device *ndev, int budget) 18784d494cdcSFugang Duan { 18794d494cdcSFugang Duan struct fec_enet_private *fep = netdev_priv(ndev); 18807cdaa4ccSTobias Waldekranz int i, done = 0; 18814d494cdcSFugang Duan 18827cdaa4ccSTobias Waldekranz /* Make sure that AVB queues are processed first. */ 18837cdaa4ccSTobias Waldekranz for (i = fep->num_rx_queues - 1; i >= 0; i--) 18847cdaa4ccSTobias Waldekranz done += fec_enet_rx_queue(ndev, budget - done, i); 18851c021bb7SUwe Kleine-König 18867cdaa4ccSTobias Waldekranz return done; 18874d494cdcSFugang Duan } 18884d494cdcSFugang Duan 18897cdaa4ccSTobias Waldekranz static bool fec_enet_collect_events(struct fec_enet_private *fep) 18904d494cdcSFugang Duan { 1891793fc096SFrank Li uint int_events; 1892793fc096SFrank Li 1893793fc096SFrank Li int_events = readl(fep->hwp + FEC_IEVENT); 1894f166f890SAndrew Lunn 1895f166f890SAndrew Lunn /* Don't clear MDIO events, we poll for those */ 1896f166f890SAndrew Lunn int_events &= ~FEC_ENET_MII; 1897f166f890SAndrew Lunn 189894191fd6SNimrod Andy writel(int_events, fep->hwp + FEC_IEVENT); 1899793fc096SFrank Li 19007cdaa4ccSTobias Waldekranz return int_events != 0; 19017cdaa4ccSTobias Waldekranz } 19027cdaa4ccSTobias Waldekranz 19037cdaa4ccSTobias Waldekranz static irqreturn_t 19047cdaa4ccSTobias Waldekranz fec_enet_interrupt(int irq, void *dev_id) 19057cdaa4ccSTobias Waldekranz { 19067cdaa4ccSTobias Waldekranz struct net_device *ndev = dev_id; 19077cdaa4ccSTobias Waldekranz struct fec_enet_private *fep = netdev_priv(ndev); 19087cdaa4ccSTobias Waldekranz irqreturn_t ret = IRQ_NONE; 19097cdaa4ccSTobias Waldekranz 19107cdaa4ccSTobias Waldekranz if (fec_enet_collect_events(fep) && fep->link) { 1911793fc096SFrank Li ret = IRQ_HANDLED; 1912793fc096SFrank Li 191394191fd6SNimrod Andy if (napi_schedule_prep(&fep->napi)) { 1914f166f890SAndrew Lunn /* Disable interrupts */ 1915f166f890SAndrew Lunn writel(0, fep->hwp + FEC_IMASK); 191694191fd6SNimrod Andy __napi_schedule(&fep->napi); 191794191fd6SNimrod Andy } 1918793fc096SFrank Li } 1919793fc096SFrank Li 1920793fc096SFrank Li return ret; 1921793fc096SFrank Li } 1922793fc096SFrank Li 1923793fc096SFrank Li static int fec_enet_rx_napi(struct napi_struct *napi, int budget) 1924793fc096SFrank Li { 1925793fc096SFrank Li struct net_device *ndev = napi->dev; 1926793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 19277cdaa4ccSTobias Waldekranz int done = 0; 19287a16807cSRussell King 19297cdaa4ccSTobias Waldekranz do { 19307cdaa4ccSTobias Waldekranz done += fec_enet_rx(ndev, budget - done); 193115cec633SWei Fang fec_enet_tx(ndev, budget); 19327cdaa4ccSTobias Waldekranz } while ((done < budget) && fec_enet_collect_events(fep)); 1933793fc096SFrank Li 19347cdaa4ccSTobias Waldekranz if (done < budget) { 19357cdaa4ccSTobias Waldekranz napi_complete_done(napi, done); 1936793fc096SFrank Li writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1937793fc096SFrank Li } 19387cdaa4ccSTobias Waldekranz 19397cdaa4ccSTobias Waldekranz return done; 1940793fc096SFrank Li } 1941793fc096SFrank Li 1942793fc096SFrank Li /* ------------------------------------------------------------------------- */ 1943052fcc45SFugang Duan static int fec_get_mac(struct net_device *ndev) 1944793fc096SFrank Li { 1945793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 1946793fc096SFrank Li unsigned char *iap, tmpaddr[ETH_ALEN]; 194783216e39SMichael Walle int ret; 1948793fc096SFrank Li 1949793fc096SFrank Li /* 1950793fc096SFrank Li * try to get mac address in following order: 1951793fc096SFrank Li * 1952793fc096SFrank Li * 1) module parameter via kernel command line in form 1953793fc096SFrank Li * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 1954793fc096SFrank Li */ 1955793fc096SFrank Li iap = macaddr; 1956793fc096SFrank Li 1957793fc096SFrank Li /* 1958793fc096SFrank Li * 2) from device tree data 1959793fc096SFrank Li */ 1960793fc096SFrank Li if (!is_valid_ether_addr(iap)) { 1961793fc096SFrank Li struct device_node *np = fep->pdev->dev.of_node; 1962793fc096SFrank Li if (np) { 196383216e39SMichael Walle ret = of_get_mac_address(np, tmpaddr); 196483216e39SMichael Walle if (!ret) 196583216e39SMichael Walle iap = tmpaddr; 1966052fcc45SFugang Duan else if (ret == -EPROBE_DEFER) 1967052fcc45SFugang Duan return ret; 1968793fc096SFrank Li } 1969793fc096SFrank Li } 1970793fc096SFrank Li 1971793fc096SFrank Li /* 1972793fc096SFrank Li * 3) from flash or fuse (via platform data) 1973793fc096SFrank Li */ 1974793fc096SFrank Li if (!is_valid_ether_addr(iap)) { 1975793fc096SFrank Li #ifdef CONFIG_M5272 1976793fc096SFrank Li if (FEC_FLASHMAC) 1977793fc096SFrank Li iap = (unsigned char *)FEC_FLASHMAC; 1978793fc096SFrank Li #else 197932d1bbb1SGeert Uytterhoeven struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); 198032d1bbb1SGeert Uytterhoeven 1981793fc096SFrank Li if (pdata) 1982793fc096SFrank Li iap = (unsigned char *)&pdata->mac; 1983793fc096SFrank Li #endif 1984793fc096SFrank Li } 1985793fc096SFrank Li 1986793fc096SFrank Li /* 1987793fc096SFrank Li * 4) FEC mac registers set by bootloader 1988793fc096SFrank Li */ 1989793fc096SFrank Li if (!is_valid_ether_addr(iap)) { 19907d7628f3SDan Carpenter *((__be32 *) &tmpaddr[0]) = 19917d7628f3SDan Carpenter cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); 19927d7628f3SDan Carpenter *((__be16 *) &tmpaddr[4]) = 19937d7628f3SDan Carpenter cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 1994793fc096SFrank Li iap = &tmpaddr[0]; 1995793fc096SFrank Li } 1996793fc096SFrank Li 1997ff5b2fabSLucas Stach /* 1998ff5b2fabSLucas Stach * 5) random mac address 1999ff5b2fabSLucas Stach */ 2000ff5b2fabSLucas Stach if (!is_valid_ether_addr(iap)) { 2001ff5b2fabSLucas Stach /* Report it and use a random ethernet address instead */ 2002a19a0582SFabio Estevam dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap); 2003ff5b2fabSLucas Stach eth_hw_addr_random(ndev); 2004a19a0582SFabio Estevam dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n", 2005ff5b2fabSLucas Stach ndev->dev_addr); 2006052fcc45SFugang Duan return 0; 2007ff5b2fabSLucas Stach } 2008ff5b2fabSLucas Stach 2009793fc096SFrank Li /* Adjust MAC if using macaddr */ 2010ba3fdfe3SJakub Kicinski eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0); 2011052fcc45SFugang Duan 2012052fcc45SFugang Duan return 0; 2013793fc096SFrank Li } 2014793fc096SFrank Li 2015793fc096SFrank Li /* ------------------------------------------------------------------------- */ 2016793fc096SFrank Li 2017793fc096SFrank Li /* 2018793fc096SFrank Li * Phy section 2019793fc096SFrank Li */ 2020aff1b8c8SAndrew Lunn 2021aff1b8c8SAndrew Lunn /* LPI Sleep Ts count base on tx clk (clk_ref). 2022aff1b8c8SAndrew Lunn * The lpi sleep cnt value = X us / (cycle_ns). 2023aff1b8c8SAndrew Lunn */ 2024aff1b8c8SAndrew Lunn static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) 2025aff1b8c8SAndrew Lunn { 2026aff1b8c8SAndrew Lunn struct fec_enet_private *fep = netdev_priv(ndev); 2027aff1b8c8SAndrew Lunn 2028aff1b8c8SAndrew Lunn return us * (fep->clk_ref_rate / 1000) / 1000; 2029aff1b8c8SAndrew Lunn } 2030aff1b8c8SAndrew Lunn 2031aff1b8c8SAndrew Lunn static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) 2032aff1b8c8SAndrew Lunn { 2033aff1b8c8SAndrew Lunn struct fec_enet_private *fep = netdev_priv(ndev); 2034aff1b8c8SAndrew Lunn struct ethtool_keee *p = &fep->eee; 2035aff1b8c8SAndrew Lunn unsigned int sleep_cycle, wake_cycle; 2036aff1b8c8SAndrew Lunn 2037aff1b8c8SAndrew Lunn if (enable) { 2038aff1b8c8SAndrew Lunn sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer); 2039aff1b8c8SAndrew Lunn wake_cycle = sleep_cycle; 2040aff1b8c8SAndrew Lunn } else { 2041aff1b8c8SAndrew Lunn sleep_cycle = 0; 2042aff1b8c8SAndrew Lunn wake_cycle = 0; 2043aff1b8c8SAndrew Lunn } 2044aff1b8c8SAndrew Lunn 2045aff1b8c8SAndrew Lunn writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP); 2046aff1b8c8SAndrew Lunn writel(wake_cycle, fep->hwp + FEC_LPI_WAKE); 2047aff1b8c8SAndrew Lunn 2048aff1b8c8SAndrew Lunn return 0; 2049aff1b8c8SAndrew Lunn } 2050aff1b8c8SAndrew Lunn 2051793fc096SFrank Li static void fec_enet_adjust_link(struct net_device *ndev) 2052793fc096SFrank Li { 2053793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 205445f5c327SPhilippe Reynes struct phy_device *phy_dev = ndev->phydev; 2055793fc096SFrank Li int status_change = 0; 2056793fc096SFrank Li 20578ce5624fSRussell King /* 20588ce5624fSRussell King * If the netdev is down, or is going down, we're not interested 20598ce5624fSRussell King * in link state events, so just mark our idea of the link as down 20608ce5624fSRussell King * and ignore the event. 20618ce5624fSRussell King */ 20628ce5624fSRussell King if (!netif_running(ndev) || !netif_device_present(ndev)) { 20638ce5624fSRussell King fep->link = 0; 20648ce5624fSRussell King } else if (phy_dev->link) { 2065793fc096SFrank Li if (!fep->link) { 2066793fc096SFrank Li fep->link = phy_dev->link; 2067793fc096SFrank Li status_change = 1; 2068793fc096SFrank Li } 2069793fc096SFrank Li 2070ef83337dSRussell King if (fep->full_duplex != phy_dev->duplex) { 2071ef83337dSRussell King fep->full_duplex = phy_dev->duplex; 2072793fc096SFrank Li status_change = 1; 2073ef83337dSRussell King } 2074793fc096SFrank Li 2075793fc096SFrank Li if (phy_dev->speed != fep->speed) { 2076793fc096SFrank Li fep->speed = phy_dev->speed; 2077793fc096SFrank Li status_change = 1; 2078793fc096SFrank Li } 2079793fc096SFrank Li 2080793fc096SFrank Li /* if any of the above changed restart the FEC */ 2081dbc64a8eSRussell King if (status_change) { 20825e344807SShenwei Wang netif_stop_queue(ndev); 2083dbc64a8eSRussell King napi_disable(&fep->napi); 2084dbc64a8eSRussell King netif_tx_lock_bh(ndev); 2085ef83337dSRussell King fec_restart(ndev); 2086657ade07SRickard x Andersson netif_tx_wake_all_queues(ndev); 20876af42d42SRussell King netif_tx_unlock_bh(ndev); 2088dbc64a8eSRussell King napi_enable(&fep->napi); 2089dbc64a8eSRussell King } 20906a2495adSAndrew Lunn if (fep->quirks & FEC_QUIRK_HAS_EEE) 20916a2495adSAndrew Lunn fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi); 2092793fc096SFrank Li } else { 2093793fc096SFrank Li if (fep->link) { 20945e344807SShenwei Wang netif_stop_queue(ndev); 2095f208ce10SRussell King napi_disable(&fep->napi); 2096f208ce10SRussell King netif_tx_lock_bh(ndev); 2097793fc096SFrank Li fec_stop(ndev); 2098f208ce10SRussell King netif_tx_unlock_bh(ndev); 2099f208ce10SRussell King napi_enable(&fep->napi); 21006e0895c2SDavid S. Miller fep->link = phy_dev->link; 2101793fc096SFrank Li status_change = 1; 2102793fc096SFrank Li } 2103793fc096SFrank Li } 2104793fc096SFrank Li 2105793fc096SFrank Li if (status_change) 2106793fc096SFrank Li phy_print_status(phy_dev); 2107793fc096SFrank Li } 2108793fc096SFrank Li 2109f166f890SAndrew Lunn static int fec_enet_mdio_wait(struct fec_enet_private *fep) 2110f166f890SAndrew Lunn { 2111f166f890SAndrew Lunn uint ievent; 2112f166f890SAndrew Lunn int ret; 2113f166f890SAndrew Lunn 2114f166f890SAndrew Lunn ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, 2115f166f890SAndrew Lunn ievent & FEC_ENET_MII, 2, 30000); 2116f166f890SAndrew Lunn 2117f166f890SAndrew Lunn if (!ret) 2118f166f890SAndrew Lunn writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 2119f166f890SAndrew Lunn 2120f166f890SAndrew Lunn return ret; 2121f166f890SAndrew Lunn } 2122f166f890SAndrew Lunn 21238d03ad1aSAndrew Lunn static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) 2124793fc096SFrank Li { 2125793fc096SFrank Li struct fec_enet_private *fep = bus->priv; 21268fff755eSAndrew Lunn struct device *dev = &fep->pdev->dev; 2127d3ee8ec7SMarco Hartmann int ret = 0, frame_start, frame_addr, frame_op; 21288fff755eSAndrew Lunn 2129da875fa5SZhang Qilong ret = pm_runtime_resume_and_get(dev); 2130b0c6ce24SFabio Estevam if (ret < 0) 21318fff755eSAndrew Lunn return ret; 2132793fc096SFrank Li 2133d3ee8ec7SMarco Hartmann /* C22 read */ 2134d3ee8ec7SMarco Hartmann frame_op = FEC_MMFR_OP_READ; 2135d3ee8ec7SMarco Hartmann frame_start = FEC_MMFR_ST; 2136d3ee8ec7SMarco Hartmann frame_addr = regnum; 2137d3ee8ec7SMarco Hartmann 2138793fc096SFrank Li /* start a read op */ 2139d3ee8ec7SMarco Hartmann writel(frame_start | frame_op | 2140d3ee8ec7SMarco Hartmann FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | 2141793fc096SFrank Li FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 2142793fc096SFrank Li 2143793fc096SFrank Li /* wait for end of transfer */ 2144f166f890SAndrew Lunn ret = fec_enet_mdio_wait(fep); 2145f166f890SAndrew Lunn if (ret) { 214631b7720cSJoe Perches netdev_err(fep->netdev, "MDIO read timeout\n"); 21478fff755eSAndrew Lunn goto out; 2148793fc096SFrank Li } 2149793fc096SFrank Li 21508fff755eSAndrew Lunn ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 21518fff755eSAndrew Lunn 21528fff755eSAndrew Lunn out: 21538fff755eSAndrew Lunn pm_runtime_mark_last_busy(dev); 21548fff755eSAndrew Lunn pm_runtime_put_autosuspend(dev); 21558fff755eSAndrew Lunn 21568fff755eSAndrew Lunn return ret; 2157793fc096SFrank Li } 2158793fc096SFrank Li 21598d03ad1aSAndrew Lunn static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id, 21608d03ad1aSAndrew Lunn int devad, int regnum) 2161793fc096SFrank Li { 2162793fc096SFrank Li struct fec_enet_private *fep = bus->priv; 21638fff755eSAndrew Lunn struct device *dev = &fep->pdev->dev; 21648d03ad1aSAndrew Lunn int ret = 0, frame_start, frame_op; 21658fff755eSAndrew Lunn 2166da875fa5SZhang Qilong ret = pm_runtime_resume_and_get(dev); 2167b0c6ce24SFabio Estevam if (ret < 0) 21688fff755eSAndrew Lunn return ret; 2169793fc096SFrank Li 2170d3ee8ec7SMarco Hartmann frame_start = FEC_MMFR_ST_C45; 2171d3ee8ec7SMarco Hartmann 2172d3ee8ec7SMarco Hartmann /* write address */ 2173d3ee8ec7SMarco Hartmann writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | 21748d03ad1aSAndrew Lunn FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2175d3ee8ec7SMarco Hartmann FEC_MMFR_TA | (regnum & 0xFFFF), 2176d3ee8ec7SMarco Hartmann fep->hwp + FEC_MII_DATA); 2177d3ee8ec7SMarco Hartmann 2178d3ee8ec7SMarco Hartmann /* wait for end of transfer */ 2179f166f890SAndrew Lunn ret = fec_enet_mdio_wait(fep); 2180f166f890SAndrew Lunn if (ret) { 2181d3ee8ec7SMarco Hartmann netdev_err(fep->netdev, "MDIO address write timeout\n"); 2182d3ee8ec7SMarco Hartmann goto out; 2183d3ee8ec7SMarco Hartmann } 21848d03ad1aSAndrew Lunn 21858d03ad1aSAndrew Lunn frame_op = FEC_MMFR_OP_READ_C45; 21868d03ad1aSAndrew Lunn 21878d03ad1aSAndrew Lunn /* start a read op */ 21888d03ad1aSAndrew Lunn writel(frame_start | frame_op | 21898d03ad1aSAndrew Lunn FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 21908d03ad1aSAndrew Lunn FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 21918d03ad1aSAndrew Lunn 21928d03ad1aSAndrew Lunn /* wait for end of transfer */ 21938d03ad1aSAndrew Lunn ret = fec_enet_mdio_wait(fep); 21948d03ad1aSAndrew Lunn if (ret) { 21958d03ad1aSAndrew Lunn netdev_err(fep->netdev, "MDIO read timeout\n"); 21968d03ad1aSAndrew Lunn goto out; 21978d03ad1aSAndrew Lunn } 21988d03ad1aSAndrew Lunn 21998d03ad1aSAndrew Lunn ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 22008d03ad1aSAndrew Lunn 22018d03ad1aSAndrew Lunn out: 22028d03ad1aSAndrew Lunn pm_runtime_mark_last_busy(dev); 22038d03ad1aSAndrew Lunn pm_runtime_put_autosuspend(dev); 22048d03ad1aSAndrew Lunn 22058d03ad1aSAndrew Lunn return ret; 22068d03ad1aSAndrew Lunn } 22078d03ad1aSAndrew Lunn 22088d03ad1aSAndrew Lunn static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, 22098d03ad1aSAndrew Lunn u16 value) 22108d03ad1aSAndrew Lunn { 22118d03ad1aSAndrew Lunn struct fec_enet_private *fep = bus->priv; 22128d03ad1aSAndrew Lunn struct device *dev = &fep->pdev->dev; 22138d03ad1aSAndrew Lunn int ret, frame_start, frame_addr; 22148d03ad1aSAndrew Lunn 22158d03ad1aSAndrew Lunn ret = pm_runtime_resume_and_get(dev); 22168d03ad1aSAndrew Lunn if (ret < 0) 22178d03ad1aSAndrew Lunn return ret; 22188d03ad1aSAndrew Lunn 2219d3ee8ec7SMarco Hartmann /* C22 write */ 2220d3ee8ec7SMarco Hartmann frame_start = FEC_MMFR_ST; 2221d3ee8ec7SMarco Hartmann frame_addr = regnum; 2222d3ee8ec7SMarco Hartmann 2223793fc096SFrank Li /* start a write op */ 2224d3ee8ec7SMarco Hartmann writel(frame_start | FEC_MMFR_OP_WRITE | 2225d3ee8ec7SMarco Hartmann FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | 2226793fc096SFrank Li FEC_MMFR_TA | FEC_MMFR_DATA(value), 2227793fc096SFrank Li fep->hwp + FEC_MII_DATA); 2228793fc096SFrank Li 2229793fc096SFrank Li /* wait for end of transfer */ 2230f166f890SAndrew Lunn ret = fec_enet_mdio_wait(fep); 2231f166f890SAndrew Lunn if (ret) 223231b7720cSJoe Perches netdev_err(fep->netdev, "MDIO write timeout\n"); 2233793fc096SFrank Li 22348d03ad1aSAndrew Lunn pm_runtime_mark_last_busy(dev); 22358d03ad1aSAndrew Lunn pm_runtime_put_autosuspend(dev); 22368d03ad1aSAndrew Lunn 22378d03ad1aSAndrew Lunn return ret; 22388d03ad1aSAndrew Lunn } 22398d03ad1aSAndrew Lunn 22408d03ad1aSAndrew Lunn static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id, 22418d03ad1aSAndrew Lunn int devad, int regnum, u16 value) 22428d03ad1aSAndrew Lunn { 22438d03ad1aSAndrew Lunn struct fec_enet_private *fep = bus->priv; 22448d03ad1aSAndrew Lunn struct device *dev = &fep->pdev->dev; 22458d03ad1aSAndrew Lunn int ret, frame_start; 22468d03ad1aSAndrew Lunn 22478d03ad1aSAndrew Lunn ret = pm_runtime_resume_and_get(dev); 22488d03ad1aSAndrew Lunn if (ret < 0) 22498d03ad1aSAndrew Lunn return ret; 22508d03ad1aSAndrew Lunn 22518d03ad1aSAndrew Lunn frame_start = FEC_MMFR_ST_C45; 22528d03ad1aSAndrew Lunn 22538d03ad1aSAndrew Lunn /* write address */ 22548d03ad1aSAndrew Lunn writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | 22558d03ad1aSAndrew Lunn FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 22568d03ad1aSAndrew Lunn FEC_MMFR_TA | (regnum & 0xFFFF), 22578d03ad1aSAndrew Lunn fep->hwp + FEC_MII_DATA); 22588d03ad1aSAndrew Lunn 22598d03ad1aSAndrew Lunn /* wait for end of transfer */ 22608d03ad1aSAndrew Lunn ret = fec_enet_mdio_wait(fep); 22618d03ad1aSAndrew Lunn if (ret) { 22628d03ad1aSAndrew Lunn netdev_err(fep->netdev, "MDIO address write timeout\n"); 22638d03ad1aSAndrew Lunn goto out; 22648d03ad1aSAndrew Lunn } 22658d03ad1aSAndrew Lunn 22668d03ad1aSAndrew Lunn /* start a write op */ 22678d03ad1aSAndrew Lunn writel(frame_start | FEC_MMFR_OP_WRITE | 22688d03ad1aSAndrew Lunn FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 22698d03ad1aSAndrew Lunn FEC_MMFR_TA | FEC_MMFR_DATA(value), 22708d03ad1aSAndrew Lunn fep->hwp + FEC_MII_DATA); 22718d03ad1aSAndrew Lunn 22728d03ad1aSAndrew Lunn /* wait for end of transfer */ 22738d03ad1aSAndrew Lunn ret = fec_enet_mdio_wait(fep); 22748d03ad1aSAndrew Lunn if (ret) 22758d03ad1aSAndrew Lunn netdev_err(fep->netdev, "MDIO write timeout\n"); 22768d03ad1aSAndrew Lunn 2277d3ee8ec7SMarco Hartmann out: 22788fff755eSAndrew Lunn pm_runtime_mark_last_busy(dev); 22798fff755eSAndrew Lunn pm_runtime_put_autosuspend(dev); 22808fff755eSAndrew Lunn 22818fff755eSAndrew Lunn return ret; 2282793fc096SFrank Li } 2283793fc096SFrank Li 228464a632daSMarek Vasut static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) 228564a632daSMarek Vasut { 228664a632daSMarek Vasut struct fec_enet_private *fep = netdev_priv(ndev); 228764a632daSMarek Vasut struct phy_device *phy_dev = ndev->phydev; 228864a632daSMarek Vasut 228964a632daSMarek Vasut if (phy_dev) { 229064a632daSMarek Vasut phy_reset_after_clk_enable(phy_dev); 229164a632daSMarek Vasut } else if (fep->phy_node) { 229264a632daSMarek Vasut /* 229364a632daSMarek Vasut * If the PHY still is not bound to the MAC, but there is 229464a632daSMarek Vasut * OF PHY node and a matching PHY device instance already, 229564a632daSMarek Vasut * use the OF PHY node to obtain the PHY device instance, 229664a632daSMarek Vasut * and then use that PHY device instance when triggering 229764a632daSMarek Vasut * the PHY reset. 229864a632daSMarek Vasut */ 229964a632daSMarek Vasut phy_dev = of_phy_find_device(fep->phy_node); 230064a632daSMarek Vasut phy_reset_after_clk_enable(phy_dev); 230164a632daSMarek Vasut put_device(&phy_dev->mdio.dev); 230264a632daSMarek Vasut } 230364a632daSMarek Vasut } 230464a632daSMarek Vasut 2305e8fcfcd5SNimrod Andy static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 2306e8fcfcd5SNimrod Andy { 2307e8fcfcd5SNimrod Andy struct fec_enet_private *fep = netdev_priv(ndev); 2308e8fcfcd5SNimrod Andy int ret; 2309e8fcfcd5SNimrod Andy 2310e8fcfcd5SNimrod Andy if (enable) { 2311e8fcfcd5SNimrod Andy ret = clk_prepare_enable(fep->clk_enet_out); 2312e8fcfcd5SNimrod Andy if (ret) 2313d7c3a206SAndy Duan return ret; 231401e5943aSUwe Kleine-König 2315e8fcfcd5SNimrod Andy if (fep->clk_ptp) { 231601b825f9SFrancesco Dolcini mutex_lock(&fep->ptp_clk_mutex); 2317e8fcfcd5SNimrod Andy ret = clk_prepare_enable(fep->clk_ptp); 231891c0d987SNimrod Andy if (ret) { 231901b825f9SFrancesco Dolcini mutex_unlock(&fep->ptp_clk_mutex); 2320e8fcfcd5SNimrod Andy goto failed_clk_ptp; 232191c0d987SNimrod Andy } else { 232291c0d987SNimrod Andy fep->ptp_clk_on = true; 232391c0d987SNimrod Andy } 232401b825f9SFrancesco Dolcini mutex_unlock(&fep->ptp_clk_mutex); 2325e8fcfcd5SNimrod Andy } 232601e5943aSUwe Kleine-König 23279b5330edSFugang Duan ret = clk_prepare_enable(fep->clk_ref); 23289b5330edSFugang Duan if (ret) 23299b5330edSFugang Duan goto failed_clk_ref; 23301b0a83acSRichard Leitner 2331fc539459SFugang Duan ret = clk_prepare_enable(fep->clk_2x_txclk); 2332fc539459SFugang Duan if (ret) 2333fc539459SFugang Duan goto failed_clk_2x_txclk; 2334fc539459SFugang Duan 233564a632daSMarek Vasut fec_enet_phy_reset_after_clk_enable(ndev); 2336e8fcfcd5SNimrod Andy } else { 2337e8fcfcd5SNimrod Andy clk_disable_unprepare(fep->clk_enet_out); 233891c0d987SNimrod Andy if (fep->clk_ptp) { 233901b825f9SFrancesco Dolcini mutex_lock(&fep->ptp_clk_mutex); 2340e8fcfcd5SNimrod Andy clk_disable_unprepare(fep->clk_ptp); 234191c0d987SNimrod Andy fep->ptp_clk_on = false; 234201b825f9SFrancesco Dolcini mutex_unlock(&fep->ptp_clk_mutex); 234391c0d987SNimrod Andy } 23449b5330edSFugang Duan clk_disable_unprepare(fep->clk_ref); 2345fc539459SFugang Duan clk_disable_unprepare(fep->clk_2x_txclk); 2346e8fcfcd5SNimrod Andy } 2347e8fcfcd5SNimrod Andy 2348e8fcfcd5SNimrod Andy return 0; 23499b5330edSFugang Duan 2350fc539459SFugang Duan failed_clk_2x_txclk: 2351fc539459SFugang Duan if (fep->clk_ref) 2352fc539459SFugang Duan clk_disable_unprepare(fep->clk_ref); 23539b5330edSFugang Duan failed_clk_ref: 2354a74d19baSLiu Xiang if (fep->clk_ptp) { 235501b825f9SFrancesco Dolcini mutex_lock(&fep->ptp_clk_mutex); 2356a74d19baSLiu Xiang clk_disable_unprepare(fep->clk_ptp); 2357a74d19baSLiu Xiang fep->ptp_clk_on = false; 235801b825f9SFrancesco Dolcini mutex_unlock(&fep->ptp_clk_mutex); 2359a74d19baSLiu Xiang } 2360e8fcfcd5SNimrod Andy failed_clk_ptp: 2361e8fcfcd5SNimrod Andy clk_disable_unprepare(fep->clk_enet_out); 2362e8fcfcd5SNimrod Andy 2363e8fcfcd5SNimrod Andy return ret; 2364e8fcfcd5SNimrod Andy } 2365e8fcfcd5SNimrod Andy 2366b820c114SJoakim Zhang static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, 2367b820c114SJoakim Zhang struct device_node *np) 2368b820c114SJoakim Zhang { 2369b820c114SJoakim Zhang u32 rgmii_tx_delay, rgmii_rx_delay; 2370b820c114SJoakim Zhang 2371b820c114SJoakim Zhang /* For rgmii tx internal delay, valid values are 0ps and 2000ps */ 2372b820c114SJoakim Zhang if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) { 2373b820c114SJoakim Zhang if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) { 2374b820c114SJoakim Zhang dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps"); 2375b820c114SJoakim Zhang return -EINVAL; 2376b820c114SJoakim Zhang } else if (rgmii_tx_delay == 2000) { 2377b820c114SJoakim Zhang fep->rgmii_txc_dly = true; 2378b820c114SJoakim Zhang } 2379b820c114SJoakim Zhang } 2380b820c114SJoakim Zhang 2381b820c114SJoakim Zhang /* For rgmii rx internal delay, valid values are 0ps and 2000ps */ 2382b820c114SJoakim Zhang if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) { 2383b820c114SJoakim Zhang if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) { 2384b820c114SJoakim Zhang dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps"); 2385b820c114SJoakim Zhang return -EINVAL; 2386b820c114SJoakim Zhang } else if (rgmii_rx_delay == 2000) { 2387b820c114SJoakim Zhang fep->rgmii_rxc_dly = true; 2388b820c114SJoakim Zhang } 2389b820c114SJoakim Zhang } 2390b820c114SJoakim Zhang 2391b820c114SJoakim Zhang return 0; 2392b820c114SJoakim Zhang } 2393b820c114SJoakim Zhang 2394793fc096SFrank Li static int fec_enet_mii_probe(struct net_device *ndev) 2395793fc096SFrank Li { 2396793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 2397793fc096SFrank Li struct phy_device *phy_dev = NULL; 2398793fc096SFrank Li char mdio_bus_id[MII_BUS_ID_SIZE]; 2399793fc096SFrank Li char phy_name[MII_BUS_ID_SIZE + 3]; 2400793fc096SFrank Li int phy_id; 2401793fc096SFrank Li int dev_id = fep->dev_id; 2402793fc096SFrank Li 2403407066f8SUwe Kleine-König if (fep->phy_node) { 2404407066f8SUwe Kleine-König phy_dev = of_phy_connect(ndev, fep->phy_node, 2405407066f8SUwe Kleine-König &fec_enet_adjust_link, 0, 2406407066f8SUwe Kleine-König fep->phy_interface); 24079558df3aSAndrew Lunn if (!phy_dev) { 24089558df3aSAndrew Lunn netdev_err(ndev, "Unable to connect to phy\n"); 2409213a9922SNimrod Andy return -ENODEV; 24109558df3aSAndrew Lunn } 2411407066f8SUwe Kleine-König } else { 2412793fc096SFrank Li /* check for attached phy */ 2413793fc096SFrank Li for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { 24147f854420SAndrew Lunn if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) 2415793fc096SFrank Li continue; 2416793fc096SFrank Li if (dev_id--) 2417793fc096SFrank Li continue; 2418f029c781SWolfram Sang strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 2419793fc096SFrank Li break; 2420793fc096SFrank Li } 2421793fc096SFrank Li 2422793fc096SFrank Li if (phy_id >= PHY_MAX_ADDR) { 242331b7720cSJoe Perches netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); 2424f029c781SWolfram Sang strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); 2425793fc096SFrank Li phy_id = 0; 2426793fc096SFrank Li } 2427793fc096SFrank Li 2428407066f8SUwe Kleine-König snprintf(phy_name, sizeof(phy_name), 2429407066f8SUwe Kleine-König PHY_ID_FMT, mdio_bus_id, phy_id); 2430793fc096SFrank Li phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 2431793fc096SFrank Li fep->phy_interface); 2432407066f8SUwe Kleine-König } 2433407066f8SUwe Kleine-König 2434793fc096SFrank Li if (IS_ERR(phy_dev)) { 243531b7720cSJoe Perches netdev_err(ndev, "could not attach to PHY\n"); 2436793fc096SFrank Li return PTR_ERR(phy_dev); 2437793fc096SFrank Li } 2438793fc096SFrank Li 2439793fc096SFrank Li /* mask with MAC supported features */ 24406b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_HAS_GBIT) { 244158056c1eSAndrew Lunn phy_set_max_speed(phy_dev, 1000); 244241124fa6SAndrew Lunn phy_remove_link_mode(phy_dev, 244341124fa6SAndrew Lunn ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 2444d1391930SGuenter Roeck #if !defined(CONFIG_M5272) 2445c306ad36SAndrew Lunn phy_support_sym_pause(phy_dev); 2446d1391930SGuenter Roeck #endif 2447793fc096SFrank Li } 2448793fc096SFrank Li else 244958056c1eSAndrew Lunn phy_set_max_speed(phy_dev, 100); 2450793fc096SFrank Li 24516a2495adSAndrew Lunn if (fep->quirks & FEC_QUIRK_HAS_EEE) 24526a2495adSAndrew Lunn phy_support_eee(phy_dev); 24536a2495adSAndrew Lunn 2454793fc096SFrank Li fep->link = 0; 2455793fc096SFrank Li fep->full_duplex = 0; 2456793fc096SFrank Li 24572220943aSAndrew Lunn phy_attached_info(phy_dev); 2458793fc096SFrank Li 2459793fc096SFrank Li return 0; 2460793fc096SFrank Li } 2461793fc096SFrank Li 2462793fc096SFrank Li static int fec_enet_mii_init(struct platform_device *pdev) 2463793fc096SFrank Li { 2464793fc096SFrank Li static struct mii_bus *fec0_mii_bus; 2465793fc096SFrank Li struct net_device *ndev = platform_get_drvdata(pdev); 2466793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 24673c01eb62SAndrew Lunn bool suppress_preamble = false; 2468cbc17e78SWei Fang struct phy_device *phydev; 2469407066f8SUwe Kleine-König struct device_node *node; 2470e7f4dc35SAndrew Lunn int err = -ENXIO; 247163c60732SUwe Kleine-König u32 mii_speed, holdtime; 24723e782985SAndrew Lunn u32 bus_freq; 2473cbc17e78SWei Fang int addr; 2474793fc096SFrank Li 2475793fc096SFrank Li /* 24763d125f9cSStefan Agner * The i.MX28 dual fec interfaces are not equal. 2477793fc096SFrank Li * Here are the differences: 2478793fc096SFrank Li * 2479793fc096SFrank Li * - fec0 supports MII & RMII modes while fec1 only supports RMII 2480793fc096SFrank Li * - fec0 acts as the 1588 time master while fec1 is slave 2481793fc096SFrank Li * - external phys can only be configured by fec0 2482793fc096SFrank Li * 2483793fc096SFrank Li * That is to say fec1 can not work independently. It only works 2484793fc096SFrank Li * when fec0 is working. The reason behind this design is that the 2485793fc096SFrank Li * second interface is added primarily for Switch mode. 2486793fc096SFrank Li * 2487793fc096SFrank Li * Because of the last point above, both phys are attached on fec0 2488793fc096SFrank Li * mdio interface in board design, and need to be configured by 2489793fc096SFrank Li * fec0 mii_bus. 2490793fc096SFrank Li */ 24913d125f9cSStefan Agner if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 2492793fc096SFrank Li /* fec1 uses fec0 mii_bus */ 2493793fc096SFrank Li if (mii_cnt && fec0_mii_bus) { 2494793fc096SFrank Li fep->mii_bus = fec0_mii_bus; 2495793fc096SFrank Li mii_cnt++; 2496793fc096SFrank Li return 0; 2497793fc096SFrank Li } 2498793fc096SFrank Li return -ENOENT; 2499793fc096SFrank Li } 2500793fc096SFrank Li 25013e782985SAndrew Lunn bus_freq = 2500000; /* 2.5MHz by default */ 25023e782985SAndrew Lunn node = of_get_child_by_name(pdev->dev.of_node, "mdio"); 25033c01eb62SAndrew Lunn if (node) { 25043e782985SAndrew Lunn of_property_read_u32(node, "clock-frequency", &bus_freq); 25053c01eb62SAndrew Lunn suppress_preamble = of_property_read_bool(node, 25063c01eb62SAndrew Lunn "suppress-preamble"); 25073c01eb62SAndrew Lunn } 25083e782985SAndrew Lunn 2509793fc096SFrank Li /* 25103e782985SAndrew Lunn * Set MII speed (= clk_get_rate() / 2 * phy_speed) 2511793fc096SFrank Li * 2512793fc096SFrank Li * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while 2513793fc096SFrank Li * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 2514793fc096SFrank Li * Reference Manual has an error on this, and gets fixed on i.MX6Q 2515793fc096SFrank Li * document. 2516793fc096SFrank Li */ 25173e782985SAndrew Lunn mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); 25186b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_ENET_MAC) 251963c60732SUwe Kleine-König mii_speed--; 252063c60732SUwe Kleine-König if (mii_speed > 63) { 252163c60732SUwe Kleine-König dev_err(&pdev->dev, 2522981a0547SPeter Meerwald-Stadler "fec clock (%lu) too fast to get right mii speed\n", 252363c60732SUwe Kleine-König clk_get_rate(fep->clk_ipg)); 252463c60732SUwe Kleine-König err = -EINVAL; 252563c60732SUwe Kleine-König goto err_out; 252663c60732SUwe Kleine-König } 252763c60732SUwe Kleine-König 252863c60732SUwe Kleine-König /* 252963c60732SUwe Kleine-König * The i.MX28 and i.MX6 types have another filed in the MSCR (aka 253063c60732SUwe Kleine-König * MII_SPEED) register that defines the MDIO output hold time. Earlier 253163c60732SUwe Kleine-König * versions are RAZ there, so just ignore the difference and write the 253263c60732SUwe Kleine-König * register always. 253363c60732SUwe Kleine-König * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. 253463c60732SUwe Kleine-König * HOLDTIME + 1 is the number of clk cycles the fec is holding the 253563c60732SUwe Kleine-König * output. 253663c60732SUwe Kleine-König * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). 253763c60732SUwe Kleine-König * Given that ceil(clkrate / 5000000) <= 64, the calculation for 253863c60732SUwe Kleine-König * holdtime cannot result in a value greater than 3. 253963c60732SUwe Kleine-König */ 254063c60732SUwe Kleine-König holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; 254163c60732SUwe Kleine-König 254263c60732SUwe Kleine-König fep->phy_speed = mii_speed << 1 | holdtime << 8; 254363c60732SUwe Kleine-König 25443c01eb62SAndrew Lunn if (suppress_preamble) 25453c01eb62SAndrew Lunn fep->phy_speed |= BIT(7); 25463c01eb62SAndrew Lunn 25471e6114f5SGreg Ungerer if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { 2548f166f890SAndrew Lunn /* Clear MMFR to avoid to generate MII event by writing MSCR. 2549f166f890SAndrew Lunn * MII event generation condition: 2550f166f890SAndrew Lunn * - writing MSCR: 2551f166f890SAndrew Lunn * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & 2552f166f890SAndrew Lunn * mscr_reg_data_in[7:0] != 0 2553f166f890SAndrew Lunn * - writing MMFR: 2554f166f890SAndrew Lunn * - mscr[7:0]_not_zero 2555f166f890SAndrew Lunn */ 2556f166f890SAndrew Lunn writel(0, fep->hwp + FEC_MII_DATA); 25571e6114f5SGreg Ungerer } 2558f166f890SAndrew Lunn 2559793fc096SFrank Li writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 2560793fc096SFrank Li 2561f166f890SAndrew Lunn /* Clear any pending transaction complete indication */ 2562f166f890SAndrew Lunn writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 2563f166f890SAndrew Lunn 2564793fc096SFrank Li fep->mii_bus = mdiobus_alloc(); 2565793fc096SFrank Li if (fep->mii_bus == NULL) { 2566793fc096SFrank Li err = -ENOMEM; 2567793fc096SFrank Li goto err_out; 2568793fc096SFrank Li } 2569793fc096SFrank Li 2570793fc096SFrank Li fep->mii_bus->name = "fec_enet_mii_bus"; 25718d03ad1aSAndrew Lunn fep->mii_bus->read = fec_enet_mdio_read_c22; 25728d03ad1aSAndrew Lunn fep->mii_bus->write = fec_enet_mdio_write_c22; 2573abc33494SGreg Ungerer if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) { 25748d03ad1aSAndrew Lunn fep->mii_bus->read_c45 = fec_enet_mdio_read_c45; 25758d03ad1aSAndrew Lunn fep->mii_bus->write_c45 = fec_enet_mdio_write_c45; 2576abc33494SGreg Ungerer } 2577793fc096SFrank Li snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2578793fc096SFrank Li pdev->name, fep->dev_id + 1); 2579793fc096SFrank Li fep->mii_bus->priv = fep; 2580793fc096SFrank Li fep->mii_bus->parent = &pdev->dev; 2581793fc096SFrank Li 2582407066f8SUwe Kleine-König err = of_mdiobus_register(fep->mii_bus, node); 2583407066f8SUwe Kleine-König if (err) 2584e7f4dc35SAndrew Lunn goto err_out_free_mdiobus; 25850607a2cdSPan Bian of_node_put(node); 2586793fc096SFrank Li 2587cbc17e78SWei Fang /* find all the PHY devices on the bus and set mac_managed_pm to true */ 2588cbc17e78SWei Fang for (addr = 0; addr < PHY_MAX_ADDR; addr++) { 2589cbc17e78SWei Fang phydev = mdiobus_get_phy(fep->mii_bus, addr); 2590cbc17e78SWei Fang if (phydev) 2591cbc17e78SWei Fang phydev->mac_managed_pm = true; 2592cbc17e78SWei Fang } 2593cbc17e78SWei Fang 2594793fc096SFrank Li mii_cnt++; 2595793fc096SFrank Li 2596793fc096SFrank Li /* save fec0 mii_bus */ 25973d125f9cSStefan Agner if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2598793fc096SFrank Li fec0_mii_bus = fep->mii_bus; 2599793fc096SFrank Li 2600793fc096SFrank Li return 0; 2601793fc096SFrank Li 2602793fc096SFrank Li err_out_free_mdiobus: 2603793fc096SFrank Li mdiobus_free(fep->mii_bus); 2604793fc096SFrank Li err_out: 26050607a2cdSPan Bian of_node_put(node); 2606793fc096SFrank Li return err; 2607793fc096SFrank Li } 2608793fc096SFrank Li 2609793fc096SFrank Li static void fec_enet_mii_remove(struct fec_enet_private *fep) 2610793fc096SFrank Li { 2611793fc096SFrank Li if (--mii_cnt == 0) { 2612793fc096SFrank Li mdiobus_unregister(fep->mii_bus); 2613793fc096SFrank Li mdiobus_free(fep->mii_bus); 2614793fc096SFrank Li } 2615793fc096SFrank Li } 2616793fc096SFrank Li 2617793fc096SFrank Li static void fec_enet_get_drvinfo(struct net_device *ndev, 2618793fc096SFrank Li struct ethtool_drvinfo *info) 2619793fc096SFrank Li { 2620793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 2621793fc096SFrank Li 2622f029c781SWolfram Sang strscpy(info->driver, fep->pdev->dev.driver->name, 2623793fc096SFrank Li sizeof(info->driver)); 2624f029c781SWolfram Sang strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); 2625793fc096SFrank Li } 2626793fc096SFrank Li 2627db65f35fSPhilippe Reynes static int fec_enet_get_regs_len(struct net_device *ndev) 2628db65f35fSPhilippe Reynes { 2629db65f35fSPhilippe Reynes struct fec_enet_private *fep = netdev_priv(ndev); 2630db65f35fSPhilippe Reynes struct resource *r; 2631db65f35fSPhilippe Reynes int s = 0; 2632db65f35fSPhilippe Reynes 2633db65f35fSPhilippe Reynes r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); 2634db65f35fSPhilippe Reynes if (r) 2635db65f35fSPhilippe Reynes s = resource_size(r); 2636db65f35fSPhilippe Reynes 2637db65f35fSPhilippe Reynes return s; 2638db65f35fSPhilippe Reynes } 2639db65f35fSPhilippe Reynes 2640db65f35fSPhilippe Reynes /* List of registers that can be safety be read to dump them with ethtool */ 2641db65f35fSPhilippe Reynes #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 26423f1dcc6aSLucas Stach defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 264378cc6e7eSFlorian Fainelli defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2644f9bcc9f3SVivien Didelot static __u32 fec_enet_register_version = 2; 2645db65f35fSPhilippe Reynes static u32 fec_enet_register_offset[] = { 2646db65f35fSPhilippe Reynes FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2647db65f35fSPhilippe Reynes FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2648db65f35fSPhilippe Reynes FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, 2649db65f35fSPhilippe Reynes FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, 2650db65f35fSPhilippe Reynes FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, 2651db65f35fSPhilippe Reynes FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, 2652db65f35fSPhilippe Reynes FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, 2653db65f35fSPhilippe Reynes FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, 2654db65f35fSPhilippe Reynes FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2655db65f35fSPhilippe Reynes FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, 2656db65f35fSPhilippe Reynes FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, 2657db65f35fSPhilippe Reynes FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, 2658db65f35fSPhilippe Reynes RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2659db65f35fSPhilippe Reynes RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2660db65f35fSPhilippe Reynes RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2661db65f35fSPhilippe Reynes RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2662db65f35fSPhilippe Reynes RMON_T_P_GTE2048, RMON_T_OCTETS, 2663db65f35fSPhilippe Reynes IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2664db65f35fSPhilippe Reynes IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2665db65f35fSPhilippe Reynes IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2666db65f35fSPhilippe Reynes RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2667db65f35fSPhilippe Reynes RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2668db65f35fSPhilippe Reynes RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2669db65f35fSPhilippe Reynes RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2670db65f35fSPhilippe Reynes RMON_R_P_GTE2048, RMON_R_OCTETS, 2671db65f35fSPhilippe Reynes IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2672db65f35fSPhilippe Reynes IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2673db65f35fSPhilippe Reynes }; 26740a8b43b1SJuergen Borleis /* for i.MX6ul */ 26750a8b43b1SJuergen Borleis static u32 fec_enet_register_offset_6ul[] = { 26760a8b43b1SJuergen Borleis FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 26770a8b43b1SJuergen Borleis FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 26780a8b43b1SJuergen Borleis FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, 26790a8b43b1SJuergen Borleis FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, 26800a8b43b1SJuergen Borleis FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, 26810a8b43b1SJuergen Borleis FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 26820a8b43b1SJuergen Borleis FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, 26830a8b43b1SJuergen Borleis RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 26840a8b43b1SJuergen Borleis RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 26850a8b43b1SJuergen Borleis RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 26860a8b43b1SJuergen Borleis RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 26870a8b43b1SJuergen Borleis RMON_T_P_GTE2048, RMON_T_OCTETS, 26880a8b43b1SJuergen Borleis IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 26890a8b43b1SJuergen Borleis IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 26900a8b43b1SJuergen Borleis IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 26910a8b43b1SJuergen Borleis RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 26920a8b43b1SJuergen Borleis RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 26930a8b43b1SJuergen Borleis RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 26940a8b43b1SJuergen Borleis RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 26950a8b43b1SJuergen Borleis RMON_R_P_GTE2048, RMON_R_OCTETS, 26960a8b43b1SJuergen Borleis IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 26970a8b43b1SJuergen Borleis IEEE_R_FDXFC, IEEE_R_OCTETS_OK 26980a8b43b1SJuergen Borleis }; 2699db65f35fSPhilippe Reynes #else 2700f9bcc9f3SVivien Didelot static __u32 fec_enet_register_version = 1; 2701db65f35fSPhilippe Reynes static u32 fec_enet_register_offset[] = { 2702db65f35fSPhilippe Reynes FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2703db65f35fSPhilippe Reynes FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2704db65f35fSPhilippe Reynes FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, 2705db65f35fSPhilippe Reynes FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, 2706db65f35fSPhilippe Reynes FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, 2707db65f35fSPhilippe Reynes FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, 2708db65f35fSPhilippe Reynes FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, 2709db65f35fSPhilippe Reynes FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, 2710db65f35fSPhilippe Reynes FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 2711db65f35fSPhilippe Reynes }; 2712db65f35fSPhilippe Reynes #endif 2713db65f35fSPhilippe Reynes 2714db65f35fSPhilippe Reynes static void fec_enet_get_regs(struct net_device *ndev, 2715db65f35fSPhilippe Reynes struct ethtool_regs *regs, void *regbuf) 2716db65f35fSPhilippe Reynes { 2717db65f35fSPhilippe Reynes struct fec_enet_private *fep = netdev_priv(ndev); 2718db65f35fSPhilippe Reynes u32 __iomem *theregs = (u32 __iomem *)fep->hwp; 2719c72a0bc0SAndrew Lunn struct device *dev = &fep->pdev->dev; 2720db65f35fSPhilippe Reynes u32 *buf = (u32 *)regbuf; 2721db65f35fSPhilippe Reynes u32 i, off; 2722c72a0bc0SAndrew Lunn int ret; 27230a8b43b1SJuergen Borleis #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 27240a8b43b1SJuergen Borleis defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 27250a8b43b1SJuergen Borleis defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 27260a8b43b1SJuergen Borleis u32 *reg_list; 27270a8b43b1SJuergen Borleis u32 reg_cnt; 2728c72a0bc0SAndrew Lunn 27290a8b43b1SJuergen Borleis if (!of_machine_is_compatible("fsl,imx6ul")) { 27300a8b43b1SJuergen Borleis reg_list = fec_enet_register_offset; 27310a8b43b1SJuergen Borleis reg_cnt = ARRAY_SIZE(fec_enet_register_offset); 27320a8b43b1SJuergen Borleis } else { 27330a8b43b1SJuergen Borleis reg_list = fec_enet_register_offset_6ul; 27340a8b43b1SJuergen Borleis reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); 27350a8b43b1SJuergen Borleis } 27360a8b43b1SJuergen Borleis #else 27370a8b43b1SJuergen Borleis /* coldfire */ 27380a8b43b1SJuergen Borleis static u32 *reg_list = fec_enet_register_offset; 27390a8b43b1SJuergen Borleis static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); 27400a8b43b1SJuergen Borleis #endif 2741da875fa5SZhang Qilong ret = pm_runtime_resume_and_get(dev); 2742c72a0bc0SAndrew Lunn if (ret < 0) 2743c72a0bc0SAndrew Lunn return; 2744db65f35fSPhilippe Reynes 2745f9bcc9f3SVivien Didelot regs->version = fec_enet_register_version; 2746f9bcc9f3SVivien Didelot 2747db65f35fSPhilippe Reynes memset(buf, 0, regs->len); 2748db65f35fSPhilippe Reynes 27490a8b43b1SJuergen Borleis for (i = 0; i < reg_cnt; i++) { 27500a8b43b1SJuergen Borleis off = reg_list[i]; 2751ec20a63aSFugang Duan 2752ec20a63aSFugang Duan if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && 2753ec20a63aSFugang Duan !(fep->quirks & FEC_QUIRK_HAS_FRREG)) 2754ec20a63aSFugang Duan continue; 2755ec20a63aSFugang Duan 2756ec20a63aSFugang Duan off >>= 2; 2757db65f35fSPhilippe Reynes buf[off] = readl(&theregs[off]); 2758db65f35fSPhilippe Reynes } 2759c72a0bc0SAndrew Lunn 2760c72a0bc0SAndrew Lunn pm_runtime_mark_last_busy(dev); 2761c72a0bc0SAndrew Lunn pm_runtime_put_autosuspend(dev); 2762db65f35fSPhilippe Reynes } 2763db65f35fSPhilippe Reynes 2764793fc096SFrank Li static int fec_enet_get_ts_info(struct net_device *ndev, 2765793fc096SFrank Li struct ethtool_ts_info *info) 2766793fc096SFrank Li { 2767793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 2768793fc096SFrank Li 2769793fc096SFrank Li if (fep->bufdesc_ex) { 2770793fc096SFrank Li 2771793fc096SFrank Li info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 2772793fc096SFrank Li SOF_TIMESTAMPING_RX_SOFTWARE | 2773793fc096SFrank Li SOF_TIMESTAMPING_SOFTWARE | 2774793fc096SFrank Li SOF_TIMESTAMPING_TX_HARDWARE | 2775793fc096SFrank Li SOF_TIMESTAMPING_RX_HARDWARE | 2776793fc096SFrank Li SOF_TIMESTAMPING_RAW_HARDWARE; 2777793fc096SFrank Li if (fep->ptp_clock) 2778793fc096SFrank Li info->phc_index = ptp_clock_index(fep->ptp_clock); 2779793fc096SFrank Li else 2780793fc096SFrank Li info->phc_index = -1; 2781793fc096SFrank Li 2782793fc096SFrank Li info->tx_types = (1 << HWTSTAMP_TX_OFF) | 2783793fc096SFrank Li (1 << HWTSTAMP_TX_ON); 2784793fc096SFrank Li 2785793fc096SFrank Li info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2786793fc096SFrank Li (1 << HWTSTAMP_FILTER_ALL); 2787793fc096SFrank Li return 0; 2788793fc096SFrank Li } else { 2789793fc096SFrank Li return ethtool_op_get_ts_info(ndev, info); 2790793fc096SFrank Li } 2791793fc096SFrank Li } 2792793fc096SFrank Li 2793d1391930SGuenter Roeck #if !defined(CONFIG_M5272) 2794d1391930SGuenter Roeck 2795793fc096SFrank Li static void fec_enet_get_pauseparam(struct net_device *ndev, 2796793fc096SFrank Li struct ethtool_pauseparam *pause) 2797793fc096SFrank Li { 2798793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 2799793fc096SFrank Li 2800793fc096SFrank Li pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; 2801793fc096SFrank Li pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; 2802793fc096SFrank Li pause->rx_pause = pause->tx_pause; 2803793fc096SFrank Li } 2804793fc096SFrank Li 2805793fc096SFrank Li static int fec_enet_set_pauseparam(struct net_device *ndev, 2806793fc096SFrank Li struct ethtool_pauseparam *pause) 2807793fc096SFrank Li { 2808793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 2809793fc096SFrank Li 281045f5c327SPhilippe Reynes if (!ndev->phydev) 28110b146ca8SRussell King return -ENODEV; 28120b146ca8SRussell King 2813793fc096SFrank Li if (pause->tx_pause != pause->rx_pause) { 2814793fc096SFrank Li netdev_info(ndev, 2815793fc096SFrank Li "hardware only support enable/disable both tx and rx"); 2816793fc096SFrank Li return -EINVAL; 2817793fc096SFrank Li } 2818793fc096SFrank Li 2819793fc096SFrank Li fep->pause_flag = 0; 2820793fc096SFrank Li 2821793fc096SFrank Li /* tx pause must be same as rx pause */ 2822793fc096SFrank Li fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; 2823793fc096SFrank Li fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; 2824793fc096SFrank Li 28250c122405SAndrew Lunn phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause, 28260c122405SAndrew Lunn pause->autoneg); 2827793fc096SFrank Li 2828793fc096SFrank Li if (pause->autoneg) { 2829793fc096SFrank Li if (netif_running(ndev)) 2830793fc096SFrank Li fec_stop(ndev); 283145f5c327SPhilippe Reynes phy_start_aneg(ndev->phydev); 2832793fc096SFrank Li } 2833dbc64a8eSRussell King if (netif_running(ndev)) { 2834dbc64a8eSRussell King napi_disable(&fep->napi); 2835dbc64a8eSRussell King netif_tx_lock_bh(ndev); 2836ef83337dSRussell King fec_restart(ndev); 2837657ade07SRickard x Andersson netif_tx_wake_all_queues(ndev); 28386af42d42SRussell King netif_tx_unlock_bh(ndev); 2839dbc64a8eSRussell King napi_enable(&fep->napi); 2840dbc64a8eSRussell King } 2841793fc096SFrank Li 2842793fc096SFrank Li return 0; 2843793fc096SFrank Li } 2844793fc096SFrank Li 284538ae92dcSChris Healy static const struct fec_stat { 284638ae92dcSChris Healy char name[ETH_GSTRING_LEN]; 284738ae92dcSChris Healy u16 offset; 284838ae92dcSChris Healy } fec_stats[] = { 284938ae92dcSChris Healy /* RMON TX */ 285038ae92dcSChris Healy { "tx_dropped", RMON_T_DROP }, 285138ae92dcSChris Healy { "tx_packets", RMON_T_PACKETS }, 285238ae92dcSChris Healy { "tx_broadcast", RMON_T_BC_PKT }, 285338ae92dcSChris Healy { "tx_multicast", RMON_T_MC_PKT }, 285438ae92dcSChris Healy { "tx_crc_errors", RMON_T_CRC_ALIGN }, 285538ae92dcSChris Healy { "tx_undersize", RMON_T_UNDERSIZE }, 285638ae92dcSChris Healy { "tx_oversize", RMON_T_OVERSIZE }, 285738ae92dcSChris Healy { "tx_fragment", RMON_T_FRAG }, 285838ae92dcSChris Healy { "tx_jabber", RMON_T_JAB }, 285938ae92dcSChris Healy { "tx_collision", RMON_T_COL }, 286038ae92dcSChris Healy { "tx_64byte", RMON_T_P64 }, 286138ae92dcSChris Healy { "tx_65to127byte", RMON_T_P65TO127 }, 286238ae92dcSChris Healy { "tx_128to255byte", RMON_T_P128TO255 }, 286338ae92dcSChris Healy { "tx_256to511byte", RMON_T_P256TO511 }, 286438ae92dcSChris Healy { "tx_512to1023byte", RMON_T_P512TO1023 }, 286538ae92dcSChris Healy { "tx_1024to2047byte", RMON_T_P1024TO2047 }, 286638ae92dcSChris Healy { "tx_GTE2048byte", RMON_T_P_GTE2048 }, 286738ae92dcSChris Healy { "tx_octets", RMON_T_OCTETS }, 286838ae92dcSChris Healy 286938ae92dcSChris Healy /* IEEE TX */ 287038ae92dcSChris Healy { "IEEE_tx_drop", IEEE_T_DROP }, 287138ae92dcSChris Healy { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, 287238ae92dcSChris Healy { "IEEE_tx_1col", IEEE_T_1COL }, 287338ae92dcSChris Healy { "IEEE_tx_mcol", IEEE_T_MCOL }, 287438ae92dcSChris Healy { "IEEE_tx_def", IEEE_T_DEF }, 287538ae92dcSChris Healy { "IEEE_tx_lcol", IEEE_T_LCOL }, 287638ae92dcSChris Healy { "IEEE_tx_excol", IEEE_T_EXCOL }, 287738ae92dcSChris Healy { "IEEE_tx_macerr", IEEE_T_MACERR }, 287838ae92dcSChris Healy { "IEEE_tx_cserr", IEEE_T_CSERR }, 287938ae92dcSChris Healy { "IEEE_tx_sqe", IEEE_T_SQE }, 288038ae92dcSChris Healy { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, 288138ae92dcSChris Healy { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, 288238ae92dcSChris Healy 288338ae92dcSChris Healy /* RMON RX */ 288438ae92dcSChris Healy { "rx_packets", RMON_R_PACKETS }, 288538ae92dcSChris Healy { "rx_broadcast", RMON_R_BC_PKT }, 288638ae92dcSChris Healy { "rx_multicast", RMON_R_MC_PKT }, 288738ae92dcSChris Healy { "rx_crc_errors", RMON_R_CRC_ALIGN }, 288838ae92dcSChris Healy { "rx_undersize", RMON_R_UNDERSIZE }, 288938ae92dcSChris Healy { "rx_oversize", RMON_R_OVERSIZE }, 289038ae92dcSChris Healy { "rx_fragment", RMON_R_FRAG }, 289138ae92dcSChris Healy { "rx_jabber", RMON_R_JAB }, 289238ae92dcSChris Healy { "rx_64byte", RMON_R_P64 }, 289338ae92dcSChris Healy { "rx_65to127byte", RMON_R_P65TO127 }, 289438ae92dcSChris Healy { "rx_128to255byte", RMON_R_P128TO255 }, 289538ae92dcSChris Healy { "rx_256to511byte", RMON_R_P256TO511 }, 289638ae92dcSChris Healy { "rx_512to1023byte", RMON_R_P512TO1023 }, 289738ae92dcSChris Healy { "rx_1024to2047byte", RMON_R_P1024TO2047 }, 289838ae92dcSChris Healy { "rx_GTE2048byte", RMON_R_P_GTE2048 }, 289938ae92dcSChris Healy { "rx_octets", RMON_R_OCTETS }, 290038ae92dcSChris Healy 290138ae92dcSChris Healy /* IEEE RX */ 290238ae92dcSChris Healy { "IEEE_rx_drop", IEEE_R_DROP }, 290338ae92dcSChris Healy { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, 290438ae92dcSChris Healy { "IEEE_rx_crc", IEEE_R_CRC }, 290538ae92dcSChris Healy { "IEEE_rx_align", IEEE_R_ALIGN }, 290638ae92dcSChris Healy { "IEEE_rx_macerr", IEEE_R_MACERR }, 290738ae92dcSChris Healy { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, 290838ae92dcSChris Healy { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 290938ae92dcSChris Healy }; 291038ae92dcSChris Healy 2911f85de666SNikita Yushchenko #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) 2912f85de666SNikita Yushchenko 29136970ef27SShenwei Wang static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = { 29146970ef27SShenwei Wang "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */ 29156970ef27SShenwei Wang "rx_xdp_pass", /* RX_XDP_PASS, */ 29166970ef27SShenwei Wang "rx_xdp_drop", /* RX_XDP_DROP, */ 29176970ef27SShenwei Wang "rx_xdp_tx", /* RX_XDP_TX, */ 29186970ef27SShenwei Wang "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */ 29196970ef27SShenwei Wang "tx_xdp_xmit", /* TX_XDP_XMIT, */ 29206970ef27SShenwei Wang "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */ 29216970ef27SShenwei Wang }; 29226970ef27SShenwei Wang 292380cca775SNikita Yushchenko static void fec_enet_update_ethtool_stats(struct net_device *dev) 292438ae92dcSChris Healy { 292538ae92dcSChris Healy struct fec_enet_private *fep = netdev_priv(dev); 292638ae92dcSChris Healy int i; 292738ae92dcSChris Healy 292838ae92dcSChris Healy for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 292980cca775SNikita Yushchenko fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); 293080cca775SNikita Yushchenko } 293180cca775SNikita Yushchenko 29326970ef27SShenwei Wang static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) 29336970ef27SShenwei Wang { 29346970ef27SShenwei Wang u64 xdp_stats[XDP_STATS_TOTAL] = { 0 }; 29356970ef27SShenwei Wang struct fec_enet_priv_rx_q *rxq; 29366970ef27SShenwei Wang int i, j; 29376970ef27SShenwei Wang 29386970ef27SShenwei Wang for (i = fep->num_rx_queues - 1; i >= 0; i--) { 29396970ef27SShenwei Wang rxq = fep->rx_queue[i]; 29406970ef27SShenwei Wang 29416970ef27SShenwei Wang for (j = 0; j < XDP_STATS_TOTAL; j++) 29426970ef27SShenwei Wang xdp_stats[j] += rxq->stats[j]; 29436970ef27SShenwei Wang } 29446970ef27SShenwei Wang 29456970ef27SShenwei Wang memcpy(data, xdp_stats, sizeof(xdp_stats)); 29466970ef27SShenwei Wang } 29476970ef27SShenwei Wang 29486970ef27SShenwei Wang static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) 29496970ef27SShenwei Wang { 2950857922b1SLucas Stach #ifdef CONFIG_PAGE_POOL_STATS 29516970ef27SShenwei Wang struct page_pool_stats stats = {}; 29526970ef27SShenwei Wang struct fec_enet_priv_rx_q *rxq; 29536970ef27SShenwei Wang int i; 29546970ef27SShenwei Wang 29556970ef27SShenwei Wang for (i = fep->num_rx_queues - 1; i >= 0; i--) { 29566970ef27SShenwei Wang rxq = fep->rx_queue[i]; 29576970ef27SShenwei Wang 29586970ef27SShenwei Wang if (!rxq->page_pool) 29596970ef27SShenwei Wang continue; 29606970ef27SShenwei Wang 29616970ef27SShenwei Wang page_pool_get_stats(rxq->page_pool, &stats); 29626970ef27SShenwei Wang } 29636970ef27SShenwei Wang 29646970ef27SShenwei Wang page_pool_ethtool_stats_get(data, &stats); 2965857922b1SLucas Stach #endif 29666970ef27SShenwei Wang } 29676970ef27SShenwei Wang 296880cca775SNikita Yushchenko static void fec_enet_get_ethtool_stats(struct net_device *dev, 296980cca775SNikita Yushchenko struct ethtool_stats *stats, u64 *data) 297080cca775SNikita Yushchenko { 297180cca775SNikita Yushchenko struct fec_enet_private *fep = netdev_priv(dev); 297280cca775SNikita Yushchenko 297380cca775SNikita Yushchenko if (netif_running(dev)) 297480cca775SNikita Yushchenko fec_enet_update_ethtool_stats(dev); 297580cca775SNikita Yushchenko 2976f85de666SNikita Yushchenko memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); 29776970ef27SShenwei Wang data += FEC_STATS_SIZE / sizeof(u64); 29786970ef27SShenwei Wang 29796970ef27SShenwei Wang fec_enet_get_xdp_stats(fep, data); 29806970ef27SShenwei Wang data += XDP_STATS_TOTAL; 29816970ef27SShenwei Wang 29826970ef27SShenwei Wang fec_enet_page_pool_stats(fep, data); 298338ae92dcSChris Healy } 298438ae92dcSChris Healy 298538ae92dcSChris Healy static void fec_enet_get_strings(struct net_device *netdev, 298638ae92dcSChris Healy u32 stringset, u8 *data) 298738ae92dcSChris Healy { 298838ae92dcSChris Healy int i; 298938ae92dcSChris Healy switch (stringset) { 299038ae92dcSChris Healy case ETH_SS_STATS: 29916970ef27SShenwei Wang for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { 2992e403cfffSjustinstitt@google.com ethtool_puts(&data, fec_stats[i].name); 29936970ef27SShenwei Wang } 29946970ef27SShenwei Wang for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { 2995e403cfffSjustinstitt@google.com ethtool_puts(&data, fec_xdp_stat_strs[i]); 29966970ef27SShenwei Wang } 29976970ef27SShenwei Wang page_pool_ethtool_stats_get_strings(data); 29986970ef27SShenwei Wang 299938ae92dcSChris Healy break; 30006016ba34SOleksij Rempel case ETH_SS_TEST: 30016016ba34SOleksij Rempel net_selftest_get_strings(data); 30026016ba34SOleksij Rempel break; 300338ae92dcSChris Healy } 300438ae92dcSChris Healy } 300538ae92dcSChris Healy 300638ae92dcSChris Healy static int fec_enet_get_sset_count(struct net_device *dev, int sset) 300738ae92dcSChris Healy { 30086970ef27SShenwei Wang int count; 30096970ef27SShenwei Wang 301038ae92dcSChris Healy switch (sset) { 301138ae92dcSChris Healy case ETH_SS_STATS: 30126970ef27SShenwei Wang count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL; 30136970ef27SShenwei Wang count += page_pool_ethtool_stats_get_count(); 30146970ef27SShenwei Wang return count; 30156970ef27SShenwei Wang 30166016ba34SOleksij Rempel case ETH_SS_TEST: 30176016ba34SOleksij Rempel return net_selftest_get_count(); 301838ae92dcSChris Healy default: 301938ae92dcSChris Healy return -EOPNOTSUPP; 302038ae92dcSChris Healy } 302138ae92dcSChris Healy } 3022f85de666SNikita Yushchenko 30232b30842bSAndrew Lunn static void fec_enet_clear_ethtool_stats(struct net_device *dev) 30242b30842bSAndrew Lunn { 30252b30842bSAndrew Lunn struct fec_enet_private *fep = netdev_priv(dev); 30266970ef27SShenwei Wang struct fec_enet_priv_rx_q *rxq; 30276970ef27SShenwei Wang int i, j; 30282b30842bSAndrew Lunn 30292b30842bSAndrew Lunn /* Disable MIB statistics counters */ 30302b30842bSAndrew Lunn writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); 30312b30842bSAndrew Lunn 30322b30842bSAndrew Lunn for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 30332b30842bSAndrew Lunn writel(0, fep->hwp + fec_stats[i].offset); 30342b30842bSAndrew Lunn 30356970ef27SShenwei Wang for (i = fep->num_rx_queues - 1; i >= 0; i--) { 30366970ef27SShenwei Wang rxq = fep->rx_queue[i]; 30376970ef27SShenwei Wang for (j = 0; j < XDP_STATS_TOTAL; j++) 30386970ef27SShenwei Wang rxq->stats[j] = 0; 30396970ef27SShenwei Wang } 30406970ef27SShenwei Wang 30412b30842bSAndrew Lunn /* Don't disable MIB statistics counters */ 30422b30842bSAndrew Lunn writel(0, fep->hwp + FEC_MIB_CTRLSTAT); 30432b30842bSAndrew Lunn } 30442b30842bSAndrew Lunn 3045f85de666SNikita Yushchenko #else /* !defined(CONFIG_M5272) */ 3046f85de666SNikita Yushchenko #define FEC_STATS_SIZE 0 3047f85de666SNikita Yushchenko static inline void fec_enet_update_ethtool_stats(struct net_device *dev) 3048f85de666SNikita Yushchenko { 3049f85de666SNikita Yushchenko } 305041e8e404SFabio Estevam 305141e8e404SFabio Estevam static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) 305241e8e404SFabio Estevam { 305341e8e404SFabio Estevam } 3054d1391930SGuenter Roeck #endif /* !defined(CONFIG_M5272) */ 305538ae92dcSChris Healy 3056d851b47bSFugang Duan /* ITR clock source is enet system clock (clk_ahb). 3057d851b47bSFugang Duan * TCTT unit is cycle_ns * 64 cycle 3058d851b47bSFugang Duan * So, the ICTT value = X us / (cycle_ns * 64) 3059d851b47bSFugang Duan */ 3060d851b47bSFugang Duan static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) 3061d851b47bSFugang Duan { 3062d851b47bSFugang Duan struct fec_enet_private *fep = netdev_priv(ndev); 3063d851b47bSFugang Duan 3064d851b47bSFugang Duan return us * (fep->itr_clk_rate / 64000) / 1000; 3065d851b47bSFugang Duan } 3066d851b47bSFugang Duan 3067d851b47bSFugang Duan /* Set threshold for interrupt coalescing */ 3068d851b47bSFugang Duan static void fec_enet_itr_coal_set(struct net_device *ndev) 3069d851b47bSFugang Duan { 3070d851b47bSFugang Duan struct fec_enet_private *fep = netdev_priv(ndev); 3071d851b47bSFugang Duan int rx_itr, tx_itr; 3072d851b47bSFugang Duan 3073d851b47bSFugang Duan /* Must be greater than zero to avoid unpredictable behavior */ 3074d851b47bSFugang Duan if (!fep->rx_time_itr || !fep->rx_pkts_itr || 3075d851b47bSFugang Duan !fep->tx_time_itr || !fep->tx_pkts_itr) 3076d851b47bSFugang Duan return; 3077d851b47bSFugang Duan 3078d851b47bSFugang Duan /* Select enet system clock as Interrupt Coalescing 3079d851b47bSFugang Duan * timer Clock Source 3080d851b47bSFugang Duan */ 3081d851b47bSFugang Duan rx_itr = FEC_ITR_CLK_SEL; 3082d851b47bSFugang Duan tx_itr = FEC_ITR_CLK_SEL; 3083d851b47bSFugang Duan 3084d851b47bSFugang Duan /* set ICFT and ICTT */ 3085d851b47bSFugang Duan rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); 3086d851b47bSFugang Duan rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); 3087d851b47bSFugang Duan tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); 3088d851b47bSFugang Duan tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); 3089d851b47bSFugang Duan 3090d851b47bSFugang Duan rx_itr |= FEC_ITR_EN; 3091d851b47bSFugang Duan tx_itr |= FEC_ITR_EN; 3092d851b47bSFugang Duan 3093d851b47bSFugang Duan writel(tx_itr, fep->hwp + FEC_TXIC0); 3094d851b47bSFugang Duan writel(rx_itr, fep->hwp + FEC_RXIC0); 3095471ff445SJoakim Zhang if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 3096d851b47bSFugang Duan writel(tx_itr, fep->hwp + FEC_TXIC1); 3097d851b47bSFugang Duan writel(rx_itr, fep->hwp + FEC_RXIC1); 3098d851b47bSFugang Duan writel(tx_itr, fep->hwp + FEC_TXIC2); 3099d851b47bSFugang Duan writel(rx_itr, fep->hwp + FEC_RXIC2); 3100d851b47bSFugang Duan } 3101ff7566b8SFugang Duan } 3102d851b47bSFugang Duan 3103f3ccfda1SYufeng Mo static int fec_enet_get_coalesce(struct net_device *ndev, 3104f3ccfda1SYufeng Mo struct ethtool_coalesce *ec, 3105f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal, 3106f3ccfda1SYufeng Mo struct netlink_ext_ack *extack) 3107d851b47bSFugang Duan { 3108d851b47bSFugang Duan struct fec_enet_private *fep = netdev_priv(ndev); 3109d851b47bSFugang Duan 3110ff7566b8SFugang Duan if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 3111d851b47bSFugang Duan return -EOPNOTSUPP; 3112d851b47bSFugang Duan 3113d851b47bSFugang Duan ec->rx_coalesce_usecs = fep->rx_time_itr; 3114d851b47bSFugang Duan ec->rx_max_coalesced_frames = fep->rx_pkts_itr; 3115d851b47bSFugang Duan 3116d851b47bSFugang Duan ec->tx_coalesce_usecs = fep->tx_time_itr; 3117d851b47bSFugang Duan ec->tx_max_coalesced_frames = fep->tx_pkts_itr; 3118d851b47bSFugang Duan 3119d851b47bSFugang Duan return 0; 3120d851b47bSFugang Duan } 3121d851b47bSFugang Duan 3122f3ccfda1SYufeng Mo static int fec_enet_set_coalesce(struct net_device *ndev, 3123f3ccfda1SYufeng Mo struct ethtool_coalesce *ec, 3124f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal, 3125f3ccfda1SYufeng Mo struct netlink_ext_ack *extack) 3126d851b47bSFugang Duan { 3127d851b47bSFugang Duan struct fec_enet_private *fep = netdev_priv(ndev); 3128517a772cSFabio Estevam struct device *dev = &fep->pdev->dev; 3129d851b47bSFugang Duan unsigned int cycle; 3130d851b47bSFugang Duan 3131ff7566b8SFugang Duan if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 3132d851b47bSFugang Duan return -EOPNOTSUPP; 3133d851b47bSFugang Duan 3134d851b47bSFugang Duan if (ec->rx_max_coalesced_frames > 255) { 3135517a772cSFabio Estevam dev_err(dev, "Rx coalesced frames exceed hardware limitation\n"); 3136d851b47bSFugang Duan return -EINVAL; 3137d851b47bSFugang Duan } 3138d851b47bSFugang Duan 3139d851b47bSFugang Duan if (ec->tx_max_coalesced_frames > 255) { 3140517a772cSFabio Estevam dev_err(dev, "Tx coalesced frame exceed hardware limitation\n"); 3141d851b47bSFugang Duan return -EINVAL; 3142d851b47bSFugang Duan } 3143d851b47bSFugang Duan 3144ab14961dSJakub Kicinski cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); 3145d851b47bSFugang Duan if (cycle > 0xFFFF) { 3146517a772cSFabio Estevam dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); 3147d851b47bSFugang Duan return -EINVAL; 3148d851b47bSFugang Duan } 3149d851b47bSFugang Duan 3150ab14961dSJakub Kicinski cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); 3151d851b47bSFugang Duan if (cycle > 0xFFFF) { 3152ab14961dSJakub Kicinski dev_err(dev, "Tx coalesced usec exceed hardware limitation\n"); 3153d851b47bSFugang Duan return -EINVAL; 3154d851b47bSFugang Duan } 3155d851b47bSFugang Duan 3156d851b47bSFugang Duan fep->rx_time_itr = ec->rx_coalesce_usecs; 3157d851b47bSFugang Duan fep->rx_pkts_itr = ec->rx_max_coalesced_frames; 3158d851b47bSFugang Duan 3159d851b47bSFugang Duan fep->tx_time_itr = ec->tx_coalesce_usecs; 3160d851b47bSFugang Duan fep->tx_pkts_itr = ec->tx_max_coalesced_frames; 3161d851b47bSFugang Duan 3162d851b47bSFugang Duan fec_enet_itr_coal_set(ndev); 3163d851b47bSFugang Duan 3164d851b47bSFugang Duan return 0; 3165d851b47bSFugang Duan } 3166d851b47bSFugang Duan 3167b82f8c3fSFugang Duan static int 3168d80a5233SHeiner Kallweit fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) 3169b82f8c3fSFugang Duan { 3170b82f8c3fSFugang Duan struct fec_enet_private *fep = netdev_priv(ndev); 3171d80a5233SHeiner Kallweit struct ethtool_keee *p = &fep->eee; 3172b82f8c3fSFugang Duan 3173b82f8c3fSFugang Duan if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) 3174b82f8c3fSFugang Duan return -EOPNOTSUPP; 3175b82f8c3fSFugang Duan 3176b82f8c3fSFugang Duan if (!netif_running(ndev)) 3177b82f8c3fSFugang Duan return -ENETDOWN; 3178b82f8c3fSFugang Duan 3179b82f8c3fSFugang Duan edata->tx_lpi_timer = p->tx_lpi_timer; 3180b82f8c3fSFugang Duan 3181b82f8c3fSFugang Duan return phy_ethtool_get_eee(ndev->phydev, edata); 3182b82f8c3fSFugang Duan } 3183b82f8c3fSFugang Duan 3184b82f8c3fSFugang Duan static int 3185d80a5233SHeiner Kallweit fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) 3186b82f8c3fSFugang Duan { 3187b82f8c3fSFugang Duan struct fec_enet_private *fep = netdev_priv(ndev); 3188d80a5233SHeiner Kallweit struct ethtool_keee *p = &fep->eee; 3189b82f8c3fSFugang Duan 3190b82f8c3fSFugang Duan if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) 3191b82f8c3fSFugang Duan return -EOPNOTSUPP; 3192b82f8c3fSFugang Duan 3193b82f8c3fSFugang Duan if (!netif_running(ndev)) 3194b82f8c3fSFugang Duan return -ENETDOWN; 3195b82f8c3fSFugang Duan 3196b82f8c3fSFugang Duan p->tx_lpi_timer = edata->tx_lpi_timer; 3197b82f8c3fSFugang Duan 3198b82f8c3fSFugang Duan return phy_ethtool_set_eee(ndev->phydev, edata); 3199b82f8c3fSFugang Duan } 3200b82f8c3fSFugang Duan 3201de40ed31SNimrod Andy static void 3202de40ed31SNimrod Andy fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 3203de40ed31SNimrod Andy { 3204de40ed31SNimrod Andy struct fec_enet_private *fep = netdev_priv(ndev); 3205de40ed31SNimrod Andy 3206de40ed31SNimrod Andy if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { 3207de40ed31SNimrod Andy wol->supported = WAKE_MAGIC; 3208de40ed31SNimrod Andy wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; 3209de40ed31SNimrod Andy } else { 3210de40ed31SNimrod Andy wol->supported = wol->wolopts = 0; 3211de40ed31SNimrod Andy } 3212de40ed31SNimrod Andy } 3213de40ed31SNimrod Andy 3214de40ed31SNimrod Andy static int 3215de40ed31SNimrod Andy fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 3216de40ed31SNimrod Andy { 3217de40ed31SNimrod Andy struct fec_enet_private *fep = netdev_priv(ndev); 3218de40ed31SNimrod Andy 3219de40ed31SNimrod Andy if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) 3220de40ed31SNimrod Andy return -EINVAL; 3221de40ed31SNimrod Andy 3222de40ed31SNimrod Andy if (wol->wolopts & ~WAKE_MAGIC) 3223de40ed31SNimrod Andy return -EINVAL; 3224de40ed31SNimrod Andy 3225de40ed31SNimrod Andy device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); 32260b6f65c7SJoakim Zhang if (device_may_wakeup(&ndev->dev)) 3227de40ed31SNimrod Andy fep->wol_flag |= FEC_WOL_FLAG_ENABLE; 32280b6f65c7SJoakim Zhang else 3229de40ed31SNimrod Andy fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); 3230de40ed31SNimrod Andy 3231de40ed31SNimrod Andy return 0; 3232de40ed31SNimrod Andy } 3233de40ed31SNimrod Andy 3234793fc096SFrank Li static const struct ethtool_ops fec_enet_ethtool_ops = { 3235d5e3c87dSJakub Kicinski .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 3236d5e3c87dSJakub Kicinski ETHTOOL_COALESCE_MAX_FRAMES, 3237793fc096SFrank Li .get_drvinfo = fec_enet_get_drvinfo, 3238db65f35fSPhilippe Reynes .get_regs_len = fec_enet_get_regs_len, 3239db65f35fSPhilippe Reynes .get_regs = fec_enet_get_regs, 324011d59289SFlorian Fainelli .nway_reset = phy_ethtool_nway_reset, 3241c1d7c48fSRussell King .get_link = ethtool_op_get_link, 3242d851b47bSFugang Duan .get_coalesce = fec_enet_get_coalesce, 3243d851b47bSFugang Duan .set_coalesce = fec_enet_set_coalesce, 324438ae92dcSChris Healy #ifndef CONFIG_M5272 3245c1d7c48fSRussell King .get_pauseparam = fec_enet_get_pauseparam, 3246c1d7c48fSRussell King .set_pauseparam = fec_enet_set_pauseparam, 324738ae92dcSChris Healy .get_strings = fec_enet_get_strings, 3248c1d7c48fSRussell King .get_ethtool_stats = fec_enet_get_ethtool_stats, 324938ae92dcSChris Healy .get_sset_count = fec_enet_get_sset_count, 325038ae92dcSChris Healy #endif 3251c1d7c48fSRussell King .get_ts_info = fec_enet_get_ts_info, 3252de40ed31SNimrod Andy .get_wol = fec_enet_get_wol, 3253de40ed31SNimrod Andy .set_wol = fec_enet_set_wol, 3254b82f8c3fSFugang Duan .get_eee = fec_enet_get_eee, 3255b82f8c3fSFugang Duan .set_eee = fec_enet_set_eee, 32569365fbf5SPhilippe Reynes .get_link_ksettings = phy_ethtool_get_link_ksettings, 32579365fbf5SPhilippe Reynes .set_link_ksettings = phy_ethtool_set_link_ksettings, 32586016ba34SOleksij Rempel .self_test = net_selftest, 3259793fc096SFrank Li }; 3260793fc096SFrank Li 3261793fc096SFrank Li static void fec_enet_free_buffers(struct net_device *ndev) 3262793fc096SFrank Li { 3263793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 3264793fc096SFrank Li unsigned int i; 32654d494cdcSFugang Duan struct fec_enet_priv_tx_q *txq; 32664d494cdcSFugang Duan struct fec_enet_priv_rx_q *rxq; 326759d0f746SFrank Li unsigned int q; 3268793fc096SFrank Li 326959d0f746SFrank Li for (q = 0; q < fep->num_rx_queues; q++) { 327059d0f746SFrank Li rxq = fep->rx_queue[q]; 327195698ff6SShenwei Wang for (i = 0; i < rxq->bd.ring_size; i++) 3272e38553bdSWei Fang page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false); 327395698ff6SShenwei Wang 32746970ef27SShenwei Wang for (i = 0; i < XDP_STATS_TOTAL; i++) 32756970ef27SShenwei Wang rxq->stats[i] = 0; 32766970ef27SShenwei Wang 327795698ff6SShenwei Wang if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) 327895698ff6SShenwei Wang xdp_rxq_info_unreg(&rxq->xdp_rxq); 327995698ff6SShenwei Wang page_pool_destroy(rxq->page_pool); 328095698ff6SShenwei Wang rxq->page_pool = NULL; 3281793fc096SFrank Li } 3282793fc096SFrank Li 328359d0f746SFrank Li for (q = 0; q < fep->num_tx_queues; q++) { 328459d0f746SFrank Li txq = fep->tx_queue[q]; 32857355f276STroy Kisky for (i = 0; i < txq->bd.ring_size; i++) { 32864d494cdcSFugang Duan kfree(txq->tx_bounce[i]); 32874d494cdcSFugang Duan txq->tx_bounce[i] = NULL; 328820f79739SWei Fang 3289af6f4791SWei Fang if (!txq->tx_buf[i].buf_p) { 3290af6f4791SWei Fang txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 3291af6f4791SWei Fang continue; 329220f79739SWei Fang } 329320f79739SWei Fang 3294af6f4791SWei Fang if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { 3295af6f4791SWei Fang dev_kfree_skb(txq->tx_buf[i].buf_p); 3296af6f4791SWei Fang } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { 3297af6f4791SWei Fang xdp_return_frame(txq->tx_buf[i].buf_p); 3298af6f4791SWei Fang } else { 3299af6f4791SWei Fang struct page *page = txq->tx_buf[i].buf_p; 3300af6f4791SWei Fang 3301af6f4791SWei Fang page_pool_put_page(page->pp, page, 0, false); 330220f79739SWei Fang } 3303af6f4791SWei Fang 3304af6f4791SWei Fang txq->tx_buf[i].buf_p = NULL; 3305af6f4791SWei Fang txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 33068b7c9efaSRussell King } 3307793fc096SFrank Li } 330859d0f746SFrank Li } 3309793fc096SFrank Li 331059d0f746SFrank Li static void fec_enet_free_queue(struct net_device *ndev) 331159d0f746SFrank Li { 331259d0f746SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 331359d0f746SFrank Li int i; 331459d0f746SFrank Li struct fec_enet_priv_tx_q *txq; 331559d0f746SFrank Li 331659d0f746SFrank Li for (i = 0; i < fep->num_tx_queues; i++) 331759d0f746SFrank Li if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { 331859d0f746SFrank Li txq = fep->tx_queue[i]; 3319ffd32a92SChristoph Hellwig fec_dma_free(&fep->pdev->dev, 33207355f276STroy Kisky txq->bd.ring_size * TSO_HEADER_SIZE, 3321ffd32a92SChristoph Hellwig txq->tso_hdrs, txq->tso_hdrs_dma); 332259d0f746SFrank Li } 332359d0f746SFrank Li 332459d0f746SFrank Li for (i = 0; i < fep->num_rx_queues; i++) 332559d0f746SFrank Li kfree(fep->rx_queue[i]); 332659d0f746SFrank Li for (i = 0; i < fep->num_tx_queues; i++) 332759d0f746SFrank Li kfree(fep->tx_queue[i]); 332859d0f746SFrank Li } 332959d0f746SFrank Li 333059d0f746SFrank Li static int fec_enet_alloc_queue(struct net_device *ndev) 333159d0f746SFrank Li { 333259d0f746SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 333359d0f746SFrank Li int i; 333459d0f746SFrank Li int ret = 0; 333559d0f746SFrank Li struct fec_enet_priv_tx_q *txq; 333659d0f746SFrank Li 333759d0f746SFrank Li for (i = 0; i < fep->num_tx_queues; i++) { 333859d0f746SFrank Li txq = kzalloc(sizeof(*txq), GFP_KERNEL); 333959d0f746SFrank Li if (!txq) { 334059d0f746SFrank Li ret = -ENOMEM; 334159d0f746SFrank Li goto alloc_failed; 334259d0f746SFrank Li } 334359d0f746SFrank Li 334459d0f746SFrank Li fep->tx_queue[i] = txq; 33457355f276STroy Kisky txq->bd.ring_size = TX_RING_SIZE; 33467355f276STroy Kisky fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; 334759d0f746SFrank Li 334859d0f746SFrank Li txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; 334956b3c6baSWei Fang txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; 335059d0f746SFrank Li 3351ffd32a92SChristoph Hellwig txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev, 33527355f276STroy Kisky txq->bd.ring_size * TSO_HEADER_SIZE, 3353ffd32a92SChristoph Hellwig &txq->tso_hdrs_dma, GFP_KERNEL); 335459d0f746SFrank Li if (!txq->tso_hdrs) { 335559d0f746SFrank Li ret = -ENOMEM; 335659d0f746SFrank Li goto alloc_failed; 335759d0f746SFrank Li } 335859d0f746SFrank Li } 335959d0f746SFrank Li 336059d0f746SFrank Li for (i = 0; i < fep->num_rx_queues; i++) { 336159d0f746SFrank Li fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), 336259d0f746SFrank Li GFP_KERNEL); 336359d0f746SFrank Li if (!fep->rx_queue[i]) { 336459d0f746SFrank Li ret = -ENOMEM; 336559d0f746SFrank Li goto alloc_failed; 336659d0f746SFrank Li } 336759d0f746SFrank Li 33687355f276STroy Kisky fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; 33697355f276STroy Kisky fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; 337059d0f746SFrank Li } 337159d0f746SFrank Li return ret; 337259d0f746SFrank Li 337359d0f746SFrank Li alloc_failed: 337459d0f746SFrank Li fec_enet_free_queue(ndev); 337559d0f746SFrank Li return ret; 337659d0f746SFrank Li } 337759d0f746SFrank Li 337859d0f746SFrank Li static int 337959d0f746SFrank Li fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) 3380793fc096SFrank Li { 3381793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 33824d494cdcSFugang Duan struct fec_enet_priv_rx_q *rxq; 338395698ff6SShenwei Wang dma_addr_t phys_addr; 338495698ff6SShenwei Wang struct bufdesc *bdp; 338595698ff6SShenwei Wang struct page *page; 338695698ff6SShenwei Wang int i, err; 3387793fc096SFrank Li 338859d0f746SFrank Li rxq = fep->rx_queue[queue]; 33897355f276STroy Kisky bdp = rxq->bd.base; 3390793fc096SFrank Li 339195698ff6SShenwei Wang err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size); 339295698ff6SShenwei Wang if (err < 0) { 339395698ff6SShenwei Wang netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err); 339495698ff6SShenwei Wang return err; 3395d842a31fSDuan Fugang-B38611 } 3396730ee360SRussell King 339795698ff6SShenwei Wang for (i = 0; i < rxq->bd.ring_size; i++) { 339895698ff6SShenwei Wang page = page_pool_dev_alloc_pages(rxq->page_pool); 339995698ff6SShenwei Wang if (!page) 340095698ff6SShenwei Wang goto err_alloc; 340195698ff6SShenwei Wang 340295698ff6SShenwei Wang phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM; 340395698ff6SShenwei Wang bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); 340495698ff6SShenwei Wang 340595698ff6SShenwei Wang rxq->rx_skb_info[i].page = page; 340695698ff6SShenwei Wang rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM; 34075cfa3039SJohannes Berg bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 3408793fc096SFrank Li 3409793fc096SFrank Li if (fep->bufdesc_ex) { 3410793fc096SFrank Li struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 34115cfa3039SJohannes Berg ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 3412793fc096SFrank Li } 3413793fc096SFrank Li 34147355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 3415793fc096SFrank Li } 3416793fc096SFrank Li 3417793fc096SFrank Li /* Set the last buffer to wrap. */ 34187355f276STroy Kisky bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 34195cfa3039SJohannes Berg bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 342059d0f746SFrank Li return 0; 3421793fc096SFrank Li 342259d0f746SFrank Li err_alloc: 342359d0f746SFrank Li fec_enet_free_buffers(ndev); 342459d0f746SFrank Li return -ENOMEM; 342559d0f746SFrank Li } 342659d0f746SFrank Li 342759d0f746SFrank Li static int 342859d0f746SFrank Li fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) 342959d0f746SFrank Li { 343059d0f746SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 343159d0f746SFrank Li unsigned int i; 343259d0f746SFrank Li struct bufdesc *bdp; 343359d0f746SFrank Li struct fec_enet_priv_tx_q *txq; 343459d0f746SFrank Li 343559d0f746SFrank Li txq = fep->tx_queue[queue]; 34367355f276STroy Kisky bdp = txq->bd.base; 34377355f276STroy Kisky for (i = 0; i < txq->bd.ring_size; i++) { 34384d494cdcSFugang Duan txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 34394d494cdcSFugang Duan if (!txq->tx_bounce[i]) 3440ffdce2ccSRussell King goto err_alloc; 3441793fc096SFrank Li 34425cfa3039SJohannes Berg bdp->cbd_sc = cpu_to_fec16(0); 34435cfa3039SJohannes Berg bdp->cbd_bufaddr = cpu_to_fec32(0); 3444793fc096SFrank Li 3445793fc096SFrank Li if (fep->bufdesc_ex) { 3446793fc096SFrank Li struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 34475cfa3039SJohannes Berg ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); 3448793fc096SFrank Li } 3449793fc096SFrank Li 34507355f276STroy Kisky bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 3451793fc096SFrank Li } 3452793fc096SFrank Li 3453793fc096SFrank Li /* Set the last buffer to wrap. */ 34547355f276STroy Kisky bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 34555cfa3039SJohannes Berg bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 3456793fc096SFrank Li 3457793fc096SFrank Li return 0; 3458ffdce2ccSRussell King 3459ffdce2ccSRussell King err_alloc: 3460ffdce2ccSRussell King fec_enet_free_buffers(ndev); 3461ffdce2ccSRussell King return -ENOMEM; 3462793fc096SFrank Li } 3463793fc096SFrank Li 346459d0f746SFrank Li static int fec_enet_alloc_buffers(struct net_device *ndev) 346559d0f746SFrank Li { 346659d0f746SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 346759d0f746SFrank Li unsigned int i; 346859d0f746SFrank Li 346959d0f746SFrank Li for (i = 0; i < fep->num_rx_queues; i++) 347059d0f746SFrank Li if (fec_enet_alloc_rxq_buffers(ndev, i)) 347159d0f746SFrank Li return -ENOMEM; 347259d0f746SFrank Li 347359d0f746SFrank Li for (i = 0; i < fep->num_tx_queues; i++) 347459d0f746SFrank Li if (fec_enet_alloc_txq_buffers(ndev, i)) 347559d0f746SFrank Li return -ENOMEM; 347659d0f746SFrank Li return 0; 347759d0f746SFrank Li } 347859d0f746SFrank Li 3479793fc096SFrank Li static int 3480793fc096SFrank Li fec_enet_open(struct net_device *ndev) 3481793fc096SFrank Li { 3482793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 3483793fc096SFrank Li int ret; 34841b0a83acSRichard Leitner bool reset_again; 3485793fc096SFrank Li 3486da875fa5SZhang Qilong ret = pm_runtime_resume_and_get(&fep->pdev->dev); 3487b0c6ce24SFabio Estevam if (ret < 0) 34888fff755eSAndrew Lunn return ret; 34898fff755eSAndrew Lunn 34905bbde4d2SNimrod Andy pinctrl_pm_select_default_state(&fep->pdev->dev); 3491e8fcfcd5SNimrod Andy ret = fec_enet_clk_enable(ndev, true); 3492e8fcfcd5SNimrod Andy if (ret) 34938fff755eSAndrew Lunn goto clk_enable; 3494e8fcfcd5SNimrod Andy 34951b0a83acSRichard Leitner /* During the first fec_enet_open call the PHY isn't probed at this 34961b0a83acSRichard Leitner * point. Therefore the phy_reset_after_clk_enable() call within 34971b0a83acSRichard Leitner * fec_enet_clk_enable() fails. As we need this reset in order to be 34981b0a83acSRichard Leitner * sure the PHY is working correctly we check if we need to reset again 34991b0a83acSRichard Leitner * later when the PHY is probed 35001b0a83acSRichard Leitner */ 35011b0a83acSRichard Leitner if (ndev->phydev && ndev->phydev->drv) 35021b0a83acSRichard Leitner reset_again = false; 35031b0a83acSRichard Leitner else 35041b0a83acSRichard Leitner reset_again = true; 35051b0a83acSRichard Leitner 3506793fc096SFrank Li /* I should reset the ring buffers here, but I don't yet know 3507793fc096SFrank Li * a simple way to do that. 3508793fc096SFrank Li */ 3509793fc096SFrank Li 3510793fc096SFrank Li ret = fec_enet_alloc_buffers(ndev); 3511793fc096SFrank Li if (ret) 3512681d2421SFabio Estevam goto err_enet_alloc; 3513793fc096SFrank Li 351455dd2753SNimrod Andy /* Init MAC prior to mii bus probe */ 351555dd2753SNimrod Andy fec_restart(ndev); 351655dd2753SNimrod Andy 35171b0a83acSRichard Leitner /* Call phy_reset_after_clk_enable() again if it failed during 35181b0a83acSRichard Leitner * phy_reset_after_clk_enable() before because the PHY wasn't probed. 35191b0a83acSRichard Leitner */ 35201b0a83acSRichard Leitner if (reset_again) 352164a632daSMarek Vasut fec_enet_phy_reset_after_clk_enable(ndev); 35221b0a83acSRichard Leitner 35230da1ccbbSMarek Vasut /* Probe and connect to PHY when open the interface */ 35240da1ccbbSMarek Vasut ret = fec_enet_mii_probe(ndev); 35250da1ccbbSMarek Vasut if (ret) 35260da1ccbbSMarek Vasut goto err_enet_mii_probe; 3527681d2421SFabio Estevam 3528681d2421SFabio Estevam if (fep->quirks & FEC_QUIRK_ERR006687) 3529793fc096SFrank Li imx6q_cpuidle_fec_irqs_used(); 3530793fc096SFrank Li 35317d650df9SWei Fang if (fep->quirks & FEC_QUIRK_HAS_PMQOS) 35327d650df9SWei Fang cpu_latency_qos_add_request(&fep->pm_qos_req, 0); 35337d650df9SWei Fang 3534793fc096SFrank Li napi_enable(&fep->napi); 3535793fc096SFrank Li phy_start(ndev->phydev); 3536793fc096SFrank Li netif_tx_start_all_queues(ndev); 3537793fc096SFrank Li 3538793fc096SFrank Li device_set_wakeup_enable(&ndev->dev, fep->wol_flag & 3539793fc096SFrank Li FEC_WOL_FLAG_ENABLE); 3540793fc096SFrank Li 3541793fc096SFrank Li return 0; 3542793fc096SFrank Li 3543793fc096SFrank Li err_enet_mii_probe: 3544793fc096SFrank Li fec_enet_free_buffers(ndev); 3545793fc096SFrank Li err_enet_alloc: 3546793fc096SFrank Li fec_enet_clk_enable(ndev, false); 3547793fc096SFrank Li clk_enable: 3548793fc096SFrank Li pm_runtime_mark_last_busy(&fep->pdev->dev); 3549793fc096SFrank Li pm_runtime_put_autosuspend(&fep->pdev->dev); 3550793fc096SFrank Li pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3551793fc096SFrank Li return ret; 3552d76cfae9SRussell King } 3553d76cfae9SRussell King 355431a6de34SRussell King static int 3555793fc096SFrank Li fec_enet_close(struct net_device *ndev) 3556b49cd504SRussell King { 3557793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 355831a6de34SRussell King 355945f5c327SPhilippe Reynes phy_stop(ndev->phydev); 3560793fc096SFrank Li 3561793fc096SFrank Li if (netif_device_present(ndev)) { 3562793fc096SFrank Li napi_disable(&fep->napi); 3563793fc096SFrank Li netif_tx_disable(ndev); 3564793fc096SFrank Li fec_stop(ndev); 3565793fc096SFrank Li } 3566793fc096SFrank Li 356745f5c327SPhilippe Reynes phy_disconnect(ndev->phydev); 3568793fc096SFrank Li 356929380905SLucas Stach if (fep->quirks & FEC_QUIRK_ERR006687) 357029380905SLucas Stach imx6q_cpuidle_fec_irqs_unused(); 357129380905SLucas Stach 357280cca775SNikita Yushchenko fec_enet_update_ethtool_stats(ndev); 357380cca775SNikita Yushchenko 3574e8fcfcd5SNimrod Andy fec_enet_clk_enable(ndev, false); 35757d650df9SWei Fang if (fep->quirks & FEC_QUIRK_HAS_PMQOS) 35767d650df9SWei Fang cpu_latency_qos_remove_request(&fep->pm_qos_req); 35777d650df9SWei Fang 35785bbde4d2SNimrod Andy pinctrl_pm_select_sleep_state(&fep->pdev->dev); 35798fff755eSAndrew Lunn pm_runtime_mark_last_busy(&fep->pdev->dev); 35808fff755eSAndrew Lunn pm_runtime_put_autosuspend(&fep->pdev->dev); 35818fff755eSAndrew Lunn 3582793fc096SFrank Li fec_enet_free_buffers(ndev); 3583793fc096SFrank Li 3584793fc096SFrank Li return 0; 3585793fc096SFrank Li } 3586793fc096SFrank Li 3587793fc096SFrank Li /* Set or clear the multicast filter for this adaptor. 3588793fc096SFrank Li * Skeleton taken from sunlance driver. 3589793fc096SFrank Li * The CPM Ethernet implementation allows Multicast as well as individual 3590793fc096SFrank Li * MAC address filtering. Some of the drivers check to make sure it is 3591793fc096SFrank Li * a group multicast address, and discard those that are not. I guess I 3592793fc096SFrank Li * will do the same for now, but just remove the test if you want 3593793fc096SFrank Li * individual filtering as well (do the upper net layers want or support 3594793fc096SFrank Li * this kind of feature?). 3595793fc096SFrank Li */ 3596793fc096SFrank Li 35976176e89cSJiri Kosina #define FEC_HASH_BITS 6 /* #bits in hash */ 3598793fc096SFrank Li 3599793fc096SFrank Li static void set_multicast_list(struct net_device *ndev) 3600793fc096SFrank Li { 3601793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 3602793fc096SFrank Li struct netdev_hw_addr *ha; 360316f6e983SKrzysztof Kozlowski unsigned int crc, tmp; 3604793fc096SFrank Li unsigned char hash; 360501f8902bSRui Sousa unsigned int hash_high = 0, hash_low = 0; 3606793fc096SFrank Li 3607793fc096SFrank Li if (ndev->flags & IFF_PROMISC) { 3608793fc096SFrank Li tmp = readl(fep->hwp + FEC_R_CNTRL); 3609793fc096SFrank Li tmp |= 0x8; 3610793fc096SFrank Li writel(tmp, fep->hwp + FEC_R_CNTRL); 3611793fc096SFrank Li return; 3612793fc096SFrank Li } 3613793fc096SFrank Li 3614793fc096SFrank Li tmp = readl(fep->hwp + FEC_R_CNTRL); 3615793fc096SFrank Li tmp &= ~0x8; 3616793fc096SFrank Li writel(tmp, fep->hwp + FEC_R_CNTRL); 3617793fc096SFrank Li 3618793fc096SFrank Li if (ndev->flags & IFF_ALLMULTI) { 3619793fc096SFrank Li /* Catch all multicast addresses, so set the 3620793fc096SFrank Li * filter to all 1's 3621793fc096SFrank Li */ 3622793fc096SFrank Li writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 3623793fc096SFrank Li writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3624793fc096SFrank Li 3625793fc096SFrank Li return; 3626793fc096SFrank Li } 3627793fc096SFrank Li 362801f8902bSRui Sousa /* Add the addresses in hash register */ 3629793fc096SFrank Li netdev_for_each_mc_addr(ha, ndev) { 3630793fc096SFrank Li /* calculate crc32 value of mac address */ 363116f6e983SKrzysztof Kozlowski crc = ether_crc_le(ndev->addr_len, ha->addr); 3632793fc096SFrank Li 36336176e89cSJiri Kosina /* only upper 6 bits (FEC_HASH_BITS) are used 3634981a0547SPeter Meerwald-Stadler * which point to specific bit in the hash registers 3635793fc096SFrank Li */ 36366176e89cSJiri Kosina hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; 3637793fc096SFrank Li 363801f8902bSRui Sousa if (hash > 31) 363901f8902bSRui Sousa hash_high |= 1 << (hash - 32); 364001f8902bSRui Sousa else 364101f8902bSRui Sousa hash_low |= 1 << hash; 3642793fc096SFrank Li } 364301f8902bSRui Sousa 364401f8902bSRui Sousa writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 364501f8902bSRui Sousa writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3646793fc096SFrank Li } 3647793fc096SFrank Li 3648793fc096SFrank Li /* Set a MAC change in hardware. */ 3649793fc096SFrank Li static int 3650793fc096SFrank Li fec_set_mac_address(struct net_device *ndev, void *p) 3651793fc096SFrank Li { 3652793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 3653793fc096SFrank Li struct sockaddr *addr = p; 3654793fc096SFrank Li 365544934facSLucas Stach if (addr) { 3656793fc096SFrank Li if (!is_valid_ether_addr(addr->sa_data)) 3657793fc096SFrank Li return -EADDRNOTAVAIL; 3658a05e4c0aSJakub Kicinski eth_hw_addr_set(ndev, addr->sa_data); 365944934facSLucas Stach } 3660793fc096SFrank Li 36619638d19eSNimrod Andy /* Add netif status check here to avoid system hang in below case: 36629638d19eSNimrod Andy * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; 36639638d19eSNimrod Andy * After ethx down, fec all clocks are gated off and then register 36649638d19eSNimrod Andy * access causes system hang. 36659638d19eSNimrod Andy */ 36669638d19eSNimrod Andy if (!netif_running(ndev)) 36679638d19eSNimrod Andy return 0; 36689638d19eSNimrod Andy 3669793fc096SFrank Li writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 3670793fc096SFrank Li (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 3671793fc096SFrank Li fep->hwp + FEC_ADDR_LOW); 3672793fc096SFrank Li writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), 3673793fc096SFrank Li fep->hwp + FEC_ADDR_HIGH); 3674793fc096SFrank Li return 0; 3675793fc096SFrank Li } 3676793fc096SFrank Li 36775bc26726SNimrod Andy static inline void fec_enet_set_netdev_features(struct net_device *netdev, 36784c09eed9SJim Baxter netdev_features_t features) 36794c09eed9SJim Baxter { 36804c09eed9SJim Baxter struct fec_enet_private *fep = netdev_priv(netdev); 36814c09eed9SJim Baxter netdev_features_t changed = features ^ netdev->features; 36824c09eed9SJim Baxter 36834c09eed9SJim Baxter netdev->features = features; 36844c09eed9SJim Baxter 36854c09eed9SJim Baxter /* Receive checksum has been changed */ 36864c09eed9SJim Baxter if (changed & NETIF_F_RXCSUM) { 36874c09eed9SJim Baxter if (features & NETIF_F_RXCSUM) 36884c09eed9SJim Baxter fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 36894c09eed9SJim Baxter else 36904c09eed9SJim Baxter fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; 36918506fa1dSRussell King } 36925bc26726SNimrod Andy } 36934c09eed9SJim Baxter 36945bc26726SNimrod Andy static int fec_set_features(struct net_device *netdev, 36955bc26726SNimrod Andy netdev_features_t features) 36965bc26726SNimrod Andy { 36975bc26726SNimrod Andy struct fec_enet_private *fep = netdev_priv(netdev); 36985bc26726SNimrod Andy netdev_features_t changed = features ^ netdev->features; 36995bc26726SNimrod Andy 37005b40f709SFabio Estevam if (netif_running(netdev) && changed & NETIF_F_RXCSUM) { 37015bc26726SNimrod Andy napi_disable(&fep->napi); 37025bc26726SNimrod Andy netif_tx_lock_bh(netdev); 37035bc26726SNimrod Andy fec_stop(netdev); 37045bc26726SNimrod Andy fec_enet_set_netdev_features(netdev, features); 3705ef83337dSRussell King fec_restart(netdev); 37064d494cdcSFugang Duan netif_tx_wake_all_queues(netdev); 37076af42d42SRussell King netif_tx_unlock_bh(netdev); 3708dbc64a8eSRussell King napi_enable(&fep->napi); 37095bc26726SNimrod Andy } else { 37105bc26726SNimrod Andy fec_enet_set_netdev_features(netdev, features); 37114c09eed9SJim Baxter } 37124c09eed9SJim Baxter 37134c09eed9SJim Baxter return 0; 37144c09eed9SJim Baxter } 37154c09eed9SJim Baxter 371652c4a1a8SFugang Duan static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, 371752c4a1a8SFugang Duan struct net_device *sb_dev) 371852c4a1a8SFugang Duan { 371952c4a1a8SFugang Duan struct fec_enet_private *fep = netdev_priv(ndev); 37209fc95fe9SRadu Bulie u16 vlan_tag = 0; 372152c4a1a8SFugang Duan 372252c4a1a8SFugang Duan if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 372352c4a1a8SFugang Duan return netdev_pick_tx(ndev, skb, NULL); 372452c4a1a8SFugang Duan 37259fc95fe9SRadu Bulie /* VLAN is present in the payload.*/ 37269fc95fe9SRadu Bulie if (eth_type_vlan(skb->protocol)) { 37279fc95fe9SRadu Bulie struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); 37289fc95fe9SRadu Bulie 37299fc95fe9SRadu Bulie vlan_tag = ntohs(vhdr->h_vlan_TCI); 37309fc95fe9SRadu Bulie /* VLAN is present in the skb but not yet pushed in the payload.*/ 37319fc95fe9SRadu Bulie } else if (skb_vlan_tag_present(skb)) { 37329fc95fe9SRadu Bulie vlan_tag = skb->vlan_tci; 37339fc95fe9SRadu Bulie } else { 373452c4a1a8SFugang Duan return vlan_tag; 37359fc95fe9SRadu Bulie } 373652c4a1a8SFugang Duan 373752c4a1a8SFugang Duan return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; 373852c4a1a8SFugang Duan } 373952c4a1a8SFugang Duan 37406d6b39f1SShenwei Wang static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) 37416d6b39f1SShenwei Wang { 37426d6b39f1SShenwei Wang struct fec_enet_private *fep = netdev_priv(dev); 37436d6b39f1SShenwei Wang bool is_run = netif_running(dev); 37446d6b39f1SShenwei Wang struct bpf_prog *old_prog; 37456d6b39f1SShenwei Wang 37466d6b39f1SShenwei Wang switch (bpf->command) { 37476d6b39f1SShenwei Wang case XDP_SETUP_PROG: 37486c8fae0cSShenwei Wang /* No need to support the SoCs that require to 37496c8fae0cSShenwei Wang * do the frame swap because the performance wouldn't be 37506c8fae0cSShenwei Wang * better than the skb mode. 37516c8fae0cSShenwei Wang */ 37526c8fae0cSShenwei Wang if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 37536c8fae0cSShenwei Wang return -EOPNOTSUPP; 37546c8fae0cSShenwei Wang 3755be7ecbe7SWei Fang if (!bpf->prog) 3756be7ecbe7SWei Fang xdp_features_clear_redirect_target(dev); 3757be7ecbe7SWei Fang 37586d6b39f1SShenwei Wang if (is_run) { 37596d6b39f1SShenwei Wang napi_disable(&fep->napi); 37606d6b39f1SShenwei Wang netif_tx_disable(dev); 37616d6b39f1SShenwei Wang } 37626d6b39f1SShenwei Wang 37636d6b39f1SShenwei Wang old_prog = xchg(&fep->xdp_prog, bpf->prog); 3764be7ecbe7SWei Fang if (old_prog) 3765be7ecbe7SWei Fang bpf_prog_put(old_prog); 3766be7ecbe7SWei Fang 37676d6b39f1SShenwei Wang fec_restart(dev); 37686d6b39f1SShenwei Wang 37696d6b39f1SShenwei Wang if (is_run) { 37706d6b39f1SShenwei Wang napi_enable(&fep->napi); 37716d6b39f1SShenwei Wang netif_tx_start_all_queues(dev); 37726d6b39f1SShenwei Wang } 37736d6b39f1SShenwei Wang 3774be7ecbe7SWei Fang if (bpf->prog) 3775be7ecbe7SWei Fang xdp_features_set_redirect_target(dev, false); 37766d6b39f1SShenwei Wang 37776d6b39f1SShenwei Wang return 0; 37786d6b39f1SShenwei Wang 37796d6b39f1SShenwei Wang case XDP_SETUP_XSK_POOL: 37806d6b39f1SShenwei Wang return -EOPNOTSUPP; 37816d6b39f1SShenwei Wang 37826d6b39f1SShenwei Wang default: 37836d6b39f1SShenwei Wang return -EOPNOTSUPP; 37846d6b39f1SShenwei Wang } 37856d6b39f1SShenwei Wang } 37866d6b39f1SShenwei Wang 37876d6b39f1SShenwei Wang static int 37886c8fae0cSShenwei Wang fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) 37896d6b39f1SShenwei Wang { 37906d6b39f1SShenwei Wang if (unlikely(index < 0)) 37916c8fae0cSShenwei Wang return 0; 37926d6b39f1SShenwei Wang 37936c8fae0cSShenwei Wang return (index % fep->num_tx_queues); 37946d6b39f1SShenwei Wang } 37956d6b39f1SShenwei Wang 37966d6b39f1SShenwei Wang static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, 37976d6b39f1SShenwei Wang struct fec_enet_priv_tx_q *txq, 3798af6f4791SWei Fang void *frame, u32 dma_sync_len, 3799af6f4791SWei Fang bool ndo_xmit) 38006d6b39f1SShenwei Wang { 38016d6b39f1SShenwei Wang unsigned int index, status, estatus; 3802bc638eabSWei Fang struct bufdesc *bdp; 38036d6b39f1SShenwei Wang dma_addr_t dma_addr; 38046d6b39f1SShenwei Wang int entries_free; 3805af6f4791SWei Fang u16 frame_len; 38066d6b39f1SShenwei Wang 38076d6b39f1SShenwei Wang entries_free = fec_enet_get_free_txdesc_num(txq); 38086d6b39f1SShenwei Wang if (entries_free < MAX_SKB_FRAGS + 1) { 380984a10947SWei Fang netdev_err_once(fep->netdev, "NOT enough BD for SG!\n"); 3810144470c8SShenwei Wang return -EBUSY; 38116d6b39f1SShenwei Wang } 38126d6b39f1SShenwei Wang 38136d6b39f1SShenwei Wang /* Fill in a Tx ring entry */ 38146d6b39f1SShenwei Wang bdp = txq->bd.cur; 38156d6b39f1SShenwei Wang status = fec16_to_cpu(bdp->cbd_sc); 38166d6b39f1SShenwei Wang status &= ~BD_ENET_TX_STATS; 38176d6b39f1SShenwei Wang 38186d6b39f1SShenwei Wang index = fec_enet_get_bd_index(bdp, &txq->bd); 38196d6b39f1SShenwei Wang 3820f601899eSWei Fang if (ndo_xmit) { 3821af6f4791SWei Fang struct xdp_frame *xdpf = frame; 3822af6f4791SWei Fang 3823af6f4791SWei Fang dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data, 3824af6f4791SWei Fang xdpf->len, DMA_TO_DEVICE); 38256d6b39f1SShenwei Wang if (dma_mapping_error(&fep->pdev->dev, dma_addr)) 3826144470c8SShenwei Wang return -ENOMEM; 38276d6b39f1SShenwei Wang 3828af6f4791SWei Fang frame_len = xdpf->len; 3829af6f4791SWei Fang txq->tx_buf[index].buf_p = xdpf; 3830f601899eSWei Fang txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; 3831f601899eSWei Fang } else { 3832af6f4791SWei Fang struct xdp_buff *xdpb = frame; 3833af6f4791SWei Fang struct page *page; 3834f601899eSWei Fang 3835af6f4791SWei Fang page = virt_to_page(xdpb->data); 3836af6f4791SWei Fang dma_addr = page_pool_get_dma_addr(page) + 3837af6f4791SWei Fang (xdpb->data - xdpb->data_hard_start); 3838f601899eSWei Fang dma_sync_single_for_device(&fep->pdev->dev, dma_addr, 3839f601899eSWei Fang dma_sync_len, DMA_BIDIRECTIONAL); 3840af6f4791SWei Fang frame_len = xdpb->data_end - xdpb->data; 3841af6f4791SWei Fang txq->tx_buf[index].buf_p = page; 3842f601899eSWei Fang txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX; 3843f601899eSWei Fang } 3844f601899eSWei Fang 38456d6b39f1SShenwei Wang status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 38466d6b39f1SShenwei Wang if (fep->bufdesc_ex) 38476d6b39f1SShenwei Wang estatus = BD_ENET_TX_INT; 38486d6b39f1SShenwei Wang 38496d6b39f1SShenwei Wang bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); 3850af6f4791SWei Fang bdp->cbd_datlen = cpu_to_fec16(frame_len); 38516d6b39f1SShenwei Wang 38526d6b39f1SShenwei Wang if (fep->bufdesc_ex) { 38536d6b39f1SShenwei Wang struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 38546d6b39f1SShenwei Wang 38556d6b39f1SShenwei Wang if (fep->quirks & FEC_QUIRK_HAS_AVB) 38566d6b39f1SShenwei Wang estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 38576d6b39f1SShenwei Wang 38586d6b39f1SShenwei Wang ebdp->cbd_bdu = 0; 38596d6b39f1SShenwei Wang ebdp->cbd_esc = cpu_to_fec32(estatus); 38606d6b39f1SShenwei Wang } 38616d6b39f1SShenwei Wang 38629025944fSShenwei Wang /* Make sure the updates to rest of the descriptor are performed before 38639025944fSShenwei Wang * transferring ownership. 38649025944fSShenwei Wang */ 38659025944fSShenwei Wang dma_wmb(); 38669025944fSShenwei Wang 38676d6b39f1SShenwei Wang /* Send it on its way. Tell FEC it's ready, interrupt when done, 38686d6b39f1SShenwei Wang * it's the last BD of the frame, and to put the CRC on the end. 38696d6b39f1SShenwei Wang */ 38706d6b39f1SShenwei Wang status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 38716d6b39f1SShenwei Wang bdp->cbd_sc = cpu_to_fec16(status); 38726d6b39f1SShenwei Wang 38736d6b39f1SShenwei Wang /* If this was the last BD in the ring, start at the beginning again. */ 3874bc638eabSWei Fang bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 38756d6b39f1SShenwei Wang 38769025944fSShenwei Wang /* Make sure the update to bdp are performed before txq->bd.cur. */ 38779025944fSShenwei Wang dma_wmb(); 38789025944fSShenwei Wang 38796d6b39f1SShenwei Wang txq->bd.cur = bdp; 38806d6b39f1SShenwei Wang 38819025944fSShenwei Wang /* Trigger transmission start */ 38829025944fSShenwei Wang writel(0, txq->bd.reg_desc_active); 38839025944fSShenwei Wang 38846d6b39f1SShenwei Wang return 0; 38856d6b39f1SShenwei Wang } 38866d6b39f1SShenwei Wang 3887f601899eSWei Fang static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, 3888f601899eSWei Fang int cpu, struct xdp_buff *xdp, 3889f601899eSWei Fang u32 dma_sync_len) 3890f601899eSWei Fang { 3891f601899eSWei Fang struct fec_enet_priv_tx_q *txq; 3892f601899eSWei Fang struct netdev_queue *nq; 3893f601899eSWei Fang int queue, ret; 3894f601899eSWei Fang 3895f601899eSWei Fang queue = fec_enet_xdp_get_tx_queue(fep, cpu); 3896f601899eSWei Fang txq = fep->tx_queue[queue]; 3897f601899eSWei Fang nq = netdev_get_tx_queue(fep->netdev, queue); 3898f601899eSWei Fang 3899f601899eSWei Fang __netif_tx_lock(nq, cpu); 3900f601899eSWei Fang 3901f601899eSWei Fang /* Avoid tx timeout as XDP shares the queue with kernel stack */ 3902f601899eSWei Fang txq_trans_cond_update(nq); 3903af6f4791SWei Fang ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false); 3904f601899eSWei Fang 3905f601899eSWei Fang __netif_tx_unlock(nq); 3906f601899eSWei Fang 3907f601899eSWei Fang return ret; 3908f601899eSWei Fang } 3909f601899eSWei Fang 39106d6b39f1SShenwei Wang static int fec_enet_xdp_xmit(struct net_device *dev, 39116d6b39f1SShenwei Wang int num_frames, 39126d6b39f1SShenwei Wang struct xdp_frame **frames, 39136d6b39f1SShenwei Wang u32 flags) 39146d6b39f1SShenwei Wang { 39156d6b39f1SShenwei Wang struct fec_enet_private *fep = netdev_priv(dev); 39166d6b39f1SShenwei Wang struct fec_enet_priv_tx_q *txq; 39176d6b39f1SShenwei Wang int cpu = smp_processor_id(); 391826312c68SShenwei Wang unsigned int sent_frames = 0; 39196d6b39f1SShenwei Wang struct netdev_queue *nq; 39206d6b39f1SShenwei Wang unsigned int queue; 39216d6b39f1SShenwei Wang int i; 39226d6b39f1SShenwei Wang 39236d6b39f1SShenwei Wang queue = fec_enet_xdp_get_tx_queue(fep, cpu); 39246d6b39f1SShenwei Wang txq = fep->tx_queue[queue]; 39256d6b39f1SShenwei Wang nq = netdev_get_tx_queue(fep->netdev, queue); 39266d6b39f1SShenwei Wang 39276d6b39f1SShenwei Wang __netif_tx_lock(nq, cpu); 39286d6b39f1SShenwei Wang 3929bb7a0156SWei Fang /* Avoid tx timeout as XDP shares the queue with kernel stack */ 3930bb7a0156SWei Fang txq_trans_cond_update(nq); 393126312c68SShenwei Wang for (i = 0; i < num_frames; i++) { 3932f601899eSWei Fang if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0) 393326312c68SShenwei Wang break; 393426312c68SShenwei Wang sent_frames++; 393526312c68SShenwei Wang } 39366d6b39f1SShenwei Wang 39376d6b39f1SShenwei Wang __netif_tx_unlock(nq); 39386d6b39f1SShenwei Wang 393926312c68SShenwei Wang return sent_frames; 39406d6b39f1SShenwei Wang } 39416d6b39f1SShenwei Wang 3942ef5eb9c5SVladimir Oltean static int fec_hwtstamp_get(struct net_device *ndev, 3943ef5eb9c5SVladimir Oltean struct kernel_hwtstamp_config *config) 3944ef5eb9c5SVladimir Oltean { 3945ef5eb9c5SVladimir Oltean struct fec_enet_private *fep = netdev_priv(ndev); 3946ef5eb9c5SVladimir Oltean 3947ef5eb9c5SVladimir Oltean if (!netif_running(ndev)) 3948ef5eb9c5SVladimir Oltean return -EINVAL; 3949ef5eb9c5SVladimir Oltean 3950ef5eb9c5SVladimir Oltean if (!fep->bufdesc_ex) 3951ef5eb9c5SVladimir Oltean return -EOPNOTSUPP; 3952ef5eb9c5SVladimir Oltean 3953ef5eb9c5SVladimir Oltean fec_ptp_get(ndev, config); 3954ef5eb9c5SVladimir Oltean 3955ef5eb9c5SVladimir Oltean return 0; 3956ef5eb9c5SVladimir Oltean } 3957ef5eb9c5SVladimir Oltean 3958ef5eb9c5SVladimir Oltean static int fec_hwtstamp_set(struct net_device *ndev, 3959ef5eb9c5SVladimir Oltean struct kernel_hwtstamp_config *config, 3960ef5eb9c5SVladimir Oltean struct netlink_ext_ack *extack) 3961ef5eb9c5SVladimir Oltean { 3962ef5eb9c5SVladimir Oltean struct fec_enet_private *fep = netdev_priv(ndev); 3963ef5eb9c5SVladimir Oltean 3964ef5eb9c5SVladimir Oltean if (!netif_running(ndev)) 3965ef5eb9c5SVladimir Oltean return -EINVAL; 3966ef5eb9c5SVladimir Oltean 3967ef5eb9c5SVladimir Oltean if (!fep->bufdesc_ex) 3968ef5eb9c5SVladimir Oltean return -EOPNOTSUPP; 3969ef5eb9c5SVladimir Oltean 3970ef5eb9c5SVladimir Oltean return fec_ptp_set(ndev, config, extack); 3971ef5eb9c5SVladimir Oltean } 3972ef5eb9c5SVladimir Oltean 3973793fc096SFrank Li static const struct net_device_ops fec_netdev_ops = { 3974793fc096SFrank Li .ndo_open = fec_enet_open, 3975793fc096SFrank Li .ndo_stop = fec_enet_close, 3976793fc096SFrank Li .ndo_start_xmit = fec_enet_start_xmit, 397752c4a1a8SFugang Duan .ndo_select_queue = fec_enet_select_queue, 3978793fc096SFrank Li .ndo_set_rx_mode = set_multicast_list, 3979793fc096SFrank Li .ndo_validate_addr = eth_validate_addr, 3980793fc096SFrank Li .ndo_tx_timeout = fec_timeout, 3981793fc096SFrank Li .ndo_set_mac_address = fec_set_mac_address, 3982ef5eb9c5SVladimir Oltean .ndo_eth_ioctl = phy_do_ioctl_running, 39834c09eed9SJim Baxter .ndo_set_features = fec_set_features, 39846d6b39f1SShenwei Wang .ndo_bpf = fec_enet_bpf, 39856d6b39f1SShenwei Wang .ndo_xdp_xmit = fec_enet_xdp_xmit, 3986ef5eb9c5SVladimir Oltean .ndo_hwtstamp_get = fec_hwtstamp_get, 3987ef5eb9c5SVladimir Oltean .ndo_hwtstamp_set = fec_hwtstamp_set, 3988793fc096SFrank Li }; 3989793fc096SFrank Li 399053bb20d1STroy Kisky static const unsigned short offset_des_active_rxq[] = { 399153bb20d1STroy Kisky FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 399253bb20d1STroy Kisky }; 399353bb20d1STroy Kisky 399453bb20d1STroy Kisky static const unsigned short offset_des_active_txq[] = { 399553bb20d1STroy Kisky FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 399653bb20d1STroy Kisky }; 399753bb20d1STroy Kisky 3998793fc096SFrank Li /* 3999793fc096SFrank Li * XXX: We need to clean up on failure exits here. 4000793fc096SFrank Li * 4001793fc096SFrank Li */ 4002793fc096SFrank Li static int fec_enet_init(struct net_device *ndev) 4003793fc096SFrank Li { 4004793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 4005793fc096SFrank Li struct bufdesc *cbd_base; 40064d494cdcSFugang Duan dma_addr_t bd_dma; 400755d0218aSNimrod Andy int bd_size; 400859d0f746SFrank Li unsigned int i; 40097355f276STroy Kisky unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : 40107355f276STroy Kisky sizeof(struct bufdesc); 40117355f276STroy Kisky unsigned dsize_log2 = __fls(dsize); 4012453e9dc4SStefan Agner int ret; 401355d0218aSNimrod Andy 40147355f276STroy Kisky WARN_ON(dsize != (1 << dsize_log2)); 40153f1dcc6aSLucas Stach #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 401641ef84ceSFugang Duan fep->rx_align = 0xf; 401741ef84ceSFugang Duan fep->tx_align = 0xf; 401841ef84ceSFugang Duan #else 401941ef84ceSFugang Duan fep->rx_align = 0x3; 402041ef84ceSFugang Duan fep->tx_align = 0x3; 402141ef84ceSFugang Duan #endif 4022df727d45SRasmus Villemoes fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT; 4023df727d45SRasmus Villemoes fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT; 4024df727d45SRasmus Villemoes fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT; 4025df727d45SRasmus Villemoes fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT; 402641ef84ceSFugang Duan 4027453e9dc4SStefan Agner /* Check mask of the streaming and coherent API */ 4028453e9dc4SStefan Agner ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); 4029453e9dc4SStefan Agner if (ret < 0) { 4030453e9dc4SStefan Agner dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); 4031453e9dc4SStefan Agner return ret; 4032453e9dc4SStefan Agner } 4033453e9dc4SStefan Agner 4034619fee9eSFugang Duan ret = fec_enet_alloc_queue(ndev); 4035619fee9eSFugang Duan if (ret) 4036619fee9eSFugang Duan return ret; 403779f33912SNimrod Andy 40387355f276STroy Kisky bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; 4039793fc096SFrank Li 4040793fc096SFrank Li /* Allocate memory for buffer descriptors. */ 4041ffd32a92SChristoph Hellwig cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma, 4042793fc096SFrank Li GFP_KERNEL); 40434d494cdcSFugang Duan if (!cbd_base) { 4044619fee9eSFugang Duan ret = -ENOMEM; 4045619fee9eSFugang Duan goto free_queue_mem; 40464d494cdcSFugang Duan } 4047793fc096SFrank Li 4048793fc096SFrank Li /* Get the Ethernet address */ 4049052fcc45SFugang Duan ret = fec_get_mac(ndev); 4050052fcc45SFugang Duan if (ret) 4051052fcc45SFugang Duan goto free_queue_mem; 4052052fcc45SFugang Duan 4053793fc096SFrank Li /* Set receive and transmit descriptor base. */ 405459d0f746SFrank Li for (i = 0; i < fep->num_rx_queues; i++) { 40557355f276STroy Kisky struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; 40567355f276STroy Kisky unsigned size = dsize * rxq->bd.ring_size; 40577355f276STroy Kisky 40587355f276STroy Kisky rxq->bd.qid = i; 40597355f276STroy Kisky rxq->bd.base = cbd_base; 40607355f276STroy Kisky rxq->bd.cur = cbd_base; 40617355f276STroy Kisky rxq->bd.dma = bd_dma; 40627355f276STroy Kisky rxq->bd.dsize = dsize; 40637355f276STroy Kisky rxq->bd.dsize_log2 = dsize_log2; 406453bb20d1STroy Kisky rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; 40657355f276STroy Kisky bd_dma += size; 40667355f276STroy Kisky cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 40677355f276STroy Kisky rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 406859d0f746SFrank Li } 406959d0f746SFrank Li 407059d0f746SFrank Li for (i = 0; i < fep->num_tx_queues; i++) { 40717355f276STroy Kisky struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; 40727355f276STroy Kisky unsigned size = dsize * txq->bd.ring_size; 40737355f276STroy Kisky 40747355f276STroy Kisky txq->bd.qid = i; 40757355f276STroy Kisky txq->bd.base = cbd_base; 40767355f276STroy Kisky txq->bd.cur = cbd_base; 40777355f276STroy Kisky txq->bd.dma = bd_dma; 40787355f276STroy Kisky txq->bd.dsize = dsize; 40797355f276STroy Kisky txq->bd.dsize_log2 = dsize_log2; 408053bb20d1STroy Kisky txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; 40817355f276STroy Kisky bd_dma += size; 40827355f276STroy Kisky cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 40837355f276STroy Kisky txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 408459d0f746SFrank Li } 40854d494cdcSFugang Duan 4086793fc096SFrank Li 4087793fc096SFrank Li /* The FEC Ethernet specific entries in the device structure */ 4088793fc096SFrank Li ndev->watchdog_timeo = TX_TIMEOUT; 4089793fc096SFrank Li ndev->netdev_ops = &fec_netdev_ops; 4090793fc096SFrank Li ndev->ethtool_ops = &fec_enet_ethtool_ops; 4091793fc096SFrank Li 4092793fc096SFrank Li writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 4093b48b89f9SJakub Kicinski netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi); 4094793fc096SFrank Li 40956b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_HAS_VLAN) 4096cdffcf1bSJim Baxter /* enable hw VLAN support */ 4097cdffcf1bSJim Baxter ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 4098cdffcf1bSJim Baxter 40996b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_HAS_CSUM) { 4100ee8b7a11SJakub Kicinski netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS); 410179f33912SNimrod Andy 41024c09eed9SJim Baxter /* enable hw accelerator */ 41034c09eed9SJim Baxter ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 410479f33912SNimrod Andy | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); 41054c09eed9SJim Baxter fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 410648496255SShawn Guo } 41074c09eed9SJim Baxter 4108471ff445SJoakim Zhang if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 410941ef84ceSFugang Duan fep->tx_align = 0; 411041ef84ceSFugang Duan fep->rx_align = 0x3f; 411141ef84ceSFugang Duan } 411241ef84ceSFugang Duan 411309d1e541SNimrod Andy ndev->hw_features = ndev->features; 411409d1e541SNimrod Andy 4115e4ac7cc6SWei Fang if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) 4116e4ac7cc6SWei Fang ndev->xdp_features = NETDEV_XDP_ACT_BASIC | 4117be7ecbe7SWei Fang NETDEV_XDP_ACT_REDIRECT; 4118e4ac7cc6SWei Fang 4119ef83337dSRussell King fec_restart(ndev); 4120793fc096SFrank Li 41212b30842bSAndrew Lunn if (fep->quirks & FEC_QUIRK_MIB_CLEAR) 41222b30842bSAndrew Lunn fec_enet_clear_ethtool_stats(ndev); 41232b30842bSAndrew Lunn else 412480cca775SNikita Yushchenko fec_enet_update_ethtool_stats(ndev); 412580cca775SNikita Yushchenko 4126793fc096SFrank Li return 0; 4127619fee9eSFugang Duan 4128619fee9eSFugang Duan free_queue_mem: 4129619fee9eSFugang Duan fec_enet_free_queue(ndev); 4130619fee9eSFugang Duan return ret; 4131793fc096SFrank Li } 4132793fc096SFrank Li 4133*bf0497f5SXiaolei Wang static void fec_enet_deinit(struct net_device *ndev) 4134*bf0497f5SXiaolei Wang { 4135*bf0497f5SXiaolei Wang struct fec_enet_private *fep = netdev_priv(ndev); 4136*bf0497f5SXiaolei Wang 4137*bf0497f5SXiaolei Wang netif_napi_del(&fep->napi); 4138*bf0497f5SXiaolei Wang fec_enet_free_queue(ndev); 4139*bf0497f5SXiaolei Wang } 4140*bf0497f5SXiaolei Wang 4141793fc096SFrank Li #ifdef CONFIG_OF 41429269e556SFugang Duan static int fec_reset_phy(struct platform_device *pdev) 4143793fc096SFrank Li { 4144468ba54bSArnd Bergmann struct gpio_desc *phy_reset; 4145159a0760SQuentin Schulz int msec = 1, phy_post_delay = 0; 4146793fc096SFrank Li struct device_node *np = pdev->dev.of_node; 4147468ba54bSArnd Bergmann int err; 4148793fc096SFrank Li 4149793fc096SFrank Li if (!np) 41509269e556SFugang Duan return 0; 4151793fc096SFrank Li 415261e04ccbSFugang Duan err = of_property_read_u32(np, "phy-reset-duration", &msec); 4153793fc096SFrank Li /* A sane reset duration should not be longer than 1s */ 415461e04ccbSFugang Duan if (!err && msec > 1000) 4155793fc096SFrank Li msec = 1; 4156793fc096SFrank Li 4157159a0760SQuentin Schulz err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); 4158159a0760SQuentin Schulz /* valid reset duration should be less than 1s */ 4159159a0760SQuentin Schulz if (!err && phy_post_delay > 1000) 4160159a0760SQuentin Schulz return -EINVAL; 4161159a0760SQuentin Schulz 4162d7b5e5ddSDmitry Torokhov phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset", 41630719bc3aSDmitry Torokhov GPIOD_OUT_HIGH); 4164468ba54bSArnd Bergmann if (IS_ERR(phy_reset)) 4165468ba54bSArnd Bergmann return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset), 4166468ba54bSArnd Bergmann "failed to get phy-reset-gpios\n"); 4167eb37c563SStefan Wahren 4168d7b5e5ddSDmitry Torokhov if (!phy_reset) 4169d7b5e5ddSDmitry Torokhov return 0; 4170d7b5e5ddSDmitry Torokhov 4171eb37c563SStefan Wahren if (msec > 20) 4172793fc096SFrank Li msleep(msec); 4173eb37c563SStefan Wahren else 4174eb37c563SStefan Wahren usleep_range(msec * 1000, msec * 1000 + 1000); 4175eb37c563SStefan Wahren 41760719bc3aSDmitry Torokhov gpiod_set_value_cansleep(phy_reset, 0); 41779269e556SFugang Duan 4178159a0760SQuentin Schulz if (!phy_post_delay) 4179159a0760SQuentin Schulz return 0; 4180159a0760SQuentin Schulz 4181159a0760SQuentin Schulz if (phy_post_delay > 20) 4182159a0760SQuentin Schulz msleep(phy_post_delay); 4183159a0760SQuentin Schulz else 4184159a0760SQuentin Schulz usleep_range(phy_post_delay * 1000, 4185159a0760SQuentin Schulz phy_post_delay * 1000 + 1000); 4186159a0760SQuentin Schulz 41879269e556SFugang Duan return 0; 4188793fc096SFrank Li } 4189793fc096SFrank Li #else /* CONFIG_OF */ 41909269e556SFugang Duan static int fec_reset_phy(struct platform_device *pdev) 4191793fc096SFrank Li { 4192793fc096SFrank Li /* 4193793fc096SFrank Li * In case of platform probe, the reset has been done 4194793fc096SFrank Li * by machine code. 4195793fc096SFrank Li */ 41969269e556SFugang Duan return 0; 4197793fc096SFrank Li } 4198793fc096SFrank Li #endif /* CONFIG_OF */ 4199793fc096SFrank Li 42009fc095f1SFugang Duan static void 42019fc095f1SFugang Duan fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) 42029fc095f1SFugang Duan { 42039fc095f1SFugang Duan struct device_node *np = pdev->dev.of_node; 42049fc095f1SFugang Duan 42059fc095f1SFugang Duan *num_tx = *num_rx = 1; 42069fc095f1SFugang Duan 42079fc095f1SFugang Duan if (!np || !of_device_is_available(np)) 42089fc095f1SFugang Duan return; 42099fc095f1SFugang Duan 42109fc095f1SFugang Duan /* parse the num of tx and rx queues */ 421173b1c90dSSaurabh Sengar of_property_read_u32(np, "fsl,num-tx-queues", num_tx); 4212b7bd75cfSFrank Li 421373b1c90dSSaurabh Sengar of_property_read_u32(np, "fsl,num-rx-queues", num_rx); 42149fc095f1SFugang Duan 42159fc095f1SFugang Duan if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { 4216b7bd75cfSFrank Li dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", 42179fc095f1SFugang Duan *num_tx); 42189fc095f1SFugang Duan *num_tx = 1; 42199fc095f1SFugang Duan return; 42209fc095f1SFugang Duan } 42219fc095f1SFugang Duan 42229fc095f1SFugang Duan if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { 4223b7bd75cfSFrank Li dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", 42249fc095f1SFugang Duan *num_rx); 42259fc095f1SFugang Duan *num_rx = 1; 42269fc095f1SFugang Duan return; 42279fc095f1SFugang Duan } 42289fc095f1SFugang Duan 42299fc095f1SFugang Duan } 42309fc095f1SFugang Duan 42314ad1ceecSTroy Kisky static int fec_enet_get_irq_cnt(struct platform_device *pdev) 42324ad1ceecSTroy Kisky { 42334ad1ceecSTroy Kisky int irq_cnt = platform_irq_count(pdev); 42344ad1ceecSTroy Kisky 42354ad1ceecSTroy Kisky if (irq_cnt > FEC_IRQ_NUM) 42364ad1ceecSTroy Kisky irq_cnt = FEC_IRQ_NUM; /* last for pps */ 42374ad1ceecSTroy Kisky else if (irq_cnt == 2) 42384ad1ceecSTroy Kisky irq_cnt = 1; /* last for pps */ 42394ad1ceecSTroy Kisky else if (irq_cnt <= 0) 42404ad1ceecSTroy Kisky irq_cnt = 1; /* At least 1 irq is needed */ 42414ad1ceecSTroy Kisky return irq_cnt; 42424ad1ceecSTroy Kisky } 42434ad1ceecSTroy Kisky 4244b7cdc965SJoakim Zhang static void fec_enet_get_wakeup_irq(struct platform_device *pdev) 4245b7cdc965SJoakim Zhang { 4246b7cdc965SJoakim Zhang struct net_device *ndev = platform_get_drvdata(pdev); 4247b7cdc965SJoakim Zhang struct fec_enet_private *fep = netdev_priv(ndev); 4248b7cdc965SJoakim Zhang 4249b7cdc965SJoakim Zhang if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2) 4250b7cdc965SJoakim Zhang fep->wake_irq = fep->irq[2]; 4251b7cdc965SJoakim Zhang else 4252b7cdc965SJoakim Zhang fep->wake_irq = fep->irq[0]; 4253b7cdc965SJoakim Zhang } 4254b7cdc965SJoakim Zhang 4255da722186SMartin Fuzzey static int fec_enet_init_stop_mode(struct fec_enet_private *fep, 4256da722186SMartin Fuzzey struct device_node *np) 4257da722186SMartin Fuzzey { 4258da722186SMartin Fuzzey struct device_node *gpr_np; 42598a448bf8SFugang Duan u32 out_val[3]; 4260da722186SMartin Fuzzey int ret = 0; 4261da722186SMartin Fuzzey 42628a448bf8SFugang Duan gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0); 4263da722186SMartin Fuzzey if (!gpr_np) 4264da722186SMartin Fuzzey return 0; 4265da722186SMartin Fuzzey 42668a448bf8SFugang Duan ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, 42678a448bf8SFugang Duan ARRAY_SIZE(out_val)); 42688a448bf8SFugang Duan if (ret) { 42698a448bf8SFugang Duan dev_dbg(&fep->pdev->dev, "no stop mode property\n"); 4270d2b52ec0SYang Yingliang goto out; 42718a448bf8SFugang Duan } 42728a448bf8SFugang Duan 4273da722186SMartin Fuzzey fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); 4274da722186SMartin Fuzzey if (IS_ERR(fep->stop_gpr.gpr)) { 4275da722186SMartin Fuzzey dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); 4276da722186SMartin Fuzzey ret = PTR_ERR(fep->stop_gpr.gpr); 4277da722186SMartin Fuzzey fep->stop_gpr.gpr = NULL; 4278da722186SMartin Fuzzey goto out; 4279da722186SMartin Fuzzey } 4280da722186SMartin Fuzzey 42818a448bf8SFugang Duan fep->stop_gpr.reg = out_val[1]; 42828a448bf8SFugang Duan fep->stop_gpr.bit = out_val[2]; 4283da722186SMartin Fuzzey 4284da722186SMartin Fuzzey out: 4285da722186SMartin Fuzzey of_node_put(gpr_np); 4286da722186SMartin Fuzzey 4287da722186SMartin Fuzzey return ret; 4288da722186SMartin Fuzzey } 4289da722186SMartin Fuzzey 4290793fc096SFrank Li static int 4291793fc096SFrank Li fec_probe(struct platform_device *pdev) 4292793fc096SFrank Li { 4293793fc096SFrank Li struct fec_enet_private *fep; 4294793fc096SFrank Li struct fec_platform_data *pdata; 42950c65b2b9SAndrew Lunn phy_interface_t interface; 4296793fc096SFrank Li struct net_device *ndev; 4297793fc096SFrank Li int i, irq, ret = 0; 4298793fc096SFrank Li static int dev_id; 4299407066f8SUwe Kleine-König struct device_node *np = pdev->dev.of_node, *phy_node; 4300b7bd75cfSFrank Li int num_tx_qs; 4301b7bd75cfSFrank Li int num_rx_qs; 43024ad1ceecSTroy Kisky char irq_name[8]; 43034ad1ceecSTroy Kisky int irq_cnt; 4304b0377116SRob Herring const struct fec_devinfo *dev_info; 4305793fc096SFrank Li 43069fc095f1SFugang Duan fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 43079fc095f1SFugang Duan 4308793fc096SFrank Li /* Init network device */ 430980cca775SNikita Yushchenko ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) + 4310f85de666SNikita Yushchenko FEC_STATS_SIZE, num_tx_qs, num_rx_qs); 4311793fc096SFrank Li if (!ndev) 4312793fc096SFrank Li return -ENOMEM; 4313793fc096SFrank Li 4314793fc096SFrank Li SET_NETDEV_DEV(ndev, &pdev->dev); 4315793fc096SFrank Li 4316793fc096SFrank Li /* setup board info structure */ 4317793fc096SFrank Li fep = netdev_priv(ndev); 4318793fc096SFrank Li 4319b0377116SRob Herring dev_info = device_get_match_data(&pdev->dev); 4320b0377116SRob Herring if (!dev_info) 4321b0377116SRob Herring dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data; 4322da722186SMartin Fuzzey if (dev_info) 4323da722186SMartin Fuzzey fep->quirks = dev_info->quirks; 43246b7e4008SLothar Waßmann 43250c818594SHubert Feurstein fep->netdev = ndev; 43269fc095f1SFugang Duan fep->num_rx_queues = num_rx_qs; 43279fc095f1SFugang Duan fep->num_tx_queues = num_tx_qs; 43289fc095f1SFugang Duan 4329d1391930SGuenter Roeck #if !defined(CONFIG_M5272) 4330793fc096SFrank Li /* default enable pause frame auto negotiation */ 43316b7e4008SLothar Waßmann if (fep->quirks & FEC_QUIRK_HAS_GBIT) 4332793fc096SFrank Li fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 4333d1391930SGuenter Roeck #endif 4334793fc096SFrank Li 43355bbde4d2SNimrod Andy /* Select default pin state */ 43365bbde4d2SNimrod Andy pinctrl_pm_select_default_state(&pdev->dev); 43375bbde4d2SNimrod Andy 43384f830a5aSYueHaibing fep->hwp = devm_platform_ioremap_resource(pdev, 0); 4339941e173aSTushar Behera if (IS_ERR(fep->hwp)) { 4340941e173aSTushar Behera ret = PTR_ERR(fep->hwp); 4341941e173aSTushar Behera goto failed_ioremap; 4342941e173aSTushar Behera } 4343941e173aSTushar Behera 4344793fc096SFrank Li fep->pdev = pdev; 4345793fc096SFrank Li fep->dev_id = dev_id++; 4346793fc096SFrank Li 4347793fc096SFrank Li platform_set_drvdata(pdev, ndev); 4348793fc096SFrank Li 434929380905SLucas Stach if ((of_machine_is_compatible("fsl,imx6q") || 435029380905SLucas Stach of_machine_is_compatible("fsl,imx6dl")) && 435129380905SLucas Stach !of_property_read_bool(np, "fsl,err006687-workaround-present")) 435229380905SLucas Stach fep->quirks |= FEC_QUIRK_ERR006687; 435329380905SLucas Stach 435440c79ce1SWei Fang ret = fec_enet_ipc_handle_init(fep); 435540c79ce1SWei Fang if (ret) 435640c79ce1SWei Fang goto failed_ipc_init; 435740c79ce1SWei Fang 43581a87e641SRob Herring if (of_property_read_bool(np, "fsl,magic-packet")) 4359de40ed31SNimrod Andy fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; 4360de40ed31SNimrod Andy 43618a448bf8SFugang Duan ret = fec_enet_init_stop_mode(fep, np); 4362da722186SMartin Fuzzey if (ret) 4363da722186SMartin Fuzzey goto failed_stop_mode; 4364da722186SMartin Fuzzey 4365407066f8SUwe Kleine-König phy_node = of_parse_phandle(np, "phy-handle", 0); 4366407066f8SUwe Kleine-König if (!phy_node && of_phy_is_fixed_link(np)) { 4367407066f8SUwe Kleine-König ret = of_phy_register_fixed_link(np); 4368407066f8SUwe Kleine-König if (ret < 0) { 4369407066f8SUwe Kleine-König dev_err(&pdev->dev, 4370407066f8SUwe Kleine-König "broken fixed-link specification\n"); 4371407066f8SUwe Kleine-König goto failed_phy; 4372407066f8SUwe Kleine-König } 4373407066f8SUwe Kleine-König phy_node = of_node_get(np); 4374407066f8SUwe Kleine-König } 4375407066f8SUwe Kleine-König fep->phy_node = phy_node; 4376407066f8SUwe Kleine-König 43770c65b2b9SAndrew Lunn ret = of_get_phy_mode(pdev->dev.of_node, &interface); 43780c65b2b9SAndrew Lunn if (ret) { 437994660ba0SJingoo Han pdata = dev_get_platdata(&pdev->dev); 4380793fc096SFrank Li if (pdata) 4381793fc096SFrank Li fep->phy_interface = pdata->phy; 4382793fc096SFrank Li else 4383793fc096SFrank Li fep->phy_interface = PHY_INTERFACE_MODE_MII; 4384793fc096SFrank Li } else { 43850c65b2b9SAndrew Lunn fep->phy_interface = interface; 4386793fc096SFrank Li } 4387793fc096SFrank Li 4388b820c114SJoakim Zhang ret = fec_enet_parse_rgmii_delay(fep, np); 4389b820c114SJoakim Zhang if (ret) 4390b820c114SJoakim Zhang goto failed_rgmii_delay; 4391b820c114SJoakim Zhang 4392793fc096SFrank Li fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 4393793fc096SFrank Li if (IS_ERR(fep->clk_ipg)) { 4394793fc096SFrank Li ret = PTR_ERR(fep->clk_ipg); 4395793fc096SFrank Li goto failed_clk; 4396793fc096SFrank Li } 4397793fc096SFrank Li 4398793fc096SFrank Li fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 4399793fc096SFrank Li if (IS_ERR(fep->clk_ahb)) { 4400793fc096SFrank Li ret = PTR_ERR(fep->clk_ahb); 4401793fc096SFrank Li goto failed_clk; 4402793fc096SFrank Li } 4403793fc096SFrank Li 4404d851b47bSFugang Duan fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); 4405d851b47bSFugang Duan 440638f56f33SLinus Torvalds /* enet_out is optional, depends on board */ 44075ff851b7SUwe Kleine-König fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out"); 44085ff851b7SUwe Kleine-König if (IS_ERR(fep->clk_enet_out)) { 44095ff851b7SUwe Kleine-König ret = PTR_ERR(fep->clk_enet_out); 44105ff851b7SUwe Kleine-König goto failed_clk; 44115ff851b7SUwe Kleine-König } 441238f56f33SLinus Torvalds 441391c0d987SNimrod Andy fep->ptp_clk_on = false; 441401b825f9SFrancesco Dolcini mutex_init(&fep->ptp_clk_mutex); 44159b5330edSFugang Duan 44169b5330edSFugang Duan /* clk_ref is optional, depends on board */ 441743252ed1SUwe Kleine-König fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); 441843252ed1SUwe Kleine-König if (IS_ERR(fep->clk_ref)) { 441943252ed1SUwe Kleine-König ret = PTR_ERR(fep->clk_ref); 442043252ed1SUwe Kleine-König goto failed_clk; 442143252ed1SUwe Kleine-König } 4422b82f8c3fSFugang Duan fep->clk_ref_rate = clk_get_rate(fep->clk_ref); 44239b5330edSFugang Duan 4424fc539459SFugang Duan /* clk_2x_txclk is optional, depends on board */ 4425b820c114SJoakim Zhang if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) { 4426fc539459SFugang Duan fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk"); 4427fc539459SFugang Duan if (IS_ERR(fep->clk_2x_txclk)) 4428fc539459SFugang Duan fep->clk_2x_txclk = NULL; 4429b820c114SJoakim Zhang } 4430fc539459SFugang Duan 44316b7e4008SLothar Waßmann fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; 4432793fc096SFrank Li fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); 4433793fc096SFrank Li if (IS_ERR(fep->clk_ptp)) { 443438f56f33SLinus Torvalds fep->clk_ptp = NULL; 4435217b5844SLothar Waßmann fep->bufdesc_ex = false; 4436793fc096SFrank Li } 4437793fc096SFrank Li 4438e8fcfcd5SNimrod Andy ret = fec_enet_clk_enable(ndev, true); 443913a097bdSFabio Estevam if (ret) 444013a097bdSFabio Estevam goto failed_clk; 444113a097bdSFabio Estevam 44428fff755eSAndrew Lunn ret = clk_prepare_enable(fep->clk_ipg); 44438fff755eSAndrew Lunn if (ret) 44448fff755eSAndrew Lunn goto failed_clk_ipg; 4445d7c3a206SAndy Duan ret = clk_prepare_enable(fep->clk_ahb); 4446d7c3a206SAndy Duan if (ret) 4447d7c3a206SAndy Duan goto failed_clk_ahb; 44488fff755eSAndrew Lunn 444925974d8aSStefan Agner fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); 4450f4e9f3d2SFabio Estevam if (!IS_ERR(fep->reg_phy)) { 4451f4e9f3d2SFabio Estevam ret = regulator_enable(fep->reg_phy); 4452793fc096SFrank Li if (ret) { 4453793fc096SFrank Li dev_err(&pdev->dev, 4454793fc096SFrank Li "Failed to enable phy regulator: %d\n", ret); 4455793fc096SFrank Li goto failed_regulator; 4456793fc096SFrank Li } 4457f6a4d607SFabio Estevam } else { 44583f38c683SFugang Duan if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { 44593f38c683SFugang Duan ret = -EPROBE_DEFER; 44603f38c683SFugang Duan goto failed_regulator; 44613f38c683SFugang Duan } 4462f6a4d607SFabio Estevam fep->reg_phy = NULL; 4463793fc096SFrank Li } 4464793fc096SFrank Li 44658fff755eSAndrew Lunn pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 44668fff755eSAndrew Lunn pm_runtime_use_autosuspend(&pdev->dev); 446714d2b7c1SLucas Stach pm_runtime_get_noresume(&pdev->dev); 44688fff755eSAndrew Lunn pm_runtime_set_active(&pdev->dev); 44698fff755eSAndrew Lunn pm_runtime_enable(&pdev->dev); 44708fff755eSAndrew Lunn 44719269e556SFugang Duan ret = fec_reset_phy(pdev); 44729269e556SFugang Duan if (ret) 44739269e556SFugang Duan goto failed_reset; 4474793fc096SFrank Li 44754ad1ceecSTroy Kisky irq_cnt = fec_enet_get_irq_cnt(pdev); 4476793fc096SFrank Li if (fep->bufdesc_ex) 44774ad1ceecSTroy Kisky fec_ptp_init(pdev, irq_cnt); 4478793fc096SFrank Li 4479793fc096SFrank Li ret = fec_enet_init(ndev); 4480793fc096SFrank Li if (ret) 4481793fc096SFrank Li goto failed_init; 4482793fc096SFrank Li 44834ad1ceecSTroy Kisky for (i = 0; i < irq_cnt; i++) { 44843ded9f2bSArnd Bergmann snprintf(irq_name, sizeof(irq_name), "int%d", i); 44853b56be21SAnson Huang irq = platform_get_irq_byname_optional(pdev, irq_name); 44864ad1ceecSTroy Kisky if (irq < 0) 4487793fc096SFrank Li irq = platform_get_irq(pdev, i); 4488793fc096SFrank Li if (irq < 0) { 4489793fc096SFrank Li ret = irq; 4490793fc096SFrank Li goto failed_irq; 4491793fc096SFrank Li } 44920d9b2ab1SFabio Estevam ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 449344a272ddSMichael Opdenacker 0, pdev->name, ndev); 44940d9b2ab1SFabio Estevam if (ret) 4495793fc096SFrank Li goto failed_irq; 4496de40ed31SNimrod Andy 4497de40ed31SNimrod Andy fep->irq[i] = irq; 4498793fc096SFrank Li } 4499793fc096SFrank Li 4500b7cdc965SJoakim Zhang /* Decide which interrupt line is wakeup capable */ 4501b7cdc965SJoakim Zhang fec_enet_get_wakeup_irq(pdev); 4502b7cdc965SJoakim Zhang 4503793fc096SFrank Li ret = fec_enet_mii_init(pdev); 4504793fc096SFrank Li if (ret) 4505793fc096SFrank Li goto failed_mii_init; 4506793fc096SFrank Li 4507793fc096SFrank Li /* Carrier starts down, phylib will bring it up */ 4508793fc096SFrank Li netif_carrier_off(ndev); 4509e8fcfcd5SNimrod Andy fec_enet_clk_enable(ndev, false); 45105bbde4d2SNimrod Andy pinctrl_pm_select_sleep_state(&pdev->dev); 4511793fc096SFrank Li 451259193053SAndrew Lunn ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN; 451359193053SAndrew Lunn 4514793fc096SFrank Li ret = register_netdev(ndev); 4515793fc096SFrank Li if (ret) 4516793fc096SFrank Li goto failed_register; 4517793fc096SFrank Li 4518de40ed31SNimrod Andy device_init_wakeup(&ndev->dev, fep->wol_flag & 4519de40ed31SNimrod Andy FEC_WOL_HAS_MAGIC_PACKET); 4520de40ed31SNimrod Andy 4521eb1d0640SFabio Estevam if (fep->bufdesc_ex && fep->ptp_clock) 4522eb1d0640SFabio Estevam netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 4523eb1d0640SFabio Estevam 452436cdc743SRussell King INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 45258fff755eSAndrew Lunn 45268fff755eSAndrew Lunn pm_runtime_mark_last_busy(&pdev->dev); 45278fff755eSAndrew Lunn pm_runtime_put_autosuspend(&pdev->dev); 45288fff755eSAndrew Lunn 4529793fc096SFrank Li return 0; 4530793fc096SFrank Li 4531793fc096SFrank Li failed_register: 4532793fc096SFrank Li fec_enet_mii_remove(fep); 4533793fc096SFrank Li failed_mii_init: 45347a2bbd8dSFabio Estevam failed_irq: 4535*bf0497f5SXiaolei Wang fec_enet_deinit(ndev); 45367a2bbd8dSFabio Estevam failed_init: 453732cba57bSLucas Stach fec_ptp_stop(pdev); 45389269e556SFugang Duan failed_reset: 4539ce8d24f9SAndy Duan pm_runtime_put_noidle(&pdev->dev); 45409269e556SFugang Duan pm_runtime_disable(&pdev->dev); 4541c6165cf0SFugang Duan if (fep->reg_phy) 4542c6165cf0SFugang Duan regulator_disable(fep->reg_phy); 4543793fc096SFrank Li failed_regulator: 4544d7c3a206SAndy Duan clk_disable_unprepare(fep->clk_ahb); 4545d7c3a206SAndy Duan failed_clk_ahb: 4546d7c3a206SAndy Duan clk_disable_unprepare(fep->clk_ipg); 45478fff755eSAndrew Lunn failed_clk_ipg: 4548e8fcfcd5SNimrod Andy fec_enet_clk_enable(ndev, false); 4549793fc096SFrank Li failed_clk: 4550b820c114SJoakim Zhang failed_rgmii_delay: 455182005b1cSJohan Hovold if (of_phy_is_fixed_link(np)) 455282005b1cSJohan Hovold of_phy_deregister_fixed_link(np); 4553407066f8SUwe Kleine-König of_node_put(phy_node); 4554da722186SMartin Fuzzey failed_stop_mode: 455540c79ce1SWei Fang failed_ipc_init: 4556d1616f07SFugang Duan failed_phy: 4557d1616f07SFugang Duan dev_id--; 4558793fc096SFrank Li failed_ioremap: 4559793fc096SFrank Li free_netdev(ndev); 4560793fc096SFrank Li 4561793fc096SFrank Li return ret; 4562793fc096SFrank Li } 4563793fc096SFrank Li 456412d6cc19SUwe Kleine-König static void 4565793fc096SFrank Li fec_drv_remove(struct platform_device *pdev) 4566793fc096SFrank Li { 4567793fc096SFrank Li struct net_device *ndev = platform_get_drvdata(pdev); 4568793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 456982005b1cSJohan Hovold struct device_node *np = pdev->dev.of_node; 4570a31eda65SChuhong Yuan int ret; 4571a31eda65SChuhong Yuan 4572f816b982SUwe Kleine-König ret = pm_runtime_get_sync(&pdev->dev); 4573a31eda65SChuhong Yuan if (ret < 0) 4574f816b982SUwe Kleine-König dev_err(&pdev->dev, 4575f816b982SUwe Kleine-König "Failed to resume device in remove callback (%pe)\n", 4576f816b982SUwe Kleine-König ERR_PTR(ret)); 4577793fc096SFrank Li 457836cdc743SRussell King cancel_work_sync(&fep->tx_timeout_work); 457932cba57bSLucas Stach fec_ptp_stop(pdev); 4580793fc096SFrank Li unregister_netdev(ndev); 4581793fc096SFrank Li fec_enet_mii_remove(fep); 4582f6a4d607SFabio Estevam if (fep->reg_phy) 4583f6a4d607SFabio Estevam regulator_disable(fep->reg_phy); 4584a31eda65SChuhong Yuan 458582005b1cSJohan Hovold if (of_phy_is_fixed_link(np)) 458682005b1cSJohan Hovold of_phy_deregister_fixed_link(np); 4587407066f8SUwe Kleine-König of_node_put(fep->phy_node); 4588793fc096SFrank Li 4589f816b982SUwe Kleine-König /* After pm_runtime_get_sync() failed, the clks are still off, so skip 4590f816b982SUwe Kleine-König * disabling them again. 4591f816b982SUwe Kleine-König */ 4592f816b982SUwe Kleine-König if (ret >= 0) { 4593a31eda65SChuhong Yuan clk_disable_unprepare(fep->clk_ahb); 4594a31eda65SChuhong Yuan clk_disable_unprepare(fep->clk_ipg); 4595f816b982SUwe Kleine-König } 4596a31eda65SChuhong Yuan pm_runtime_put_noidle(&pdev->dev); 4597a31eda65SChuhong Yuan pm_runtime_disable(&pdev->dev); 4598a31eda65SChuhong Yuan 4599*bf0497f5SXiaolei Wang fec_enet_deinit(ndev); 460044712965SPavel Skripkin free_netdev(ndev); 4601793fc096SFrank Li } 4602793fc096SFrank Li 4603dd66d386SFabio Estevam static int __maybe_unused fec_suspend(struct device *dev) 4604793fc096SFrank Li { 4605793fc096SFrank Li struct net_device *ndev = dev_get_drvdata(dev); 4606793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 4607da970726SWei Fang int ret; 4608793fc096SFrank Li 4609da1774e5SRussell King rtnl_lock(); 4610793fc096SFrank Li if (netif_running(ndev)) { 4611de40ed31SNimrod Andy if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) 4612de40ed31SNimrod Andy fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; 461345f5c327SPhilippe Reynes phy_stop(ndev->phydev); 461431a6de34SRussell King napi_disable(&fep->napi); 461531a6de34SRussell King netif_tx_lock_bh(ndev); 4616793fc096SFrank Li netif_device_detach(ndev); 461731a6de34SRussell King netif_tx_unlock_bh(ndev); 461831a6de34SRussell King fec_stop(ndev); 46190b6f65c7SJoakim Zhang if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 46200b6f65c7SJoakim Zhang fec_irqs_disable(ndev); 46215bbde4d2SNimrod Andy pinctrl_pm_select_sleep_state(&fep->pdev->dev); 46220b6f65c7SJoakim Zhang } else { 46230b6f65c7SJoakim Zhang fec_irqs_disable_except_wakeup(ndev); 46240b6f65c7SJoakim Zhang if (fep->wake_irq > 0) { 46250b6f65c7SJoakim Zhang disable_irq(fep->wake_irq); 46260b6f65c7SJoakim Zhang enable_irq_wake(fep->wake_irq); 46270b6f65c7SJoakim Zhang } 46280b6f65c7SJoakim Zhang fec_enet_stop_mode(fep, true); 46290b6f65c7SJoakim Zhang } 46300b6f65c7SJoakim Zhang /* It's safe to disable clocks since interrupts are masked */ 46310b6f65c7SJoakim Zhang fec_enet_clk_enable(ndev, false); 4632da970726SWei Fang 4633da970726SWei Fang fep->rpm_active = !pm_runtime_status_suspended(dev); 4634da970726SWei Fang if (fep->rpm_active) { 4635da970726SWei Fang ret = pm_runtime_force_suspend(dev); 4636da970726SWei Fang if (ret < 0) { 4637da970726SWei Fang rtnl_unlock(); 4638da970726SWei Fang return ret; 4639da970726SWei Fang } 4640da970726SWei Fang } 4641f4c4a4e0SNimrod Andy } 4642f4c4a4e0SNimrod Andy rtnl_unlock(); 4643793fc096SFrank Li 4644de40ed31SNimrod Andy if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 4645238f7bc7SFabio Estevam regulator_disable(fep->reg_phy); 4646238f7bc7SFabio Estevam 4647858eeb7dSNimrod Andy /* SOC supply clock to phy, when clock is disabled, phy link down 4648858eeb7dSNimrod Andy * SOC control phy regulator, when regulator is disabled, phy link down 4649858eeb7dSNimrod Andy */ 4650858eeb7dSNimrod Andy if (fep->clk_enet_out || fep->reg_phy) 4651858eeb7dSNimrod Andy fep->link = 0; 4652858eeb7dSNimrod Andy 4653793fc096SFrank Li return 0; 4654793fc096SFrank Li } 4655793fc096SFrank Li 4656dd66d386SFabio Estevam static int __maybe_unused fec_resume(struct device *dev) 4657793fc096SFrank Li { 4658793fc096SFrank Li struct net_device *ndev = dev_get_drvdata(dev); 4659793fc096SFrank Li struct fec_enet_private *fep = netdev_priv(ndev); 4660238f7bc7SFabio Estevam int ret; 4661de40ed31SNimrod Andy int val; 4662238f7bc7SFabio Estevam 4663de40ed31SNimrod Andy if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 4664238f7bc7SFabio Estevam ret = regulator_enable(fep->reg_phy); 4665238f7bc7SFabio Estevam if (ret) 4666238f7bc7SFabio Estevam return ret; 4667238f7bc7SFabio Estevam } 4668793fc096SFrank Li 4669da1774e5SRussell King rtnl_lock(); 4670793fc096SFrank Li if (netif_running(ndev)) { 4671da970726SWei Fang if (fep->rpm_active) 4672da970726SWei Fang pm_runtime_force_resume(dev); 4673da970726SWei Fang 4674f4c4a4e0SNimrod Andy ret = fec_enet_clk_enable(ndev, true); 4675f4c4a4e0SNimrod Andy if (ret) { 4676f4c4a4e0SNimrod Andy rtnl_unlock(); 4677f4c4a4e0SNimrod Andy goto failed_clk; 4678f4c4a4e0SNimrod Andy } 4679de40ed31SNimrod Andy if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { 4680da722186SMartin Fuzzey fec_enet_stop_mode(fep, false); 46810b6f65c7SJoakim Zhang if (fep->wake_irq) { 46820b6f65c7SJoakim Zhang disable_irq_wake(fep->wake_irq); 46830b6f65c7SJoakim Zhang enable_irq(fep->wake_irq); 46840b6f65c7SJoakim Zhang } 4685da722186SMartin Fuzzey 4686de40ed31SNimrod Andy val = readl(fep->hwp + FEC_ECNTRL); 4687de40ed31SNimrod Andy val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 4688de40ed31SNimrod Andy writel(val, fep->hwp + FEC_ECNTRL); 4689de40ed31SNimrod Andy fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; 4690de40ed31SNimrod Andy } else { 4691de40ed31SNimrod Andy pinctrl_pm_select_default_state(&fep->pdev->dev); 4692de40ed31SNimrod Andy } 4693ef83337dSRussell King fec_restart(ndev); 469431a6de34SRussell King netif_tx_lock_bh(ndev); 4695793fc096SFrank Li netif_device_attach(ndev); 46966af42d42SRussell King netif_tx_unlock_bh(ndev); 46976af42d42SRussell King napi_enable(&fep->napi); 4698557d5dc8SHeiner Kallweit phy_init_hw(ndev->phydev); 469945f5c327SPhilippe Reynes phy_start(ndev->phydev); 4700793fc096SFrank Li } 4701da1774e5SRussell King rtnl_unlock(); 4702793fc096SFrank Li 4703793fc096SFrank Li return 0; 470413a097bdSFabio Estevam 4705e8fcfcd5SNimrod Andy failed_clk: 470613a097bdSFabio Estevam if (fep->reg_phy) 470713a097bdSFabio Estevam regulator_disable(fep->reg_phy); 470813a097bdSFabio Estevam return ret; 4709793fc096SFrank Li } 4710793fc096SFrank Li 47118fff755eSAndrew Lunn static int __maybe_unused fec_runtime_suspend(struct device *dev) 47128fff755eSAndrew Lunn { 47138fff755eSAndrew Lunn struct net_device *ndev = dev_get_drvdata(dev); 47148fff755eSAndrew Lunn struct fec_enet_private *fep = netdev_priv(ndev); 47158fff755eSAndrew Lunn 4716d7c3a206SAndy Duan clk_disable_unprepare(fep->clk_ahb); 47178fff755eSAndrew Lunn clk_disable_unprepare(fep->clk_ipg); 47188fff755eSAndrew Lunn 47198fff755eSAndrew Lunn return 0; 47208fff755eSAndrew Lunn } 47218fff755eSAndrew Lunn 47228fff755eSAndrew Lunn static int __maybe_unused fec_runtime_resume(struct device *dev) 47238fff755eSAndrew Lunn { 47248fff755eSAndrew Lunn struct net_device *ndev = dev_get_drvdata(dev); 47258fff755eSAndrew Lunn struct fec_enet_private *fep = netdev_priv(ndev); 4726d7c3a206SAndy Duan int ret; 47278fff755eSAndrew Lunn 4728d7c3a206SAndy Duan ret = clk_prepare_enable(fep->clk_ahb); 4729d7c3a206SAndy Duan if (ret) 4730d7c3a206SAndy Duan return ret; 4731d7c3a206SAndy Duan ret = clk_prepare_enable(fep->clk_ipg); 4732d7c3a206SAndy Duan if (ret) 4733d7c3a206SAndy Duan goto failed_clk_ipg; 4734d7c3a206SAndy Duan 4735d7c3a206SAndy Duan return 0; 4736d7c3a206SAndy Duan 4737d7c3a206SAndy Duan failed_clk_ipg: 4738d7c3a206SAndy Duan clk_disable_unprepare(fep->clk_ahb); 4739d7c3a206SAndy Duan return ret; 47408fff755eSAndrew Lunn } 47418fff755eSAndrew Lunn 47428fff755eSAndrew Lunn static const struct dev_pm_ops fec_pm_ops = { 47438fff755eSAndrew Lunn SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 47448fff755eSAndrew Lunn SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 47458fff755eSAndrew Lunn }; 4746793fc096SFrank Li 4747793fc096SFrank Li static struct platform_driver fec_driver = { 4748793fc096SFrank Li .driver = { 4749793fc096SFrank Li .name = DRIVER_NAME, 4750793fc096SFrank Li .pm = &fec_pm_ops, 4751793fc096SFrank Li .of_match_table = fec_dt_ids, 4752272bb0e9SFabio Estevam .suppress_bind_attrs = true, 4753793fc096SFrank Li }, 4754793fc096SFrank Li .id_table = fec_devtype, 4755793fc096SFrank Li .probe = fec_probe, 475612d6cc19SUwe Kleine-König .remove_new = fec_drv_remove, 4757793fc096SFrank Li }; 4758793fc096SFrank Li 4759793fc096SFrank Li module_platform_driver(fec_driver); 4760793fc096SFrank Li 47612e875764SBreno Leitao MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver"); 4762793fc096SFrank Li MODULE_LICENSE("GPL"); 4763