xref: /linux/drivers/net/ethernet/freescale/fec_main.c (revision c94cd9508b1335b949fd13ebd269313c65492df0)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
4  * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5  *
6  * Right now, I am very wasteful with the buffers.  I allocate memory
7  * pages and then divide them into 2K frame buffers.  This way I know I
8  * have buffers large enough to hold one frame within one buffer descriptor.
9  * Once I get this working, I will use 64 or 128 byte CPM buffers, which
10  * will be much more memory efficient and will easily handle lots of
11  * small packets.
12  *
13  * Much better multiple PHY support by Magnus Damm.
14  * Copyright (c) 2000 Ericsson Radio Systems AB.
15  *
16  * Support for FEC controller of ColdFire processors.
17  * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18  *
19  * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
20  * Copyright (c) 2004-2006 Macq Electronique SA.
21  *
22  * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
23  */
24 
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/ptrace.h>
30 #include <linux/errno.h>
31 #include <linux/ioport.h>
32 #include <linux/slab.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <net/ip.h>
41 #include <net/page_pool/helpers.h>
42 #include <net/selftests.h>
43 #include <net/tso.h>
44 #include <linux/tcp.h>
45 #include <linux/udp.h>
46 #include <linux/icmp.h>
47 #include <linux/spinlock.h>
48 #include <linux/workqueue.h>
49 #include <linux/bitops.h>
50 #include <linux/io.h>
51 #include <linux/irq.h>
52 #include <linux/clk.h>
53 #include <linux/crc32.h>
54 #include <linux/platform_device.h>
55 #include <linux/property.h>
56 #include <linux/mdio.h>
57 #include <linux/phy.h>
58 #include <linux/fec.h>
59 #include <linux/of.h>
60 #include <linux/of_mdio.h>
61 #include <linux/of_net.h>
62 #include <linux/regulator/consumer.h>
63 #include <linux/if_vlan.h>
64 #include <linux/pinctrl/consumer.h>
65 #include <linux/gpio/consumer.h>
66 #include <linux/prefetch.h>
67 #include <linux/mfd/syscon.h>
68 #include <linux/regmap.h>
69 #include <soc/imx/cpuidle.h>
70 #include <linux/filter.h>
71 #include <linux/bpf.h>
72 #include <linux/bpf_trace.h>
73 
74 #include <asm/cacheflush.h>
75 
76 #include "fec.h"
77 
78 static void set_multicast_list(struct net_device *ndev);
79 static void fec_enet_itr_coal_set(struct net_device *ndev);
80 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
81 				int cpu, struct xdp_buff *xdp,
82 				u32 dma_sync_len);
83 
84 #define DRIVER_NAME	"fec"
85 
86 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
87 
88 #define FEC_ENET_RSEM_V	0x84
89 #define FEC_ENET_RSFL_V	16
90 #define FEC_ENET_RAEM_V	0x8
91 #define FEC_ENET_RAFL_V	0x8
92 #define FEC_ENET_OPD_V	0xFFF0
93 #define FEC_MDIO_PM_TIMEOUT  100 /* ms */
94 
95 #define FEC_ENET_XDP_PASS          0
96 #define FEC_ENET_XDP_CONSUMED      BIT(0)
97 #define FEC_ENET_XDP_TX            BIT(1)
98 #define FEC_ENET_XDP_REDIR         BIT(2)
99 
100 struct fec_devinfo {
101 	u32 quirks;
102 };
103 
104 static const struct fec_devinfo fec_imx25_info = {
105 	.quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
106 		  FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
107 };
108 
109 static const struct fec_devinfo fec_imx27_info = {
110 	.quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
111 		  FEC_QUIRK_HAS_MDIO_C45,
112 };
113 
114 static const struct fec_devinfo fec_imx28_info = {
115 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
116 		  FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
117 		  FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
118 		  FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
119 };
120 
121 static const struct fec_devinfo fec_imx6q_info = {
122 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
123 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
124 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
125 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
126 		  FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
127 };
128 
129 static const struct fec_devinfo fec_mvf600_info = {
130 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
131 		  FEC_QUIRK_HAS_MDIO_C45,
132 };
133 
134 static const struct fec_devinfo fec_imx6x_info = {
135 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
136 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
137 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
138 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
139 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
140 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
141 		  FEC_QUIRK_HAS_MDIO_C45,
142 };
143 
144 static const struct fec_devinfo fec_imx6ul_info = {
145 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
146 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
147 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
148 		  FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
149 		  FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
150 		  FEC_QUIRK_HAS_MDIO_C45,
151 };
152 
153 static const struct fec_devinfo fec_imx8mq_info = {
154 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
155 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
156 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
157 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
158 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
159 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
160 		  FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
161 		  FEC_QUIRK_HAS_MDIO_C45,
162 };
163 
164 static const struct fec_devinfo fec_imx8qm_info = {
165 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
166 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
167 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
168 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
169 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
170 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
171 		  FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
172 };
173 
174 static const struct fec_devinfo fec_s32v234_info = {
175 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
176 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
177 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
178 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
179 		  FEC_QUIRK_HAS_MDIO_C45,
180 };
181 
182 static struct platform_device_id fec_devtype[] = {
183 	{
184 		/* keep it for coldfire */
185 		.name = DRIVER_NAME,
186 		.driver_data = 0,
187 	}, {
188 		/* sentinel */
189 	}
190 };
191 MODULE_DEVICE_TABLE(platform, fec_devtype);
192 
193 static const struct of_device_id fec_dt_ids[] = {
194 	{ .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, },
195 	{ .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, },
196 	{ .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
197 	{ .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
198 	{ .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
199 	{ .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
200 	{ .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
201 	{ .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
202 	{ .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
203 	{ .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, },
204 	{ /* sentinel */ }
205 };
206 MODULE_DEVICE_TABLE(of, fec_dt_ids);
207 
208 static unsigned char macaddr[ETH_ALEN];
209 module_param_array(macaddr, byte, NULL, 0);
210 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
211 
212 #if defined(CONFIG_M5272)
213 /*
214  * Some hardware gets it MAC address out of local flash memory.
215  * if this is non-zero then assume it is the address to get MAC from.
216  */
217 #if defined(CONFIG_NETtel)
218 #define	FEC_FLASHMAC	0xf0006006
219 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
220 #define	FEC_FLASHMAC	0xf0006000
221 #elif defined(CONFIG_CANCam)
222 #define	FEC_FLASHMAC	0xf0020000
223 #elif defined (CONFIG_M5272C3)
224 #define	FEC_FLASHMAC	(0xffe04000 + 4)
225 #elif defined(CONFIG_MOD5272)
226 #define FEC_FLASHMAC	0xffc0406b
227 #else
228 #define	FEC_FLASHMAC	0
229 #endif
230 #endif /* CONFIG_M5272 */
231 
232 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
233  *
234  * 2048 byte skbufs are allocated. However, alignment requirements
235  * varies between FEC variants. Worst case is 64, so round down by 64.
236  */
237 #define PKT_MAXBUF_SIZE		(round_down(2048 - 64, 64))
238 #define PKT_MINBUF_SIZE		64
239 
240 /* FEC receive acceleration */
241 #define FEC_RACC_IPDIS		BIT(1)
242 #define FEC_RACC_PRODIS		BIT(2)
243 #define FEC_RACC_SHIFT16	BIT(7)
244 #define FEC_RACC_OPTIONS	(FEC_RACC_IPDIS | FEC_RACC_PRODIS)
245 
246 /* MIB Control Register */
247 #define FEC_MIB_CTRLSTAT_DISABLE	BIT(31)
248 
249 /*
250  * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
251  * size bits. Other FEC hardware does not, so we need to take that into
252  * account when setting it.
253  */
254 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
255     defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
256     defined(CONFIG_ARM64)
257 #define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
258 #else
259 #define	OPT_FRAME_SIZE	0
260 #endif
261 
262 /* FEC MII MMFR bits definition */
263 #define FEC_MMFR_ST		(1 << 30)
264 #define FEC_MMFR_ST_C45		(0)
265 #define FEC_MMFR_OP_READ	(2 << 28)
266 #define FEC_MMFR_OP_READ_C45	(3 << 28)
267 #define FEC_MMFR_OP_WRITE	(1 << 28)
268 #define FEC_MMFR_OP_ADDR_WRITE	(0)
269 #define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
270 #define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
271 #define FEC_MMFR_TA		(2 << 16)
272 #define FEC_MMFR_DATA(v)	(v & 0xffff)
273 /* FEC ECR bits definition */
274 #define FEC_ECR_RESET           BIT(0)
275 #define FEC_ECR_ETHEREN         BIT(1)
276 #define FEC_ECR_MAGICEN         BIT(2)
277 #define FEC_ECR_SLEEP           BIT(3)
278 #define FEC_ECR_EN1588          BIT(4)
279 #define FEC_ECR_BYTESWP         BIT(8)
280 /* FEC RCR bits definition */
281 #define FEC_RCR_LOOP            BIT(0)
282 #define FEC_RCR_HALFDPX         BIT(1)
283 #define FEC_RCR_MII             BIT(2)
284 #define FEC_RCR_PROMISC         BIT(3)
285 #define FEC_RCR_BC_REJ          BIT(4)
286 #define FEC_RCR_FLOWCTL         BIT(5)
287 #define FEC_RCR_RMII            BIT(8)
288 #define FEC_RCR_10BASET         BIT(9)
289 /* TX WMARK bits */
290 #define FEC_TXWMRK_STRFWD       BIT(8)
291 
292 #define FEC_MII_TIMEOUT		30000 /* us */
293 
294 /* Transmitter timeout */
295 #define TX_TIMEOUT (2 * HZ)
296 
297 #define FEC_PAUSE_FLAG_AUTONEG	0x1
298 #define FEC_PAUSE_FLAG_ENABLE	0x2
299 #define FEC_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
300 #define FEC_WOL_FLAG_ENABLE		(0x1 << 1)
301 #define FEC_WOL_FLAG_SLEEP_ON		(0x1 << 2)
302 
303 /* Max number of allowed TCP segments for software TSO */
304 #define FEC_MAX_TSO_SEGS	100
305 #define FEC_MAX_SKB_DESCS	(FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
306 
307 #define IS_TSO_HEADER(txq, addr) \
308 	((addr >= txq->tso_hdrs_dma) && \
309 	(addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
310 
311 static int mii_cnt;
312 
313 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
314 					     struct bufdesc_prop *bd)
315 {
316 	return (bdp >= bd->last) ? bd->base
317 			: (struct bufdesc *)(((void *)bdp) + bd->dsize);
318 }
319 
320 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
321 					     struct bufdesc_prop *bd)
322 {
323 	return (bdp <= bd->base) ? bd->last
324 			: (struct bufdesc *)(((void *)bdp) - bd->dsize);
325 }
326 
327 static int fec_enet_get_bd_index(struct bufdesc *bdp,
328 				 struct bufdesc_prop *bd)
329 {
330 	return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
331 }
332 
333 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
334 {
335 	int entries;
336 
337 	entries = (((const char *)txq->dirty_tx -
338 			(const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
339 
340 	return entries >= 0 ? entries : entries + txq->bd.ring_size;
341 }
342 
343 static void swap_buffer(void *bufaddr, int len)
344 {
345 	int i;
346 	unsigned int *buf = bufaddr;
347 
348 	for (i = 0; i < len; i += 4, buf++)
349 		swab32s(buf);
350 }
351 
352 static void fec_dump(struct net_device *ndev)
353 {
354 	struct fec_enet_private *fep = netdev_priv(ndev);
355 	struct bufdesc *bdp;
356 	struct fec_enet_priv_tx_q *txq;
357 	int index = 0;
358 
359 	netdev_info(ndev, "TX ring dump\n");
360 	pr_info("Nr     SC     addr       len  SKB\n");
361 
362 	txq = fep->tx_queue[0];
363 	bdp = txq->bd.base;
364 
365 	do {
366 		pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
367 			index,
368 			bdp == txq->bd.cur ? 'S' : ' ',
369 			bdp == txq->dirty_tx ? 'H' : ' ',
370 			fec16_to_cpu(bdp->cbd_sc),
371 			fec32_to_cpu(bdp->cbd_bufaddr),
372 			fec16_to_cpu(bdp->cbd_datlen),
373 			txq->tx_buf[index].buf_p);
374 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
375 		index++;
376 	} while (bdp != txq->bd.base);
377 }
378 
379 /*
380  * Coldfire does not support DMA coherent allocations, and has historically used
381  * a band-aid with a manual flush in fec_enet_rx_queue.
382  */
383 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
384 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
385 		gfp_t gfp)
386 {
387 	return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp);
388 }
389 
390 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
391 		dma_addr_t handle)
392 {
393 	dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL);
394 }
395 #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
396 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
397 		gfp_t gfp)
398 {
399 	return dma_alloc_coherent(dev, size, handle, gfp);
400 }
401 
402 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
403 		dma_addr_t handle)
404 {
405 	dma_free_coherent(dev, size, cpu_addr, handle);
406 }
407 #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
408 
409 struct fec_dma_devres {
410 	size_t		size;
411 	void		*vaddr;
412 	dma_addr_t	dma_handle;
413 };
414 
415 static void fec_dmam_release(struct device *dev, void *res)
416 {
417 	struct fec_dma_devres *this = res;
418 
419 	fec_dma_free(dev, this->size, this->vaddr, this->dma_handle);
420 }
421 
422 static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle,
423 		gfp_t gfp)
424 {
425 	struct fec_dma_devres *dr;
426 	void *vaddr;
427 
428 	dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp);
429 	if (!dr)
430 		return NULL;
431 	vaddr = fec_dma_alloc(dev, size, handle, gfp);
432 	if (!vaddr) {
433 		devres_free(dr);
434 		return NULL;
435 	}
436 	dr->vaddr = vaddr;
437 	dr->dma_handle = *handle;
438 	dr->size = size;
439 	devres_add(dev, dr);
440 	return vaddr;
441 }
442 
443 static inline bool is_ipv4_pkt(struct sk_buff *skb)
444 {
445 	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
446 }
447 
448 static int
449 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
450 {
451 	/* Only run for packets requiring a checksum. */
452 	if (skb->ip_summed != CHECKSUM_PARTIAL)
453 		return 0;
454 
455 	if (unlikely(skb_cow_head(skb, 0)))
456 		return -1;
457 
458 	if (is_ipv4_pkt(skb))
459 		ip_hdr(skb)->check = 0;
460 	*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
461 
462 	return 0;
463 }
464 
465 static int
466 fec_enet_create_page_pool(struct fec_enet_private *fep,
467 			  struct fec_enet_priv_rx_q *rxq, int size)
468 {
469 	struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
470 	struct page_pool_params pp_params = {
471 		.order = 0,
472 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
473 		.pool_size = size,
474 		.nid = dev_to_node(&fep->pdev->dev),
475 		.dev = &fep->pdev->dev,
476 		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
477 		.offset = FEC_ENET_XDP_HEADROOM,
478 		.max_len = FEC_ENET_RX_FRSIZE,
479 	};
480 	int err;
481 
482 	rxq->page_pool = page_pool_create(&pp_params);
483 	if (IS_ERR(rxq->page_pool)) {
484 		err = PTR_ERR(rxq->page_pool);
485 		rxq->page_pool = NULL;
486 		return err;
487 	}
488 
489 	err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
490 	if (err < 0)
491 		goto err_free_pp;
492 
493 	err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
494 					 rxq->page_pool);
495 	if (err)
496 		goto err_unregister_rxq;
497 
498 	return 0;
499 
500 err_unregister_rxq:
501 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
502 err_free_pp:
503 	page_pool_destroy(rxq->page_pool);
504 	rxq->page_pool = NULL;
505 	return err;
506 }
507 
508 static struct bufdesc *
509 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
510 			     struct sk_buff *skb,
511 			     struct net_device *ndev)
512 {
513 	struct fec_enet_private *fep = netdev_priv(ndev);
514 	struct bufdesc *bdp = txq->bd.cur;
515 	struct bufdesc_ex *ebdp;
516 	int nr_frags = skb_shinfo(skb)->nr_frags;
517 	int frag, frag_len;
518 	unsigned short status;
519 	unsigned int estatus = 0;
520 	skb_frag_t *this_frag;
521 	unsigned int index;
522 	void *bufaddr;
523 	dma_addr_t addr;
524 	int i;
525 
526 	for (frag = 0; frag < nr_frags; frag++) {
527 		this_frag = &skb_shinfo(skb)->frags[frag];
528 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
529 		ebdp = (struct bufdesc_ex *)bdp;
530 
531 		status = fec16_to_cpu(bdp->cbd_sc);
532 		status &= ~BD_ENET_TX_STATS;
533 		status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
534 		frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
535 
536 		/* Handle the last BD specially */
537 		if (frag == nr_frags - 1) {
538 			status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
539 			if (fep->bufdesc_ex) {
540 				estatus |= BD_ENET_TX_INT;
541 				if (unlikely(skb_shinfo(skb)->tx_flags &
542 					SKBTX_HW_TSTAMP && fep->hwts_tx_en))
543 					estatus |= BD_ENET_TX_TS;
544 			}
545 		}
546 
547 		if (fep->bufdesc_ex) {
548 			if (fep->quirks & FEC_QUIRK_HAS_AVB)
549 				estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
550 			if (skb->ip_summed == CHECKSUM_PARTIAL)
551 				estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
552 
553 			ebdp->cbd_bdu = 0;
554 			ebdp->cbd_esc = cpu_to_fec32(estatus);
555 		}
556 
557 		bufaddr = skb_frag_address(this_frag);
558 
559 		index = fec_enet_get_bd_index(bdp, &txq->bd);
560 		if (((unsigned long) bufaddr) & fep->tx_align ||
561 			fep->quirks & FEC_QUIRK_SWAP_FRAME) {
562 			memcpy(txq->tx_bounce[index], bufaddr, frag_len);
563 			bufaddr = txq->tx_bounce[index];
564 
565 			if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
566 				swap_buffer(bufaddr, frag_len);
567 		}
568 
569 		addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
570 				      DMA_TO_DEVICE);
571 		if (dma_mapping_error(&fep->pdev->dev, addr)) {
572 			if (net_ratelimit())
573 				netdev_err(ndev, "Tx DMA memory map failed\n");
574 			goto dma_mapping_error;
575 		}
576 
577 		bdp->cbd_bufaddr = cpu_to_fec32(addr);
578 		bdp->cbd_datlen = cpu_to_fec16(frag_len);
579 		/* Make sure the updates to rest of the descriptor are
580 		 * performed before transferring ownership.
581 		 */
582 		wmb();
583 		bdp->cbd_sc = cpu_to_fec16(status);
584 	}
585 
586 	return bdp;
587 dma_mapping_error:
588 	bdp = txq->bd.cur;
589 	for (i = 0; i < frag; i++) {
590 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
591 		dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
592 				 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
593 	}
594 	return ERR_PTR(-ENOMEM);
595 }
596 
597 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
598 				   struct sk_buff *skb, struct net_device *ndev)
599 {
600 	struct fec_enet_private *fep = netdev_priv(ndev);
601 	int nr_frags = skb_shinfo(skb)->nr_frags;
602 	struct bufdesc *bdp, *last_bdp;
603 	void *bufaddr;
604 	dma_addr_t addr;
605 	unsigned short status;
606 	unsigned short buflen;
607 	unsigned int estatus = 0;
608 	unsigned int index;
609 	int entries_free;
610 
611 	entries_free = fec_enet_get_free_txdesc_num(txq);
612 	if (entries_free < MAX_SKB_FRAGS + 1) {
613 		dev_kfree_skb_any(skb);
614 		if (net_ratelimit())
615 			netdev_err(ndev, "NOT enough BD for SG!\n");
616 		return NETDEV_TX_OK;
617 	}
618 
619 	/* Protocol checksum off-load for TCP and UDP. */
620 	if (fec_enet_clear_csum(skb, ndev)) {
621 		dev_kfree_skb_any(skb);
622 		return NETDEV_TX_OK;
623 	}
624 
625 	/* Fill in a Tx ring entry */
626 	bdp = txq->bd.cur;
627 	last_bdp = bdp;
628 	status = fec16_to_cpu(bdp->cbd_sc);
629 	status &= ~BD_ENET_TX_STATS;
630 
631 	/* Set buffer length and buffer pointer */
632 	bufaddr = skb->data;
633 	buflen = skb_headlen(skb);
634 
635 	index = fec_enet_get_bd_index(bdp, &txq->bd);
636 	if (((unsigned long) bufaddr) & fep->tx_align ||
637 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
638 		memcpy(txq->tx_bounce[index], skb->data, buflen);
639 		bufaddr = txq->tx_bounce[index];
640 
641 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
642 			swap_buffer(bufaddr, buflen);
643 	}
644 
645 	/* Push the data cache so the CPM does not get stale memory data. */
646 	addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
647 	if (dma_mapping_error(&fep->pdev->dev, addr)) {
648 		dev_kfree_skb_any(skb);
649 		if (net_ratelimit())
650 			netdev_err(ndev, "Tx DMA memory map failed\n");
651 		return NETDEV_TX_OK;
652 	}
653 
654 	if (nr_frags) {
655 		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
656 		if (IS_ERR(last_bdp)) {
657 			dma_unmap_single(&fep->pdev->dev, addr,
658 					 buflen, DMA_TO_DEVICE);
659 			dev_kfree_skb_any(skb);
660 			return NETDEV_TX_OK;
661 		}
662 	} else {
663 		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
664 		if (fep->bufdesc_ex) {
665 			estatus = BD_ENET_TX_INT;
666 			if (unlikely(skb_shinfo(skb)->tx_flags &
667 				SKBTX_HW_TSTAMP && fep->hwts_tx_en))
668 				estatus |= BD_ENET_TX_TS;
669 		}
670 	}
671 	bdp->cbd_bufaddr = cpu_to_fec32(addr);
672 	bdp->cbd_datlen = cpu_to_fec16(buflen);
673 
674 	if (fep->bufdesc_ex) {
675 
676 		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
677 
678 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
679 			fep->hwts_tx_en))
680 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
681 
682 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
683 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
684 
685 		if (skb->ip_summed == CHECKSUM_PARTIAL)
686 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
687 
688 		ebdp->cbd_bdu = 0;
689 		ebdp->cbd_esc = cpu_to_fec32(estatus);
690 	}
691 
692 	index = fec_enet_get_bd_index(last_bdp, &txq->bd);
693 	/* Save skb pointer */
694 	txq->tx_buf[index].buf_p = skb;
695 
696 	/* Make sure the updates to rest of the descriptor are performed before
697 	 * transferring ownership.
698 	 */
699 	wmb();
700 
701 	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
702 	 * it's the last BD of the frame, and to put the CRC on the end.
703 	 */
704 	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
705 	bdp->cbd_sc = cpu_to_fec16(status);
706 
707 	/* If this was the last BD in the ring, start at the beginning again. */
708 	bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
709 
710 	skb_tx_timestamp(skb);
711 
712 	/* Make sure the update to bdp is performed before txq->bd.cur. */
713 	wmb();
714 	txq->bd.cur = bdp;
715 
716 	/* Trigger transmission start */
717 	writel(0, txq->bd.reg_desc_active);
718 
719 	return 0;
720 }
721 
722 static int
723 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
724 			  struct net_device *ndev,
725 			  struct bufdesc *bdp, int index, char *data,
726 			  int size, bool last_tcp, bool is_last)
727 {
728 	struct fec_enet_private *fep = netdev_priv(ndev);
729 	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
730 	unsigned short status;
731 	unsigned int estatus = 0;
732 	dma_addr_t addr;
733 
734 	status = fec16_to_cpu(bdp->cbd_sc);
735 	status &= ~BD_ENET_TX_STATS;
736 
737 	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
738 
739 	if (((unsigned long) data) & fep->tx_align ||
740 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
741 		memcpy(txq->tx_bounce[index], data, size);
742 		data = txq->tx_bounce[index];
743 
744 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
745 			swap_buffer(data, size);
746 	}
747 
748 	addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
749 	if (dma_mapping_error(&fep->pdev->dev, addr)) {
750 		dev_kfree_skb_any(skb);
751 		if (net_ratelimit())
752 			netdev_err(ndev, "Tx DMA memory map failed\n");
753 		return NETDEV_TX_OK;
754 	}
755 
756 	bdp->cbd_datlen = cpu_to_fec16(size);
757 	bdp->cbd_bufaddr = cpu_to_fec32(addr);
758 
759 	if (fep->bufdesc_ex) {
760 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
761 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
762 		if (skb->ip_summed == CHECKSUM_PARTIAL)
763 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
764 		ebdp->cbd_bdu = 0;
765 		ebdp->cbd_esc = cpu_to_fec32(estatus);
766 	}
767 
768 	/* Handle the last BD specially */
769 	if (last_tcp)
770 		status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
771 	if (is_last) {
772 		status |= BD_ENET_TX_INTR;
773 		if (fep->bufdesc_ex)
774 			ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
775 	}
776 
777 	bdp->cbd_sc = cpu_to_fec16(status);
778 
779 	return 0;
780 }
781 
782 static int
783 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
784 			 struct sk_buff *skb, struct net_device *ndev,
785 			 struct bufdesc *bdp, int index)
786 {
787 	struct fec_enet_private *fep = netdev_priv(ndev);
788 	int hdr_len = skb_tcp_all_headers(skb);
789 	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
790 	void *bufaddr;
791 	unsigned long dmabuf;
792 	unsigned short status;
793 	unsigned int estatus = 0;
794 
795 	status = fec16_to_cpu(bdp->cbd_sc);
796 	status &= ~BD_ENET_TX_STATS;
797 	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
798 
799 	bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
800 	dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
801 	if (((unsigned long)bufaddr) & fep->tx_align ||
802 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
803 		memcpy(txq->tx_bounce[index], skb->data, hdr_len);
804 		bufaddr = txq->tx_bounce[index];
805 
806 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
807 			swap_buffer(bufaddr, hdr_len);
808 
809 		dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
810 					hdr_len, DMA_TO_DEVICE);
811 		if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
812 			dev_kfree_skb_any(skb);
813 			if (net_ratelimit())
814 				netdev_err(ndev, "Tx DMA memory map failed\n");
815 			return NETDEV_TX_OK;
816 		}
817 	}
818 
819 	bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
820 	bdp->cbd_datlen = cpu_to_fec16(hdr_len);
821 
822 	if (fep->bufdesc_ex) {
823 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
824 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
825 		if (skb->ip_summed == CHECKSUM_PARTIAL)
826 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
827 		ebdp->cbd_bdu = 0;
828 		ebdp->cbd_esc = cpu_to_fec32(estatus);
829 	}
830 
831 	bdp->cbd_sc = cpu_to_fec16(status);
832 
833 	return 0;
834 }
835 
836 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
837 				   struct sk_buff *skb,
838 				   struct net_device *ndev)
839 {
840 	struct fec_enet_private *fep = netdev_priv(ndev);
841 	int hdr_len, total_len, data_left;
842 	struct bufdesc *bdp = txq->bd.cur;
843 	struct tso_t tso;
844 	unsigned int index = 0;
845 	int ret;
846 
847 	if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
848 		dev_kfree_skb_any(skb);
849 		if (net_ratelimit())
850 			netdev_err(ndev, "NOT enough BD for TSO!\n");
851 		return NETDEV_TX_OK;
852 	}
853 
854 	/* Protocol checksum off-load for TCP and UDP. */
855 	if (fec_enet_clear_csum(skb, ndev)) {
856 		dev_kfree_skb_any(skb);
857 		return NETDEV_TX_OK;
858 	}
859 
860 	/* Initialize the TSO handler, and prepare the first payload */
861 	hdr_len = tso_start(skb, &tso);
862 
863 	total_len = skb->len - hdr_len;
864 	while (total_len > 0) {
865 		char *hdr;
866 
867 		index = fec_enet_get_bd_index(bdp, &txq->bd);
868 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
869 		total_len -= data_left;
870 
871 		/* prepare packet headers: MAC + IP + TCP */
872 		hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
873 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
874 		ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
875 		if (ret)
876 			goto err_release;
877 
878 		while (data_left > 0) {
879 			int size;
880 
881 			size = min_t(int, tso.size, data_left);
882 			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
883 			index = fec_enet_get_bd_index(bdp, &txq->bd);
884 			ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
885 							bdp, index,
886 							tso.data, size,
887 							size == data_left,
888 							total_len == 0);
889 			if (ret)
890 				goto err_release;
891 
892 			data_left -= size;
893 			tso_build_data(skb, &tso, size);
894 		}
895 
896 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
897 	}
898 
899 	/* Save skb pointer */
900 	txq->tx_buf[index].buf_p = skb;
901 
902 	skb_tx_timestamp(skb);
903 	txq->bd.cur = bdp;
904 
905 	/* Trigger transmission start */
906 	if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
907 	    !readl(txq->bd.reg_desc_active) ||
908 	    !readl(txq->bd.reg_desc_active) ||
909 	    !readl(txq->bd.reg_desc_active) ||
910 	    !readl(txq->bd.reg_desc_active))
911 		writel(0, txq->bd.reg_desc_active);
912 
913 	return 0;
914 
915 err_release:
916 	/* TODO: Release all used data descriptors for TSO */
917 	return ret;
918 }
919 
920 static netdev_tx_t
921 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
922 {
923 	struct fec_enet_private *fep = netdev_priv(ndev);
924 	int entries_free;
925 	unsigned short queue;
926 	struct fec_enet_priv_tx_q *txq;
927 	struct netdev_queue *nq;
928 	int ret;
929 
930 	queue = skb_get_queue_mapping(skb);
931 	txq = fep->tx_queue[queue];
932 	nq = netdev_get_tx_queue(ndev, queue);
933 
934 	if (skb_is_gso(skb))
935 		ret = fec_enet_txq_submit_tso(txq, skb, ndev);
936 	else
937 		ret = fec_enet_txq_submit_skb(txq, skb, ndev);
938 	if (ret)
939 		return ret;
940 
941 	entries_free = fec_enet_get_free_txdesc_num(txq);
942 	if (entries_free <= txq->tx_stop_threshold)
943 		netif_tx_stop_queue(nq);
944 
945 	return NETDEV_TX_OK;
946 }
947 
948 /* Init RX & TX buffer descriptors
949  */
950 static void fec_enet_bd_init(struct net_device *dev)
951 {
952 	struct fec_enet_private *fep = netdev_priv(dev);
953 	struct fec_enet_priv_tx_q *txq;
954 	struct fec_enet_priv_rx_q *rxq;
955 	struct bufdesc *bdp;
956 	unsigned int i;
957 	unsigned int q;
958 
959 	for (q = 0; q < fep->num_rx_queues; q++) {
960 		/* Initialize the receive buffer descriptors. */
961 		rxq = fep->rx_queue[q];
962 		bdp = rxq->bd.base;
963 
964 		for (i = 0; i < rxq->bd.ring_size; i++) {
965 
966 			/* Initialize the BD for every fragment in the page. */
967 			if (bdp->cbd_bufaddr)
968 				bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
969 			else
970 				bdp->cbd_sc = cpu_to_fec16(0);
971 			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
972 		}
973 
974 		/* Set the last buffer to wrap */
975 		bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
976 		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
977 
978 		rxq->bd.cur = rxq->bd.base;
979 	}
980 
981 	for (q = 0; q < fep->num_tx_queues; q++) {
982 		/* ...and the same for transmit */
983 		txq = fep->tx_queue[q];
984 		bdp = txq->bd.base;
985 		txq->bd.cur = bdp;
986 
987 		for (i = 0; i < txq->bd.ring_size; i++) {
988 			/* Initialize the BD for every fragment in the page. */
989 			bdp->cbd_sc = cpu_to_fec16(0);
990 			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
991 				if (bdp->cbd_bufaddr &&
992 				    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
993 					dma_unmap_single(&fep->pdev->dev,
994 							 fec32_to_cpu(bdp->cbd_bufaddr),
995 							 fec16_to_cpu(bdp->cbd_datlen),
996 							 DMA_TO_DEVICE);
997 				if (txq->tx_buf[i].buf_p)
998 					dev_kfree_skb_any(txq->tx_buf[i].buf_p);
999 			} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
1000 				if (bdp->cbd_bufaddr)
1001 					dma_unmap_single(&fep->pdev->dev,
1002 							 fec32_to_cpu(bdp->cbd_bufaddr),
1003 							 fec16_to_cpu(bdp->cbd_datlen),
1004 							 DMA_TO_DEVICE);
1005 
1006 				if (txq->tx_buf[i].buf_p)
1007 					xdp_return_frame(txq->tx_buf[i].buf_p);
1008 			} else {
1009 				struct page *page = txq->tx_buf[i].buf_p;
1010 
1011 				if (page)
1012 					page_pool_put_page(page->pp, page, 0, false);
1013 			}
1014 
1015 			txq->tx_buf[i].buf_p = NULL;
1016 			/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1017 			txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
1018 			bdp->cbd_bufaddr = cpu_to_fec32(0);
1019 			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1020 		}
1021 
1022 		/* Set the last buffer to wrap */
1023 		bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
1024 		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1025 		txq->dirty_tx = bdp;
1026 	}
1027 }
1028 
1029 static void fec_enet_active_rxring(struct net_device *ndev)
1030 {
1031 	struct fec_enet_private *fep = netdev_priv(ndev);
1032 	int i;
1033 
1034 	for (i = 0; i < fep->num_rx_queues; i++)
1035 		writel(0, fep->rx_queue[i]->bd.reg_desc_active);
1036 }
1037 
1038 static void fec_enet_enable_ring(struct net_device *ndev)
1039 {
1040 	struct fec_enet_private *fep = netdev_priv(ndev);
1041 	struct fec_enet_priv_tx_q *txq;
1042 	struct fec_enet_priv_rx_q *rxq;
1043 	int i;
1044 
1045 	for (i = 0; i < fep->num_rx_queues; i++) {
1046 		rxq = fep->rx_queue[i];
1047 		writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
1048 		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
1049 
1050 		/* enable DMA1/2 */
1051 		if (i)
1052 			writel(RCMR_MATCHEN | RCMR_CMP(i),
1053 			       fep->hwp + FEC_RCMR(i));
1054 	}
1055 
1056 	for (i = 0; i < fep->num_tx_queues; i++) {
1057 		txq = fep->tx_queue[i];
1058 		writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1059 
1060 		/* enable DMA1/2 */
1061 		if (i)
1062 			writel(DMA_CLASS_EN | IDLE_SLOPE(i),
1063 			       fep->hwp + FEC_DMA_CFG(i));
1064 	}
1065 }
1066 
1067 /*
1068  * This function is called to start or restart the FEC during a link
1069  * change, transmit timeout, or to reconfigure the FEC.  The network
1070  * packet processing for this device must be stopped before this call.
1071  */
1072 static void
1073 fec_restart(struct net_device *ndev)
1074 {
1075 	struct fec_enet_private *fep = netdev_priv(ndev);
1076 	u32 temp_mac[2];
1077 	u32 rcntl = OPT_FRAME_SIZE | 0x04;
1078 	u32 ecntl = FEC_ECR_ETHEREN;
1079 
1080 	/* Whack a reset.  We should wait for this.
1081 	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1082 	 * instead of reset MAC itself.
1083 	 */
1084 	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
1085 	    ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
1086 		writel(0, fep->hwp + FEC_ECNTRL);
1087 	} else {
1088 		writel(1, fep->hwp + FEC_ECNTRL);
1089 		udelay(10);
1090 	}
1091 
1092 	/*
1093 	 * enet-mac reset will reset mac address registers too,
1094 	 * so need to reconfigure it.
1095 	 */
1096 	memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1097 	writel((__force u32)cpu_to_be32(temp_mac[0]),
1098 	       fep->hwp + FEC_ADDR_LOW);
1099 	writel((__force u32)cpu_to_be32(temp_mac[1]),
1100 	       fep->hwp + FEC_ADDR_HIGH);
1101 
1102 	/* Clear any outstanding interrupt, except MDIO. */
1103 	writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
1104 
1105 	fec_enet_bd_init(ndev);
1106 
1107 	fec_enet_enable_ring(ndev);
1108 
1109 	/* Enable MII mode */
1110 	if (fep->full_duplex == DUPLEX_FULL) {
1111 		/* FD enable */
1112 		writel(0x04, fep->hwp + FEC_X_CNTRL);
1113 	} else {
1114 		/* No Rcv on Xmit */
1115 		rcntl |= 0x02;
1116 		writel(0x0, fep->hwp + FEC_X_CNTRL);
1117 	}
1118 
1119 	/* Set MII speed */
1120 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1121 
1122 #if !defined(CONFIG_M5272)
1123 	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1124 		u32 val = readl(fep->hwp + FEC_RACC);
1125 
1126 		/* align IP header */
1127 		val |= FEC_RACC_SHIFT16;
1128 		if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1129 			/* set RX checksum */
1130 			val |= FEC_RACC_OPTIONS;
1131 		else
1132 			val &= ~FEC_RACC_OPTIONS;
1133 		writel(val, fep->hwp + FEC_RACC);
1134 		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1135 	}
1136 #endif
1137 
1138 	/*
1139 	 * The phy interface and speed need to get configured
1140 	 * differently on enet-mac.
1141 	 */
1142 	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1143 		/* Enable flow control and length check */
1144 		rcntl |= 0x40000000 | 0x00000020;
1145 
1146 		/* RGMII, RMII or MII */
1147 		if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
1148 		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1149 		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
1150 		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
1151 			rcntl |= (1 << 6);
1152 		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1153 			rcntl |= FEC_RCR_RMII;
1154 		else
1155 			rcntl &= ~FEC_RCR_RMII;
1156 
1157 		/* 1G, 100M or 10M */
1158 		if (ndev->phydev) {
1159 			if (ndev->phydev->speed == SPEED_1000)
1160 				ecntl |= (1 << 5);
1161 			else if (ndev->phydev->speed == SPEED_100)
1162 				rcntl &= ~FEC_RCR_10BASET;
1163 			else
1164 				rcntl |= FEC_RCR_10BASET;
1165 		}
1166 	} else {
1167 #ifdef FEC_MIIGSK_ENR
1168 		if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1169 			u32 cfgr;
1170 			/* disable the gasket and wait */
1171 			writel(0, fep->hwp + FEC_MIIGSK_ENR);
1172 			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1173 				udelay(1);
1174 
1175 			/*
1176 			 * configure the gasket:
1177 			 *   RMII, 50 MHz, no loopback, no echo
1178 			 *   MII, 25 MHz, no loopback, no echo
1179 			 */
1180 			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1181 				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1182 			if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1183 				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1184 			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1185 
1186 			/* re-enable the gasket */
1187 			writel(2, fep->hwp + FEC_MIIGSK_ENR);
1188 		}
1189 #endif
1190 	}
1191 
1192 #if !defined(CONFIG_M5272)
1193 	/* enable pause frame*/
1194 	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1195 	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1196 	     ndev->phydev && ndev->phydev->pause)) {
1197 		rcntl |= FEC_RCR_FLOWCTL;
1198 
1199 		/* set FIFO threshold parameter to reduce overrun */
1200 		writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1201 		writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1202 		writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1203 		writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1204 
1205 		/* OPD */
1206 		writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1207 	} else {
1208 		rcntl &= ~FEC_RCR_FLOWCTL;
1209 	}
1210 #endif /* !defined(CONFIG_M5272) */
1211 
1212 	writel(rcntl, fep->hwp + FEC_R_CNTRL);
1213 
1214 	/* Setup multicast filter. */
1215 	set_multicast_list(ndev);
1216 #ifndef CONFIG_M5272
1217 	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1218 	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1219 #endif
1220 
1221 	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1222 		/* enable ENET endian swap */
1223 		ecntl |= FEC_ECR_BYTESWP;
1224 		/* enable ENET store and forward mode */
1225 		writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
1226 	}
1227 
1228 	if (fep->bufdesc_ex)
1229 		ecntl |= FEC_ECR_EN1588;
1230 
1231 	if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1232 	    fep->rgmii_txc_dly)
1233 		ecntl |= FEC_ENET_TXC_DLY;
1234 	if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1235 	    fep->rgmii_rxc_dly)
1236 		ecntl |= FEC_ENET_RXC_DLY;
1237 
1238 #ifndef CONFIG_M5272
1239 	/* Enable the MIB statistic event counters */
1240 	writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1241 #endif
1242 
1243 	/* And last, enable the transmit and receive processing */
1244 	writel(ecntl, fep->hwp + FEC_ECNTRL);
1245 	fec_enet_active_rxring(ndev);
1246 
1247 	if (fep->bufdesc_ex)
1248 		fec_ptp_start_cyclecounter(ndev);
1249 
1250 	/* Enable interrupts we wish to service */
1251 	if (fep->link)
1252 		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1253 	else
1254 		writel(0, fep->hwp + FEC_IMASK);
1255 
1256 	/* Init the interrupt coalescing */
1257 	if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
1258 		fec_enet_itr_coal_set(ndev);
1259 }
1260 
1261 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
1262 {
1263 	if (!(of_machine_is_compatible("fsl,imx8qm") ||
1264 	      of_machine_is_compatible("fsl,imx8qxp") ||
1265 	      of_machine_is_compatible("fsl,imx8dxl")))
1266 		return 0;
1267 
1268 	return imx_scu_get_handle(&fep->ipc_handle);
1269 }
1270 
1271 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
1272 {
1273 	struct device_node *np = fep->pdev->dev.of_node;
1274 	u32 rsrc_id, val;
1275 	int idx;
1276 
1277 	if (!np || !fep->ipc_handle)
1278 		return;
1279 
1280 	idx = of_alias_get_id(np, "ethernet");
1281 	if (idx < 0)
1282 		idx = 0;
1283 	rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
1284 
1285 	val = enabled ? 1 : 0;
1286 	imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
1287 }
1288 
1289 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
1290 {
1291 	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1292 	struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
1293 
1294 	if (stop_gpr->gpr) {
1295 		if (enabled)
1296 			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1297 					   BIT(stop_gpr->bit),
1298 					   BIT(stop_gpr->bit));
1299 		else
1300 			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1301 					   BIT(stop_gpr->bit), 0);
1302 	} else if (pdata && pdata->sleep_mode_enable) {
1303 		pdata->sleep_mode_enable(enabled);
1304 	} else {
1305 		fec_enet_ipg_stop_set(fep, enabled);
1306 	}
1307 }
1308 
1309 static void fec_irqs_disable(struct net_device *ndev)
1310 {
1311 	struct fec_enet_private *fep = netdev_priv(ndev);
1312 
1313 	writel(0, fep->hwp + FEC_IMASK);
1314 }
1315 
1316 static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
1317 {
1318 	struct fec_enet_private *fep = netdev_priv(ndev);
1319 
1320 	writel(0, fep->hwp + FEC_IMASK);
1321 	writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1322 }
1323 
1324 static void
1325 fec_stop(struct net_device *ndev)
1326 {
1327 	struct fec_enet_private *fep = netdev_priv(ndev);
1328 	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
1329 	u32 val;
1330 
1331 	/* We cannot expect a graceful transmit stop without link !!! */
1332 	if (fep->link) {
1333 		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1334 		udelay(10);
1335 		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1336 			netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1337 	}
1338 
1339 	/* Whack a reset.  We should wait for this.
1340 	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1341 	 * instead of reset MAC itself.
1342 	 */
1343 	if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1344 		if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
1345 			writel(0, fep->hwp + FEC_ECNTRL);
1346 		} else {
1347 			writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
1348 			udelay(10);
1349 		}
1350 	} else {
1351 		val = readl(fep->hwp + FEC_ECNTRL);
1352 		val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1353 		writel(val, fep->hwp + FEC_ECNTRL);
1354 	}
1355 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1356 	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1357 
1358 	/* We have to keep ENET enabled to have MII interrupt stay working */
1359 	if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1360 		!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1361 		writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
1362 		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1363 	}
1364 
1365 	if (fep->bufdesc_ex) {
1366 		val = readl(fep->hwp + FEC_ECNTRL);
1367 		val |= FEC_ECR_EN1588;
1368 		writel(val, fep->hwp + FEC_ECNTRL);
1369 	}
1370 }
1371 
1372 static void
1373 fec_timeout(struct net_device *ndev, unsigned int txqueue)
1374 {
1375 	struct fec_enet_private *fep = netdev_priv(ndev);
1376 
1377 	fec_dump(ndev);
1378 
1379 	ndev->stats.tx_errors++;
1380 
1381 	schedule_work(&fep->tx_timeout_work);
1382 }
1383 
1384 static void fec_enet_timeout_work(struct work_struct *work)
1385 {
1386 	struct fec_enet_private *fep =
1387 		container_of(work, struct fec_enet_private, tx_timeout_work);
1388 	struct net_device *ndev = fep->netdev;
1389 
1390 	rtnl_lock();
1391 	if (netif_device_present(ndev) || netif_running(ndev)) {
1392 		napi_disable(&fep->napi);
1393 		netif_tx_lock_bh(ndev);
1394 		fec_restart(ndev);
1395 		netif_tx_wake_all_queues(ndev);
1396 		netif_tx_unlock_bh(ndev);
1397 		napi_enable(&fep->napi);
1398 	}
1399 	rtnl_unlock();
1400 }
1401 
1402 static void
1403 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1404 	struct skb_shared_hwtstamps *hwtstamps)
1405 {
1406 	unsigned long flags;
1407 	u64 ns;
1408 
1409 	spin_lock_irqsave(&fep->tmreg_lock, flags);
1410 	ns = timecounter_cyc2time(&fep->tc, ts);
1411 	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1412 
1413 	memset(hwtstamps, 0, sizeof(*hwtstamps));
1414 	hwtstamps->hwtstamp = ns_to_ktime(ns);
1415 }
1416 
1417 static void
1418 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1419 {
1420 	struct	fec_enet_private *fep;
1421 	struct xdp_frame *xdpf;
1422 	struct bufdesc *bdp;
1423 	unsigned short status;
1424 	struct	sk_buff	*skb;
1425 	struct fec_enet_priv_tx_q *txq;
1426 	struct netdev_queue *nq;
1427 	int	index = 0;
1428 	int	entries_free;
1429 	struct page *page;
1430 	int frame_len;
1431 
1432 	fep = netdev_priv(ndev);
1433 
1434 	txq = fep->tx_queue[queue_id];
1435 	/* get next bdp of dirty_tx */
1436 	nq = netdev_get_tx_queue(ndev, queue_id);
1437 	bdp = txq->dirty_tx;
1438 
1439 	/* get next bdp of dirty_tx */
1440 	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1441 
1442 	while (bdp != READ_ONCE(txq->bd.cur)) {
1443 		/* Order the load of bd.cur and cbd_sc */
1444 		rmb();
1445 		status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1446 		if (status & BD_ENET_TX_READY)
1447 			break;
1448 
1449 		index = fec_enet_get_bd_index(bdp, &txq->bd);
1450 
1451 		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1452 			skb = txq->tx_buf[index].buf_p;
1453 			if (bdp->cbd_bufaddr &&
1454 			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1455 				dma_unmap_single(&fep->pdev->dev,
1456 						 fec32_to_cpu(bdp->cbd_bufaddr),
1457 						 fec16_to_cpu(bdp->cbd_datlen),
1458 						 DMA_TO_DEVICE);
1459 			bdp->cbd_bufaddr = cpu_to_fec32(0);
1460 			if (!skb)
1461 				goto tx_buf_done;
1462 		} else {
1463 			/* Tx processing cannot call any XDP (or page pool) APIs if
1464 			 * the "budget" is 0. Because NAPI is called with budget of
1465 			 * 0 (such as netpoll) indicates we may be in an IRQ context,
1466 			 * however, we can't use the page pool from IRQ context.
1467 			 */
1468 			if (unlikely(!budget))
1469 				break;
1470 
1471 			if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1472 				xdpf = txq->tx_buf[index].buf_p;
1473 				if (bdp->cbd_bufaddr)
1474 					dma_unmap_single(&fep->pdev->dev,
1475 							 fec32_to_cpu(bdp->cbd_bufaddr),
1476 							 fec16_to_cpu(bdp->cbd_datlen),
1477 							 DMA_TO_DEVICE);
1478 			} else {
1479 				page = txq->tx_buf[index].buf_p;
1480 			}
1481 
1482 			bdp->cbd_bufaddr = cpu_to_fec32(0);
1483 			if (unlikely(!txq->tx_buf[index].buf_p)) {
1484 				txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1485 				goto tx_buf_done;
1486 			}
1487 
1488 			frame_len = fec16_to_cpu(bdp->cbd_datlen);
1489 		}
1490 
1491 		/* Check for errors. */
1492 		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1493 				   BD_ENET_TX_RL | BD_ENET_TX_UN |
1494 				   BD_ENET_TX_CSL)) {
1495 			ndev->stats.tx_errors++;
1496 			if (status & BD_ENET_TX_HB)  /* No heartbeat */
1497 				ndev->stats.tx_heartbeat_errors++;
1498 			if (status & BD_ENET_TX_LC)  /* Late collision */
1499 				ndev->stats.tx_window_errors++;
1500 			if (status & BD_ENET_TX_RL)  /* Retrans limit */
1501 				ndev->stats.tx_aborted_errors++;
1502 			if (status & BD_ENET_TX_UN)  /* Underrun */
1503 				ndev->stats.tx_fifo_errors++;
1504 			if (status & BD_ENET_TX_CSL) /* Carrier lost */
1505 				ndev->stats.tx_carrier_errors++;
1506 		} else {
1507 			ndev->stats.tx_packets++;
1508 
1509 			if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
1510 				ndev->stats.tx_bytes += skb->len;
1511 			else
1512 				ndev->stats.tx_bytes += frame_len;
1513 		}
1514 
1515 		/* Deferred means some collisions occurred during transmit,
1516 		 * but we eventually sent the packet OK.
1517 		 */
1518 		if (status & BD_ENET_TX_DEF)
1519 			ndev->stats.collisions++;
1520 
1521 		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1522 			/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1523 			 * are to time stamp the packet, so we still need to check time
1524 			 * stamping enabled flag.
1525 			 */
1526 			if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1527 				     fep->hwts_tx_en) && fep->bufdesc_ex) {
1528 				struct skb_shared_hwtstamps shhwtstamps;
1529 				struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1530 
1531 				fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1532 				skb_tstamp_tx(skb, &shhwtstamps);
1533 			}
1534 
1535 			/* Free the sk buffer associated with this last transmit */
1536 			napi_consume_skb(skb, budget);
1537 		} else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1538 			xdp_return_frame_rx_napi(xdpf);
1539 		} else { /* recycle pages of XDP_TX frames */
1540 			/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
1541 			page_pool_put_page(page->pp, page, 0, true);
1542 		}
1543 
1544 		txq->tx_buf[index].buf_p = NULL;
1545 		/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1546 		txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1547 
1548 tx_buf_done:
1549 		/* Make sure the update to bdp and tx_buf are performed
1550 		 * before dirty_tx
1551 		 */
1552 		wmb();
1553 		txq->dirty_tx = bdp;
1554 
1555 		/* Update pointer to next buffer descriptor to be transmitted */
1556 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1557 
1558 		/* Since we have freed up a buffer, the ring is no longer full
1559 		 */
1560 		if (netif_tx_queue_stopped(nq)) {
1561 			entries_free = fec_enet_get_free_txdesc_num(txq);
1562 			if (entries_free >= txq->tx_wake_threshold)
1563 				netif_tx_wake_queue(nq);
1564 		}
1565 	}
1566 
1567 	/* ERR006358: Keep the transmitter going */
1568 	if (bdp != txq->bd.cur &&
1569 	    readl(txq->bd.reg_desc_active) == 0)
1570 		writel(0, txq->bd.reg_desc_active);
1571 }
1572 
1573 static void fec_enet_tx(struct net_device *ndev, int budget)
1574 {
1575 	struct fec_enet_private *fep = netdev_priv(ndev);
1576 	int i;
1577 
1578 	/* Make sure that AVB queues are processed first. */
1579 	for (i = fep->num_tx_queues - 1; i >= 0; i--)
1580 		fec_enet_tx_queue(ndev, i, budget);
1581 }
1582 
1583 static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
1584 				struct bufdesc *bdp, int index)
1585 {
1586 	struct page *new_page;
1587 	dma_addr_t phys_addr;
1588 
1589 	new_page = page_pool_dev_alloc_pages(rxq->page_pool);
1590 	WARN_ON(!new_page);
1591 	rxq->rx_skb_info[index].page = new_page;
1592 
1593 	rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
1594 	phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
1595 	bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
1596 }
1597 
1598 static u32
1599 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
1600 		 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
1601 {
1602 	unsigned int sync, len = xdp->data_end - xdp->data;
1603 	u32 ret = FEC_ENET_XDP_PASS;
1604 	struct page *page;
1605 	int err;
1606 	u32 act;
1607 
1608 	act = bpf_prog_run_xdp(prog, xdp);
1609 
1610 	/* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
1611 	 * max len CPU touch
1612 	 */
1613 	sync = xdp->data_end - xdp->data;
1614 	sync = max(sync, len);
1615 
1616 	switch (act) {
1617 	case XDP_PASS:
1618 		rxq->stats[RX_XDP_PASS]++;
1619 		ret = FEC_ENET_XDP_PASS;
1620 		break;
1621 
1622 	case XDP_REDIRECT:
1623 		rxq->stats[RX_XDP_REDIRECT]++;
1624 		err = xdp_do_redirect(fep->netdev, xdp, prog);
1625 		if (unlikely(err))
1626 			goto xdp_err;
1627 
1628 		ret = FEC_ENET_XDP_REDIR;
1629 		break;
1630 
1631 	case XDP_TX:
1632 		rxq->stats[RX_XDP_TX]++;
1633 		err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
1634 		if (unlikely(err)) {
1635 			rxq->stats[RX_XDP_TX_ERRORS]++;
1636 			goto xdp_err;
1637 		}
1638 
1639 		ret = FEC_ENET_XDP_TX;
1640 		break;
1641 
1642 	default:
1643 		bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
1644 		fallthrough;
1645 
1646 	case XDP_ABORTED:
1647 		fallthrough;    /* handle aborts by dropping packet */
1648 
1649 	case XDP_DROP:
1650 		rxq->stats[RX_XDP_DROP]++;
1651 xdp_err:
1652 		ret = FEC_ENET_XDP_CONSUMED;
1653 		page = virt_to_head_page(xdp->data);
1654 		page_pool_put_page(rxq->page_pool, page, sync, true);
1655 		if (act != XDP_DROP)
1656 			trace_xdp_exception(fep->netdev, prog, act);
1657 		break;
1658 	}
1659 
1660 	return ret;
1661 }
1662 
1663 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1664  * When we update through the ring, if the next incoming buffer has
1665  * not been given to the system, we just set the empty indicator,
1666  * effectively tossing the packet.
1667  */
1668 static int
1669 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1670 {
1671 	struct fec_enet_private *fep = netdev_priv(ndev);
1672 	struct fec_enet_priv_rx_q *rxq;
1673 	struct bufdesc *bdp;
1674 	unsigned short status;
1675 	struct  sk_buff *skb;
1676 	ushort	pkt_len;
1677 	__u8 *data;
1678 	int	pkt_received = 0;
1679 	struct	bufdesc_ex *ebdp = NULL;
1680 	bool	vlan_packet_rcvd = false;
1681 	u16	vlan_tag;
1682 	int	index = 0;
1683 	bool	need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1684 	struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
1685 	u32 ret, xdp_result = FEC_ENET_XDP_PASS;
1686 	u32 data_start = FEC_ENET_XDP_HEADROOM;
1687 	int cpu = smp_processor_id();
1688 	struct xdp_buff xdp;
1689 	struct page *page;
1690 	u32 sub_len = 4;
1691 
1692 #if !defined(CONFIG_M5272)
1693 	/*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
1694 	 * FEC_RACC_SHIFT16 is set by default in the probe function.
1695 	 */
1696 	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1697 		data_start += 2;
1698 		sub_len += 2;
1699 	}
1700 #endif
1701 
1702 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
1703 	/*
1704 	 * Hacky flush of all caches instead of using the DMA API for the TSO
1705 	 * headers.
1706 	 */
1707 	flush_cache_all();
1708 #endif
1709 	rxq = fep->rx_queue[queue_id];
1710 
1711 	/* First, grab all of the stats for the incoming packet.
1712 	 * These get messed up if we get called due to a busy condition.
1713 	 */
1714 	bdp = rxq->bd.cur;
1715 	xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
1716 
1717 	while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1718 
1719 		if (pkt_received >= budget)
1720 			break;
1721 		pkt_received++;
1722 
1723 		writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
1724 
1725 		/* Check for errors. */
1726 		status ^= BD_ENET_RX_LAST;
1727 		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1728 			   BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1729 			   BD_ENET_RX_CL)) {
1730 			ndev->stats.rx_errors++;
1731 			if (status & BD_ENET_RX_OV) {
1732 				/* FIFO overrun */
1733 				ndev->stats.rx_fifo_errors++;
1734 				goto rx_processing_done;
1735 			}
1736 			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1737 						| BD_ENET_RX_LAST)) {
1738 				/* Frame too long or too short. */
1739 				ndev->stats.rx_length_errors++;
1740 				if (status & BD_ENET_RX_LAST)
1741 					netdev_err(ndev, "rcv is not +last\n");
1742 			}
1743 			if (status & BD_ENET_RX_CR)	/* CRC Error */
1744 				ndev->stats.rx_crc_errors++;
1745 			/* Report late collisions as a frame error. */
1746 			if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1747 				ndev->stats.rx_frame_errors++;
1748 			goto rx_processing_done;
1749 		}
1750 
1751 		/* Process the incoming frame. */
1752 		ndev->stats.rx_packets++;
1753 		pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1754 		ndev->stats.rx_bytes += pkt_len;
1755 
1756 		index = fec_enet_get_bd_index(bdp, &rxq->bd);
1757 		page = rxq->rx_skb_info[index].page;
1758 		dma_sync_single_for_cpu(&fep->pdev->dev,
1759 					fec32_to_cpu(bdp->cbd_bufaddr),
1760 					pkt_len,
1761 					DMA_FROM_DEVICE);
1762 		prefetch(page_address(page));
1763 		fec_enet_update_cbd(rxq, bdp, index);
1764 
1765 		if (xdp_prog) {
1766 			xdp_buff_clear_frags_flag(&xdp);
1767 			/* subtract 16bit shift and FCS */
1768 			xdp_prepare_buff(&xdp, page_address(page),
1769 					 data_start, pkt_len - sub_len, false);
1770 			ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
1771 			xdp_result |= ret;
1772 			if (ret != FEC_ENET_XDP_PASS)
1773 				goto rx_processing_done;
1774 		}
1775 
1776 		/* The packet length includes FCS, but we don't want to
1777 		 * include that when passing upstream as it messes up
1778 		 * bridging applications.
1779 		 */
1780 		skb = build_skb(page_address(page), PAGE_SIZE);
1781 		if (unlikely(!skb)) {
1782 			page_pool_recycle_direct(rxq->page_pool, page);
1783 			ndev->stats.rx_dropped++;
1784 
1785 			netdev_err_once(ndev, "build_skb failed!\n");
1786 			goto rx_processing_done;
1787 		}
1788 
1789 		skb_reserve(skb, data_start);
1790 		skb_put(skb, pkt_len - sub_len);
1791 		skb_mark_for_recycle(skb);
1792 
1793 		if (unlikely(need_swap)) {
1794 			data = page_address(page) + FEC_ENET_XDP_HEADROOM;
1795 			swap_buffer(data, pkt_len);
1796 		}
1797 		data = skb->data;
1798 
1799 		/* Extract the enhanced buffer descriptor */
1800 		ebdp = NULL;
1801 		if (fep->bufdesc_ex)
1802 			ebdp = (struct bufdesc_ex *)bdp;
1803 
1804 		/* If this is a VLAN packet remove the VLAN Tag */
1805 		vlan_packet_rcvd = false;
1806 		if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1807 		    fep->bufdesc_ex &&
1808 		    (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1809 			/* Push and remove the vlan tag */
1810 			struct vlan_hdr *vlan_header =
1811 					(struct vlan_hdr *) (data + ETH_HLEN);
1812 			vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1813 
1814 			vlan_packet_rcvd = true;
1815 
1816 			memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1817 			skb_pull(skb, VLAN_HLEN);
1818 		}
1819 
1820 		skb->protocol = eth_type_trans(skb, ndev);
1821 
1822 		/* Get receive timestamp from the skb */
1823 		if (fep->hwts_rx_en && fep->bufdesc_ex)
1824 			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1825 					  skb_hwtstamps(skb));
1826 
1827 		if (fep->bufdesc_ex &&
1828 		    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1829 			if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1830 				/* don't check it */
1831 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1832 			} else {
1833 				skb_checksum_none_assert(skb);
1834 			}
1835 		}
1836 
1837 		/* Handle received VLAN packets */
1838 		if (vlan_packet_rcvd)
1839 			__vlan_hwaccel_put_tag(skb,
1840 					       htons(ETH_P_8021Q),
1841 					       vlan_tag);
1842 
1843 		skb_record_rx_queue(skb, queue_id);
1844 		napi_gro_receive(&fep->napi, skb);
1845 
1846 rx_processing_done:
1847 		/* Clear the status flags for this buffer */
1848 		status &= ~BD_ENET_RX_STATS;
1849 
1850 		/* Mark the buffer empty */
1851 		status |= BD_ENET_RX_EMPTY;
1852 
1853 		if (fep->bufdesc_ex) {
1854 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1855 
1856 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1857 			ebdp->cbd_prot = 0;
1858 			ebdp->cbd_bdu = 0;
1859 		}
1860 		/* Make sure the updates to rest of the descriptor are
1861 		 * performed before transferring ownership.
1862 		 */
1863 		wmb();
1864 		bdp->cbd_sc = cpu_to_fec16(status);
1865 
1866 		/* Update BD pointer to next entry */
1867 		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1868 
1869 		/* Doing this here will keep the FEC running while we process
1870 		 * incoming frames.  On a heavily loaded network, we should be
1871 		 * able to keep up at the expense of system resources.
1872 		 */
1873 		writel(0, rxq->bd.reg_desc_active);
1874 	}
1875 	rxq->bd.cur = bdp;
1876 
1877 	if (xdp_result & FEC_ENET_XDP_REDIR)
1878 		xdp_do_flush();
1879 
1880 	return pkt_received;
1881 }
1882 
1883 static int fec_enet_rx(struct net_device *ndev, int budget)
1884 {
1885 	struct fec_enet_private *fep = netdev_priv(ndev);
1886 	int i, done = 0;
1887 
1888 	/* Make sure that AVB queues are processed first. */
1889 	for (i = fep->num_rx_queues - 1; i >= 0; i--)
1890 		done += fec_enet_rx_queue(ndev, budget - done, i);
1891 
1892 	return done;
1893 }
1894 
1895 static bool fec_enet_collect_events(struct fec_enet_private *fep)
1896 {
1897 	uint int_events;
1898 
1899 	int_events = readl(fep->hwp + FEC_IEVENT);
1900 
1901 	/* Don't clear MDIO events, we poll for those */
1902 	int_events &= ~FEC_ENET_MII;
1903 
1904 	writel(int_events, fep->hwp + FEC_IEVENT);
1905 
1906 	return int_events != 0;
1907 }
1908 
1909 static irqreturn_t
1910 fec_enet_interrupt(int irq, void *dev_id)
1911 {
1912 	struct net_device *ndev = dev_id;
1913 	struct fec_enet_private *fep = netdev_priv(ndev);
1914 	irqreturn_t ret = IRQ_NONE;
1915 
1916 	if (fec_enet_collect_events(fep) && fep->link) {
1917 		ret = IRQ_HANDLED;
1918 
1919 		if (napi_schedule_prep(&fep->napi)) {
1920 			/* Disable interrupts */
1921 			writel(0, fep->hwp + FEC_IMASK);
1922 			__napi_schedule(&fep->napi);
1923 		}
1924 	}
1925 
1926 	return ret;
1927 }
1928 
1929 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1930 {
1931 	struct net_device *ndev = napi->dev;
1932 	struct fec_enet_private *fep = netdev_priv(ndev);
1933 	int done = 0;
1934 
1935 	do {
1936 		done += fec_enet_rx(ndev, budget - done);
1937 		fec_enet_tx(ndev, budget);
1938 	} while ((done < budget) && fec_enet_collect_events(fep));
1939 
1940 	if (done < budget) {
1941 		napi_complete_done(napi, done);
1942 		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1943 	}
1944 
1945 	return done;
1946 }
1947 
1948 /* ------------------------------------------------------------------------- */
1949 static int fec_get_mac(struct net_device *ndev)
1950 {
1951 	struct fec_enet_private *fep = netdev_priv(ndev);
1952 	unsigned char *iap, tmpaddr[ETH_ALEN];
1953 	int ret;
1954 
1955 	/*
1956 	 * try to get mac address in following order:
1957 	 *
1958 	 * 1) module parameter via kernel command line in form
1959 	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1960 	 */
1961 	iap = macaddr;
1962 
1963 	/*
1964 	 * 2) from device tree data
1965 	 */
1966 	if (!is_valid_ether_addr(iap)) {
1967 		struct device_node *np = fep->pdev->dev.of_node;
1968 		if (np) {
1969 			ret = of_get_mac_address(np, tmpaddr);
1970 			if (!ret)
1971 				iap = tmpaddr;
1972 			else if (ret == -EPROBE_DEFER)
1973 				return ret;
1974 		}
1975 	}
1976 
1977 	/*
1978 	 * 3) from flash or fuse (via platform data)
1979 	 */
1980 	if (!is_valid_ether_addr(iap)) {
1981 #ifdef CONFIG_M5272
1982 		if (FEC_FLASHMAC)
1983 			iap = (unsigned char *)FEC_FLASHMAC;
1984 #else
1985 		struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1986 
1987 		if (pdata)
1988 			iap = (unsigned char *)&pdata->mac;
1989 #endif
1990 	}
1991 
1992 	/*
1993 	 * 4) FEC mac registers set by bootloader
1994 	 */
1995 	if (!is_valid_ether_addr(iap)) {
1996 		*((__be32 *) &tmpaddr[0]) =
1997 			cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1998 		*((__be16 *) &tmpaddr[4]) =
1999 			cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
2000 		iap = &tmpaddr[0];
2001 	}
2002 
2003 	/*
2004 	 * 5) random mac address
2005 	 */
2006 	if (!is_valid_ether_addr(iap)) {
2007 		/* Report it and use a random ethernet address instead */
2008 		dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
2009 		eth_hw_addr_random(ndev);
2010 		dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
2011 			 ndev->dev_addr);
2012 		return 0;
2013 	}
2014 
2015 	/* Adjust MAC if using macaddr */
2016 	eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
2017 
2018 	return 0;
2019 }
2020 
2021 /* ------------------------------------------------------------------------- */
2022 
2023 /*
2024  * Phy section
2025  */
2026 
2027 /* LPI Sleep Ts count base on tx clk (clk_ref).
2028  * The lpi sleep cnt value = X us / (cycle_ns).
2029  */
2030 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
2031 {
2032 	struct fec_enet_private *fep = netdev_priv(ndev);
2033 
2034 	return us * (fep->clk_ref_rate / 1000) / 1000;
2035 }
2036 
2037 static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
2038 {
2039 	struct fec_enet_private *fep = netdev_priv(ndev);
2040 	struct ethtool_keee *p = &fep->eee;
2041 	unsigned int sleep_cycle, wake_cycle;
2042 
2043 	if (enable) {
2044 		sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
2045 		wake_cycle = sleep_cycle;
2046 	} else {
2047 		sleep_cycle = 0;
2048 		wake_cycle = 0;
2049 	}
2050 
2051 	writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
2052 	writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
2053 
2054 	return 0;
2055 }
2056 
2057 static void fec_enet_adjust_link(struct net_device *ndev)
2058 {
2059 	struct fec_enet_private *fep = netdev_priv(ndev);
2060 	struct phy_device *phy_dev = ndev->phydev;
2061 	int status_change = 0;
2062 
2063 	/*
2064 	 * If the netdev is down, or is going down, we're not interested
2065 	 * in link state events, so just mark our idea of the link as down
2066 	 * and ignore the event.
2067 	 */
2068 	if (!netif_running(ndev) || !netif_device_present(ndev)) {
2069 		fep->link = 0;
2070 	} else if (phy_dev->link) {
2071 		if (!fep->link) {
2072 			fep->link = phy_dev->link;
2073 			status_change = 1;
2074 		}
2075 
2076 		if (fep->full_duplex != phy_dev->duplex) {
2077 			fep->full_duplex = phy_dev->duplex;
2078 			status_change = 1;
2079 		}
2080 
2081 		if (phy_dev->speed != fep->speed) {
2082 			fep->speed = phy_dev->speed;
2083 			status_change = 1;
2084 		}
2085 
2086 		/* if any of the above changed restart the FEC */
2087 		if (status_change) {
2088 			netif_stop_queue(ndev);
2089 			napi_disable(&fep->napi);
2090 			netif_tx_lock_bh(ndev);
2091 			fec_restart(ndev);
2092 			netif_tx_wake_all_queues(ndev);
2093 			netif_tx_unlock_bh(ndev);
2094 			napi_enable(&fep->napi);
2095 		}
2096 		if (fep->quirks & FEC_QUIRK_HAS_EEE)
2097 			fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
2098 	} else {
2099 		if (fep->link) {
2100 			netif_stop_queue(ndev);
2101 			napi_disable(&fep->napi);
2102 			netif_tx_lock_bh(ndev);
2103 			fec_stop(ndev);
2104 			netif_tx_unlock_bh(ndev);
2105 			napi_enable(&fep->napi);
2106 			fep->link = phy_dev->link;
2107 			status_change = 1;
2108 		}
2109 	}
2110 
2111 	if (status_change)
2112 		phy_print_status(phy_dev);
2113 }
2114 
2115 static int fec_enet_mdio_wait(struct fec_enet_private *fep)
2116 {
2117 	uint ievent;
2118 	int ret;
2119 
2120 	ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
2121 					ievent & FEC_ENET_MII, 2, 30000);
2122 
2123 	if (!ret)
2124 		writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2125 
2126 	return ret;
2127 }
2128 
2129 static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
2130 {
2131 	struct fec_enet_private *fep = bus->priv;
2132 	struct device *dev = &fep->pdev->dev;
2133 	int ret = 0, frame_start, frame_addr, frame_op;
2134 
2135 	ret = pm_runtime_resume_and_get(dev);
2136 	if (ret < 0)
2137 		return ret;
2138 
2139 	/* C22 read */
2140 	frame_op = FEC_MMFR_OP_READ;
2141 	frame_start = FEC_MMFR_ST;
2142 	frame_addr = regnum;
2143 
2144 	/* start a read op */
2145 	writel(frame_start | frame_op |
2146 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2147 	       FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2148 
2149 	/* wait for end of transfer */
2150 	ret = fec_enet_mdio_wait(fep);
2151 	if (ret) {
2152 		netdev_err(fep->netdev, "MDIO read timeout\n");
2153 		goto out;
2154 	}
2155 
2156 	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2157 
2158 out:
2159 	pm_runtime_mark_last_busy(dev);
2160 	pm_runtime_put_autosuspend(dev);
2161 
2162 	return ret;
2163 }
2164 
2165 static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
2166 				  int devad, int regnum)
2167 {
2168 	struct fec_enet_private *fep = bus->priv;
2169 	struct device *dev = &fep->pdev->dev;
2170 	int ret = 0, frame_start, frame_op;
2171 
2172 	ret = pm_runtime_resume_and_get(dev);
2173 	if (ret < 0)
2174 		return ret;
2175 
2176 	frame_start = FEC_MMFR_ST_C45;
2177 
2178 	/* write address */
2179 	writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2180 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2181 	       FEC_MMFR_TA | (regnum & 0xFFFF),
2182 	       fep->hwp + FEC_MII_DATA);
2183 
2184 	/* wait for end of transfer */
2185 	ret = fec_enet_mdio_wait(fep);
2186 	if (ret) {
2187 		netdev_err(fep->netdev, "MDIO address write timeout\n");
2188 		goto out;
2189 	}
2190 
2191 	frame_op = FEC_MMFR_OP_READ_C45;
2192 
2193 	/* start a read op */
2194 	writel(frame_start | frame_op |
2195 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2196 	       FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2197 
2198 	/* wait for end of transfer */
2199 	ret = fec_enet_mdio_wait(fep);
2200 	if (ret) {
2201 		netdev_err(fep->netdev, "MDIO read timeout\n");
2202 		goto out;
2203 	}
2204 
2205 	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2206 
2207 out:
2208 	pm_runtime_mark_last_busy(dev);
2209 	pm_runtime_put_autosuspend(dev);
2210 
2211 	return ret;
2212 }
2213 
2214 static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
2215 				   u16 value)
2216 {
2217 	struct fec_enet_private *fep = bus->priv;
2218 	struct device *dev = &fep->pdev->dev;
2219 	int ret, frame_start, frame_addr;
2220 
2221 	ret = pm_runtime_resume_and_get(dev);
2222 	if (ret < 0)
2223 		return ret;
2224 
2225 	/* C22 write */
2226 	frame_start = FEC_MMFR_ST;
2227 	frame_addr = regnum;
2228 
2229 	/* start a write op */
2230 	writel(frame_start | FEC_MMFR_OP_WRITE |
2231 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2232 	       FEC_MMFR_TA | FEC_MMFR_DATA(value),
2233 	       fep->hwp + FEC_MII_DATA);
2234 
2235 	/* wait for end of transfer */
2236 	ret = fec_enet_mdio_wait(fep);
2237 	if (ret)
2238 		netdev_err(fep->netdev, "MDIO write timeout\n");
2239 
2240 	pm_runtime_mark_last_busy(dev);
2241 	pm_runtime_put_autosuspend(dev);
2242 
2243 	return ret;
2244 }
2245 
2246 static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
2247 				   int devad, int regnum, u16 value)
2248 {
2249 	struct fec_enet_private *fep = bus->priv;
2250 	struct device *dev = &fep->pdev->dev;
2251 	int ret, frame_start;
2252 
2253 	ret = pm_runtime_resume_and_get(dev);
2254 	if (ret < 0)
2255 		return ret;
2256 
2257 	frame_start = FEC_MMFR_ST_C45;
2258 
2259 	/* write address */
2260 	writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2261 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2262 	       FEC_MMFR_TA | (regnum & 0xFFFF),
2263 	       fep->hwp + FEC_MII_DATA);
2264 
2265 	/* wait for end of transfer */
2266 	ret = fec_enet_mdio_wait(fep);
2267 	if (ret) {
2268 		netdev_err(fep->netdev, "MDIO address write timeout\n");
2269 		goto out;
2270 	}
2271 
2272 	/* start a write op */
2273 	writel(frame_start | FEC_MMFR_OP_WRITE |
2274 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2275 	       FEC_MMFR_TA | FEC_MMFR_DATA(value),
2276 	       fep->hwp + FEC_MII_DATA);
2277 
2278 	/* wait for end of transfer */
2279 	ret = fec_enet_mdio_wait(fep);
2280 	if (ret)
2281 		netdev_err(fep->netdev, "MDIO write timeout\n");
2282 
2283 out:
2284 	pm_runtime_mark_last_busy(dev);
2285 	pm_runtime_put_autosuspend(dev);
2286 
2287 	return ret;
2288 }
2289 
2290 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
2291 {
2292 	struct fec_enet_private *fep = netdev_priv(ndev);
2293 	struct phy_device *phy_dev = ndev->phydev;
2294 
2295 	if (phy_dev) {
2296 		phy_reset_after_clk_enable(phy_dev);
2297 	} else if (fep->phy_node) {
2298 		/*
2299 		 * If the PHY still is not bound to the MAC, but there is
2300 		 * OF PHY node and a matching PHY device instance already,
2301 		 * use the OF PHY node to obtain the PHY device instance,
2302 		 * and then use that PHY device instance when triggering
2303 		 * the PHY reset.
2304 		 */
2305 		phy_dev = of_phy_find_device(fep->phy_node);
2306 		phy_reset_after_clk_enable(phy_dev);
2307 		put_device(&phy_dev->mdio.dev);
2308 	}
2309 }
2310 
2311 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
2312 {
2313 	struct fec_enet_private *fep = netdev_priv(ndev);
2314 	int ret;
2315 
2316 	if (enable) {
2317 		ret = clk_prepare_enable(fep->clk_enet_out);
2318 		if (ret)
2319 			return ret;
2320 
2321 		if (fep->clk_ptp) {
2322 			mutex_lock(&fep->ptp_clk_mutex);
2323 			ret = clk_prepare_enable(fep->clk_ptp);
2324 			if (ret) {
2325 				mutex_unlock(&fep->ptp_clk_mutex);
2326 				goto failed_clk_ptp;
2327 			} else {
2328 				fep->ptp_clk_on = true;
2329 			}
2330 			mutex_unlock(&fep->ptp_clk_mutex);
2331 		}
2332 
2333 		ret = clk_prepare_enable(fep->clk_ref);
2334 		if (ret)
2335 			goto failed_clk_ref;
2336 
2337 		ret = clk_prepare_enable(fep->clk_2x_txclk);
2338 		if (ret)
2339 			goto failed_clk_2x_txclk;
2340 
2341 		fec_enet_phy_reset_after_clk_enable(ndev);
2342 	} else {
2343 		clk_disable_unprepare(fep->clk_enet_out);
2344 		if (fep->clk_ptp) {
2345 			mutex_lock(&fep->ptp_clk_mutex);
2346 			clk_disable_unprepare(fep->clk_ptp);
2347 			fep->ptp_clk_on = false;
2348 			mutex_unlock(&fep->ptp_clk_mutex);
2349 		}
2350 		clk_disable_unprepare(fep->clk_ref);
2351 		clk_disable_unprepare(fep->clk_2x_txclk);
2352 	}
2353 
2354 	return 0;
2355 
2356 failed_clk_2x_txclk:
2357 	if (fep->clk_ref)
2358 		clk_disable_unprepare(fep->clk_ref);
2359 failed_clk_ref:
2360 	if (fep->clk_ptp) {
2361 		mutex_lock(&fep->ptp_clk_mutex);
2362 		clk_disable_unprepare(fep->clk_ptp);
2363 		fep->ptp_clk_on = false;
2364 		mutex_unlock(&fep->ptp_clk_mutex);
2365 	}
2366 failed_clk_ptp:
2367 	clk_disable_unprepare(fep->clk_enet_out);
2368 
2369 	return ret;
2370 }
2371 
2372 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
2373 				      struct device_node *np)
2374 {
2375 	u32 rgmii_tx_delay, rgmii_rx_delay;
2376 
2377 	/* For rgmii tx internal delay, valid values are 0ps and 2000ps */
2378 	if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) {
2379 		if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) {
2380 			dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
2381 			return -EINVAL;
2382 		} else if (rgmii_tx_delay == 2000) {
2383 			fep->rgmii_txc_dly = true;
2384 		}
2385 	}
2386 
2387 	/* For rgmii rx internal delay, valid values are 0ps and 2000ps */
2388 	if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) {
2389 		if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) {
2390 			dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
2391 			return -EINVAL;
2392 		} else if (rgmii_rx_delay == 2000) {
2393 			fep->rgmii_rxc_dly = true;
2394 		}
2395 	}
2396 
2397 	return 0;
2398 }
2399 
2400 static int fec_enet_mii_probe(struct net_device *ndev)
2401 {
2402 	struct fec_enet_private *fep = netdev_priv(ndev);
2403 	struct phy_device *phy_dev = NULL;
2404 	char mdio_bus_id[MII_BUS_ID_SIZE];
2405 	char phy_name[MII_BUS_ID_SIZE + 3];
2406 	int phy_id;
2407 	int dev_id = fep->dev_id;
2408 
2409 	if (fep->phy_node) {
2410 		phy_dev = of_phy_connect(ndev, fep->phy_node,
2411 					 &fec_enet_adjust_link, 0,
2412 					 fep->phy_interface);
2413 		if (!phy_dev) {
2414 			netdev_err(ndev, "Unable to connect to phy\n");
2415 			return -ENODEV;
2416 		}
2417 	} else {
2418 		/* check for attached phy */
2419 		for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
2420 			if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
2421 				continue;
2422 			if (dev_id--)
2423 				continue;
2424 			strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
2425 			break;
2426 		}
2427 
2428 		if (phy_id >= PHY_MAX_ADDR) {
2429 			netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
2430 			strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
2431 			phy_id = 0;
2432 		}
2433 
2434 		snprintf(phy_name, sizeof(phy_name),
2435 			 PHY_ID_FMT, mdio_bus_id, phy_id);
2436 		phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
2437 				      fep->phy_interface);
2438 	}
2439 
2440 	if (IS_ERR(phy_dev)) {
2441 		netdev_err(ndev, "could not attach to PHY\n");
2442 		return PTR_ERR(phy_dev);
2443 	}
2444 
2445 	/* mask with MAC supported features */
2446 	if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
2447 		phy_set_max_speed(phy_dev, 1000);
2448 		phy_remove_link_mode(phy_dev,
2449 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2450 #if !defined(CONFIG_M5272)
2451 		phy_support_sym_pause(phy_dev);
2452 #endif
2453 	}
2454 	else
2455 		phy_set_max_speed(phy_dev, 100);
2456 
2457 	if (fep->quirks & FEC_QUIRK_HAS_EEE)
2458 		phy_support_eee(phy_dev);
2459 
2460 	fep->link = 0;
2461 	fep->full_duplex = 0;
2462 
2463 	phy_attached_info(phy_dev);
2464 
2465 	return 0;
2466 }
2467 
2468 static int fec_enet_mii_init(struct platform_device *pdev)
2469 {
2470 	static struct mii_bus *fec0_mii_bus;
2471 	struct net_device *ndev = platform_get_drvdata(pdev);
2472 	struct fec_enet_private *fep = netdev_priv(ndev);
2473 	bool suppress_preamble = false;
2474 	struct phy_device *phydev;
2475 	struct device_node *node;
2476 	int err = -ENXIO;
2477 	u32 mii_speed, holdtime;
2478 	u32 bus_freq;
2479 	int addr;
2480 
2481 	/*
2482 	 * The i.MX28 dual fec interfaces are not equal.
2483 	 * Here are the differences:
2484 	 *
2485 	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
2486 	 *  - fec0 acts as the 1588 time master while fec1 is slave
2487 	 *  - external phys can only be configured by fec0
2488 	 *
2489 	 * That is to say fec1 can not work independently. It only works
2490 	 * when fec0 is working. The reason behind this design is that the
2491 	 * second interface is added primarily for Switch mode.
2492 	 *
2493 	 * Because of the last point above, both phys are attached on fec0
2494 	 * mdio interface in board design, and need to be configured by
2495 	 * fec0 mii_bus.
2496 	 */
2497 	if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
2498 		/* fec1 uses fec0 mii_bus */
2499 		if (mii_cnt && fec0_mii_bus) {
2500 			fep->mii_bus = fec0_mii_bus;
2501 			mii_cnt++;
2502 			return 0;
2503 		}
2504 		return -ENOENT;
2505 	}
2506 
2507 	bus_freq = 2500000; /* 2.5MHz by default */
2508 	node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2509 	if (node) {
2510 		of_property_read_u32(node, "clock-frequency", &bus_freq);
2511 		suppress_preamble = of_property_read_bool(node,
2512 							  "suppress-preamble");
2513 	}
2514 
2515 	/*
2516 	 * Set MII speed (= clk_get_rate() / 2 * phy_speed)
2517 	 *
2518 	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2519 	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
2520 	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2521 	 * document.
2522 	 */
2523 	mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
2524 	if (fep->quirks & FEC_QUIRK_ENET_MAC)
2525 		mii_speed--;
2526 	if (mii_speed > 63) {
2527 		dev_err(&pdev->dev,
2528 			"fec clock (%lu) too fast to get right mii speed\n",
2529 			clk_get_rate(fep->clk_ipg));
2530 		err = -EINVAL;
2531 		goto err_out;
2532 	}
2533 
2534 	/*
2535 	 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2536 	 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2537 	 * versions are RAZ there, so just ignore the difference and write the
2538 	 * register always.
2539 	 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2540 	 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2541 	 * output.
2542 	 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2543 	 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2544 	 * holdtime cannot result in a value greater than 3.
2545 	 */
2546 	holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2547 
2548 	fep->phy_speed = mii_speed << 1 | holdtime << 8;
2549 
2550 	if (suppress_preamble)
2551 		fep->phy_speed |= BIT(7);
2552 
2553 	if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
2554 		/* Clear MMFR to avoid to generate MII event by writing MSCR.
2555 		 * MII event generation condition:
2556 		 * - writing MSCR:
2557 		 *	- mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
2558 		 *	  mscr_reg_data_in[7:0] != 0
2559 		 * - writing MMFR:
2560 		 *	- mscr[7:0]_not_zero
2561 		 */
2562 		writel(0, fep->hwp + FEC_MII_DATA);
2563 	}
2564 
2565 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2566 
2567 	/* Clear any pending transaction complete indication */
2568 	writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2569 
2570 	fep->mii_bus = mdiobus_alloc();
2571 	if (fep->mii_bus == NULL) {
2572 		err = -ENOMEM;
2573 		goto err_out;
2574 	}
2575 
2576 	fep->mii_bus->name = "fec_enet_mii_bus";
2577 	fep->mii_bus->read = fec_enet_mdio_read_c22;
2578 	fep->mii_bus->write = fec_enet_mdio_write_c22;
2579 	if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
2580 		fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
2581 		fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
2582 	}
2583 	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2584 		pdev->name, fep->dev_id + 1);
2585 	fep->mii_bus->priv = fep;
2586 	fep->mii_bus->parent = &pdev->dev;
2587 
2588 	err = of_mdiobus_register(fep->mii_bus, node);
2589 	if (err)
2590 		goto err_out_free_mdiobus;
2591 	of_node_put(node);
2592 
2593 	/* find all the PHY devices on the bus and set mac_managed_pm to true */
2594 	for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
2595 		phydev = mdiobus_get_phy(fep->mii_bus, addr);
2596 		if (phydev)
2597 			phydev->mac_managed_pm = true;
2598 	}
2599 
2600 	mii_cnt++;
2601 
2602 	/* save fec0 mii_bus */
2603 	if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2604 		fec0_mii_bus = fep->mii_bus;
2605 
2606 	return 0;
2607 
2608 err_out_free_mdiobus:
2609 	mdiobus_free(fep->mii_bus);
2610 err_out:
2611 	of_node_put(node);
2612 	return err;
2613 }
2614 
2615 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2616 {
2617 	if (--mii_cnt == 0) {
2618 		mdiobus_unregister(fep->mii_bus);
2619 		mdiobus_free(fep->mii_bus);
2620 	}
2621 }
2622 
2623 static void fec_enet_get_drvinfo(struct net_device *ndev,
2624 				 struct ethtool_drvinfo *info)
2625 {
2626 	struct fec_enet_private *fep = netdev_priv(ndev);
2627 
2628 	strscpy(info->driver, fep->pdev->dev.driver->name,
2629 		sizeof(info->driver));
2630 	strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2631 }
2632 
2633 static int fec_enet_get_regs_len(struct net_device *ndev)
2634 {
2635 	struct fec_enet_private *fep = netdev_priv(ndev);
2636 	struct resource *r;
2637 	int s = 0;
2638 
2639 	r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2640 	if (r)
2641 		s = resource_size(r);
2642 
2643 	return s;
2644 }
2645 
2646 /* List of registers that can be safety be read to dump them with ethtool */
2647 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2648 	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2649 	defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2650 static __u32 fec_enet_register_version = 2;
2651 static u32 fec_enet_register_offset[] = {
2652 	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2653 	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2654 	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2655 	FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2656 	FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2657 	FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2658 	FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2659 	FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2660 	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2661 	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2662 	FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2663 	FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2664 	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2665 	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2666 	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2667 	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2668 	RMON_T_P_GTE2048, RMON_T_OCTETS,
2669 	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2670 	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2671 	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2672 	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2673 	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2674 	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2675 	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2676 	RMON_R_P_GTE2048, RMON_R_OCTETS,
2677 	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2678 	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2679 };
2680 /* for i.MX6ul */
2681 static u32 fec_enet_register_offset_6ul[] = {
2682 	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2683 	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2684 	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
2685 	FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
2686 	FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
2687 	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2688 	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
2689 	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2690 	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2691 	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2692 	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2693 	RMON_T_P_GTE2048, RMON_T_OCTETS,
2694 	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2695 	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2696 	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2697 	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2698 	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2699 	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2700 	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2701 	RMON_R_P_GTE2048, RMON_R_OCTETS,
2702 	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2703 	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2704 };
2705 #else
2706 static __u32 fec_enet_register_version = 1;
2707 static u32 fec_enet_register_offset[] = {
2708 	FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2709 	FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2710 	FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2711 	FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2712 	FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2713 	FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2714 	FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2715 	FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2716 	FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2717 };
2718 #endif
2719 
2720 static void fec_enet_get_regs(struct net_device *ndev,
2721 			      struct ethtool_regs *regs, void *regbuf)
2722 {
2723 	struct fec_enet_private *fep = netdev_priv(ndev);
2724 	u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2725 	struct device *dev = &fep->pdev->dev;
2726 	u32 *buf = (u32 *)regbuf;
2727 	u32 i, off;
2728 	int ret;
2729 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2730 	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2731 	defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2732 	u32 *reg_list;
2733 	u32 reg_cnt;
2734 
2735 	if (!of_machine_is_compatible("fsl,imx6ul")) {
2736 		reg_list = fec_enet_register_offset;
2737 		reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2738 	} else {
2739 		reg_list = fec_enet_register_offset_6ul;
2740 		reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
2741 	}
2742 #else
2743 	/* coldfire */
2744 	static u32 *reg_list = fec_enet_register_offset;
2745 	static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2746 #endif
2747 	ret = pm_runtime_resume_and_get(dev);
2748 	if (ret < 0)
2749 		return;
2750 
2751 	regs->version = fec_enet_register_version;
2752 
2753 	memset(buf, 0, regs->len);
2754 
2755 	for (i = 0; i < reg_cnt; i++) {
2756 		off = reg_list[i];
2757 
2758 		if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
2759 		    !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2760 			continue;
2761 
2762 		off >>= 2;
2763 		buf[off] = readl(&theregs[off]);
2764 	}
2765 
2766 	pm_runtime_mark_last_busy(dev);
2767 	pm_runtime_put_autosuspend(dev);
2768 }
2769 
2770 static int fec_enet_get_ts_info(struct net_device *ndev,
2771 				struct kernel_ethtool_ts_info *info)
2772 {
2773 	struct fec_enet_private *fep = netdev_priv(ndev);
2774 
2775 	if (fep->bufdesc_ex) {
2776 
2777 		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2778 					SOF_TIMESTAMPING_TX_HARDWARE |
2779 					SOF_TIMESTAMPING_RX_HARDWARE |
2780 					SOF_TIMESTAMPING_RAW_HARDWARE;
2781 		if (fep->ptp_clock)
2782 			info->phc_index = ptp_clock_index(fep->ptp_clock);
2783 
2784 		info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2785 				 (1 << HWTSTAMP_TX_ON);
2786 
2787 		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2788 				   (1 << HWTSTAMP_FILTER_ALL);
2789 		return 0;
2790 	} else {
2791 		return ethtool_op_get_ts_info(ndev, info);
2792 	}
2793 }
2794 
2795 #if !defined(CONFIG_M5272)
2796 
2797 static void fec_enet_get_pauseparam(struct net_device *ndev,
2798 				    struct ethtool_pauseparam *pause)
2799 {
2800 	struct fec_enet_private *fep = netdev_priv(ndev);
2801 
2802 	pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2803 	pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2804 	pause->rx_pause = pause->tx_pause;
2805 }
2806 
2807 static int fec_enet_set_pauseparam(struct net_device *ndev,
2808 				   struct ethtool_pauseparam *pause)
2809 {
2810 	struct fec_enet_private *fep = netdev_priv(ndev);
2811 
2812 	if (!ndev->phydev)
2813 		return -ENODEV;
2814 
2815 	if (pause->tx_pause != pause->rx_pause) {
2816 		netdev_info(ndev,
2817 			"hardware only support enable/disable both tx and rx");
2818 		return -EINVAL;
2819 	}
2820 
2821 	fep->pause_flag = 0;
2822 
2823 	/* tx pause must be same as rx pause */
2824 	fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2825 	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2826 
2827 	phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
2828 			  pause->autoneg);
2829 
2830 	if (pause->autoneg) {
2831 		if (netif_running(ndev))
2832 			fec_stop(ndev);
2833 		phy_start_aneg(ndev->phydev);
2834 	}
2835 	if (netif_running(ndev)) {
2836 		napi_disable(&fep->napi);
2837 		netif_tx_lock_bh(ndev);
2838 		fec_restart(ndev);
2839 		netif_tx_wake_all_queues(ndev);
2840 		netif_tx_unlock_bh(ndev);
2841 		napi_enable(&fep->napi);
2842 	}
2843 
2844 	return 0;
2845 }
2846 
2847 static const struct fec_stat {
2848 	char name[ETH_GSTRING_LEN];
2849 	u16 offset;
2850 } fec_stats[] = {
2851 	/* RMON TX */
2852 	{ "tx_dropped", RMON_T_DROP },
2853 	{ "tx_packets", RMON_T_PACKETS },
2854 	{ "tx_broadcast", RMON_T_BC_PKT },
2855 	{ "tx_multicast", RMON_T_MC_PKT },
2856 	{ "tx_crc_errors", RMON_T_CRC_ALIGN },
2857 	{ "tx_undersize", RMON_T_UNDERSIZE },
2858 	{ "tx_oversize", RMON_T_OVERSIZE },
2859 	{ "tx_fragment", RMON_T_FRAG },
2860 	{ "tx_jabber", RMON_T_JAB },
2861 	{ "tx_collision", RMON_T_COL },
2862 	{ "tx_64byte", RMON_T_P64 },
2863 	{ "tx_65to127byte", RMON_T_P65TO127 },
2864 	{ "tx_128to255byte", RMON_T_P128TO255 },
2865 	{ "tx_256to511byte", RMON_T_P256TO511 },
2866 	{ "tx_512to1023byte", RMON_T_P512TO1023 },
2867 	{ "tx_1024to2047byte", RMON_T_P1024TO2047 },
2868 	{ "tx_GTE2048byte", RMON_T_P_GTE2048 },
2869 	{ "tx_octets", RMON_T_OCTETS },
2870 
2871 	/* IEEE TX */
2872 	{ "IEEE_tx_drop", IEEE_T_DROP },
2873 	{ "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2874 	{ "IEEE_tx_1col", IEEE_T_1COL },
2875 	{ "IEEE_tx_mcol", IEEE_T_MCOL },
2876 	{ "IEEE_tx_def", IEEE_T_DEF },
2877 	{ "IEEE_tx_lcol", IEEE_T_LCOL },
2878 	{ "IEEE_tx_excol", IEEE_T_EXCOL },
2879 	{ "IEEE_tx_macerr", IEEE_T_MACERR },
2880 	{ "IEEE_tx_cserr", IEEE_T_CSERR },
2881 	{ "IEEE_tx_sqe", IEEE_T_SQE },
2882 	{ "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2883 	{ "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2884 
2885 	/* RMON RX */
2886 	{ "rx_packets", RMON_R_PACKETS },
2887 	{ "rx_broadcast", RMON_R_BC_PKT },
2888 	{ "rx_multicast", RMON_R_MC_PKT },
2889 	{ "rx_crc_errors", RMON_R_CRC_ALIGN },
2890 	{ "rx_undersize", RMON_R_UNDERSIZE },
2891 	{ "rx_oversize", RMON_R_OVERSIZE },
2892 	{ "rx_fragment", RMON_R_FRAG },
2893 	{ "rx_jabber", RMON_R_JAB },
2894 	{ "rx_64byte", RMON_R_P64 },
2895 	{ "rx_65to127byte", RMON_R_P65TO127 },
2896 	{ "rx_128to255byte", RMON_R_P128TO255 },
2897 	{ "rx_256to511byte", RMON_R_P256TO511 },
2898 	{ "rx_512to1023byte", RMON_R_P512TO1023 },
2899 	{ "rx_1024to2047byte", RMON_R_P1024TO2047 },
2900 	{ "rx_GTE2048byte", RMON_R_P_GTE2048 },
2901 	{ "rx_octets", RMON_R_OCTETS },
2902 
2903 	/* IEEE RX */
2904 	{ "IEEE_rx_drop", IEEE_R_DROP },
2905 	{ "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2906 	{ "IEEE_rx_crc", IEEE_R_CRC },
2907 	{ "IEEE_rx_align", IEEE_R_ALIGN },
2908 	{ "IEEE_rx_macerr", IEEE_R_MACERR },
2909 	{ "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2910 	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2911 };
2912 
2913 #define FEC_STATS_SIZE		(ARRAY_SIZE(fec_stats) * sizeof(u64))
2914 
2915 static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
2916 	"rx_xdp_redirect",           /* RX_XDP_REDIRECT = 0, */
2917 	"rx_xdp_pass",               /* RX_XDP_PASS, */
2918 	"rx_xdp_drop",               /* RX_XDP_DROP, */
2919 	"rx_xdp_tx",                 /* RX_XDP_TX, */
2920 	"rx_xdp_tx_errors",          /* RX_XDP_TX_ERRORS, */
2921 	"tx_xdp_xmit",               /* TX_XDP_XMIT, */
2922 	"tx_xdp_xmit_errors",        /* TX_XDP_XMIT_ERRORS, */
2923 };
2924 
2925 static void fec_enet_update_ethtool_stats(struct net_device *dev)
2926 {
2927 	struct fec_enet_private *fep = netdev_priv(dev);
2928 	int i;
2929 
2930 	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2931 		fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2932 }
2933 
2934 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
2935 {
2936 	u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
2937 	struct fec_enet_priv_rx_q *rxq;
2938 	int i, j;
2939 
2940 	for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2941 		rxq = fep->rx_queue[i];
2942 
2943 		for (j = 0; j < XDP_STATS_TOTAL; j++)
2944 			xdp_stats[j] += rxq->stats[j];
2945 	}
2946 
2947 	memcpy(data, xdp_stats, sizeof(xdp_stats));
2948 }
2949 
2950 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
2951 {
2952 #ifdef CONFIG_PAGE_POOL_STATS
2953 	struct page_pool_stats stats = {};
2954 	struct fec_enet_priv_rx_q *rxq;
2955 	int i;
2956 
2957 	for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2958 		rxq = fep->rx_queue[i];
2959 
2960 		if (!rxq->page_pool)
2961 			continue;
2962 
2963 		page_pool_get_stats(rxq->page_pool, &stats);
2964 	}
2965 
2966 	page_pool_ethtool_stats_get(data, &stats);
2967 #endif
2968 }
2969 
2970 static void fec_enet_get_ethtool_stats(struct net_device *dev,
2971 				       struct ethtool_stats *stats, u64 *data)
2972 {
2973 	struct fec_enet_private *fep = netdev_priv(dev);
2974 
2975 	if (netif_running(dev))
2976 		fec_enet_update_ethtool_stats(dev);
2977 
2978 	memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2979 	data += FEC_STATS_SIZE / sizeof(u64);
2980 
2981 	fec_enet_get_xdp_stats(fep, data);
2982 	data += XDP_STATS_TOTAL;
2983 
2984 	fec_enet_page_pool_stats(fep, data);
2985 }
2986 
2987 static void fec_enet_get_strings(struct net_device *netdev,
2988 	u32 stringset, u8 *data)
2989 {
2990 	int i;
2991 	switch (stringset) {
2992 	case ETH_SS_STATS:
2993 		for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
2994 			ethtool_puts(&data, fec_stats[i].name);
2995 		}
2996 		for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
2997 			ethtool_puts(&data, fec_xdp_stat_strs[i]);
2998 		}
2999 		page_pool_ethtool_stats_get_strings(data);
3000 
3001 		break;
3002 	case ETH_SS_TEST:
3003 		net_selftest_get_strings(data);
3004 		break;
3005 	}
3006 }
3007 
3008 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
3009 {
3010 	int count;
3011 
3012 	switch (sset) {
3013 	case ETH_SS_STATS:
3014 		count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
3015 		count += page_pool_ethtool_stats_get_count();
3016 		return count;
3017 
3018 	case ETH_SS_TEST:
3019 		return net_selftest_get_count();
3020 	default:
3021 		return -EOPNOTSUPP;
3022 	}
3023 }
3024 
3025 static void fec_enet_clear_ethtool_stats(struct net_device *dev)
3026 {
3027 	struct fec_enet_private *fep = netdev_priv(dev);
3028 	struct fec_enet_priv_rx_q *rxq;
3029 	int i, j;
3030 
3031 	/* Disable MIB statistics counters */
3032 	writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
3033 
3034 	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
3035 		writel(0, fep->hwp + fec_stats[i].offset);
3036 
3037 	for (i = fep->num_rx_queues - 1; i >= 0; i--) {
3038 		rxq = fep->rx_queue[i];
3039 		for (j = 0; j < XDP_STATS_TOTAL; j++)
3040 			rxq->stats[j] = 0;
3041 	}
3042 
3043 	/* Don't disable MIB statistics counters */
3044 	writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
3045 }
3046 
3047 #else	/* !defined(CONFIG_M5272) */
3048 #define FEC_STATS_SIZE	0
3049 static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
3050 {
3051 }
3052 
3053 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
3054 {
3055 }
3056 #endif /* !defined(CONFIG_M5272) */
3057 
3058 /* ITR clock source is enet system clock (clk_ahb).
3059  * TCTT unit is cycle_ns * 64 cycle
3060  * So, the ICTT value = X us / (cycle_ns * 64)
3061  */
3062 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
3063 {
3064 	struct fec_enet_private *fep = netdev_priv(ndev);
3065 
3066 	return us * (fep->itr_clk_rate / 64000) / 1000;
3067 }
3068 
3069 /* Set threshold for interrupt coalescing */
3070 static void fec_enet_itr_coal_set(struct net_device *ndev)
3071 {
3072 	struct fec_enet_private *fep = netdev_priv(ndev);
3073 	int rx_itr, tx_itr;
3074 
3075 	/* Must be greater than zero to avoid unpredictable behavior */
3076 	if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
3077 	    !fep->tx_time_itr || !fep->tx_pkts_itr)
3078 		return;
3079 
3080 	/* Select enet system clock as Interrupt Coalescing
3081 	 * timer Clock Source
3082 	 */
3083 	rx_itr = FEC_ITR_CLK_SEL;
3084 	tx_itr = FEC_ITR_CLK_SEL;
3085 
3086 	/* set ICFT and ICTT */
3087 	rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
3088 	rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
3089 	tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
3090 	tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
3091 
3092 	rx_itr |= FEC_ITR_EN;
3093 	tx_itr |= FEC_ITR_EN;
3094 
3095 	writel(tx_itr, fep->hwp + FEC_TXIC0);
3096 	writel(rx_itr, fep->hwp + FEC_RXIC0);
3097 	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
3098 		writel(tx_itr, fep->hwp + FEC_TXIC1);
3099 		writel(rx_itr, fep->hwp + FEC_RXIC1);
3100 		writel(tx_itr, fep->hwp + FEC_TXIC2);
3101 		writel(rx_itr, fep->hwp + FEC_RXIC2);
3102 	}
3103 }
3104 
3105 static int fec_enet_get_coalesce(struct net_device *ndev,
3106 				 struct ethtool_coalesce *ec,
3107 				 struct kernel_ethtool_coalesce *kernel_coal,
3108 				 struct netlink_ext_ack *extack)
3109 {
3110 	struct fec_enet_private *fep = netdev_priv(ndev);
3111 
3112 	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3113 		return -EOPNOTSUPP;
3114 
3115 	ec->rx_coalesce_usecs = fep->rx_time_itr;
3116 	ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
3117 
3118 	ec->tx_coalesce_usecs = fep->tx_time_itr;
3119 	ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
3120 
3121 	return 0;
3122 }
3123 
3124 static int fec_enet_set_coalesce(struct net_device *ndev,
3125 				 struct ethtool_coalesce *ec,
3126 				 struct kernel_ethtool_coalesce *kernel_coal,
3127 				 struct netlink_ext_ack *extack)
3128 {
3129 	struct fec_enet_private *fep = netdev_priv(ndev);
3130 	struct device *dev = &fep->pdev->dev;
3131 	unsigned int cycle;
3132 
3133 	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3134 		return -EOPNOTSUPP;
3135 
3136 	if (ec->rx_max_coalesced_frames > 255) {
3137 		dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
3138 		return -EINVAL;
3139 	}
3140 
3141 	if (ec->tx_max_coalesced_frames > 255) {
3142 		dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
3143 		return -EINVAL;
3144 	}
3145 
3146 	cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
3147 	if (cycle > 0xFFFF) {
3148 		dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
3149 		return -EINVAL;
3150 	}
3151 
3152 	cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
3153 	if (cycle > 0xFFFF) {
3154 		dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
3155 		return -EINVAL;
3156 	}
3157 
3158 	fep->rx_time_itr = ec->rx_coalesce_usecs;
3159 	fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
3160 
3161 	fep->tx_time_itr = ec->tx_coalesce_usecs;
3162 	fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
3163 
3164 	fec_enet_itr_coal_set(ndev);
3165 
3166 	return 0;
3167 }
3168 
3169 static int
3170 fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
3171 {
3172 	struct fec_enet_private *fep = netdev_priv(ndev);
3173 	struct ethtool_keee *p = &fep->eee;
3174 
3175 	if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3176 		return -EOPNOTSUPP;
3177 
3178 	if (!netif_running(ndev))
3179 		return -ENETDOWN;
3180 
3181 	edata->tx_lpi_timer = p->tx_lpi_timer;
3182 
3183 	return phy_ethtool_get_eee(ndev->phydev, edata);
3184 }
3185 
3186 static int
3187 fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
3188 {
3189 	struct fec_enet_private *fep = netdev_priv(ndev);
3190 	struct ethtool_keee *p = &fep->eee;
3191 
3192 	if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3193 		return -EOPNOTSUPP;
3194 
3195 	if (!netif_running(ndev))
3196 		return -ENETDOWN;
3197 
3198 	p->tx_lpi_timer = edata->tx_lpi_timer;
3199 
3200 	return phy_ethtool_set_eee(ndev->phydev, edata);
3201 }
3202 
3203 static void
3204 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3205 {
3206 	struct fec_enet_private *fep = netdev_priv(ndev);
3207 
3208 	if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
3209 		wol->supported = WAKE_MAGIC;
3210 		wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
3211 	} else {
3212 		wol->supported = wol->wolopts = 0;
3213 	}
3214 }
3215 
3216 static int
3217 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3218 {
3219 	struct fec_enet_private *fep = netdev_priv(ndev);
3220 
3221 	if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
3222 		return -EINVAL;
3223 
3224 	if (wol->wolopts & ~WAKE_MAGIC)
3225 		return -EINVAL;
3226 
3227 	device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
3228 	if (device_may_wakeup(&ndev->dev))
3229 		fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
3230 	else
3231 		fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
3232 
3233 	return 0;
3234 }
3235 
3236 static const struct ethtool_ops fec_enet_ethtool_ops = {
3237 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3238 				     ETHTOOL_COALESCE_MAX_FRAMES,
3239 	.get_drvinfo		= fec_enet_get_drvinfo,
3240 	.get_regs_len		= fec_enet_get_regs_len,
3241 	.get_regs		= fec_enet_get_regs,
3242 	.nway_reset		= phy_ethtool_nway_reset,
3243 	.get_link		= ethtool_op_get_link,
3244 	.get_coalesce		= fec_enet_get_coalesce,
3245 	.set_coalesce		= fec_enet_set_coalesce,
3246 #ifndef CONFIG_M5272
3247 	.get_pauseparam		= fec_enet_get_pauseparam,
3248 	.set_pauseparam		= fec_enet_set_pauseparam,
3249 	.get_strings		= fec_enet_get_strings,
3250 	.get_ethtool_stats	= fec_enet_get_ethtool_stats,
3251 	.get_sset_count		= fec_enet_get_sset_count,
3252 #endif
3253 	.get_ts_info		= fec_enet_get_ts_info,
3254 	.get_wol		= fec_enet_get_wol,
3255 	.set_wol		= fec_enet_set_wol,
3256 	.get_eee		= fec_enet_get_eee,
3257 	.set_eee		= fec_enet_set_eee,
3258 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
3259 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
3260 	.self_test		= net_selftest,
3261 };
3262 
3263 static void fec_enet_free_buffers(struct net_device *ndev)
3264 {
3265 	struct fec_enet_private *fep = netdev_priv(ndev);
3266 	unsigned int i;
3267 	struct fec_enet_priv_tx_q *txq;
3268 	struct fec_enet_priv_rx_q *rxq;
3269 	unsigned int q;
3270 
3271 	for (q = 0; q < fep->num_rx_queues; q++) {
3272 		rxq = fep->rx_queue[q];
3273 		for (i = 0; i < rxq->bd.ring_size; i++)
3274 			page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
3275 
3276 		for (i = 0; i < XDP_STATS_TOTAL; i++)
3277 			rxq->stats[i] = 0;
3278 
3279 		if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
3280 			xdp_rxq_info_unreg(&rxq->xdp_rxq);
3281 		page_pool_destroy(rxq->page_pool);
3282 		rxq->page_pool = NULL;
3283 	}
3284 
3285 	for (q = 0; q < fep->num_tx_queues; q++) {
3286 		txq = fep->tx_queue[q];
3287 		for (i = 0; i < txq->bd.ring_size; i++) {
3288 			kfree(txq->tx_bounce[i]);
3289 			txq->tx_bounce[i] = NULL;
3290 
3291 			if (!txq->tx_buf[i].buf_p) {
3292 				txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3293 				continue;
3294 			}
3295 
3296 			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3297 				dev_kfree_skb(txq->tx_buf[i].buf_p);
3298 			} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
3299 				xdp_return_frame(txq->tx_buf[i].buf_p);
3300 			} else {
3301 				struct page *page = txq->tx_buf[i].buf_p;
3302 
3303 				page_pool_put_page(page->pp, page, 0, false);
3304 			}
3305 
3306 			txq->tx_buf[i].buf_p = NULL;
3307 			txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3308 		}
3309 	}
3310 }
3311 
3312 static void fec_enet_free_queue(struct net_device *ndev)
3313 {
3314 	struct fec_enet_private *fep = netdev_priv(ndev);
3315 	int i;
3316 	struct fec_enet_priv_tx_q *txq;
3317 
3318 	for (i = 0; i < fep->num_tx_queues; i++)
3319 		if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
3320 			txq = fep->tx_queue[i];
3321 			fec_dma_free(&fep->pdev->dev,
3322 				     txq->bd.ring_size * TSO_HEADER_SIZE,
3323 				     txq->tso_hdrs, txq->tso_hdrs_dma);
3324 		}
3325 
3326 	for (i = 0; i < fep->num_rx_queues; i++)
3327 		kfree(fep->rx_queue[i]);
3328 	for (i = 0; i < fep->num_tx_queues; i++)
3329 		kfree(fep->tx_queue[i]);
3330 }
3331 
3332 static int fec_enet_alloc_queue(struct net_device *ndev)
3333 {
3334 	struct fec_enet_private *fep = netdev_priv(ndev);
3335 	int i;
3336 	int ret = 0;
3337 	struct fec_enet_priv_tx_q *txq;
3338 
3339 	for (i = 0; i < fep->num_tx_queues; i++) {
3340 		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
3341 		if (!txq) {
3342 			ret = -ENOMEM;
3343 			goto alloc_failed;
3344 		}
3345 
3346 		fep->tx_queue[i] = txq;
3347 		txq->bd.ring_size = TX_RING_SIZE;
3348 		fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
3349 
3350 		txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
3351 		txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
3352 
3353 		txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev,
3354 					txq->bd.ring_size * TSO_HEADER_SIZE,
3355 					&txq->tso_hdrs_dma, GFP_KERNEL);
3356 		if (!txq->tso_hdrs) {
3357 			ret = -ENOMEM;
3358 			goto alloc_failed;
3359 		}
3360 	}
3361 
3362 	for (i = 0; i < fep->num_rx_queues; i++) {
3363 		fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
3364 					   GFP_KERNEL);
3365 		if (!fep->rx_queue[i]) {
3366 			ret = -ENOMEM;
3367 			goto alloc_failed;
3368 		}
3369 
3370 		fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
3371 		fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
3372 	}
3373 	return ret;
3374 
3375 alloc_failed:
3376 	fec_enet_free_queue(ndev);
3377 	return ret;
3378 }
3379 
3380 static int
3381 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
3382 {
3383 	struct fec_enet_private *fep = netdev_priv(ndev);
3384 	struct fec_enet_priv_rx_q *rxq;
3385 	dma_addr_t phys_addr;
3386 	struct bufdesc	*bdp;
3387 	struct page *page;
3388 	int i, err;
3389 
3390 	rxq = fep->rx_queue[queue];
3391 	bdp = rxq->bd.base;
3392 
3393 	err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
3394 	if (err < 0) {
3395 		netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
3396 		return err;
3397 	}
3398 
3399 	for (i = 0; i < rxq->bd.ring_size; i++) {
3400 		page = page_pool_dev_alloc_pages(rxq->page_pool);
3401 		if (!page)
3402 			goto err_alloc;
3403 
3404 		phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
3405 		bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
3406 
3407 		rxq->rx_skb_info[i].page = page;
3408 		rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
3409 		bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
3410 
3411 		if (fep->bufdesc_ex) {
3412 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3413 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
3414 		}
3415 
3416 		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
3417 	}
3418 
3419 	/* Set the last buffer to wrap. */
3420 	bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
3421 	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3422 	return 0;
3423 
3424  err_alloc:
3425 	fec_enet_free_buffers(ndev);
3426 	return -ENOMEM;
3427 }
3428 
3429 static int
3430 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
3431 {
3432 	struct fec_enet_private *fep = netdev_priv(ndev);
3433 	unsigned int i;
3434 	struct bufdesc  *bdp;
3435 	struct fec_enet_priv_tx_q *txq;
3436 
3437 	txq = fep->tx_queue[queue];
3438 	bdp = txq->bd.base;
3439 	for (i = 0; i < txq->bd.ring_size; i++) {
3440 		txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
3441 		if (!txq->tx_bounce[i])
3442 			goto err_alloc;
3443 
3444 		bdp->cbd_sc = cpu_to_fec16(0);
3445 		bdp->cbd_bufaddr = cpu_to_fec32(0);
3446 
3447 		if (fep->bufdesc_ex) {
3448 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3449 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
3450 		}
3451 
3452 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3453 	}
3454 
3455 	/* Set the last buffer to wrap. */
3456 	bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
3457 	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3458 
3459 	return 0;
3460 
3461  err_alloc:
3462 	fec_enet_free_buffers(ndev);
3463 	return -ENOMEM;
3464 }
3465 
3466 static int fec_enet_alloc_buffers(struct net_device *ndev)
3467 {
3468 	struct fec_enet_private *fep = netdev_priv(ndev);
3469 	unsigned int i;
3470 
3471 	for (i = 0; i < fep->num_rx_queues; i++)
3472 		if (fec_enet_alloc_rxq_buffers(ndev, i))
3473 			return -ENOMEM;
3474 
3475 	for (i = 0; i < fep->num_tx_queues; i++)
3476 		if (fec_enet_alloc_txq_buffers(ndev, i))
3477 			return -ENOMEM;
3478 	return 0;
3479 }
3480 
3481 static int
3482 fec_enet_open(struct net_device *ndev)
3483 {
3484 	struct fec_enet_private *fep = netdev_priv(ndev);
3485 	int ret;
3486 	bool reset_again;
3487 
3488 	ret = pm_runtime_resume_and_get(&fep->pdev->dev);
3489 	if (ret < 0)
3490 		return ret;
3491 
3492 	pinctrl_pm_select_default_state(&fep->pdev->dev);
3493 	ret = fec_enet_clk_enable(ndev, true);
3494 	if (ret)
3495 		goto clk_enable;
3496 
3497 	/* During the first fec_enet_open call the PHY isn't probed at this
3498 	 * point. Therefore the phy_reset_after_clk_enable() call within
3499 	 * fec_enet_clk_enable() fails. As we need this reset in order to be
3500 	 * sure the PHY is working correctly we check if we need to reset again
3501 	 * later when the PHY is probed
3502 	 */
3503 	if (ndev->phydev && ndev->phydev->drv)
3504 		reset_again = false;
3505 	else
3506 		reset_again = true;
3507 
3508 	/* I should reset the ring buffers here, but I don't yet know
3509 	 * a simple way to do that.
3510 	 */
3511 
3512 	ret = fec_enet_alloc_buffers(ndev);
3513 	if (ret)
3514 		goto err_enet_alloc;
3515 
3516 	/* Init MAC prior to mii bus probe */
3517 	fec_restart(ndev);
3518 
3519 	/* Call phy_reset_after_clk_enable() again if it failed during
3520 	 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
3521 	 */
3522 	if (reset_again)
3523 		fec_enet_phy_reset_after_clk_enable(ndev);
3524 
3525 	/* Probe and connect to PHY when open the interface */
3526 	ret = fec_enet_mii_probe(ndev);
3527 	if (ret)
3528 		goto err_enet_mii_probe;
3529 
3530 	if (fep->quirks & FEC_QUIRK_ERR006687)
3531 		imx6q_cpuidle_fec_irqs_used();
3532 
3533 	if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3534 		cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
3535 
3536 	napi_enable(&fep->napi);
3537 	phy_start(ndev->phydev);
3538 	netif_tx_start_all_queues(ndev);
3539 
3540 	device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
3541 				 FEC_WOL_FLAG_ENABLE);
3542 
3543 	return 0;
3544 
3545 err_enet_mii_probe:
3546 	fec_enet_free_buffers(ndev);
3547 err_enet_alloc:
3548 	fec_enet_clk_enable(ndev, false);
3549 clk_enable:
3550 	pm_runtime_mark_last_busy(&fep->pdev->dev);
3551 	pm_runtime_put_autosuspend(&fep->pdev->dev);
3552 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3553 	return ret;
3554 }
3555 
3556 static int
3557 fec_enet_close(struct net_device *ndev)
3558 {
3559 	struct fec_enet_private *fep = netdev_priv(ndev);
3560 
3561 	phy_stop(ndev->phydev);
3562 
3563 	if (netif_device_present(ndev)) {
3564 		napi_disable(&fep->napi);
3565 		netif_tx_disable(ndev);
3566 		fec_stop(ndev);
3567 	}
3568 
3569 	phy_disconnect(ndev->phydev);
3570 
3571 	if (fep->quirks & FEC_QUIRK_ERR006687)
3572 		imx6q_cpuidle_fec_irqs_unused();
3573 
3574 	fec_enet_update_ethtool_stats(ndev);
3575 
3576 	fec_enet_clk_enable(ndev, false);
3577 	if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3578 		cpu_latency_qos_remove_request(&fep->pm_qos_req);
3579 
3580 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3581 	pm_runtime_mark_last_busy(&fep->pdev->dev);
3582 	pm_runtime_put_autosuspend(&fep->pdev->dev);
3583 
3584 	fec_enet_free_buffers(ndev);
3585 
3586 	return 0;
3587 }
3588 
3589 /* Set or clear the multicast filter for this adaptor.
3590  * Skeleton taken from sunlance driver.
3591  * The CPM Ethernet implementation allows Multicast as well as individual
3592  * MAC address filtering.  Some of the drivers check to make sure it is
3593  * a group multicast address, and discard those that are not.  I guess I
3594  * will do the same for now, but just remove the test if you want
3595  * individual filtering as well (do the upper net layers want or support
3596  * this kind of feature?).
3597  */
3598 
3599 #define FEC_HASH_BITS	6		/* #bits in hash */
3600 
3601 static void set_multicast_list(struct net_device *ndev)
3602 {
3603 	struct fec_enet_private *fep = netdev_priv(ndev);
3604 	struct netdev_hw_addr *ha;
3605 	unsigned int crc, tmp;
3606 	unsigned char hash;
3607 	unsigned int hash_high = 0, hash_low = 0;
3608 
3609 	if (ndev->flags & IFF_PROMISC) {
3610 		tmp = readl(fep->hwp + FEC_R_CNTRL);
3611 		tmp |= 0x8;
3612 		writel(tmp, fep->hwp + FEC_R_CNTRL);
3613 		return;
3614 	}
3615 
3616 	tmp = readl(fep->hwp + FEC_R_CNTRL);
3617 	tmp &= ~0x8;
3618 	writel(tmp, fep->hwp + FEC_R_CNTRL);
3619 
3620 	if (ndev->flags & IFF_ALLMULTI) {
3621 		/* Catch all multicast addresses, so set the
3622 		 * filter to all 1's
3623 		 */
3624 		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3625 		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3626 
3627 		return;
3628 	}
3629 
3630 	/* Add the addresses in hash register */
3631 	netdev_for_each_mc_addr(ha, ndev) {
3632 		/* calculate crc32 value of mac address */
3633 		crc = ether_crc_le(ndev->addr_len, ha->addr);
3634 
3635 		/* only upper 6 bits (FEC_HASH_BITS) are used
3636 		 * which point to specific bit in the hash registers
3637 		 */
3638 		hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
3639 
3640 		if (hash > 31)
3641 			hash_high |= 1 << (hash - 32);
3642 		else
3643 			hash_low |= 1 << hash;
3644 	}
3645 
3646 	writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3647 	writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3648 }
3649 
3650 /* Set a MAC change in hardware. */
3651 static int
3652 fec_set_mac_address(struct net_device *ndev, void *p)
3653 {
3654 	struct fec_enet_private *fep = netdev_priv(ndev);
3655 	struct sockaddr *addr = p;
3656 
3657 	if (addr) {
3658 		if (!is_valid_ether_addr(addr->sa_data))
3659 			return -EADDRNOTAVAIL;
3660 		eth_hw_addr_set(ndev, addr->sa_data);
3661 	}
3662 
3663 	/* Add netif status check here to avoid system hang in below case:
3664 	 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3665 	 * After ethx down, fec all clocks are gated off and then register
3666 	 * access causes system hang.
3667 	 */
3668 	if (!netif_running(ndev))
3669 		return 0;
3670 
3671 	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
3672 		(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
3673 		fep->hwp + FEC_ADDR_LOW);
3674 	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3675 		fep->hwp + FEC_ADDR_HIGH);
3676 	return 0;
3677 }
3678 
3679 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3680 	netdev_features_t features)
3681 {
3682 	struct fec_enet_private *fep = netdev_priv(netdev);
3683 	netdev_features_t changed = features ^ netdev->features;
3684 
3685 	netdev->features = features;
3686 
3687 	/* Receive checksum has been changed */
3688 	if (changed & NETIF_F_RXCSUM) {
3689 		if (features & NETIF_F_RXCSUM)
3690 			fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3691 		else
3692 			fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3693 	}
3694 }
3695 
3696 static int fec_set_features(struct net_device *netdev,
3697 	netdev_features_t features)
3698 {
3699 	struct fec_enet_private *fep = netdev_priv(netdev);
3700 	netdev_features_t changed = features ^ netdev->features;
3701 
3702 	if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3703 		napi_disable(&fep->napi);
3704 		netif_tx_lock_bh(netdev);
3705 		fec_stop(netdev);
3706 		fec_enet_set_netdev_features(netdev, features);
3707 		fec_restart(netdev);
3708 		netif_tx_wake_all_queues(netdev);
3709 		netif_tx_unlock_bh(netdev);
3710 		napi_enable(&fep->napi);
3711 	} else {
3712 		fec_enet_set_netdev_features(netdev, features);
3713 	}
3714 
3715 	return 0;
3716 }
3717 
3718 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
3719 				 struct net_device *sb_dev)
3720 {
3721 	struct fec_enet_private *fep = netdev_priv(ndev);
3722 	u16 vlan_tag = 0;
3723 
3724 	if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
3725 		return netdev_pick_tx(ndev, skb, NULL);
3726 
3727 	/* VLAN is present in the payload.*/
3728 	if (eth_type_vlan(skb->protocol)) {
3729 		struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
3730 
3731 		vlan_tag = ntohs(vhdr->h_vlan_TCI);
3732 	/*  VLAN is present in the skb but not yet pushed in the payload.*/
3733 	} else if (skb_vlan_tag_present(skb)) {
3734 		vlan_tag = skb->vlan_tci;
3735 	} else {
3736 		return vlan_tag;
3737 	}
3738 
3739 	return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
3740 }
3741 
3742 static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
3743 {
3744 	struct fec_enet_private *fep = netdev_priv(dev);
3745 	bool is_run = netif_running(dev);
3746 	struct bpf_prog *old_prog;
3747 
3748 	switch (bpf->command) {
3749 	case XDP_SETUP_PROG:
3750 		/* No need to support the SoCs that require to
3751 		 * do the frame swap because the performance wouldn't be
3752 		 * better than the skb mode.
3753 		 */
3754 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
3755 			return -EOPNOTSUPP;
3756 
3757 		if (!bpf->prog)
3758 			xdp_features_clear_redirect_target(dev);
3759 
3760 		if (is_run) {
3761 			napi_disable(&fep->napi);
3762 			netif_tx_disable(dev);
3763 		}
3764 
3765 		old_prog = xchg(&fep->xdp_prog, bpf->prog);
3766 		if (old_prog)
3767 			bpf_prog_put(old_prog);
3768 
3769 		fec_restart(dev);
3770 
3771 		if (is_run) {
3772 			napi_enable(&fep->napi);
3773 			netif_tx_start_all_queues(dev);
3774 		}
3775 
3776 		if (bpf->prog)
3777 			xdp_features_set_redirect_target(dev, false);
3778 
3779 		return 0;
3780 
3781 	case XDP_SETUP_XSK_POOL:
3782 		return -EOPNOTSUPP;
3783 
3784 	default:
3785 		return -EOPNOTSUPP;
3786 	}
3787 }
3788 
3789 static int
3790 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
3791 {
3792 	if (unlikely(index < 0))
3793 		return 0;
3794 
3795 	return (index % fep->num_tx_queues);
3796 }
3797 
3798 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
3799 				   struct fec_enet_priv_tx_q *txq,
3800 				   void *frame, u32 dma_sync_len,
3801 				   bool ndo_xmit)
3802 {
3803 	unsigned int index, status, estatus;
3804 	struct bufdesc *bdp;
3805 	dma_addr_t dma_addr;
3806 	int entries_free;
3807 	u16 frame_len;
3808 
3809 	entries_free = fec_enet_get_free_txdesc_num(txq);
3810 	if (entries_free < MAX_SKB_FRAGS + 1) {
3811 		netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
3812 		return -EBUSY;
3813 	}
3814 
3815 	/* Fill in a Tx ring entry */
3816 	bdp = txq->bd.cur;
3817 	status = fec16_to_cpu(bdp->cbd_sc);
3818 	status &= ~BD_ENET_TX_STATS;
3819 
3820 	index = fec_enet_get_bd_index(bdp, &txq->bd);
3821 
3822 	if (ndo_xmit) {
3823 		struct xdp_frame *xdpf = frame;
3824 
3825 		dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
3826 					  xdpf->len, DMA_TO_DEVICE);
3827 		if (dma_mapping_error(&fep->pdev->dev, dma_addr))
3828 			return -ENOMEM;
3829 
3830 		frame_len = xdpf->len;
3831 		txq->tx_buf[index].buf_p = xdpf;
3832 		txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3833 	} else {
3834 		struct xdp_buff *xdpb = frame;
3835 		struct page *page;
3836 
3837 		page = virt_to_page(xdpb->data);
3838 		dma_addr = page_pool_get_dma_addr(page) +
3839 			   (xdpb->data - xdpb->data_hard_start);
3840 		dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
3841 					   dma_sync_len, DMA_BIDIRECTIONAL);
3842 		frame_len = xdpb->data_end - xdpb->data;
3843 		txq->tx_buf[index].buf_p = page;
3844 		txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
3845 	}
3846 
3847 	status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
3848 	if (fep->bufdesc_ex)
3849 		estatus = BD_ENET_TX_INT;
3850 
3851 	bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
3852 	bdp->cbd_datlen = cpu_to_fec16(frame_len);
3853 
3854 	if (fep->bufdesc_ex) {
3855 		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3856 
3857 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
3858 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
3859 
3860 		ebdp->cbd_bdu = 0;
3861 		ebdp->cbd_esc = cpu_to_fec32(estatus);
3862 	}
3863 
3864 	/* Make sure the updates to rest of the descriptor are performed before
3865 	 * transferring ownership.
3866 	 */
3867 	dma_wmb();
3868 
3869 	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
3870 	 * it's the last BD of the frame, and to put the CRC on the end.
3871 	 */
3872 	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
3873 	bdp->cbd_sc = cpu_to_fec16(status);
3874 
3875 	/* If this was the last BD in the ring, start at the beginning again. */
3876 	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3877 
3878 	/* Make sure the update to bdp are performed before txq->bd.cur. */
3879 	dma_wmb();
3880 
3881 	txq->bd.cur = bdp;
3882 
3883 	/* Trigger transmission start */
3884 	writel(0, txq->bd.reg_desc_active);
3885 
3886 	return 0;
3887 }
3888 
3889 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
3890 				int cpu, struct xdp_buff *xdp,
3891 				u32 dma_sync_len)
3892 {
3893 	struct fec_enet_priv_tx_q *txq;
3894 	struct netdev_queue *nq;
3895 	int queue, ret;
3896 
3897 	queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3898 	txq = fep->tx_queue[queue];
3899 	nq = netdev_get_tx_queue(fep->netdev, queue);
3900 
3901 	__netif_tx_lock(nq, cpu);
3902 
3903 	/* Avoid tx timeout as XDP shares the queue with kernel stack */
3904 	txq_trans_cond_update(nq);
3905 	ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
3906 
3907 	__netif_tx_unlock(nq);
3908 
3909 	return ret;
3910 }
3911 
3912 static int fec_enet_xdp_xmit(struct net_device *dev,
3913 			     int num_frames,
3914 			     struct xdp_frame **frames,
3915 			     u32 flags)
3916 {
3917 	struct fec_enet_private *fep = netdev_priv(dev);
3918 	struct fec_enet_priv_tx_q *txq;
3919 	int cpu = smp_processor_id();
3920 	unsigned int sent_frames = 0;
3921 	struct netdev_queue *nq;
3922 	unsigned int queue;
3923 	int i;
3924 
3925 	queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3926 	txq = fep->tx_queue[queue];
3927 	nq = netdev_get_tx_queue(fep->netdev, queue);
3928 
3929 	__netif_tx_lock(nq, cpu);
3930 
3931 	/* Avoid tx timeout as XDP shares the queue with kernel stack */
3932 	txq_trans_cond_update(nq);
3933 	for (i = 0; i < num_frames; i++) {
3934 		if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
3935 			break;
3936 		sent_frames++;
3937 	}
3938 
3939 	__netif_tx_unlock(nq);
3940 
3941 	return sent_frames;
3942 }
3943 
3944 static int fec_hwtstamp_get(struct net_device *ndev,
3945 			    struct kernel_hwtstamp_config *config)
3946 {
3947 	struct fec_enet_private *fep = netdev_priv(ndev);
3948 
3949 	if (!netif_running(ndev))
3950 		return -EINVAL;
3951 
3952 	if (!fep->bufdesc_ex)
3953 		return -EOPNOTSUPP;
3954 
3955 	fec_ptp_get(ndev, config);
3956 
3957 	return 0;
3958 }
3959 
3960 static int fec_hwtstamp_set(struct net_device *ndev,
3961 			    struct kernel_hwtstamp_config *config,
3962 			    struct netlink_ext_ack *extack)
3963 {
3964 	struct fec_enet_private *fep = netdev_priv(ndev);
3965 
3966 	if (!netif_running(ndev))
3967 		return -EINVAL;
3968 
3969 	if (!fep->bufdesc_ex)
3970 		return -EOPNOTSUPP;
3971 
3972 	return fec_ptp_set(ndev, config, extack);
3973 }
3974 
3975 static const struct net_device_ops fec_netdev_ops = {
3976 	.ndo_open		= fec_enet_open,
3977 	.ndo_stop		= fec_enet_close,
3978 	.ndo_start_xmit		= fec_enet_start_xmit,
3979 	.ndo_select_queue       = fec_enet_select_queue,
3980 	.ndo_set_rx_mode	= set_multicast_list,
3981 	.ndo_validate_addr	= eth_validate_addr,
3982 	.ndo_tx_timeout		= fec_timeout,
3983 	.ndo_set_mac_address	= fec_set_mac_address,
3984 	.ndo_eth_ioctl		= phy_do_ioctl_running,
3985 	.ndo_set_features	= fec_set_features,
3986 	.ndo_bpf		= fec_enet_bpf,
3987 	.ndo_xdp_xmit		= fec_enet_xdp_xmit,
3988 	.ndo_hwtstamp_get	= fec_hwtstamp_get,
3989 	.ndo_hwtstamp_set	= fec_hwtstamp_set,
3990 };
3991 
3992 static const unsigned short offset_des_active_rxq[] = {
3993 	FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
3994 };
3995 
3996 static const unsigned short offset_des_active_txq[] = {
3997 	FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
3998 };
3999 
4000  /*
4001   * XXX:  We need to clean up on failure exits here.
4002   *
4003   */
4004 static int fec_enet_init(struct net_device *ndev)
4005 {
4006 	struct fec_enet_private *fep = netdev_priv(ndev);
4007 	struct bufdesc *cbd_base;
4008 	dma_addr_t bd_dma;
4009 	int bd_size;
4010 	unsigned int i;
4011 	unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
4012 			sizeof(struct bufdesc);
4013 	unsigned dsize_log2 = __fls(dsize);
4014 	int ret;
4015 
4016 	WARN_ON(dsize != (1 << dsize_log2));
4017 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4018 	fep->rx_align = 0xf;
4019 	fep->tx_align = 0xf;
4020 #else
4021 	fep->rx_align = 0x3;
4022 	fep->tx_align = 0x3;
4023 #endif
4024 	fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4025 	fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4026 	fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
4027 	fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
4028 
4029 	/* Check mask of the streaming and coherent API */
4030 	ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
4031 	if (ret < 0) {
4032 		dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
4033 		return ret;
4034 	}
4035 
4036 	ret = fec_enet_alloc_queue(ndev);
4037 	if (ret)
4038 		return ret;
4039 
4040 	bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
4041 
4042 	/* Allocate memory for buffer descriptors. */
4043 	cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma,
4044 				  GFP_KERNEL);
4045 	if (!cbd_base) {
4046 		ret = -ENOMEM;
4047 		goto free_queue_mem;
4048 	}
4049 
4050 	/* Get the Ethernet address */
4051 	ret = fec_get_mac(ndev);
4052 	if (ret)
4053 		goto free_queue_mem;
4054 
4055 	/* Set receive and transmit descriptor base. */
4056 	for (i = 0; i < fep->num_rx_queues; i++) {
4057 		struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
4058 		unsigned size = dsize * rxq->bd.ring_size;
4059 
4060 		rxq->bd.qid = i;
4061 		rxq->bd.base = cbd_base;
4062 		rxq->bd.cur = cbd_base;
4063 		rxq->bd.dma = bd_dma;
4064 		rxq->bd.dsize = dsize;
4065 		rxq->bd.dsize_log2 = dsize_log2;
4066 		rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
4067 		bd_dma += size;
4068 		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4069 		rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4070 	}
4071 
4072 	for (i = 0; i < fep->num_tx_queues; i++) {
4073 		struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
4074 		unsigned size = dsize * txq->bd.ring_size;
4075 
4076 		txq->bd.qid = i;
4077 		txq->bd.base = cbd_base;
4078 		txq->bd.cur = cbd_base;
4079 		txq->bd.dma = bd_dma;
4080 		txq->bd.dsize = dsize;
4081 		txq->bd.dsize_log2 = dsize_log2;
4082 		txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
4083 		bd_dma += size;
4084 		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4085 		txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4086 	}
4087 
4088 
4089 	/* The FEC Ethernet specific entries in the device structure */
4090 	ndev->watchdog_timeo = TX_TIMEOUT;
4091 	ndev->netdev_ops = &fec_netdev_ops;
4092 	ndev->ethtool_ops = &fec_enet_ethtool_ops;
4093 
4094 	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
4095 	netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
4096 
4097 	if (fep->quirks & FEC_QUIRK_HAS_VLAN)
4098 		/* enable hw VLAN support */
4099 		ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4100 
4101 	if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
4102 		netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
4103 
4104 		/* enable hw accelerator */
4105 		ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
4106 				| NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
4107 		fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
4108 	}
4109 
4110 	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
4111 		fep->tx_align = 0;
4112 		fep->rx_align = 0x3f;
4113 	}
4114 
4115 	ndev->hw_features = ndev->features;
4116 
4117 	if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
4118 		ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
4119 				     NETDEV_XDP_ACT_REDIRECT;
4120 
4121 	fec_restart(ndev);
4122 
4123 	if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
4124 		fec_enet_clear_ethtool_stats(ndev);
4125 	else
4126 		fec_enet_update_ethtool_stats(ndev);
4127 
4128 	return 0;
4129 
4130 free_queue_mem:
4131 	fec_enet_free_queue(ndev);
4132 	return ret;
4133 }
4134 
4135 static void fec_enet_deinit(struct net_device *ndev)
4136 {
4137 	struct fec_enet_private *fep = netdev_priv(ndev);
4138 
4139 	netif_napi_del(&fep->napi);
4140 	fec_enet_free_queue(ndev);
4141 }
4142 
4143 #ifdef CONFIG_OF
4144 static int fec_reset_phy(struct platform_device *pdev)
4145 {
4146 	struct gpio_desc *phy_reset;
4147 	int msec = 1, phy_post_delay = 0;
4148 	struct device_node *np = pdev->dev.of_node;
4149 	int err;
4150 
4151 	if (!np)
4152 		return 0;
4153 
4154 	err = of_property_read_u32(np, "phy-reset-duration", &msec);
4155 	/* A sane reset duration should not be longer than 1s */
4156 	if (!err && msec > 1000)
4157 		msec = 1;
4158 
4159 	err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
4160 	/* valid reset duration should be less than 1s */
4161 	if (!err && phy_post_delay > 1000)
4162 		return -EINVAL;
4163 
4164 	phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
4165 					    GPIOD_OUT_HIGH);
4166 	if (IS_ERR(phy_reset))
4167 		return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
4168 				     "failed to get phy-reset-gpios\n");
4169 
4170 	if (!phy_reset)
4171 		return 0;
4172 
4173 	if (msec > 20)
4174 		msleep(msec);
4175 	else
4176 		usleep_range(msec * 1000, msec * 1000 + 1000);
4177 
4178 	gpiod_set_value_cansleep(phy_reset, 0);
4179 
4180 	if (!phy_post_delay)
4181 		return 0;
4182 
4183 	if (phy_post_delay > 20)
4184 		msleep(phy_post_delay);
4185 	else
4186 		usleep_range(phy_post_delay * 1000,
4187 			     phy_post_delay * 1000 + 1000);
4188 
4189 	return 0;
4190 }
4191 #else /* CONFIG_OF */
4192 static int fec_reset_phy(struct platform_device *pdev)
4193 {
4194 	/*
4195 	 * In case of platform probe, the reset has been done
4196 	 * by machine code.
4197 	 */
4198 	return 0;
4199 }
4200 #endif /* CONFIG_OF */
4201 
4202 static void
4203 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
4204 {
4205 	struct device_node *np = pdev->dev.of_node;
4206 
4207 	*num_tx = *num_rx = 1;
4208 
4209 	if (!np || !of_device_is_available(np))
4210 		return;
4211 
4212 	/* parse the num of tx and rx queues */
4213 	of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
4214 
4215 	of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
4216 
4217 	if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
4218 		dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
4219 			 *num_tx);
4220 		*num_tx = 1;
4221 		return;
4222 	}
4223 
4224 	if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
4225 		dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
4226 			 *num_rx);
4227 		*num_rx = 1;
4228 		return;
4229 	}
4230 
4231 }
4232 
4233 static int fec_enet_get_irq_cnt(struct platform_device *pdev)
4234 {
4235 	int irq_cnt = platform_irq_count(pdev);
4236 
4237 	if (irq_cnt > FEC_IRQ_NUM)
4238 		irq_cnt = FEC_IRQ_NUM;	/* last for pps */
4239 	else if (irq_cnt == 2)
4240 		irq_cnt = 1;	/* last for pps */
4241 	else if (irq_cnt <= 0)
4242 		irq_cnt = 1;	/* At least 1 irq is needed */
4243 	return irq_cnt;
4244 }
4245 
4246 static void fec_enet_get_wakeup_irq(struct platform_device *pdev)
4247 {
4248 	struct net_device *ndev = platform_get_drvdata(pdev);
4249 	struct fec_enet_private *fep = netdev_priv(ndev);
4250 
4251 	if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
4252 		fep->wake_irq = fep->irq[2];
4253 	else
4254 		fep->wake_irq = fep->irq[0];
4255 }
4256 
4257 static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
4258 				   struct device_node *np)
4259 {
4260 	struct device_node *gpr_np;
4261 	u32 out_val[3];
4262 	int ret = 0;
4263 
4264 	gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
4265 	if (!gpr_np)
4266 		return 0;
4267 
4268 	ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
4269 					 ARRAY_SIZE(out_val));
4270 	if (ret) {
4271 		dev_dbg(&fep->pdev->dev, "no stop mode property\n");
4272 		goto out;
4273 	}
4274 
4275 	fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
4276 	if (IS_ERR(fep->stop_gpr.gpr)) {
4277 		dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
4278 		ret = PTR_ERR(fep->stop_gpr.gpr);
4279 		fep->stop_gpr.gpr = NULL;
4280 		goto out;
4281 	}
4282 
4283 	fep->stop_gpr.reg = out_val[1];
4284 	fep->stop_gpr.bit = out_val[2];
4285 
4286 out:
4287 	of_node_put(gpr_np);
4288 
4289 	return ret;
4290 }
4291 
4292 static int
4293 fec_probe(struct platform_device *pdev)
4294 {
4295 	struct fec_enet_private *fep;
4296 	struct fec_platform_data *pdata;
4297 	phy_interface_t interface;
4298 	struct net_device *ndev;
4299 	int i, irq, ret = 0;
4300 	static int dev_id;
4301 	struct device_node *np = pdev->dev.of_node, *phy_node;
4302 	int num_tx_qs;
4303 	int num_rx_qs;
4304 	char irq_name[8];
4305 	int irq_cnt;
4306 	const struct fec_devinfo *dev_info;
4307 
4308 	fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
4309 
4310 	/* Init network device */
4311 	ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
4312 				  FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
4313 	if (!ndev)
4314 		return -ENOMEM;
4315 
4316 	SET_NETDEV_DEV(ndev, &pdev->dev);
4317 
4318 	/* setup board info structure */
4319 	fep = netdev_priv(ndev);
4320 
4321 	dev_info = device_get_match_data(&pdev->dev);
4322 	if (!dev_info)
4323 		dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data;
4324 	if (dev_info)
4325 		fep->quirks = dev_info->quirks;
4326 
4327 	fep->netdev = ndev;
4328 	fep->num_rx_queues = num_rx_qs;
4329 	fep->num_tx_queues = num_tx_qs;
4330 
4331 #if !defined(CONFIG_M5272)
4332 	/* default enable pause frame auto negotiation */
4333 	if (fep->quirks & FEC_QUIRK_HAS_GBIT)
4334 		fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
4335 #endif
4336 
4337 	/* Select default pin state */
4338 	pinctrl_pm_select_default_state(&pdev->dev);
4339 
4340 	fep->hwp = devm_platform_ioremap_resource(pdev, 0);
4341 	if (IS_ERR(fep->hwp)) {
4342 		ret = PTR_ERR(fep->hwp);
4343 		goto failed_ioremap;
4344 	}
4345 
4346 	fep->pdev = pdev;
4347 	fep->dev_id = dev_id++;
4348 
4349 	platform_set_drvdata(pdev, ndev);
4350 
4351 	if ((of_machine_is_compatible("fsl,imx6q") ||
4352 	     of_machine_is_compatible("fsl,imx6dl")) &&
4353 	    !of_property_read_bool(np, "fsl,err006687-workaround-present"))
4354 		fep->quirks |= FEC_QUIRK_ERR006687;
4355 
4356 	ret = fec_enet_ipc_handle_init(fep);
4357 	if (ret)
4358 		goto failed_ipc_init;
4359 
4360 	if (of_property_read_bool(np, "fsl,magic-packet"))
4361 		fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
4362 
4363 	ret = fec_enet_init_stop_mode(fep, np);
4364 	if (ret)
4365 		goto failed_stop_mode;
4366 
4367 	phy_node = of_parse_phandle(np, "phy-handle", 0);
4368 	if (!phy_node && of_phy_is_fixed_link(np)) {
4369 		ret = of_phy_register_fixed_link(np);
4370 		if (ret < 0) {
4371 			dev_err(&pdev->dev,
4372 				"broken fixed-link specification\n");
4373 			goto failed_phy;
4374 		}
4375 		phy_node = of_node_get(np);
4376 	}
4377 	fep->phy_node = phy_node;
4378 
4379 	ret = of_get_phy_mode(pdev->dev.of_node, &interface);
4380 	if (ret) {
4381 		pdata = dev_get_platdata(&pdev->dev);
4382 		if (pdata)
4383 			fep->phy_interface = pdata->phy;
4384 		else
4385 			fep->phy_interface = PHY_INTERFACE_MODE_MII;
4386 	} else {
4387 		fep->phy_interface = interface;
4388 	}
4389 
4390 	ret = fec_enet_parse_rgmii_delay(fep, np);
4391 	if (ret)
4392 		goto failed_rgmii_delay;
4393 
4394 	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
4395 	if (IS_ERR(fep->clk_ipg)) {
4396 		ret = PTR_ERR(fep->clk_ipg);
4397 		goto failed_clk;
4398 	}
4399 
4400 	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
4401 	if (IS_ERR(fep->clk_ahb)) {
4402 		ret = PTR_ERR(fep->clk_ahb);
4403 		goto failed_clk;
4404 	}
4405 
4406 	fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
4407 
4408 	/* enet_out is optional, depends on board */
4409 	fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
4410 	if (IS_ERR(fep->clk_enet_out)) {
4411 		ret = PTR_ERR(fep->clk_enet_out);
4412 		goto failed_clk;
4413 	}
4414 
4415 	fep->ptp_clk_on = false;
4416 	mutex_init(&fep->ptp_clk_mutex);
4417 
4418 	/* clk_ref is optional, depends on board */
4419 	fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
4420 	if (IS_ERR(fep->clk_ref)) {
4421 		ret = PTR_ERR(fep->clk_ref);
4422 		goto failed_clk;
4423 	}
4424 	fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
4425 
4426 	/* clk_2x_txclk is optional, depends on board */
4427 	if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
4428 		fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
4429 		if (IS_ERR(fep->clk_2x_txclk))
4430 			fep->clk_2x_txclk = NULL;
4431 	}
4432 
4433 	fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
4434 	fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
4435 	if (IS_ERR(fep->clk_ptp)) {
4436 		fep->clk_ptp = NULL;
4437 		fep->bufdesc_ex = false;
4438 	}
4439 
4440 	ret = fec_enet_clk_enable(ndev, true);
4441 	if (ret)
4442 		goto failed_clk;
4443 
4444 	ret = clk_prepare_enable(fep->clk_ipg);
4445 	if (ret)
4446 		goto failed_clk_ipg;
4447 	ret = clk_prepare_enable(fep->clk_ahb);
4448 	if (ret)
4449 		goto failed_clk_ahb;
4450 
4451 	fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
4452 	if (!IS_ERR(fep->reg_phy)) {
4453 		ret = regulator_enable(fep->reg_phy);
4454 		if (ret) {
4455 			dev_err(&pdev->dev,
4456 				"Failed to enable phy regulator: %d\n", ret);
4457 			goto failed_regulator;
4458 		}
4459 	} else {
4460 		if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
4461 			ret = -EPROBE_DEFER;
4462 			goto failed_regulator;
4463 		}
4464 		fep->reg_phy = NULL;
4465 	}
4466 
4467 	pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
4468 	pm_runtime_use_autosuspend(&pdev->dev);
4469 	pm_runtime_get_noresume(&pdev->dev);
4470 	pm_runtime_set_active(&pdev->dev);
4471 	pm_runtime_enable(&pdev->dev);
4472 
4473 	ret = fec_reset_phy(pdev);
4474 	if (ret)
4475 		goto failed_reset;
4476 
4477 	irq_cnt = fec_enet_get_irq_cnt(pdev);
4478 	if (fep->bufdesc_ex)
4479 		fec_ptp_init(pdev, irq_cnt);
4480 
4481 	ret = fec_enet_init(ndev);
4482 	if (ret)
4483 		goto failed_init;
4484 
4485 	for (i = 0; i < irq_cnt; i++) {
4486 		snprintf(irq_name, sizeof(irq_name), "int%d", i);
4487 		irq = platform_get_irq_byname_optional(pdev, irq_name);
4488 		if (irq < 0)
4489 			irq = platform_get_irq(pdev, i);
4490 		if (irq < 0) {
4491 			ret = irq;
4492 			goto failed_irq;
4493 		}
4494 		ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
4495 				       0, pdev->name, ndev);
4496 		if (ret)
4497 			goto failed_irq;
4498 
4499 		fep->irq[i] = irq;
4500 	}
4501 
4502 	/* Decide which interrupt line is wakeup capable */
4503 	fec_enet_get_wakeup_irq(pdev);
4504 
4505 	ret = fec_enet_mii_init(pdev);
4506 	if (ret)
4507 		goto failed_mii_init;
4508 
4509 	/* Carrier starts down, phylib will bring it up */
4510 	netif_carrier_off(ndev);
4511 	fec_enet_clk_enable(ndev, false);
4512 	pinctrl_pm_select_sleep_state(&pdev->dev);
4513 
4514 	ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
4515 
4516 	ret = register_netdev(ndev);
4517 	if (ret)
4518 		goto failed_register;
4519 
4520 	device_init_wakeup(&ndev->dev, fep->wol_flag &
4521 			   FEC_WOL_HAS_MAGIC_PACKET);
4522 
4523 	if (fep->bufdesc_ex && fep->ptp_clock)
4524 		netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
4525 
4526 	INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
4527 
4528 	pm_runtime_mark_last_busy(&pdev->dev);
4529 	pm_runtime_put_autosuspend(&pdev->dev);
4530 
4531 	return 0;
4532 
4533 failed_register:
4534 	fec_enet_mii_remove(fep);
4535 failed_mii_init:
4536 failed_irq:
4537 	fec_enet_deinit(ndev);
4538 failed_init:
4539 	fec_ptp_stop(pdev);
4540 failed_reset:
4541 	pm_runtime_put_noidle(&pdev->dev);
4542 	pm_runtime_disable(&pdev->dev);
4543 	if (fep->reg_phy)
4544 		regulator_disable(fep->reg_phy);
4545 failed_regulator:
4546 	clk_disable_unprepare(fep->clk_ahb);
4547 failed_clk_ahb:
4548 	clk_disable_unprepare(fep->clk_ipg);
4549 failed_clk_ipg:
4550 	fec_enet_clk_enable(ndev, false);
4551 failed_clk:
4552 failed_rgmii_delay:
4553 	if (of_phy_is_fixed_link(np))
4554 		of_phy_deregister_fixed_link(np);
4555 	of_node_put(phy_node);
4556 failed_stop_mode:
4557 failed_ipc_init:
4558 failed_phy:
4559 	dev_id--;
4560 failed_ioremap:
4561 	free_netdev(ndev);
4562 
4563 	return ret;
4564 }
4565 
4566 static void
4567 fec_drv_remove(struct platform_device *pdev)
4568 {
4569 	struct net_device *ndev = platform_get_drvdata(pdev);
4570 	struct fec_enet_private *fep = netdev_priv(ndev);
4571 	struct device_node *np = pdev->dev.of_node;
4572 	int ret;
4573 
4574 	ret = pm_runtime_get_sync(&pdev->dev);
4575 	if (ret < 0)
4576 		dev_err(&pdev->dev,
4577 			"Failed to resume device in remove callback (%pe)\n",
4578 			ERR_PTR(ret));
4579 
4580 	cancel_work_sync(&fep->tx_timeout_work);
4581 	fec_ptp_stop(pdev);
4582 	unregister_netdev(ndev);
4583 	fec_enet_mii_remove(fep);
4584 	if (fep->reg_phy)
4585 		regulator_disable(fep->reg_phy);
4586 
4587 	if (of_phy_is_fixed_link(np))
4588 		of_phy_deregister_fixed_link(np);
4589 	of_node_put(fep->phy_node);
4590 
4591 	/* After pm_runtime_get_sync() failed, the clks are still off, so skip
4592 	 * disabling them again.
4593 	 */
4594 	if (ret >= 0) {
4595 		clk_disable_unprepare(fep->clk_ahb);
4596 		clk_disable_unprepare(fep->clk_ipg);
4597 	}
4598 	pm_runtime_put_noidle(&pdev->dev);
4599 	pm_runtime_disable(&pdev->dev);
4600 
4601 	fec_enet_deinit(ndev);
4602 	free_netdev(ndev);
4603 }
4604 
4605 static int fec_suspend(struct device *dev)
4606 {
4607 	struct net_device *ndev = dev_get_drvdata(dev);
4608 	struct fec_enet_private *fep = netdev_priv(ndev);
4609 	int ret;
4610 
4611 	rtnl_lock();
4612 	if (netif_running(ndev)) {
4613 		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
4614 			fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
4615 		phy_stop(ndev->phydev);
4616 		napi_disable(&fep->napi);
4617 		netif_tx_lock_bh(ndev);
4618 		netif_device_detach(ndev);
4619 		netif_tx_unlock_bh(ndev);
4620 		fec_stop(ndev);
4621 		if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4622 			fec_irqs_disable(ndev);
4623 			pinctrl_pm_select_sleep_state(&fep->pdev->dev);
4624 		} else {
4625 			fec_irqs_disable_except_wakeup(ndev);
4626 			if (fep->wake_irq > 0) {
4627 				disable_irq(fep->wake_irq);
4628 				enable_irq_wake(fep->wake_irq);
4629 			}
4630 			fec_enet_stop_mode(fep, true);
4631 		}
4632 		/* It's safe to disable clocks since interrupts are masked */
4633 		fec_enet_clk_enable(ndev, false);
4634 
4635 		fep->rpm_active = !pm_runtime_status_suspended(dev);
4636 		if (fep->rpm_active) {
4637 			ret = pm_runtime_force_suspend(dev);
4638 			if (ret < 0) {
4639 				rtnl_unlock();
4640 				return ret;
4641 			}
4642 		}
4643 	}
4644 	rtnl_unlock();
4645 
4646 	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
4647 		regulator_disable(fep->reg_phy);
4648 
4649 	/* SOC supply clock to phy, when clock is disabled, phy link down
4650 	 * SOC control phy regulator, when regulator is disabled, phy link down
4651 	 */
4652 	if (fep->clk_enet_out || fep->reg_phy)
4653 		fep->link = 0;
4654 
4655 	return 0;
4656 }
4657 
4658 static int fec_resume(struct device *dev)
4659 {
4660 	struct net_device *ndev = dev_get_drvdata(dev);
4661 	struct fec_enet_private *fep = netdev_priv(ndev);
4662 	int ret;
4663 	int val;
4664 
4665 	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4666 		ret = regulator_enable(fep->reg_phy);
4667 		if (ret)
4668 			return ret;
4669 	}
4670 
4671 	rtnl_lock();
4672 	if (netif_running(ndev)) {
4673 		if (fep->rpm_active)
4674 			pm_runtime_force_resume(dev);
4675 
4676 		ret = fec_enet_clk_enable(ndev, true);
4677 		if (ret) {
4678 			rtnl_unlock();
4679 			goto failed_clk;
4680 		}
4681 		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
4682 			fec_enet_stop_mode(fep, false);
4683 			if (fep->wake_irq) {
4684 				disable_irq_wake(fep->wake_irq);
4685 				enable_irq(fep->wake_irq);
4686 			}
4687 
4688 			val = readl(fep->hwp + FEC_ECNTRL);
4689 			val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
4690 			writel(val, fep->hwp + FEC_ECNTRL);
4691 			fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
4692 		} else {
4693 			pinctrl_pm_select_default_state(&fep->pdev->dev);
4694 		}
4695 		fec_restart(ndev);
4696 		netif_tx_lock_bh(ndev);
4697 		netif_device_attach(ndev);
4698 		netif_tx_unlock_bh(ndev);
4699 		napi_enable(&fep->napi);
4700 		phy_init_hw(ndev->phydev);
4701 		phy_start(ndev->phydev);
4702 	}
4703 	rtnl_unlock();
4704 
4705 	return 0;
4706 
4707 failed_clk:
4708 	if (fep->reg_phy)
4709 		regulator_disable(fep->reg_phy);
4710 	return ret;
4711 }
4712 
4713 static int fec_runtime_suspend(struct device *dev)
4714 {
4715 	struct net_device *ndev = dev_get_drvdata(dev);
4716 	struct fec_enet_private *fep = netdev_priv(ndev);
4717 
4718 	clk_disable_unprepare(fep->clk_ahb);
4719 	clk_disable_unprepare(fep->clk_ipg);
4720 
4721 	return 0;
4722 }
4723 
4724 static int fec_runtime_resume(struct device *dev)
4725 {
4726 	struct net_device *ndev = dev_get_drvdata(dev);
4727 	struct fec_enet_private *fep = netdev_priv(ndev);
4728 	int ret;
4729 
4730 	ret = clk_prepare_enable(fep->clk_ahb);
4731 	if (ret)
4732 		return ret;
4733 	ret = clk_prepare_enable(fep->clk_ipg);
4734 	if (ret)
4735 		goto failed_clk_ipg;
4736 
4737 	return 0;
4738 
4739 failed_clk_ipg:
4740 	clk_disable_unprepare(fep->clk_ahb);
4741 	return ret;
4742 }
4743 
4744 static const struct dev_pm_ops fec_pm_ops = {
4745 	SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
4746 	RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
4747 };
4748 
4749 static struct platform_driver fec_driver = {
4750 	.driver	= {
4751 		.name	= DRIVER_NAME,
4752 		.pm	= pm_ptr(&fec_pm_ops),
4753 		.of_match_table = fec_dt_ids,
4754 		.suppress_bind_attrs = true,
4755 	},
4756 	.id_table = fec_devtype,
4757 	.probe	= fec_probe,
4758 	.remove_new = fec_drv_remove,
4759 };
4760 
4761 module_platform_driver(fec_driver);
4762 
4763 MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
4764 MODULE_LICENSE("GPL");
4765