xref: /linux/drivers/net/ethernet/freescale/fec_main.c (revision 799a4912eea74c667da1c8167f93bf2d1508a89e)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
4  * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5  *
6  * Right now, I am very wasteful with the buffers.  I allocate memory
7  * pages and then divide them into 2K frame buffers.  This way I know I
8  * have buffers large enough to hold one frame within one buffer descriptor.
9  * Once I get this working, I will use 64 or 128 byte CPM buffers, which
10  * will be much more memory efficient and will easily handle lots of
11  * small packets.
12  *
13  * Much better multiple PHY support by Magnus Damm.
14  * Copyright (c) 2000 Ericsson Radio Systems AB.
15  *
16  * Support for FEC controller of ColdFire processors.
17  * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18  *
19  * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
20  * Copyright (c) 2004-2006 Macq Electronique SA.
21  *
22  * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
23  */
24 
25 #include <linux/bitops.h>
26 #include <linux/bpf.h>
27 #include <linux/bpf_trace.h>
28 #include <linux/cacheflush.h>
29 #include <linux/clk.h>
30 #include <linux/crc32.h>
31 #include <linux/delay.h>
32 #include <linux/errno.h>
33 #include <linux/etherdevice.h>
34 #include <linux/fec.h>
35 #include <linux/filter.h>
36 #include <linux/gpio/consumer.h>
37 #include <linux/icmp.h>
38 #include <linux/if_vlan.h>
39 #include <linux/in.h>
40 #include <linux/interrupt.h>
41 #include <linux/io.h>
42 #include <linux/ioport.h>
43 #include <linux/ip.h>
44 #include <linux/irq.h>
45 #include <linux/kernel.h>
46 #include <linux/mdio.h>
47 #include <linux/mfd/syscon.h>
48 #include <linux/module.h>
49 #include <linux/netdevice.h>
50 #include <linux/of.h>
51 #include <linux/of_mdio.h>
52 #include <linux/of_net.h>
53 #include <linux/phy.h>
54 #include <linux/pinctrl/consumer.h>
55 #include <linux/phy_fixed.h>
56 #include <linux/platform_device.h>
57 #include <linux/pm_runtime.h>
58 #include <linux/prefetch.h>
59 #include <linux/property.h>
60 #include <linux/ptrace.h>
61 #include <linux/regmap.h>
62 #include <linux/regulator/consumer.h>
63 #include <linux/skbuff.h>
64 #include <linux/slab.h>
65 #include <linux/spinlock.h>
66 #include <linux/string.h>
67 #include <linux/tcp.h>
68 #include <linux/udp.h>
69 #include <linux/workqueue.h>
70 #include <net/ip.h>
71 #include <net/page_pool/helpers.h>
72 #include <net/selftests.h>
73 #include <net/tso.h>
74 #include <soc/imx/cpuidle.h>
75 
76 #include "fec.h"
77 
78 static void set_multicast_list(struct net_device *ndev);
79 static void fec_enet_itr_coal_set(struct net_device *ndev);
80 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
81 				int cpu, struct xdp_buff *xdp,
82 				u32 dma_sync_len);
83 
84 #define DRIVER_NAME	"fec"
85 
86 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
87 
88 #define FEC_ENET_RSEM_V	0x84
89 #define FEC_ENET_RSFL_V	16
90 #define FEC_ENET_RAEM_V	0x8
91 #define FEC_ENET_RAFL_V	0x8
92 #define FEC_ENET_OPD_V	0xFFF0
93 #define FEC_MDIO_PM_TIMEOUT  100 /* ms */
94 
95 #define FEC_ENET_XDP_PASS          0
96 #define FEC_ENET_XDP_CONSUMED      BIT(0)
97 #define FEC_ENET_XDP_TX            BIT(1)
98 #define FEC_ENET_XDP_REDIR         BIT(2)
99 
100 struct fec_devinfo {
101 	u32 quirks;
102 };
103 
104 static const struct fec_devinfo fec_imx25_info = {
105 	.quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
106 		  FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
107 };
108 
109 static const struct fec_devinfo fec_imx27_info = {
110 	.quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
111 		  FEC_QUIRK_HAS_MDIO_C45,
112 };
113 
114 static const struct fec_devinfo fec_imx28_info = {
115 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
116 		  FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
117 		  FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
118 		  FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
119 };
120 
121 static const struct fec_devinfo fec_imx6q_info = {
122 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
123 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
124 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
125 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
126 		  FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
127 };
128 
129 static const struct fec_devinfo fec_mvf600_info = {
130 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
131 		  FEC_QUIRK_HAS_MDIO_C45,
132 };
133 
134 static const struct fec_devinfo fec_imx6sx_info = {
135 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
136 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
137 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
138 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
139 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
140 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
141 		  FEC_QUIRK_HAS_MDIO_C45,
142 };
143 
144 static const struct fec_devinfo fec_imx6ul_info = {
145 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
146 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
147 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
148 		  FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
149 		  FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
150 		  FEC_QUIRK_HAS_MDIO_C45,
151 };
152 
153 static const struct fec_devinfo fec_imx8mq_info = {
154 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
155 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
156 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
157 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
158 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
159 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
160 		  FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
161 		  FEC_QUIRK_HAS_MDIO_C45,
162 };
163 
164 static const struct fec_devinfo fec_imx8qm_info = {
165 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
166 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
167 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
168 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
169 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
170 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
171 		  FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45 |
172 		  FEC_QUIRK_JUMBO_FRAME,
173 };
174 
175 static const struct fec_devinfo fec_s32v234_info = {
176 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
177 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
178 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
179 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
180 		  FEC_QUIRK_HAS_MDIO_C45,
181 };
182 
183 static struct platform_device_id fec_devtype[] = {
184 	{
185 		/* keep it for coldfire */
186 		.name = DRIVER_NAME,
187 		.driver_data = 0,
188 	}, {
189 		/* sentinel */
190 	}
191 };
192 MODULE_DEVICE_TABLE(platform, fec_devtype);
193 
194 static const struct of_device_id fec_dt_ids[] = {
195 	{ .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, },
196 	{ .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, },
197 	{ .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
198 	{ .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
199 	{ .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
200 	{ .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, },
201 	{ .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
202 	{ .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
203 	{ .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
204 	{ .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, },
205 	{ /* sentinel */ }
206 };
207 MODULE_DEVICE_TABLE(of, fec_dt_ids);
208 
209 static unsigned char macaddr[ETH_ALEN];
210 module_param_array(macaddr, byte, NULL, 0);
211 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
212 
213 #if defined(CONFIG_M5272)
214 /*
215  * Some hardware gets it MAC address out of local flash memory.
216  * if this is non-zero then assume it is the address to get MAC from.
217  */
218 #if defined(CONFIG_NETtel)
219 #define	FEC_FLASHMAC	0xf0006006
220 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
221 #define	FEC_FLASHMAC	0xf0006000
222 #elif defined(CONFIG_CANCam)
223 #define	FEC_FLASHMAC	0xf0020000
224 #elif defined (CONFIG_M5272C3)
225 #define	FEC_FLASHMAC	(0xffe04000 + 4)
226 #elif defined(CONFIG_MOD5272)
227 #define FEC_FLASHMAC	0xffc0406b
228 #else
229 #define	FEC_FLASHMAC	0
230 #endif
231 #endif /* CONFIG_M5272 */
232 
233 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
234  *
235  * 2048 byte skbufs are allocated. However, alignment requirements
236  * varies between FEC variants. Worst case is 64, so round down by 64.
237  */
238 #define MAX_JUMBO_BUF_SIZE	(round_down(16384 - FEC_DRV_RESERVE_SPACE - 64, 64))
239 #define PKT_MAXBUF_SIZE		(round_down(2048 - 64, 64))
240 #define PKT_MINBUF_SIZE		64
241 
242 /* FEC receive acceleration */
243 #define FEC_RACC_IPDIS		BIT(1)
244 #define FEC_RACC_PRODIS		BIT(2)
245 #define FEC_RACC_SHIFT16	BIT(7)
246 #define FEC_RACC_OPTIONS	(FEC_RACC_IPDIS | FEC_RACC_PRODIS)
247 
248 /* MIB Control Register */
249 #define FEC_MIB_CTRLSTAT_DISABLE	BIT(31)
250 
251 /*
252  * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
253  * size bits. Other FEC hardware does not, so we need to take that into
254  * account when setting it.
255  */
256 #ifndef CONFIG_M5272
257 #define	OPT_ARCH_HAS_MAX_FL	1
258 #else
259 #define	OPT_ARCH_HAS_MAX_FL	0
260 #endif
261 
262 /* FEC MII MMFR bits definition */
263 #define FEC_MMFR_ST		(1 << 30)
264 #define FEC_MMFR_ST_C45		(0)
265 #define FEC_MMFR_OP_READ	(2 << 28)
266 #define FEC_MMFR_OP_READ_C45	(3 << 28)
267 #define FEC_MMFR_OP_WRITE	(1 << 28)
268 #define FEC_MMFR_OP_ADDR_WRITE	(0)
269 #define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
270 #define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
271 #define FEC_MMFR_TA		(2 << 16)
272 #define FEC_MMFR_DATA(v)	(v & 0xffff)
273 /* FEC ECR bits definition */
274 #define FEC_ECR_RESET           BIT(0)
275 #define FEC_ECR_ETHEREN         BIT(1)
276 #define FEC_ECR_MAGICEN         BIT(2)
277 #define FEC_ECR_SLEEP           BIT(3)
278 #define FEC_ECR_EN1588          BIT(4)
279 #define FEC_ECR_SPEED           BIT(5)
280 #define FEC_ECR_BYTESWP         BIT(8)
281 /* FEC RCR bits definition */
282 #define FEC_RCR_LOOP            BIT(0)
283 #define FEC_RCR_DRT		BIT(1)
284 #define FEC_RCR_MII             BIT(2)
285 #define FEC_RCR_PROMISC         BIT(3)
286 #define FEC_RCR_BC_REJ          BIT(4)
287 #define FEC_RCR_FLOWCTL         BIT(5)
288 #define FEC_RCR_RGMII		BIT(6)
289 #define FEC_RCR_RMII            BIT(8)
290 #define FEC_RCR_10BASET         BIT(9)
291 #define FEC_RCR_NLC		BIT(30)
292 /* TX WMARK bits */
293 #define FEC_TXWMRK_STRFWD       BIT(8)
294 
295 #define FEC_MII_TIMEOUT		30000 /* us */
296 
297 /* Transmitter timeout */
298 #define TX_TIMEOUT (2 * HZ)
299 
300 #define FEC_PAUSE_FLAG_AUTONEG	0x1
301 #define FEC_PAUSE_FLAG_ENABLE	0x2
302 #define FEC_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
303 #define FEC_WOL_FLAG_ENABLE		(0x1 << 1)
304 #define FEC_WOL_FLAG_SLEEP_ON		(0x1 << 2)
305 
306 /* Max number of allowed TCP segments for software TSO */
307 #define FEC_MAX_TSO_SEGS	100
308 #define FEC_MAX_SKB_DESCS	(FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
309 
310 #define IS_TSO_HEADER(txq, addr) \
311 	((addr >= txq->tso_hdrs_dma) && \
312 	(addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
313 
314 static int mii_cnt;
315 
316 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
317 					     struct bufdesc_prop *bd)
318 {
319 	return (bdp >= bd->last) ? bd->base
320 			: (struct bufdesc *)(((void *)bdp) + bd->dsize);
321 }
322 
323 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
324 					     struct bufdesc_prop *bd)
325 {
326 	return (bdp <= bd->base) ? bd->last
327 			: (struct bufdesc *)(((void *)bdp) - bd->dsize);
328 }
329 
330 static int fec_enet_get_bd_index(struct bufdesc *bdp,
331 				 struct bufdesc_prop *bd)
332 {
333 	return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
334 }
335 
336 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
337 {
338 	int entries;
339 
340 	entries = (((const char *)txq->dirty_tx -
341 			(const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
342 
343 	return entries >= 0 ? entries : entries + txq->bd.ring_size;
344 }
345 
346 static void swap_buffer(void *bufaddr, int len)
347 {
348 	int i;
349 	unsigned int *buf = bufaddr;
350 
351 	for (i = 0; i < len; i += 4, buf++)
352 		swab32s(buf);
353 }
354 
355 static void fec_dump(struct net_device *ndev)
356 {
357 	struct fec_enet_private *fep = netdev_priv(ndev);
358 	struct bufdesc *bdp;
359 	struct fec_enet_priv_tx_q *txq;
360 	int index = 0;
361 
362 	netdev_info(ndev, "TX ring dump\n");
363 	pr_info("Nr     SC     addr       len  SKB\n");
364 
365 	txq = fep->tx_queue[0];
366 	bdp = txq->bd.base;
367 
368 	do {
369 		pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
370 			index,
371 			bdp == txq->bd.cur ? 'S' : ' ',
372 			bdp == txq->dirty_tx ? 'H' : ' ',
373 			fec16_to_cpu(bdp->cbd_sc),
374 			fec32_to_cpu(bdp->cbd_bufaddr),
375 			fec16_to_cpu(bdp->cbd_datlen),
376 			txq->tx_buf[index].buf_p);
377 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
378 		index++;
379 	} while (bdp != txq->bd.base);
380 }
381 
382 /*
383  * Coldfire does not support DMA coherent allocations, and has historically used
384  * a band-aid with a manual flush in fec_enet_rx_queue.
385  */
386 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
387 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
388 		gfp_t gfp)
389 {
390 	return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp);
391 }
392 
393 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
394 		dma_addr_t handle)
395 {
396 	dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL);
397 }
398 #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
399 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
400 		gfp_t gfp)
401 {
402 	return dma_alloc_coherent(dev, size, handle, gfp);
403 }
404 
405 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
406 		dma_addr_t handle)
407 {
408 	dma_free_coherent(dev, size, cpu_addr, handle);
409 }
410 #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
411 
412 struct fec_dma_devres {
413 	size_t		size;
414 	void		*vaddr;
415 	dma_addr_t	dma_handle;
416 };
417 
418 static void fec_dmam_release(struct device *dev, void *res)
419 {
420 	struct fec_dma_devres *this = res;
421 
422 	fec_dma_free(dev, this->size, this->vaddr, this->dma_handle);
423 }
424 
425 static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle,
426 		gfp_t gfp)
427 {
428 	struct fec_dma_devres *dr;
429 	void *vaddr;
430 
431 	dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp);
432 	if (!dr)
433 		return NULL;
434 	vaddr = fec_dma_alloc(dev, size, handle, gfp);
435 	if (!vaddr) {
436 		devres_free(dr);
437 		return NULL;
438 	}
439 	dr->vaddr = vaddr;
440 	dr->dma_handle = *handle;
441 	dr->size = size;
442 	devres_add(dev, dr);
443 	return vaddr;
444 }
445 
446 static inline bool is_ipv4_pkt(struct sk_buff *skb)
447 {
448 	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
449 }
450 
451 static int
452 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
453 {
454 	/* Only run for packets requiring a checksum. */
455 	if (skb->ip_summed != CHECKSUM_PARTIAL)
456 		return 0;
457 
458 	if (unlikely(skb_cow_head(skb, 0)))
459 		return -1;
460 
461 	if (is_ipv4_pkt(skb))
462 		ip_hdr(skb)->check = 0;
463 	*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
464 
465 	return 0;
466 }
467 
468 static int
469 fec_enet_create_page_pool(struct fec_enet_private *fep,
470 			  struct fec_enet_priv_rx_q *rxq, int size)
471 {
472 	struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
473 	struct page_pool_params pp_params = {
474 		.order = fep->pagepool_order,
475 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
476 		.pool_size = size,
477 		.nid = dev_to_node(&fep->pdev->dev),
478 		.dev = &fep->pdev->dev,
479 		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
480 		.offset = FEC_ENET_XDP_HEADROOM,
481 		.max_len = fep->rx_frame_size,
482 	};
483 	int err;
484 
485 	rxq->page_pool = page_pool_create(&pp_params);
486 	if (IS_ERR(rxq->page_pool)) {
487 		err = PTR_ERR(rxq->page_pool);
488 		rxq->page_pool = NULL;
489 		return err;
490 	}
491 
492 	err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
493 	if (err < 0)
494 		goto err_free_pp;
495 
496 	err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
497 					 rxq->page_pool);
498 	if (err)
499 		goto err_unregister_rxq;
500 
501 	return 0;
502 
503 err_unregister_rxq:
504 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
505 err_free_pp:
506 	page_pool_destroy(rxq->page_pool);
507 	rxq->page_pool = NULL;
508 	return err;
509 }
510 
511 static struct bufdesc *
512 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
513 			     struct sk_buff *skb,
514 			     struct net_device *ndev)
515 {
516 	struct fec_enet_private *fep = netdev_priv(ndev);
517 	struct bufdesc *bdp = txq->bd.cur;
518 	struct bufdesc_ex *ebdp;
519 	int nr_frags = skb_shinfo(skb)->nr_frags;
520 	int frag, frag_len;
521 	unsigned short status;
522 	unsigned int estatus = 0;
523 	skb_frag_t *this_frag;
524 	unsigned int index;
525 	void *bufaddr;
526 	dma_addr_t addr;
527 	int i;
528 
529 	for (frag = 0; frag < nr_frags; frag++) {
530 		this_frag = &skb_shinfo(skb)->frags[frag];
531 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
532 		ebdp = (struct bufdesc_ex *)bdp;
533 
534 		status = fec16_to_cpu(bdp->cbd_sc);
535 		status &= ~BD_ENET_TX_STATS;
536 		status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
537 		frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
538 
539 		/* Handle the last BD specially */
540 		if (frag == nr_frags - 1) {
541 			status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
542 			if (fep->bufdesc_ex) {
543 				estatus |= BD_ENET_TX_INT;
544 				if (unlikely(skb_shinfo(skb)->tx_flags &
545 					SKBTX_HW_TSTAMP && fep->hwts_tx_en))
546 					estatus |= BD_ENET_TX_TS;
547 			}
548 		}
549 
550 		if (fep->bufdesc_ex) {
551 			if (fep->quirks & FEC_QUIRK_HAS_AVB)
552 				estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
553 			if (skb->ip_summed == CHECKSUM_PARTIAL)
554 				estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
555 
556 			ebdp->cbd_bdu = 0;
557 			ebdp->cbd_esc = cpu_to_fec32(estatus);
558 		}
559 
560 		bufaddr = skb_frag_address(this_frag);
561 
562 		index = fec_enet_get_bd_index(bdp, &txq->bd);
563 		if (((unsigned long) bufaddr) & fep->tx_align ||
564 			fep->quirks & FEC_QUIRK_SWAP_FRAME) {
565 			memcpy(txq->tx_bounce[index], bufaddr, frag_len);
566 			bufaddr = txq->tx_bounce[index];
567 
568 			if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
569 				swap_buffer(bufaddr, frag_len);
570 		}
571 
572 		addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
573 				      DMA_TO_DEVICE);
574 		if (dma_mapping_error(&fep->pdev->dev, addr)) {
575 			if (net_ratelimit())
576 				netdev_err(ndev, "Tx DMA memory map failed\n");
577 			goto dma_mapping_error;
578 		}
579 
580 		bdp->cbd_bufaddr = cpu_to_fec32(addr);
581 		bdp->cbd_datlen = cpu_to_fec16(frag_len);
582 		/* Make sure the updates to rest of the descriptor are
583 		 * performed before transferring ownership.
584 		 */
585 		wmb();
586 		bdp->cbd_sc = cpu_to_fec16(status);
587 	}
588 
589 	return bdp;
590 dma_mapping_error:
591 	bdp = txq->bd.cur;
592 	for (i = 0; i < frag; i++) {
593 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
594 		dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
595 				 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
596 	}
597 	return ERR_PTR(-ENOMEM);
598 }
599 
600 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
601 				   struct sk_buff *skb, struct net_device *ndev)
602 {
603 	struct fec_enet_private *fep = netdev_priv(ndev);
604 	int nr_frags = skb_shinfo(skb)->nr_frags;
605 	struct bufdesc *bdp, *last_bdp;
606 	void *bufaddr;
607 	dma_addr_t addr;
608 	unsigned short status;
609 	unsigned short buflen;
610 	unsigned int estatus = 0;
611 	unsigned int index;
612 	int entries_free;
613 
614 	entries_free = fec_enet_get_free_txdesc_num(txq);
615 	if (entries_free < MAX_SKB_FRAGS + 1) {
616 		dev_kfree_skb_any(skb);
617 		if (net_ratelimit())
618 			netdev_err(ndev, "NOT enough BD for SG!\n");
619 		return NETDEV_TX_OK;
620 	}
621 
622 	/* Protocol checksum off-load for TCP and UDP. */
623 	if (fec_enet_clear_csum(skb, ndev)) {
624 		dev_kfree_skb_any(skb);
625 		return NETDEV_TX_OK;
626 	}
627 
628 	/* Fill in a Tx ring entry */
629 	bdp = txq->bd.cur;
630 	last_bdp = bdp;
631 	status = fec16_to_cpu(bdp->cbd_sc);
632 	status &= ~BD_ENET_TX_STATS;
633 
634 	/* Set buffer length and buffer pointer */
635 	bufaddr = skb->data;
636 	buflen = skb_headlen(skb);
637 
638 	index = fec_enet_get_bd_index(bdp, &txq->bd);
639 	if (((unsigned long) bufaddr) & fep->tx_align ||
640 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
641 		memcpy(txq->tx_bounce[index], skb->data, buflen);
642 		bufaddr = txq->tx_bounce[index];
643 
644 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
645 			swap_buffer(bufaddr, buflen);
646 	}
647 
648 	/* Push the data cache so the CPM does not get stale memory data. */
649 	addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
650 	if (dma_mapping_error(&fep->pdev->dev, addr)) {
651 		dev_kfree_skb_any(skb);
652 		if (net_ratelimit())
653 			netdev_err(ndev, "Tx DMA memory map failed\n");
654 		return NETDEV_TX_OK;
655 	}
656 
657 	if (nr_frags) {
658 		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
659 		if (IS_ERR(last_bdp)) {
660 			dma_unmap_single(&fep->pdev->dev, addr,
661 					 buflen, DMA_TO_DEVICE);
662 			dev_kfree_skb_any(skb);
663 			return NETDEV_TX_OK;
664 		}
665 	} else {
666 		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
667 		if (fep->bufdesc_ex) {
668 			estatus = BD_ENET_TX_INT;
669 			if (unlikely(skb_shinfo(skb)->tx_flags &
670 				SKBTX_HW_TSTAMP && fep->hwts_tx_en))
671 				estatus |= BD_ENET_TX_TS;
672 		}
673 	}
674 	bdp->cbd_bufaddr = cpu_to_fec32(addr);
675 	bdp->cbd_datlen = cpu_to_fec16(buflen);
676 
677 	if (fep->bufdesc_ex) {
678 
679 		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
680 
681 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
682 			fep->hwts_tx_en))
683 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
684 
685 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
686 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
687 
688 		if (skb->ip_summed == CHECKSUM_PARTIAL)
689 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
690 
691 		ebdp->cbd_bdu = 0;
692 		ebdp->cbd_esc = cpu_to_fec32(estatus);
693 	}
694 
695 	index = fec_enet_get_bd_index(last_bdp, &txq->bd);
696 	/* Save skb pointer */
697 	txq->tx_buf[index].buf_p = skb;
698 
699 	/* Make sure the updates to rest of the descriptor are performed before
700 	 * transferring ownership.
701 	 */
702 	wmb();
703 
704 	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
705 	 * it's the last BD of the frame, and to put the CRC on the end.
706 	 */
707 	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
708 	bdp->cbd_sc = cpu_to_fec16(status);
709 
710 	/* If this was the last BD in the ring, start at the beginning again. */
711 	bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
712 
713 	skb_tx_timestamp(skb);
714 
715 	/* Make sure the update to bdp is performed before txq->bd.cur. */
716 	wmb();
717 	txq->bd.cur = bdp;
718 
719 	/* Trigger transmission start */
720 	if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
721 	    !readl(txq->bd.reg_desc_active) ||
722 	    !readl(txq->bd.reg_desc_active) ||
723 	    !readl(txq->bd.reg_desc_active) ||
724 	    !readl(txq->bd.reg_desc_active))
725 		writel(0, txq->bd.reg_desc_active);
726 
727 	return 0;
728 }
729 
730 static int
731 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
732 			  struct net_device *ndev,
733 			  struct bufdesc *bdp, int index, char *data,
734 			  int size, bool last_tcp, bool is_last)
735 {
736 	struct fec_enet_private *fep = netdev_priv(ndev);
737 	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
738 	unsigned short status;
739 	unsigned int estatus = 0;
740 	dma_addr_t addr;
741 
742 	status = fec16_to_cpu(bdp->cbd_sc);
743 	status &= ~BD_ENET_TX_STATS;
744 
745 	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
746 
747 	if (((unsigned long) data) & fep->tx_align ||
748 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
749 		memcpy(txq->tx_bounce[index], data, size);
750 		data = txq->tx_bounce[index];
751 
752 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
753 			swap_buffer(data, size);
754 	}
755 
756 	addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
757 	if (dma_mapping_error(&fep->pdev->dev, addr)) {
758 		dev_kfree_skb_any(skb);
759 		if (net_ratelimit())
760 			netdev_err(ndev, "Tx DMA memory map failed\n");
761 		return NETDEV_TX_OK;
762 	}
763 
764 	bdp->cbd_datlen = cpu_to_fec16(size);
765 	bdp->cbd_bufaddr = cpu_to_fec32(addr);
766 
767 	if (fep->bufdesc_ex) {
768 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
769 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
770 		if (skb->ip_summed == CHECKSUM_PARTIAL)
771 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
772 		ebdp->cbd_bdu = 0;
773 		ebdp->cbd_esc = cpu_to_fec32(estatus);
774 	}
775 
776 	/* Handle the last BD specially */
777 	if (last_tcp)
778 		status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
779 	if (is_last) {
780 		status |= BD_ENET_TX_INTR;
781 		if (fep->bufdesc_ex)
782 			ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
783 	}
784 
785 	bdp->cbd_sc = cpu_to_fec16(status);
786 
787 	return 0;
788 }
789 
790 static int
791 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
792 			 struct sk_buff *skb, struct net_device *ndev,
793 			 struct bufdesc *bdp, int index)
794 {
795 	struct fec_enet_private *fep = netdev_priv(ndev);
796 	int hdr_len = skb_tcp_all_headers(skb);
797 	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
798 	void *bufaddr;
799 	unsigned long dmabuf;
800 	unsigned short status;
801 	unsigned int estatus = 0;
802 
803 	status = fec16_to_cpu(bdp->cbd_sc);
804 	status &= ~BD_ENET_TX_STATS;
805 	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
806 
807 	bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
808 	dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
809 	if (((unsigned long)bufaddr) & fep->tx_align ||
810 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
811 		memcpy(txq->tx_bounce[index], skb->data, hdr_len);
812 		bufaddr = txq->tx_bounce[index];
813 
814 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
815 			swap_buffer(bufaddr, hdr_len);
816 
817 		dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
818 					hdr_len, DMA_TO_DEVICE);
819 		if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
820 			dev_kfree_skb_any(skb);
821 			if (net_ratelimit())
822 				netdev_err(ndev, "Tx DMA memory map failed\n");
823 			return NETDEV_TX_OK;
824 		}
825 	}
826 
827 	bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
828 	bdp->cbd_datlen = cpu_to_fec16(hdr_len);
829 
830 	if (fep->bufdesc_ex) {
831 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
832 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
833 		if (skb->ip_summed == CHECKSUM_PARTIAL)
834 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
835 		ebdp->cbd_bdu = 0;
836 		ebdp->cbd_esc = cpu_to_fec32(estatus);
837 	}
838 
839 	bdp->cbd_sc = cpu_to_fec16(status);
840 
841 	return 0;
842 }
843 
844 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
845 				   struct sk_buff *skb,
846 				   struct net_device *ndev)
847 {
848 	struct fec_enet_private *fep = netdev_priv(ndev);
849 	int hdr_len, total_len, data_left;
850 	struct bufdesc *bdp = txq->bd.cur;
851 	struct bufdesc *tmp_bdp;
852 	struct bufdesc_ex *ebdp;
853 	struct tso_t tso;
854 	unsigned int index = 0;
855 	int ret;
856 
857 	if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
858 		dev_kfree_skb_any(skb);
859 		if (net_ratelimit())
860 			netdev_err(ndev, "NOT enough BD for TSO!\n");
861 		return NETDEV_TX_OK;
862 	}
863 
864 	/* Protocol checksum off-load for TCP and UDP. */
865 	if (fec_enet_clear_csum(skb, ndev)) {
866 		dev_kfree_skb_any(skb);
867 		return NETDEV_TX_OK;
868 	}
869 
870 	/* Initialize the TSO handler, and prepare the first payload */
871 	hdr_len = tso_start(skb, &tso);
872 
873 	total_len = skb->len - hdr_len;
874 	while (total_len > 0) {
875 		char *hdr;
876 
877 		index = fec_enet_get_bd_index(bdp, &txq->bd);
878 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
879 		total_len -= data_left;
880 
881 		/* prepare packet headers: MAC + IP + TCP */
882 		hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
883 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
884 		ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
885 		if (ret)
886 			goto err_release;
887 
888 		while (data_left > 0) {
889 			int size;
890 
891 			size = min_t(int, tso.size, data_left);
892 			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
893 			index = fec_enet_get_bd_index(bdp, &txq->bd);
894 			ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
895 							bdp, index,
896 							tso.data, size,
897 							size == data_left,
898 							total_len == 0);
899 			if (ret)
900 				goto err_release;
901 
902 			data_left -= size;
903 			tso_build_data(skb, &tso, size);
904 		}
905 
906 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
907 	}
908 
909 	/* Save skb pointer */
910 	txq->tx_buf[index].buf_p = skb;
911 
912 	skb_tx_timestamp(skb);
913 	txq->bd.cur = bdp;
914 
915 	/* Trigger transmission start */
916 	if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
917 	    !readl(txq->bd.reg_desc_active) ||
918 	    !readl(txq->bd.reg_desc_active) ||
919 	    !readl(txq->bd.reg_desc_active) ||
920 	    !readl(txq->bd.reg_desc_active))
921 		writel(0, txq->bd.reg_desc_active);
922 
923 	return 0;
924 
925 err_release:
926 	/* Release all used data descriptors for TSO */
927 	tmp_bdp = txq->bd.cur;
928 
929 	while (tmp_bdp != bdp) {
930 		/* Unmap data buffers */
931 		if (tmp_bdp->cbd_bufaddr &&
932 		    !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr)))
933 			dma_unmap_single(&fep->pdev->dev,
934 					 fec32_to_cpu(tmp_bdp->cbd_bufaddr),
935 					 fec16_to_cpu(tmp_bdp->cbd_datlen),
936 					 DMA_TO_DEVICE);
937 
938 		/* Clear standard buffer descriptor fields */
939 		tmp_bdp->cbd_sc = 0;
940 		tmp_bdp->cbd_datlen = 0;
941 		tmp_bdp->cbd_bufaddr = 0;
942 
943 		/* Handle extended descriptor if enabled */
944 		if (fep->bufdesc_ex) {
945 			ebdp = (struct bufdesc_ex *)tmp_bdp;
946 			ebdp->cbd_esc = 0;
947 		}
948 
949 		tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd);
950 	}
951 
952 	dev_kfree_skb_any(skb);
953 
954 	return ret;
955 }
956 
957 static netdev_tx_t
958 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
959 {
960 	struct fec_enet_private *fep = netdev_priv(ndev);
961 	int entries_free;
962 	unsigned short queue;
963 	struct fec_enet_priv_tx_q *txq;
964 	struct netdev_queue *nq;
965 	int ret;
966 
967 	queue = skb_get_queue_mapping(skb);
968 	txq = fep->tx_queue[queue];
969 	nq = netdev_get_tx_queue(ndev, queue);
970 
971 	if (skb_is_gso(skb))
972 		ret = fec_enet_txq_submit_tso(txq, skb, ndev);
973 	else
974 		ret = fec_enet_txq_submit_skb(txq, skb, ndev);
975 	if (ret)
976 		return ret;
977 
978 	entries_free = fec_enet_get_free_txdesc_num(txq);
979 	if (entries_free <= txq->tx_stop_threshold)
980 		netif_tx_stop_queue(nq);
981 
982 	return NETDEV_TX_OK;
983 }
984 
985 /* Init RX & TX buffer descriptors
986  */
987 static void fec_enet_bd_init(struct net_device *dev)
988 {
989 	struct fec_enet_private *fep = netdev_priv(dev);
990 	struct fec_enet_priv_tx_q *txq;
991 	struct fec_enet_priv_rx_q *rxq;
992 	struct bufdesc *bdp;
993 	unsigned int i;
994 	unsigned int q;
995 
996 	for (q = 0; q < fep->num_rx_queues; q++) {
997 		/* Initialize the receive buffer descriptors. */
998 		rxq = fep->rx_queue[q];
999 		bdp = rxq->bd.base;
1000 
1001 		for (i = 0; i < rxq->bd.ring_size; i++) {
1002 
1003 			/* Initialize the BD for every fragment in the page. */
1004 			if (bdp->cbd_bufaddr)
1005 				bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
1006 			else
1007 				bdp->cbd_sc = cpu_to_fec16(0);
1008 			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1009 		}
1010 
1011 		/* Set the last buffer to wrap */
1012 		bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
1013 		bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
1014 
1015 		rxq->bd.cur = rxq->bd.base;
1016 	}
1017 
1018 	for (q = 0; q < fep->num_tx_queues; q++) {
1019 		/* ...and the same for transmit */
1020 		txq = fep->tx_queue[q];
1021 		bdp = txq->bd.base;
1022 		txq->bd.cur = bdp;
1023 
1024 		for (i = 0; i < txq->bd.ring_size; i++) {
1025 			/* Initialize the BD for every fragment in the page. */
1026 			bdp->cbd_sc = cpu_to_fec16(0);
1027 			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
1028 				if (bdp->cbd_bufaddr &&
1029 				    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1030 					dma_unmap_single(&fep->pdev->dev,
1031 							 fec32_to_cpu(bdp->cbd_bufaddr),
1032 							 fec16_to_cpu(bdp->cbd_datlen),
1033 							 DMA_TO_DEVICE);
1034 				if (txq->tx_buf[i].buf_p)
1035 					dev_kfree_skb_any(txq->tx_buf[i].buf_p);
1036 			} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
1037 				if (bdp->cbd_bufaddr)
1038 					dma_unmap_single(&fep->pdev->dev,
1039 							 fec32_to_cpu(bdp->cbd_bufaddr),
1040 							 fec16_to_cpu(bdp->cbd_datlen),
1041 							 DMA_TO_DEVICE);
1042 
1043 				if (txq->tx_buf[i].buf_p)
1044 					xdp_return_frame(txq->tx_buf[i].buf_p);
1045 			} else {
1046 				struct page *page = txq->tx_buf[i].buf_p;
1047 
1048 				if (page)
1049 					page_pool_put_page(pp_page_to_nmdesc(page)->pp,
1050 							   page, 0,
1051 							   false);
1052 			}
1053 
1054 			txq->tx_buf[i].buf_p = NULL;
1055 			/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1056 			txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
1057 			bdp->cbd_bufaddr = cpu_to_fec32(0);
1058 			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1059 		}
1060 
1061 		/* Set the last buffer to wrap */
1062 		bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
1063 		bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
1064 		txq->dirty_tx = bdp;
1065 	}
1066 }
1067 
1068 static void fec_enet_active_rxring(struct net_device *ndev)
1069 {
1070 	struct fec_enet_private *fep = netdev_priv(ndev);
1071 	int i;
1072 
1073 	for (i = 0; i < fep->num_rx_queues; i++)
1074 		writel(0, fep->rx_queue[i]->bd.reg_desc_active);
1075 }
1076 
1077 static void fec_enet_enable_ring(struct net_device *ndev)
1078 {
1079 	struct fec_enet_private *fep = netdev_priv(ndev);
1080 	struct fec_enet_priv_tx_q *txq;
1081 	struct fec_enet_priv_rx_q *rxq;
1082 	int i;
1083 
1084 	for (i = 0; i < fep->num_rx_queues; i++) {
1085 		rxq = fep->rx_queue[i];
1086 		writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
1087 		writel(fep->max_buf_size, fep->hwp + FEC_R_BUFF_SIZE(i));
1088 
1089 		/* enable DMA1/2 */
1090 		if (i)
1091 			writel(RCMR_MATCHEN | RCMR_CMP(i),
1092 			       fep->hwp + FEC_RCMR(i));
1093 	}
1094 
1095 	for (i = 0; i < fep->num_tx_queues; i++) {
1096 		txq = fep->tx_queue[i];
1097 		writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1098 
1099 		/* enable DMA1/2 */
1100 		if (i)
1101 			writel(DMA_CLASS_EN | IDLE_SLOPE(i),
1102 			       fep->hwp + FEC_DMA_CFG(i));
1103 	}
1104 }
1105 
1106 /* Whack a reset.  We should wait for this.
1107  * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1108  * instead of reset MAC itself.
1109  */
1110 static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
1111 {
1112 	u32 val;
1113 
1114 	if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1115 		if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
1116 		    ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
1117 			writel(0, fep->hwp + FEC_ECNTRL);
1118 		} else {
1119 			writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
1120 			udelay(10);
1121 		}
1122 	} else {
1123 		val = readl(fep->hwp + FEC_ECNTRL);
1124 		val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1125 		writel(val, fep->hwp + FEC_ECNTRL);
1126 	}
1127 }
1128 
1129 static void fec_set_hw_mac_addr(struct net_device *ndev)
1130 {
1131 	struct fec_enet_private *fep = netdev_priv(ndev);
1132 
1133 	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
1134 	       (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
1135 	       fep->hwp + FEC_ADDR_LOW);
1136 	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
1137 	       fep->hwp + FEC_ADDR_HIGH);
1138 }
1139 
1140 /*
1141  * This function is called to start or restart the FEC during a link
1142  * change, transmit timeout, or to reconfigure the FEC.  The network
1143  * packet processing for this device must be stopped before this call.
1144  */
1145 static void
1146 fec_restart(struct net_device *ndev)
1147 {
1148 	struct fec_enet_private *fep = netdev_priv(ndev);
1149 	u32 ecntl = FEC_ECR_ETHEREN;
1150 	u32 rcntl = FEC_RCR_MII;
1151 
1152 	if (OPT_ARCH_HAS_MAX_FL)
1153 		rcntl |= (fep->netdev->mtu + ETH_HLEN + ETH_FCS_LEN) << 16;
1154 
1155 	if (fep->bufdesc_ex)
1156 		fec_ptp_save_state(fep);
1157 
1158 	fec_ctrl_reset(fep, false);
1159 
1160 	/*
1161 	 * enet-mac reset will reset mac address registers too,
1162 	 * so need to reconfigure it.
1163 	 */
1164 	fec_set_hw_mac_addr(ndev);
1165 
1166 	/* Clear any outstanding interrupt, except MDIO. */
1167 	writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
1168 
1169 	fec_enet_bd_init(ndev);
1170 
1171 	fec_enet_enable_ring(ndev);
1172 
1173 	/* Enable MII mode */
1174 	if (fep->full_duplex == DUPLEX_FULL) {
1175 		/* FD enable */
1176 		writel(0x04, fep->hwp + FEC_X_CNTRL);
1177 	} else {
1178 		/* No Rcv on Xmit */
1179 		rcntl |= FEC_RCR_DRT;
1180 		writel(0x0, fep->hwp + FEC_X_CNTRL);
1181 	}
1182 
1183 	/* Set MII speed */
1184 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1185 
1186 #if !defined(CONFIG_M5272)
1187 	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1188 		u32 val = readl(fep->hwp + FEC_RACC);
1189 
1190 		/* align IP header */
1191 		val |= FEC_RACC_SHIFT16;
1192 		if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1193 			/* set RX checksum */
1194 			val |= FEC_RACC_OPTIONS;
1195 		else
1196 			val &= ~FEC_RACC_OPTIONS;
1197 		writel(val, fep->hwp + FEC_RACC);
1198 		writel(min(fep->rx_frame_size, fep->max_buf_size), fep->hwp + FEC_FTRL);
1199 	}
1200 #endif
1201 
1202 	/*
1203 	 * The phy interface and speed need to get configured
1204 	 * differently on enet-mac.
1205 	 */
1206 	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1207 		/* Enable flow control and length check */
1208 		rcntl |= FEC_RCR_NLC | FEC_RCR_FLOWCTL;
1209 
1210 		/* RGMII, RMII or MII */
1211 		if (phy_interface_mode_is_rgmii(fep->phy_interface))
1212 			rcntl |= FEC_RCR_RGMII;
1213 		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1214 			rcntl |= FEC_RCR_RMII;
1215 		else
1216 			rcntl &= ~FEC_RCR_RMII;
1217 
1218 		/* 1G, 100M or 10M */
1219 		if (ndev->phydev) {
1220 			if (ndev->phydev->speed == SPEED_1000)
1221 				ecntl |= FEC_ECR_SPEED;
1222 			else if (ndev->phydev->speed == SPEED_100)
1223 				rcntl &= ~FEC_RCR_10BASET;
1224 			else
1225 				rcntl |= FEC_RCR_10BASET;
1226 		}
1227 	} else {
1228 #ifdef FEC_MIIGSK_ENR
1229 		if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1230 			u32 cfgr;
1231 			/* disable the gasket and wait */
1232 			writel(0, fep->hwp + FEC_MIIGSK_ENR);
1233 			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1234 				udelay(1);
1235 
1236 			/*
1237 			 * configure the gasket:
1238 			 *   RMII, 50 MHz, no loopback, no echo
1239 			 *   MII, 25 MHz, no loopback, no echo
1240 			 */
1241 			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1242 				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1243 			if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1244 				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1245 			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1246 
1247 			/* re-enable the gasket */
1248 			writel(2, fep->hwp + FEC_MIIGSK_ENR);
1249 		}
1250 #endif
1251 	}
1252 
1253 #if !defined(CONFIG_M5272)
1254 	/* enable pause frame*/
1255 	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1256 	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1257 	     ndev->phydev && ndev->phydev->pause)) {
1258 		rcntl |= FEC_RCR_FLOWCTL;
1259 
1260 		/* set FIFO threshold parameter to reduce overrun */
1261 		writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1262 		writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1263 		writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1264 		writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1265 
1266 		/* OPD */
1267 		writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1268 	} else {
1269 		rcntl &= ~FEC_RCR_FLOWCTL;
1270 	}
1271 #endif /* !defined(CONFIG_M5272) */
1272 
1273 	writel(rcntl, fep->hwp + FEC_R_CNTRL);
1274 
1275 	/* Setup multicast filter. */
1276 	set_multicast_list(ndev);
1277 #ifndef CONFIG_M5272
1278 	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1279 	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1280 #endif
1281 
1282 	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1283 		/* enable ENET endian swap */
1284 		ecntl |= FEC_ECR_BYTESWP;
1285 
1286 		/* When Jumbo Frame is enabled, the FIFO may not be large enough
1287 		 * to hold an entire frame. In such cases, if the MTU exceeds
1288 		 * (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN), configure the interface
1289 		 * to operate in cut-through mode, triggered by the FIFO threshold.
1290 		 * Otherwise, enable the ENET store-and-forward mode.
1291 		 */
1292 		if ((fep->quirks & FEC_QUIRK_JUMBO_FRAME) &&
1293 		    (ndev->mtu > (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN)))
1294 			writel(0xF, fep->hwp + FEC_X_WMRK);
1295 		else
1296 			writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
1297 	}
1298 
1299 	if (fep->bufdesc_ex)
1300 		ecntl |= FEC_ECR_EN1588;
1301 
1302 	if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1303 	    fep->rgmii_txc_dly)
1304 		ecntl |= FEC_ENET_TXC_DLY;
1305 	if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1306 	    fep->rgmii_rxc_dly)
1307 		ecntl |= FEC_ENET_RXC_DLY;
1308 
1309 #ifndef CONFIG_M5272
1310 	/* Enable the MIB statistic event counters */
1311 	writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1312 #endif
1313 
1314 	/* And last, enable the transmit and receive processing */
1315 	writel(ecntl, fep->hwp + FEC_ECNTRL);
1316 	fec_enet_active_rxring(ndev);
1317 
1318 	if (fep->bufdesc_ex) {
1319 		fec_ptp_start_cyclecounter(ndev);
1320 		fec_ptp_restore_state(fep);
1321 	}
1322 
1323 	/* Enable interrupts we wish to service */
1324 	if (fep->link)
1325 		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1326 	else
1327 		writel(0, fep->hwp + FEC_IMASK);
1328 
1329 	/* Init the interrupt coalescing */
1330 	if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
1331 		fec_enet_itr_coal_set(ndev);
1332 }
1333 
1334 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
1335 {
1336 	if (!(of_machine_is_compatible("fsl,imx8qm") ||
1337 	      of_machine_is_compatible("fsl,imx8qp") ||
1338 	      of_machine_is_compatible("fsl,imx8qxp") ||
1339 	      of_machine_is_compatible("fsl,imx8dx") ||
1340 	      of_machine_is_compatible("fsl,imx8dxl")))
1341 		return 0;
1342 
1343 	return imx_scu_get_handle(&fep->ipc_handle);
1344 }
1345 
1346 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
1347 {
1348 	struct device_node *np = fep->pdev->dev.of_node;
1349 	u32 rsrc_id, val;
1350 	int idx;
1351 
1352 	if (!np || !fep->ipc_handle)
1353 		return;
1354 
1355 	idx = of_alias_get_id(np, "ethernet");
1356 	if (idx < 0)
1357 		idx = 0;
1358 	rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
1359 
1360 	val = enabled ? 1 : 0;
1361 	imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
1362 }
1363 
1364 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
1365 {
1366 	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1367 	struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
1368 
1369 	if (stop_gpr->gpr) {
1370 		if (enabled)
1371 			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1372 					   BIT(stop_gpr->bit),
1373 					   BIT(stop_gpr->bit));
1374 		else
1375 			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1376 					   BIT(stop_gpr->bit), 0);
1377 	} else if (pdata && pdata->sleep_mode_enable) {
1378 		pdata->sleep_mode_enable(enabled);
1379 	} else {
1380 		fec_enet_ipg_stop_set(fep, enabled);
1381 	}
1382 }
1383 
1384 static void fec_irqs_disable(struct net_device *ndev)
1385 {
1386 	struct fec_enet_private *fep = netdev_priv(ndev);
1387 
1388 	writel(0, fep->hwp + FEC_IMASK);
1389 }
1390 
1391 static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
1392 {
1393 	struct fec_enet_private *fep = netdev_priv(ndev);
1394 
1395 	writel(0, fep->hwp + FEC_IMASK);
1396 	writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1397 }
1398 
1399 static void
1400 fec_stop(struct net_device *ndev)
1401 {
1402 	struct fec_enet_private *fep = netdev_priv(ndev);
1403 	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
1404 	u32 val;
1405 
1406 	/* We cannot expect a graceful transmit stop without link !!! */
1407 	if (fep->link) {
1408 		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1409 		udelay(10);
1410 		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1411 			netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1412 	}
1413 
1414 	if (fep->bufdesc_ex)
1415 		fec_ptp_save_state(fep);
1416 
1417 	fec_ctrl_reset(fep, true);
1418 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1419 	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1420 
1421 	/* We have to keep ENET enabled to have MII interrupt stay working */
1422 	if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1423 		!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1424 		writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
1425 		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1426 	}
1427 
1428 	if (fep->bufdesc_ex) {
1429 		val = readl(fep->hwp + FEC_ECNTRL);
1430 		val |= FEC_ECR_EN1588;
1431 		writel(val, fep->hwp + FEC_ECNTRL);
1432 
1433 		fec_ptp_start_cyclecounter(ndev);
1434 		fec_ptp_restore_state(fep);
1435 	}
1436 }
1437 
1438 static void
1439 fec_timeout(struct net_device *ndev, unsigned int txqueue)
1440 {
1441 	struct fec_enet_private *fep = netdev_priv(ndev);
1442 
1443 	fec_dump(ndev);
1444 
1445 	ndev->stats.tx_errors++;
1446 
1447 	schedule_work(&fep->tx_timeout_work);
1448 }
1449 
1450 static void fec_enet_timeout_work(struct work_struct *work)
1451 {
1452 	struct fec_enet_private *fep =
1453 		container_of(work, struct fec_enet_private, tx_timeout_work);
1454 	struct net_device *ndev = fep->netdev;
1455 
1456 	rtnl_lock();
1457 	if (netif_device_present(ndev) || netif_running(ndev)) {
1458 		napi_disable(&fep->napi);
1459 		netif_tx_lock_bh(ndev);
1460 		fec_restart(ndev);
1461 		netif_tx_wake_all_queues(ndev);
1462 		netif_tx_unlock_bh(ndev);
1463 		napi_enable(&fep->napi);
1464 	}
1465 	rtnl_unlock();
1466 }
1467 
1468 static void
1469 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1470 	struct skb_shared_hwtstamps *hwtstamps)
1471 {
1472 	unsigned long flags;
1473 	u64 ns;
1474 
1475 	spin_lock_irqsave(&fep->tmreg_lock, flags);
1476 	ns = timecounter_cyc2time(&fep->tc, ts);
1477 	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1478 
1479 	memset(hwtstamps, 0, sizeof(*hwtstamps));
1480 	hwtstamps->hwtstamp = ns_to_ktime(ns);
1481 }
1482 
1483 static void
1484 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1485 {
1486 	struct	fec_enet_private *fep;
1487 	struct xdp_frame *xdpf;
1488 	struct bufdesc *bdp;
1489 	unsigned short status;
1490 	struct	sk_buff	*skb;
1491 	struct fec_enet_priv_tx_q *txq;
1492 	struct netdev_queue *nq;
1493 	int	index = 0;
1494 	int	entries_free;
1495 	struct page *page;
1496 	int frame_len;
1497 
1498 	fep = netdev_priv(ndev);
1499 
1500 	txq = fep->tx_queue[queue_id];
1501 	/* get next bdp of dirty_tx */
1502 	nq = netdev_get_tx_queue(ndev, queue_id);
1503 	bdp = txq->dirty_tx;
1504 
1505 	/* get next bdp of dirty_tx */
1506 	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1507 
1508 	while (bdp != READ_ONCE(txq->bd.cur)) {
1509 		/* Order the load of bd.cur and cbd_sc */
1510 		rmb();
1511 		status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1512 		if (status & BD_ENET_TX_READY)
1513 			break;
1514 
1515 		index = fec_enet_get_bd_index(bdp, &txq->bd);
1516 
1517 		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1518 			skb = txq->tx_buf[index].buf_p;
1519 			if (bdp->cbd_bufaddr &&
1520 			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1521 				dma_unmap_single(&fep->pdev->dev,
1522 						 fec32_to_cpu(bdp->cbd_bufaddr),
1523 						 fec16_to_cpu(bdp->cbd_datlen),
1524 						 DMA_TO_DEVICE);
1525 			bdp->cbd_bufaddr = cpu_to_fec32(0);
1526 			if (!skb)
1527 				goto tx_buf_done;
1528 		} else {
1529 			/* Tx processing cannot call any XDP (or page pool) APIs if
1530 			 * the "budget" is 0. Because NAPI is called with budget of
1531 			 * 0 (such as netpoll) indicates we may be in an IRQ context,
1532 			 * however, we can't use the page pool from IRQ context.
1533 			 */
1534 			if (unlikely(!budget))
1535 				break;
1536 
1537 			if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1538 				xdpf = txq->tx_buf[index].buf_p;
1539 				if (bdp->cbd_bufaddr)
1540 					dma_unmap_single(&fep->pdev->dev,
1541 							 fec32_to_cpu(bdp->cbd_bufaddr),
1542 							 fec16_to_cpu(bdp->cbd_datlen),
1543 							 DMA_TO_DEVICE);
1544 			} else {
1545 				page = txq->tx_buf[index].buf_p;
1546 			}
1547 
1548 			bdp->cbd_bufaddr = cpu_to_fec32(0);
1549 			if (unlikely(!txq->tx_buf[index].buf_p)) {
1550 				txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1551 				goto tx_buf_done;
1552 			}
1553 
1554 			frame_len = fec16_to_cpu(bdp->cbd_datlen);
1555 		}
1556 
1557 		/* Check for errors. */
1558 		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1559 				   BD_ENET_TX_RL | BD_ENET_TX_UN |
1560 				   BD_ENET_TX_CSL)) {
1561 			ndev->stats.tx_errors++;
1562 			if (status & BD_ENET_TX_HB)  /* No heartbeat */
1563 				ndev->stats.tx_heartbeat_errors++;
1564 			if (status & BD_ENET_TX_LC)  /* Late collision */
1565 				ndev->stats.tx_window_errors++;
1566 			if (status & BD_ENET_TX_RL)  /* Retrans limit */
1567 				ndev->stats.tx_aborted_errors++;
1568 			if (status & BD_ENET_TX_UN)  /* Underrun */
1569 				ndev->stats.tx_fifo_errors++;
1570 			if (status & BD_ENET_TX_CSL) /* Carrier lost */
1571 				ndev->stats.tx_carrier_errors++;
1572 		} else {
1573 			ndev->stats.tx_packets++;
1574 
1575 			if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
1576 				ndev->stats.tx_bytes += skb->len;
1577 			else
1578 				ndev->stats.tx_bytes += frame_len;
1579 		}
1580 
1581 		/* Deferred means some collisions occurred during transmit,
1582 		 * but we eventually sent the packet OK.
1583 		 */
1584 		if (status & BD_ENET_TX_DEF)
1585 			ndev->stats.collisions++;
1586 
1587 		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1588 			/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1589 			 * are to time stamp the packet, so we still need to check time
1590 			 * stamping enabled flag.
1591 			 */
1592 			if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1593 				     fep->hwts_tx_en) && fep->bufdesc_ex) {
1594 				struct skb_shared_hwtstamps shhwtstamps;
1595 				struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1596 
1597 				fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1598 				skb_tstamp_tx(skb, &shhwtstamps);
1599 			}
1600 
1601 			/* Free the sk buffer associated with this last transmit */
1602 			napi_consume_skb(skb, budget);
1603 		} else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1604 			xdp_return_frame_rx_napi(xdpf);
1605 		} else { /* recycle pages of XDP_TX frames */
1606 			/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
1607 			page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
1608 					   0, true);
1609 		}
1610 
1611 		txq->tx_buf[index].buf_p = NULL;
1612 		/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1613 		txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1614 
1615 tx_buf_done:
1616 		/* Make sure the update to bdp and tx_buf are performed
1617 		 * before dirty_tx
1618 		 */
1619 		wmb();
1620 		txq->dirty_tx = bdp;
1621 
1622 		/* Update pointer to next buffer descriptor to be transmitted */
1623 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1624 
1625 		/* Since we have freed up a buffer, the ring is no longer full
1626 		 */
1627 		if (netif_tx_queue_stopped(nq)) {
1628 			entries_free = fec_enet_get_free_txdesc_num(txq);
1629 			if (entries_free >= txq->tx_wake_threshold)
1630 				netif_tx_wake_queue(nq);
1631 		}
1632 	}
1633 
1634 	/* ERR006358: Keep the transmitter going */
1635 	if (bdp != txq->bd.cur &&
1636 	    readl(txq->bd.reg_desc_active) == 0)
1637 		writel(0, txq->bd.reg_desc_active);
1638 }
1639 
1640 static void fec_enet_tx(struct net_device *ndev, int budget)
1641 {
1642 	struct fec_enet_private *fep = netdev_priv(ndev);
1643 	int i;
1644 
1645 	/* Make sure that AVB queues are processed first. */
1646 	for (i = fep->num_tx_queues - 1; i >= 0; i--)
1647 		fec_enet_tx_queue(ndev, i, budget);
1648 }
1649 
1650 static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
1651 				struct bufdesc *bdp, int index)
1652 {
1653 	struct page *new_page;
1654 	dma_addr_t phys_addr;
1655 
1656 	new_page = page_pool_dev_alloc_pages(rxq->page_pool);
1657 	if (unlikely(!new_page))
1658 		return -ENOMEM;
1659 
1660 	rxq->rx_buf[index] = new_page;
1661 	phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
1662 	bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
1663 
1664 	return 0;
1665 }
1666 
1667 static u32
1668 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
1669 		 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
1670 {
1671 	unsigned int sync, len = xdp->data_end - xdp->data;
1672 	u32 ret = FEC_ENET_XDP_PASS;
1673 	struct page *page;
1674 	int err;
1675 	u32 act;
1676 
1677 	act = bpf_prog_run_xdp(prog, xdp);
1678 
1679 	/* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
1680 	 * max len CPU touch
1681 	 */
1682 	sync = xdp->data_end - xdp->data;
1683 	sync = max(sync, len);
1684 
1685 	switch (act) {
1686 	case XDP_PASS:
1687 		rxq->stats[RX_XDP_PASS]++;
1688 		ret = FEC_ENET_XDP_PASS;
1689 		break;
1690 
1691 	case XDP_REDIRECT:
1692 		rxq->stats[RX_XDP_REDIRECT]++;
1693 		err = xdp_do_redirect(fep->netdev, xdp, prog);
1694 		if (unlikely(err))
1695 			goto xdp_err;
1696 
1697 		ret = FEC_ENET_XDP_REDIR;
1698 		break;
1699 
1700 	case XDP_TX:
1701 		rxq->stats[RX_XDP_TX]++;
1702 		err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
1703 		if (unlikely(err)) {
1704 			rxq->stats[RX_XDP_TX_ERRORS]++;
1705 			goto xdp_err;
1706 		}
1707 
1708 		ret = FEC_ENET_XDP_TX;
1709 		break;
1710 
1711 	default:
1712 		bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
1713 		fallthrough;
1714 
1715 	case XDP_ABORTED:
1716 		fallthrough;    /* handle aborts by dropping packet */
1717 
1718 	case XDP_DROP:
1719 		rxq->stats[RX_XDP_DROP]++;
1720 xdp_err:
1721 		ret = FEC_ENET_XDP_CONSUMED;
1722 		page = virt_to_head_page(xdp->data);
1723 		page_pool_put_page(rxq->page_pool, page, sync, true);
1724 		if (act != XDP_DROP)
1725 			trace_xdp_exception(fep->netdev, prog, act);
1726 		break;
1727 	}
1728 
1729 	return ret;
1730 }
1731 
1732 static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
1733 {
1734 	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
1735 		const struct vlan_ethhdr *vlan_header = skb_vlan_eth_hdr(skb);
1736 		const u16 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1737 
1738 		/* Push and remove the vlan tag */
1739 
1740 		memmove(skb->data + VLAN_HLEN, skb->data, ETH_ALEN * 2);
1741 		skb_pull(skb, VLAN_HLEN);
1742 		__vlan_hwaccel_put_tag(skb,
1743 				       htons(ETH_P_8021Q),
1744 				       vlan_tag);
1745 	}
1746 }
1747 
1748 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1749  * When we update through the ring, if the next incoming buffer has
1750  * not been given to the system, we just set the empty indicator,
1751  * effectively tossing the packet.
1752  */
1753 static int
1754 fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
1755 {
1756 	struct fec_enet_private *fep = netdev_priv(ndev);
1757 	struct fec_enet_priv_rx_q *rxq;
1758 	struct bufdesc *bdp;
1759 	unsigned short status;
1760 	struct  sk_buff *skb;
1761 	ushort	pkt_len;
1762 	int	pkt_received = 0;
1763 	struct	bufdesc_ex *ebdp = NULL;
1764 	int	index = 0;
1765 	bool	need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1766 	struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
1767 	u32 ret, xdp_result = FEC_ENET_XDP_PASS;
1768 	u32 data_start = FEC_ENET_XDP_HEADROOM;
1769 	int cpu = smp_processor_id();
1770 	struct xdp_buff xdp;
1771 	struct page *page;
1772 	__fec32 cbd_bufaddr;
1773 	u32 sub_len = 4;
1774 
1775 	/*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
1776 	 * FEC_RACC_SHIFT16 is set by default in the probe function.
1777 	 */
1778 	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1779 		data_start += 2;
1780 		sub_len += 2;
1781 	}
1782 
1783 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
1784 	/*
1785 	 * Hacky flush of all caches instead of using the DMA API for the TSO
1786 	 * headers.
1787 	 */
1788 	flush_cache_all();
1789 #endif
1790 	rxq = fep->rx_queue[queue_id];
1791 
1792 	/* First, grab all of the stats for the incoming packet.
1793 	 * These get messed up if we get called due to a busy condition.
1794 	 */
1795 	bdp = rxq->bd.cur;
1796 	xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
1797 
1798 	while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1799 
1800 		if (pkt_received >= budget)
1801 			break;
1802 		pkt_received++;
1803 
1804 		writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
1805 
1806 		/* Check for errors. */
1807 		status ^= BD_ENET_RX_LAST;
1808 		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1809 			   BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1810 			   BD_ENET_RX_CL)) {
1811 			ndev->stats.rx_errors++;
1812 			if (status & BD_ENET_RX_OV) {
1813 				/* FIFO overrun */
1814 				ndev->stats.rx_fifo_errors++;
1815 				goto rx_processing_done;
1816 			}
1817 			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1818 						| BD_ENET_RX_LAST)) {
1819 				/* Frame too long or too short. */
1820 				ndev->stats.rx_length_errors++;
1821 				if (status & BD_ENET_RX_LAST)
1822 					netdev_err(ndev, "rcv is not +last\n");
1823 			}
1824 			if (status & BD_ENET_RX_CR)	/* CRC Error */
1825 				ndev->stats.rx_crc_errors++;
1826 			/* Report late collisions as a frame error. */
1827 			if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1828 				ndev->stats.rx_frame_errors++;
1829 			goto rx_processing_done;
1830 		}
1831 
1832 		/* Process the incoming frame. */
1833 		ndev->stats.rx_packets++;
1834 		pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1835 		ndev->stats.rx_bytes += pkt_len;
1836 		if (fep->quirks & FEC_QUIRK_HAS_RACC)
1837 			ndev->stats.rx_bytes -= 2;
1838 
1839 		index = fec_enet_get_bd_index(bdp, &rxq->bd);
1840 		page = rxq->rx_buf[index];
1841 		cbd_bufaddr = bdp->cbd_bufaddr;
1842 		if (fec_enet_update_cbd(rxq, bdp, index)) {
1843 			ndev->stats.rx_dropped++;
1844 			goto rx_processing_done;
1845 		}
1846 
1847 		dma_sync_single_for_cpu(&fep->pdev->dev,
1848 					fec32_to_cpu(cbd_bufaddr),
1849 					pkt_len,
1850 					DMA_FROM_DEVICE);
1851 		prefetch(page_address(page));
1852 
1853 		if (xdp_prog) {
1854 			xdp_buff_clear_frags_flag(&xdp);
1855 			/* subtract 16bit shift and FCS */
1856 			xdp_prepare_buff(&xdp, page_address(page),
1857 					 data_start, pkt_len - sub_len, false);
1858 			ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
1859 			xdp_result |= ret;
1860 			if (ret != FEC_ENET_XDP_PASS)
1861 				goto rx_processing_done;
1862 		}
1863 
1864 		/* The packet length includes FCS, but we don't want to
1865 		 * include that when passing upstream as it messes up
1866 		 * bridging applications.
1867 		 */
1868 		skb = build_skb(page_address(page),
1869 				PAGE_SIZE << fep->pagepool_order);
1870 		if (unlikely(!skb)) {
1871 			page_pool_recycle_direct(rxq->page_pool, page);
1872 			ndev->stats.rx_dropped++;
1873 
1874 			netdev_err_once(ndev, "build_skb failed!\n");
1875 			goto rx_processing_done;
1876 		}
1877 
1878 		skb_reserve(skb, data_start);
1879 		skb_put(skb, pkt_len - sub_len);
1880 		skb_mark_for_recycle(skb);
1881 
1882 		if (unlikely(need_swap)) {
1883 			u8 *data;
1884 
1885 			data = page_address(page) + FEC_ENET_XDP_HEADROOM;
1886 			swap_buffer(data, pkt_len);
1887 		}
1888 
1889 		/* Extract the enhanced buffer descriptor */
1890 		ebdp = NULL;
1891 		if (fep->bufdesc_ex)
1892 			ebdp = (struct bufdesc_ex *)bdp;
1893 
1894 		/* If this is a VLAN packet remove the VLAN Tag */
1895 		if (fep->bufdesc_ex &&
1896 		    (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN)))
1897 			fec_enet_rx_vlan(ndev, skb);
1898 
1899 		skb->protocol = eth_type_trans(skb, ndev);
1900 
1901 		/* Get receive timestamp from the skb */
1902 		if (fep->hwts_rx_en && fep->bufdesc_ex)
1903 			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1904 					  skb_hwtstamps(skb));
1905 
1906 		if (fep->bufdesc_ex &&
1907 		    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1908 			if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1909 				/* don't check it */
1910 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1911 			} else {
1912 				skb_checksum_none_assert(skb);
1913 			}
1914 		}
1915 
1916 		skb_record_rx_queue(skb, queue_id);
1917 		napi_gro_receive(&fep->napi, skb);
1918 
1919 rx_processing_done:
1920 		/* Clear the status flags for this buffer */
1921 		status &= ~BD_ENET_RX_STATS;
1922 
1923 		/* Mark the buffer empty */
1924 		status |= BD_ENET_RX_EMPTY;
1925 
1926 		if (fep->bufdesc_ex) {
1927 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1928 
1929 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1930 			ebdp->cbd_prot = 0;
1931 			ebdp->cbd_bdu = 0;
1932 		}
1933 		/* Make sure the updates to rest of the descriptor are
1934 		 * performed before transferring ownership.
1935 		 */
1936 		wmb();
1937 		bdp->cbd_sc = cpu_to_fec16(status);
1938 
1939 		/* Update BD pointer to next entry */
1940 		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1941 
1942 		/* Doing this here will keep the FEC running while we process
1943 		 * incoming frames.  On a heavily loaded network, we should be
1944 		 * able to keep up at the expense of system resources.
1945 		 */
1946 		writel(0, rxq->bd.reg_desc_active);
1947 	}
1948 	rxq->bd.cur = bdp;
1949 
1950 	if (xdp_result & FEC_ENET_XDP_REDIR)
1951 		xdp_do_flush();
1952 
1953 	return pkt_received;
1954 }
1955 
1956 static int fec_enet_rx(struct net_device *ndev, int budget)
1957 {
1958 	struct fec_enet_private *fep = netdev_priv(ndev);
1959 	int i, done = 0;
1960 
1961 	/* Make sure that AVB queues are processed first. */
1962 	for (i = fep->num_rx_queues - 1; i >= 0; i--)
1963 		done += fec_enet_rx_queue(ndev, i, budget - done);
1964 
1965 	return done;
1966 }
1967 
1968 static bool fec_enet_collect_events(struct fec_enet_private *fep)
1969 {
1970 	uint int_events;
1971 
1972 	int_events = readl(fep->hwp + FEC_IEVENT);
1973 
1974 	/* Don't clear MDIO events, we poll for those */
1975 	int_events &= ~FEC_ENET_MII;
1976 
1977 	writel(int_events, fep->hwp + FEC_IEVENT);
1978 
1979 	return int_events != 0;
1980 }
1981 
1982 static irqreturn_t
1983 fec_enet_interrupt(int irq, void *dev_id)
1984 {
1985 	struct net_device *ndev = dev_id;
1986 	struct fec_enet_private *fep = netdev_priv(ndev);
1987 	irqreturn_t ret = IRQ_NONE;
1988 
1989 	if (fec_enet_collect_events(fep) && fep->link) {
1990 		ret = IRQ_HANDLED;
1991 
1992 		if (napi_schedule_prep(&fep->napi)) {
1993 			/* Disable interrupts */
1994 			writel(0, fep->hwp + FEC_IMASK);
1995 			__napi_schedule(&fep->napi);
1996 		}
1997 	}
1998 
1999 	return ret;
2000 }
2001 
2002 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
2003 {
2004 	struct net_device *ndev = napi->dev;
2005 	struct fec_enet_private *fep = netdev_priv(ndev);
2006 	int done = 0;
2007 
2008 	do {
2009 		done += fec_enet_rx(ndev, budget - done);
2010 		fec_enet_tx(ndev, budget);
2011 	} while ((done < budget) && fec_enet_collect_events(fep));
2012 
2013 	if (done < budget) {
2014 		napi_complete_done(napi, done);
2015 		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
2016 	}
2017 
2018 	return done;
2019 }
2020 
2021 /* ------------------------------------------------------------------------- */
2022 static int fec_get_mac(struct net_device *ndev)
2023 {
2024 	struct fec_enet_private *fep = netdev_priv(ndev);
2025 	unsigned char *iap, tmpaddr[ETH_ALEN];
2026 	int ret;
2027 
2028 	/*
2029 	 * try to get mac address in following order:
2030 	 *
2031 	 * 1) module parameter via kernel command line in form
2032 	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
2033 	 */
2034 	iap = macaddr;
2035 
2036 	/*
2037 	 * 2) from device tree data
2038 	 */
2039 	if (!is_valid_ether_addr(iap)) {
2040 		struct device_node *np = fep->pdev->dev.of_node;
2041 		if (np) {
2042 			ret = of_get_mac_address(np, tmpaddr);
2043 			if (!ret)
2044 				iap = tmpaddr;
2045 			else if (ret == -EPROBE_DEFER)
2046 				return ret;
2047 		}
2048 	}
2049 
2050 	/*
2051 	 * 3) from flash or fuse (via platform data)
2052 	 */
2053 	if (!is_valid_ether_addr(iap)) {
2054 #ifdef CONFIG_M5272
2055 		if (FEC_FLASHMAC)
2056 			iap = (unsigned char *)FEC_FLASHMAC;
2057 #else
2058 		struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
2059 
2060 		if (pdata)
2061 			iap = (unsigned char *)&pdata->mac;
2062 #endif
2063 	}
2064 
2065 	/*
2066 	 * 4) FEC mac registers set by bootloader
2067 	 */
2068 	if (!is_valid_ether_addr(iap)) {
2069 		*((__be32 *) &tmpaddr[0]) =
2070 			cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
2071 		*((__be16 *) &tmpaddr[4]) =
2072 			cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
2073 		iap = &tmpaddr[0];
2074 	}
2075 
2076 	/*
2077 	 * 5) random mac address
2078 	 */
2079 	if (!is_valid_ether_addr(iap)) {
2080 		/* Report it and use a random ethernet address instead */
2081 		dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
2082 		eth_hw_addr_random(ndev);
2083 		dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
2084 			 ndev->dev_addr);
2085 		return 0;
2086 	}
2087 
2088 	/* Adjust MAC if using macaddr */
2089 	eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
2090 
2091 	return 0;
2092 }
2093 
2094 /* ------------------------------------------------------------------------- */
2095 
2096 /*
2097  * Phy section
2098  */
2099 
2100 /* LPI Sleep Ts count base on tx clk (clk_ref).
2101  * The lpi sleep cnt value = X us / (cycle_ns).
2102  */
2103 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
2104 {
2105 	struct fec_enet_private *fep = netdev_priv(ndev);
2106 
2107 	return us * (fep->clk_ref_rate / 1000) / 1000;
2108 }
2109 
2110 static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer,
2111 				 bool enable)
2112 {
2113 	struct fec_enet_private *fep = netdev_priv(ndev);
2114 	unsigned int sleep_cycle, wake_cycle;
2115 
2116 	if (enable) {
2117 		sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer);
2118 		wake_cycle = sleep_cycle;
2119 	} else {
2120 		sleep_cycle = 0;
2121 		wake_cycle = 0;
2122 	}
2123 
2124 	writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
2125 	writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
2126 
2127 	return 0;
2128 }
2129 
2130 static void fec_enet_adjust_link(struct net_device *ndev)
2131 {
2132 	struct fec_enet_private *fep = netdev_priv(ndev);
2133 	struct phy_device *phy_dev = ndev->phydev;
2134 	int status_change = 0;
2135 
2136 	/*
2137 	 * If the netdev is down, or is going down, we're not interested
2138 	 * in link state events, so just mark our idea of the link as down
2139 	 * and ignore the event.
2140 	 */
2141 	if (!netif_running(ndev) || !netif_device_present(ndev)) {
2142 		fep->link = 0;
2143 	} else if (phy_dev->link) {
2144 		if (!fep->link) {
2145 			fep->link = phy_dev->link;
2146 			status_change = 1;
2147 		}
2148 
2149 		if (fep->full_duplex != phy_dev->duplex) {
2150 			fep->full_duplex = phy_dev->duplex;
2151 			status_change = 1;
2152 		}
2153 
2154 		if (phy_dev->speed != fep->speed) {
2155 			fep->speed = phy_dev->speed;
2156 			status_change = 1;
2157 		}
2158 
2159 		/* if any of the above changed restart the FEC */
2160 		if (status_change) {
2161 			netif_stop_queue(ndev);
2162 			napi_disable(&fep->napi);
2163 			netif_tx_lock_bh(ndev);
2164 			fec_restart(ndev);
2165 			netif_tx_wake_all_queues(ndev);
2166 			netif_tx_unlock_bh(ndev);
2167 			napi_enable(&fep->napi);
2168 		}
2169 		if (fep->quirks & FEC_QUIRK_HAS_EEE)
2170 			fec_enet_eee_mode_set(ndev,
2171 					      phy_dev->eee_cfg.tx_lpi_timer,
2172 					      phy_dev->enable_tx_lpi);
2173 	} else {
2174 		if (fep->link) {
2175 			netif_stop_queue(ndev);
2176 			napi_disable(&fep->napi);
2177 			netif_tx_lock_bh(ndev);
2178 			fec_stop(ndev);
2179 			netif_tx_unlock_bh(ndev);
2180 			napi_enable(&fep->napi);
2181 			fep->link = phy_dev->link;
2182 			status_change = 1;
2183 		}
2184 	}
2185 
2186 	if (status_change)
2187 		phy_print_status(phy_dev);
2188 }
2189 
2190 static int fec_enet_mdio_wait(struct fec_enet_private *fep)
2191 {
2192 	uint ievent;
2193 	int ret;
2194 
2195 	ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
2196 					ievent & FEC_ENET_MII, 2, 30000);
2197 
2198 	if (!ret)
2199 		writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2200 
2201 	return ret;
2202 }
2203 
2204 static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
2205 {
2206 	struct fec_enet_private *fep = bus->priv;
2207 	struct device *dev = &fep->pdev->dev;
2208 	int ret = 0, frame_start, frame_addr, frame_op;
2209 
2210 	ret = pm_runtime_resume_and_get(dev);
2211 	if (ret < 0)
2212 		return ret;
2213 
2214 	/* C22 read */
2215 	frame_op = FEC_MMFR_OP_READ;
2216 	frame_start = FEC_MMFR_ST;
2217 	frame_addr = regnum;
2218 
2219 	/* start a read op */
2220 	writel(frame_start | frame_op |
2221 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2222 	       FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2223 
2224 	/* wait for end of transfer */
2225 	ret = fec_enet_mdio_wait(fep);
2226 	if (ret) {
2227 		netdev_err(fep->netdev, "MDIO read timeout\n");
2228 		goto out;
2229 	}
2230 
2231 	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2232 
2233 out:
2234 	pm_runtime_put_autosuspend(dev);
2235 
2236 	return ret;
2237 }
2238 
2239 static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
2240 				  int devad, int regnum)
2241 {
2242 	struct fec_enet_private *fep = bus->priv;
2243 	struct device *dev = &fep->pdev->dev;
2244 	int ret = 0, frame_start, frame_op;
2245 
2246 	ret = pm_runtime_resume_and_get(dev);
2247 	if (ret < 0)
2248 		return ret;
2249 
2250 	frame_start = FEC_MMFR_ST_C45;
2251 
2252 	/* write address */
2253 	writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2254 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2255 	       FEC_MMFR_TA | (regnum & 0xFFFF),
2256 	       fep->hwp + FEC_MII_DATA);
2257 
2258 	/* wait for end of transfer */
2259 	ret = fec_enet_mdio_wait(fep);
2260 	if (ret) {
2261 		netdev_err(fep->netdev, "MDIO address write timeout\n");
2262 		goto out;
2263 	}
2264 
2265 	frame_op = FEC_MMFR_OP_READ_C45;
2266 
2267 	/* start a read op */
2268 	writel(frame_start | frame_op |
2269 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2270 	       FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2271 
2272 	/* wait for end of transfer */
2273 	ret = fec_enet_mdio_wait(fep);
2274 	if (ret) {
2275 		netdev_err(fep->netdev, "MDIO read timeout\n");
2276 		goto out;
2277 	}
2278 
2279 	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2280 
2281 out:
2282 	pm_runtime_put_autosuspend(dev);
2283 
2284 	return ret;
2285 }
2286 
2287 static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
2288 				   u16 value)
2289 {
2290 	struct fec_enet_private *fep = bus->priv;
2291 	struct device *dev = &fep->pdev->dev;
2292 	int ret, frame_start, frame_addr;
2293 
2294 	ret = pm_runtime_resume_and_get(dev);
2295 	if (ret < 0)
2296 		return ret;
2297 
2298 	/* C22 write */
2299 	frame_start = FEC_MMFR_ST;
2300 	frame_addr = regnum;
2301 
2302 	/* start a write op */
2303 	writel(frame_start | FEC_MMFR_OP_WRITE |
2304 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2305 	       FEC_MMFR_TA | FEC_MMFR_DATA(value),
2306 	       fep->hwp + FEC_MII_DATA);
2307 
2308 	/* wait for end of transfer */
2309 	ret = fec_enet_mdio_wait(fep);
2310 	if (ret)
2311 		netdev_err(fep->netdev, "MDIO write timeout\n");
2312 
2313 	pm_runtime_put_autosuspend(dev);
2314 
2315 	return ret;
2316 }
2317 
2318 static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
2319 				   int devad, int regnum, u16 value)
2320 {
2321 	struct fec_enet_private *fep = bus->priv;
2322 	struct device *dev = &fep->pdev->dev;
2323 	int ret, frame_start;
2324 
2325 	ret = pm_runtime_resume_and_get(dev);
2326 	if (ret < 0)
2327 		return ret;
2328 
2329 	frame_start = FEC_MMFR_ST_C45;
2330 
2331 	/* write address */
2332 	writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2333 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2334 	       FEC_MMFR_TA | (regnum & 0xFFFF),
2335 	       fep->hwp + FEC_MII_DATA);
2336 
2337 	/* wait for end of transfer */
2338 	ret = fec_enet_mdio_wait(fep);
2339 	if (ret) {
2340 		netdev_err(fep->netdev, "MDIO address write timeout\n");
2341 		goto out;
2342 	}
2343 
2344 	/* start a write op */
2345 	writel(frame_start | FEC_MMFR_OP_WRITE |
2346 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2347 	       FEC_MMFR_TA | FEC_MMFR_DATA(value),
2348 	       fep->hwp + FEC_MII_DATA);
2349 
2350 	/* wait for end of transfer */
2351 	ret = fec_enet_mdio_wait(fep);
2352 	if (ret)
2353 		netdev_err(fep->netdev, "MDIO write timeout\n");
2354 
2355 out:
2356 	pm_runtime_put_autosuspend(dev);
2357 
2358 	return ret;
2359 }
2360 
2361 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
2362 {
2363 	struct fec_enet_private *fep = netdev_priv(ndev);
2364 	struct phy_device *phy_dev = ndev->phydev;
2365 
2366 	if (phy_dev) {
2367 		phy_reset_after_clk_enable(phy_dev);
2368 	} else if (fep->phy_node) {
2369 		/*
2370 		 * If the PHY still is not bound to the MAC, but there is
2371 		 * OF PHY node and a matching PHY device instance already,
2372 		 * use the OF PHY node to obtain the PHY device instance,
2373 		 * and then use that PHY device instance when triggering
2374 		 * the PHY reset.
2375 		 */
2376 		phy_dev = of_phy_find_device(fep->phy_node);
2377 		phy_reset_after_clk_enable(phy_dev);
2378 		if (phy_dev)
2379 			put_device(&phy_dev->mdio.dev);
2380 	}
2381 }
2382 
2383 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
2384 {
2385 	struct fec_enet_private *fep = netdev_priv(ndev);
2386 	int ret;
2387 
2388 	if (enable) {
2389 		ret = clk_prepare_enable(fep->clk_enet_out);
2390 		if (ret)
2391 			return ret;
2392 
2393 		if (fep->clk_ptp) {
2394 			mutex_lock(&fep->ptp_clk_mutex);
2395 			ret = clk_prepare_enable(fep->clk_ptp);
2396 			if (ret) {
2397 				mutex_unlock(&fep->ptp_clk_mutex);
2398 				goto failed_clk_ptp;
2399 			} else {
2400 				fep->ptp_clk_on = true;
2401 			}
2402 			mutex_unlock(&fep->ptp_clk_mutex);
2403 		}
2404 
2405 		ret = clk_prepare_enable(fep->clk_ref);
2406 		if (ret)
2407 			goto failed_clk_ref;
2408 
2409 		ret = clk_prepare_enable(fep->clk_2x_txclk);
2410 		if (ret)
2411 			goto failed_clk_2x_txclk;
2412 
2413 		fec_enet_phy_reset_after_clk_enable(ndev);
2414 	} else {
2415 		clk_disable_unprepare(fep->clk_enet_out);
2416 		if (fep->clk_ptp) {
2417 			mutex_lock(&fep->ptp_clk_mutex);
2418 			clk_disable_unprepare(fep->clk_ptp);
2419 			fep->ptp_clk_on = false;
2420 			mutex_unlock(&fep->ptp_clk_mutex);
2421 		}
2422 		clk_disable_unprepare(fep->clk_ref);
2423 		clk_disable_unprepare(fep->clk_2x_txclk);
2424 	}
2425 
2426 	return 0;
2427 
2428 failed_clk_2x_txclk:
2429 	if (fep->clk_ref)
2430 		clk_disable_unprepare(fep->clk_ref);
2431 failed_clk_ref:
2432 	if (fep->clk_ptp) {
2433 		mutex_lock(&fep->ptp_clk_mutex);
2434 		clk_disable_unprepare(fep->clk_ptp);
2435 		fep->ptp_clk_on = false;
2436 		mutex_unlock(&fep->ptp_clk_mutex);
2437 	}
2438 failed_clk_ptp:
2439 	clk_disable_unprepare(fep->clk_enet_out);
2440 
2441 	return ret;
2442 }
2443 
2444 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
2445 				      struct device_node *np)
2446 {
2447 	u32 rgmii_tx_delay, rgmii_rx_delay;
2448 
2449 	/* For rgmii tx internal delay, valid values are 0ps and 2000ps */
2450 	if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) {
2451 		if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) {
2452 			dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
2453 			return -EINVAL;
2454 		} else if (rgmii_tx_delay == 2000) {
2455 			fep->rgmii_txc_dly = true;
2456 		}
2457 	}
2458 
2459 	/* For rgmii rx internal delay, valid values are 0ps and 2000ps */
2460 	if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) {
2461 		if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) {
2462 			dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
2463 			return -EINVAL;
2464 		} else if (rgmii_rx_delay == 2000) {
2465 			fep->rgmii_rxc_dly = true;
2466 		}
2467 	}
2468 
2469 	return 0;
2470 }
2471 
2472 static int fec_enet_mii_probe(struct net_device *ndev)
2473 {
2474 	struct fec_enet_private *fep = netdev_priv(ndev);
2475 	struct phy_device *phy_dev;
2476 	int ret;
2477 
2478 	if (fep->phy_node) {
2479 		phy_dev = of_phy_connect(ndev, fep->phy_node,
2480 					 &fec_enet_adjust_link, 0,
2481 					 fep->phy_interface);
2482 		if (!phy_dev) {
2483 			netdev_err(ndev, "Unable to connect to phy\n");
2484 			return -ENODEV;
2485 		}
2486 	} else {
2487 		/* check for attached phy */
2488 		phy_dev = phy_find_first(fep->mii_bus);
2489 		if (fep->dev_id && phy_dev)
2490 			phy_dev = phy_find_next(fep->mii_bus, phy_dev);
2491 
2492 		if (!phy_dev) {
2493 			netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
2494 			phy_dev = fixed_phy_register_100fd();
2495 			if (IS_ERR(phy_dev)) {
2496 				netdev_err(ndev, "could not register fixed PHY\n");
2497 				return PTR_ERR(phy_dev);
2498 			}
2499 		}
2500 
2501 		ret = phy_connect_direct(ndev, phy_dev, &fec_enet_adjust_link,
2502 					 fep->phy_interface);
2503 		if (ret) {
2504 			if (phy_is_pseudo_fixed_link(phy_dev))
2505 				fixed_phy_unregister(phy_dev);
2506 			netdev_err(ndev, "could not attach to PHY\n");
2507 			return ret;
2508 		}
2509 
2510 	}
2511 
2512 	/* mask with MAC supported features */
2513 	if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
2514 		phy_set_max_speed(phy_dev, 1000);
2515 		phy_remove_link_mode(phy_dev,
2516 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2517 		phy_support_sym_pause(phy_dev);
2518 	}
2519 	else
2520 		phy_set_max_speed(phy_dev, 100);
2521 
2522 	if (fep->quirks & FEC_QUIRK_HAS_EEE)
2523 		phy_support_eee(phy_dev);
2524 
2525 	fep->link = 0;
2526 	fep->full_duplex = 0;
2527 
2528 	phy_attached_info(phy_dev);
2529 
2530 	return 0;
2531 }
2532 
2533 static int fec_enet_mii_init(struct platform_device *pdev)
2534 {
2535 	static struct mii_bus *fec0_mii_bus;
2536 	struct net_device *ndev = platform_get_drvdata(pdev);
2537 	struct fec_enet_private *fep = netdev_priv(ndev);
2538 	bool suppress_preamble = false;
2539 	struct phy_device *phydev;
2540 	struct device_node *node;
2541 	int err = -ENXIO;
2542 	u32 mii_speed, holdtime;
2543 	u32 bus_freq;
2544 
2545 	/*
2546 	 * The i.MX28 dual fec interfaces are not equal.
2547 	 * Here are the differences:
2548 	 *
2549 	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
2550 	 *  - fec0 acts as the 1588 time master while fec1 is slave
2551 	 *  - external phys can only be configured by fec0
2552 	 *
2553 	 * That is to say fec1 can not work independently. It only works
2554 	 * when fec0 is working. The reason behind this design is that the
2555 	 * second interface is added primarily for Switch mode.
2556 	 *
2557 	 * Because of the last point above, both phys are attached on fec0
2558 	 * mdio interface in board design, and need to be configured by
2559 	 * fec0 mii_bus.
2560 	 */
2561 	if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
2562 		/* fec1 uses fec0 mii_bus */
2563 		if (mii_cnt && fec0_mii_bus) {
2564 			fep->mii_bus = fec0_mii_bus;
2565 			mii_cnt++;
2566 			return 0;
2567 		}
2568 		return -ENOENT;
2569 	}
2570 
2571 	bus_freq = 2500000; /* 2.5MHz by default */
2572 	node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2573 	if (node) {
2574 		of_property_read_u32(node, "clock-frequency", &bus_freq);
2575 		suppress_preamble = of_property_read_bool(node,
2576 							  "suppress-preamble");
2577 	}
2578 
2579 	/*
2580 	 * Set MII speed (= clk_get_rate() / 2 * phy_speed)
2581 	 *
2582 	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2583 	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
2584 	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2585 	 * document.
2586 	 */
2587 	mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
2588 	if (fep->quirks & FEC_QUIRK_ENET_MAC)
2589 		mii_speed--;
2590 	if (mii_speed > 63) {
2591 		dev_err(&pdev->dev,
2592 			"fec clock (%lu) too fast to get right mii speed\n",
2593 			clk_get_rate(fep->clk_ipg));
2594 		err = -EINVAL;
2595 		goto err_out;
2596 	}
2597 
2598 	/*
2599 	 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2600 	 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2601 	 * versions are RAZ there, so just ignore the difference and write the
2602 	 * register always.
2603 	 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2604 	 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2605 	 * output.
2606 	 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2607 	 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2608 	 * holdtime cannot result in a value greater than 3.
2609 	 */
2610 	holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2611 
2612 	fep->phy_speed = mii_speed << 1 | holdtime << 8;
2613 
2614 	if (suppress_preamble)
2615 		fep->phy_speed |= BIT(7);
2616 
2617 	if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
2618 		/* Clear MMFR to avoid to generate MII event by writing MSCR.
2619 		 * MII event generation condition:
2620 		 * - writing MSCR:
2621 		 *	- mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
2622 		 *	  mscr_reg_data_in[7:0] != 0
2623 		 * - writing MMFR:
2624 		 *	- mscr[7:0]_not_zero
2625 		 */
2626 		writel(0, fep->hwp + FEC_MII_DATA);
2627 	}
2628 
2629 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2630 
2631 	/* Clear any pending transaction complete indication */
2632 	writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2633 
2634 	fep->mii_bus = mdiobus_alloc();
2635 	if (fep->mii_bus == NULL) {
2636 		err = -ENOMEM;
2637 		goto err_out;
2638 	}
2639 
2640 	fep->mii_bus->name = "fec_enet_mii_bus";
2641 	fep->mii_bus->read = fec_enet_mdio_read_c22;
2642 	fep->mii_bus->write = fec_enet_mdio_write_c22;
2643 	if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
2644 		fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
2645 		fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
2646 	}
2647 	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2648 		pdev->name, fep->dev_id + 1);
2649 	fep->mii_bus->priv = fep;
2650 	fep->mii_bus->parent = &pdev->dev;
2651 
2652 	err = of_mdiobus_register(fep->mii_bus, node);
2653 	if (err)
2654 		goto err_out_free_mdiobus;
2655 	of_node_put(node);
2656 
2657 	/* find all the PHY devices on the bus and set mac_managed_pm to true */
2658 	mdiobus_for_each_phy(fep->mii_bus, phydev)
2659 		phydev->mac_managed_pm = true;
2660 
2661 	mii_cnt++;
2662 
2663 	/* save fec0 mii_bus */
2664 	if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2665 		fec0_mii_bus = fep->mii_bus;
2666 
2667 	return 0;
2668 
2669 err_out_free_mdiobus:
2670 	mdiobus_free(fep->mii_bus);
2671 err_out:
2672 	of_node_put(node);
2673 	return err;
2674 }
2675 
2676 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2677 {
2678 	if (--mii_cnt == 0) {
2679 		mdiobus_unregister(fep->mii_bus);
2680 		mdiobus_free(fep->mii_bus);
2681 	}
2682 }
2683 
2684 static void fec_enet_get_drvinfo(struct net_device *ndev,
2685 				 struct ethtool_drvinfo *info)
2686 {
2687 	struct fec_enet_private *fep = netdev_priv(ndev);
2688 
2689 	strscpy(info->driver, fep->pdev->dev.driver->name,
2690 		sizeof(info->driver));
2691 	strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2692 }
2693 
2694 static int fec_enet_get_regs_len(struct net_device *ndev)
2695 {
2696 	struct fec_enet_private *fep = netdev_priv(ndev);
2697 	struct resource *r;
2698 	int s = 0;
2699 
2700 	r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2701 	if (r)
2702 		s = resource_size(r);
2703 
2704 	return s;
2705 }
2706 
2707 /* List of registers that can be safety be read to dump them with ethtool */
2708 #if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
2709 static __u32 fec_enet_register_version = 2;
2710 static u32 fec_enet_register_offset[] = {
2711 	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2712 	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2713 	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2714 	FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2715 	FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2716 	FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2717 	FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2718 	FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2719 	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2720 	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2721 	FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2722 	FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2723 	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2724 	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2725 	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2726 	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2727 	RMON_T_P_GTE2048, RMON_T_OCTETS,
2728 	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2729 	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2730 	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2731 	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2732 	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2733 	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2734 	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2735 	RMON_R_P_GTE2048, RMON_R_OCTETS,
2736 	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2737 	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2738 };
2739 /* for i.MX6ul */
2740 static u32 fec_enet_register_offset_6ul[] = {
2741 	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2742 	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2743 	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
2744 	FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
2745 	FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
2746 	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2747 	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
2748 	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2749 	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2750 	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2751 	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2752 	RMON_T_P_GTE2048, RMON_T_OCTETS,
2753 	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2754 	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2755 	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2756 	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2757 	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2758 	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2759 	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2760 	RMON_R_P_GTE2048, RMON_R_OCTETS,
2761 	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2762 	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2763 };
2764 #else
2765 static __u32 fec_enet_register_version = 1;
2766 static u32 fec_enet_register_offset[] = {
2767 	FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2768 	FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2769 	FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2770 	FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2771 	FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2772 	FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2773 	FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2774 	FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2775 	FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2776 };
2777 #endif
2778 
2779 static void fec_enet_get_regs(struct net_device *ndev,
2780 			      struct ethtool_regs *regs, void *regbuf)
2781 {
2782 	u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2783 	struct fec_enet_private *fep = netdev_priv(ndev);
2784 	u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2785 	u32 *reg_list = fec_enet_register_offset;
2786 	struct device *dev = &fep->pdev->dev;
2787 	u32 *buf = (u32 *)regbuf;
2788 	u32 i, off;
2789 	int ret;
2790 
2791 #if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
2792 	if (of_machine_is_compatible("fsl,imx6ul")) {
2793 		reg_list = fec_enet_register_offset_6ul;
2794 		reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
2795 	}
2796 #endif
2797 
2798 	ret = pm_runtime_resume_and_get(dev);
2799 	if (ret < 0)
2800 		return;
2801 
2802 	regs->version = fec_enet_register_version;
2803 
2804 	memset(buf, 0, regs->len);
2805 
2806 	for (i = 0; i < reg_cnt; i++) {
2807 		off = reg_list[i];
2808 
2809 		if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
2810 		    !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2811 			continue;
2812 
2813 		off >>= 2;
2814 		buf[off] = readl(&theregs[off]);
2815 	}
2816 
2817 	pm_runtime_put_autosuspend(dev);
2818 }
2819 
2820 static int fec_enet_get_ts_info(struct net_device *ndev,
2821 				struct kernel_ethtool_ts_info *info)
2822 {
2823 	struct fec_enet_private *fep = netdev_priv(ndev);
2824 
2825 	if (fep->bufdesc_ex) {
2826 
2827 		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2828 					SOF_TIMESTAMPING_TX_HARDWARE |
2829 					SOF_TIMESTAMPING_RX_HARDWARE |
2830 					SOF_TIMESTAMPING_RAW_HARDWARE;
2831 		if (fep->ptp_clock)
2832 			info->phc_index = ptp_clock_index(fep->ptp_clock);
2833 
2834 		info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2835 				 (1 << HWTSTAMP_TX_ON);
2836 
2837 		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2838 				   (1 << HWTSTAMP_FILTER_ALL);
2839 		return 0;
2840 	} else {
2841 		return ethtool_op_get_ts_info(ndev, info);
2842 	}
2843 }
2844 
2845 #if !defined(CONFIG_M5272)
2846 
2847 static void fec_enet_get_pauseparam(struct net_device *ndev,
2848 				    struct ethtool_pauseparam *pause)
2849 {
2850 	struct fec_enet_private *fep = netdev_priv(ndev);
2851 
2852 	pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2853 	pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2854 	pause->rx_pause = pause->tx_pause;
2855 }
2856 
2857 static int fec_enet_set_pauseparam(struct net_device *ndev,
2858 				   struct ethtool_pauseparam *pause)
2859 {
2860 	struct fec_enet_private *fep = netdev_priv(ndev);
2861 
2862 	if (!ndev->phydev)
2863 		return -ENODEV;
2864 
2865 	if (pause->tx_pause != pause->rx_pause) {
2866 		netdev_info(ndev,
2867 			"hardware only support enable/disable both tx and rx");
2868 		return -EINVAL;
2869 	}
2870 
2871 	fep->pause_flag = 0;
2872 
2873 	/* tx pause must be same as rx pause */
2874 	fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2875 	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2876 
2877 	phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
2878 			  pause->autoneg);
2879 
2880 	if (pause->autoneg) {
2881 		if (netif_running(ndev))
2882 			fec_stop(ndev);
2883 		phy_start_aneg(ndev->phydev);
2884 	}
2885 	if (netif_running(ndev)) {
2886 		napi_disable(&fep->napi);
2887 		netif_tx_lock_bh(ndev);
2888 		fec_restart(ndev);
2889 		netif_tx_wake_all_queues(ndev);
2890 		netif_tx_unlock_bh(ndev);
2891 		napi_enable(&fep->napi);
2892 	}
2893 
2894 	return 0;
2895 }
2896 
2897 static const struct fec_stat {
2898 	char name[ETH_GSTRING_LEN];
2899 	u16 offset;
2900 } fec_stats[] = {
2901 	/* RMON TX */
2902 	{ "tx_dropped", RMON_T_DROP },
2903 	{ "tx_packets", RMON_T_PACKETS },
2904 	{ "tx_broadcast", RMON_T_BC_PKT },
2905 	{ "tx_multicast", RMON_T_MC_PKT },
2906 	{ "tx_crc_errors", RMON_T_CRC_ALIGN },
2907 	{ "tx_undersize", RMON_T_UNDERSIZE },
2908 	{ "tx_oversize", RMON_T_OVERSIZE },
2909 	{ "tx_fragment", RMON_T_FRAG },
2910 	{ "tx_jabber", RMON_T_JAB },
2911 	{ "tx_collision", RMON_T_COL },
2912 	{ "tx_64byte", RMON_T_P64 },
2913 	{ "tx_65to127byte", RMON_T_P65TO127 },
2914 	{ "tx_128to255byte", RMON_T_P128TO255 },
2915 	{ "tx_256to511byte", RMON_T_P256TO511 },
2916 	{ "tx_512to1023byte", RMON_T_P512TO1023 },
2917 	{ "tx_1024to2047byte", RMON_T_P1024TO2047 },
2918 	{ "tx_GTE2048byte", RMON_T_P_GTE2048 },
2919 	{ "tx_octets", RMON_T_OCTETS },
2920 
2921 	/* IEEE TX */
2922 	{ "IEEE_tx_drop", IEEE_T_DROP },
2923 	{ "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2924 	{ "IEEE_tx_1col", IEEE_T_1COL },
2925 	{ "IEEE_tx_mcol", IEEE_T_MCOL },
2926 	{ "IEEE_tx_def", IEEE_T_DEF },
2927 	{ "IEEE_tx_lcol", IEEE_T_LCOL },
2928 	{ "IEEE_tx_excol", IEEE_T_EXCOL },
2929 	{ "IEEE_tx_macerr", IEEE_T_MACERR },
2930 	{ "IEEE_tx_cserr", IEEE_T_CSERR },
2931 	{ "IEEE_tx_sqe", IEEE_T_SQE },
2932 	{ "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2933 	{ "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2934 
2935 	/* RMON RX */
2936 	{ "rx_packets", RMON_R_PACKETS },
2937 	{ "rx_broadcast", RMON_R_BC_PKT },
2938 	{ "rx_multicast", RMON_R_MC_PKT },
2939 	{ "rx_crc_errors", RMON_R_CRC_ALIGN },
2940 	{ "rx_undersize", RMON_R_UNDERSIZE },
2941 	{ "rx_oversize", RMON_R_OVERSIZE },
2942 	{ "rx_fragment", RMON_R_FRAG },
2943 	{ "rx_jabber", RMON_R_JAB },
2944 	{ "rx_64byte", RMON_R_P64 },
2945 	{ "rx_65to127byte", RMON_R_P65TO127 },
2946 	{ "rx_128to255byte", RMON_R_P128TO255 },
2947 	{ "rx_256to511byte", RMON_R_P256TO511 },
2948 	{ "rx_512to1023byte", RMON_R_P512TO1023 },
2949 	{ "rx_1024to2047byte", RMON_R_P1024TO2047 },
2950 	{ "rx_GTE2048byte", RMON_R_P_GTE2048 },
2951 	{ "rx_octets", RMON_R_OCTETS },
2952 
2953 	/* IEEE RX */
2954 	{ "IEEE_rx_drop", IEEE_R_DROP },
2955 	{ "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2956 	{ "IEEE_rx_crc", IEEE_R_CRC },
2957 	{ "IEEE_rx_align", IEEE_R_ALIGN },
2958 	{ "IEEE_rx_macerr", IEEE_R_MACERR },
2959 	{ "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2960 	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2961 };
2962 
2963 #define FEC_STATS_SIZE		(ARRAY_SIZE(fec_stats) * sizeof(u64))
2964 
2965 static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
2966 	"rx_xdp_redirect",           /* RX_XDP_REDIRECT = 0, */
2967 	"rx_xdp_pass",               /* RX_XDP_PASS, */
2968 	"rx_xdp_drop",               /* RX_XDP_DROP, */
2969 	"rx_xdp_tx",                 /* RX_XDP_TX, */
2970 	"rx_xdp_tx_errors",          /* RX_XDP_TX_ERRORS, */
2971 	"tx_xdp_xmit",               /* TX_XDP_XMIT, */
2972 	"tx_xdp_xmit_errors",        /* TX_XDP_XMIT_ERRORS, */
2973 };
2974 
2975 static void fec_enet_update_ethtool_stats(struct net_device *dev)
2976 {
2977 	struct fec_enet_private *fep = netdev_priv(dev);
2978 	int i;
2979 
2980 	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2981 		fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2982 }
2983 
2984 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
2985 {
2986 	u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
2987 	struct fec_enet_priv_rx_q *rxq;
2988 	int i, j;
2989 
2990 	for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2991 		rxq = fep->rx_queue[i];
2992 
2993 		for (j = 0; j < XDP_STATS_TOTAL; j++)
2994 			xdp_stats[j] += rxq->stats[j];
2995 	}
2996 
2997 	memcpy(data, xdp_stats, sizeof(xdp_stats));
2998 }
2999 
3000 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
3001 {
3002 #ifdef CONFIG_PAGE_POOL_STATS
3003 	struct page_pool_stats stats = {};
3004 	struct fec_enet_priv_rx_q *rxq;
3005 	int i;
3006 
3007 	for (i = fep->num_rx_queues - 1; i >= 0; i--) {
3008 		rxq = fep->rx_queue[i];
3009 
3010 		if (!rxq->page_pool)
3011 			continue;
3012 
3013 		page_pool_get_stats(rxq->page_pool, &stats);
3014 	}
3015 
3016 	page_pool_ethtool_stats_get(data, &stats);
3017 #endif
3018 }
3019 
3020 static void fec_enet_get_ethtool_stats(struct net_device *dev,
3021 				       struct ethtool_stats *stats, u64 *data)
3022 {
3023 	struct fec_enet_private *fep = netdev_priv(dev);
3024 
3025 	if (netif_running(dev))
3026 		fec_enet_update_ethtool_stats(dev);
3027 
3028 	memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
3029 	data += FEC_STATS_SIZE / sizeof(u64);
3030 
3031 	fec_enet_get_xdp_stats(fep, data);
3032 	data += XDP_STATS_TOTAL;
3033 
3034 	fec_enet_page_pool_stats(fep, data);
3035 }
3036 
3037 static void fec_enet_get_strings(struct net_device *netdev,
3038 	u32 stringset, u8 *data)
3039 {
3040 	int i;
3041 	switch (stringset) {
3042 	case ETH_SS_STATS:
3043 		for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
3044 			ethtool_puts(&data, fec_stats[i].name);
3045 		}
3046 		for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
3047 			ethtool_puts(&data, fec_xdp_stat_strs[i]);
3048 		}
3049 		page_pool_ethtool_stats_get_strings(data);
3050 
3051 		break;
3052 	case ETH_SS_TEST:
3053 		net_selftest_get_strings(data);
3054 		break;
3055 	}
3056 }
3057 
3058 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
3059 {
3060 	int count;
3061 
3062 	switch (sset) {
3063 	case ETH_SS_STATS:
3064 		count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
3065 		count += page_pool_ethtool_stats_get_count();
3066 		return count;
3067 
3068 	case ETH_SS_TEST:
3069 		return net_selftest_get_count();
3070 	default:
3071 		return -EOPNOTSUPP;
3072 	}
3073 }
3074 
3075 static void fec_enet_clear_ethtool_stats(struct net_device *dev)
3076 {
3077 	struct fec_enet_private *fep = netdev_priv(dev);
3078 	struct fec_enet_priv_rx_q *rxq;
3079 	int i, j;
3080 
3081 	/* Disable MIB statistics counters */
3082 	writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
3083 
3084 	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
3085 		writel(0, fep->hwp + fec_stats[i].offset);
3086 
3087 	for (i = fep->num_rx_queues - 1; i >= 0; i--) {
3088 		rxq = fep->rx_queue[i];
3089 		for (j = 0; j < XDP_STATS_TOTAL; j++)
3090 			rxq->stats[j] = 0;
3091 	}
3092 
3093 	/* Don't disable MIB statistics counters */
3094 	writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
3095 }
3096 
3097 #else	/* !defined(CONFIG_M5272) */
3098 #define FEC_STATS_SIZE	0
3099 static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
3100 {
3101 }
3102 
3103 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
3104 {
3105 }
3106 #endif /* !defined(CONFIG_M5272) */
3107 
3108 /* ITR clock source is enet system clock (clk_ahb).
3109  * TCTT unit is cycle_ns * 64 cycle
3110  * So, the ICTT value = X us / (cycle_ns * 64)
3111  */
3112 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
3113 {
3114 	struct fec_enet_private *fep = netdev_priv(ndev);
3115 
3116 	return us * (fep->itr_clk_rate / 64000) / 1000;
3117 }
3118 
3119 /* Set threshold for interrupt coalescing */
3120 static void fec_enet_itr_coal_set(struct net_device *ndev)
3121 {
3122 	struct fec_enet_private *fep = netdev_priv(ndev);
3123 	u32 rx_itr = 0, tx_itr = 0;
3124 	int rx_ictt, tx_ictt;
3125 
3126 	rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
3127 	tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
3128 
3129 	if (rx_ictt > 0 && fep->rx_pkts_itr > 1) {
3130 		/* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
3131 		rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
3132 		rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
3133 		rx_itr |= FEC_ITR_ICTT(rx_ictt);
3134 	}
3135 
3136 	if (tx_ictt > 0 && fep->tx_pkts_itr > 1) {
3137 		/* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
3138 		tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
3139 		tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
3140 		tx_itr |= FEC_ITR_ICTT(tx_ictt);
3141 	}
3142 
3143 	writel(tx_itr, fep->hwp + FEC_TXIC0);
3144 	writel(rx_itr, fep->hwp + FEC_RXIC0);
3145 	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
3146 		writel(tx_itr, fep->hwp + FEC_TXIC1);
3147 		writel(rx_itr, fep->hwp + FEC_RXIC1);
3148 		writel(tx_itr, fep->hwp + FEC_TXIC2);
3149 		writel(rx_itr, fep->hwp + FEC_RXIC2);
3150 	}
3151 }
3152 
3153 static int fec_enet_get_coalesce(struct net_device *ndev,
3154 				 struct ethtool_coalesce *ec,
3155 				 struct kernel_ethtool_coalesce *kernel_coal,
3156 				 struct netlink_ext_ack *extack)
3157 {
3158 	struct fec_enet_private *fep = netdev_priv(ndev);
3159 
3160 	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3161 		return -EOPNOTSUPP;
3162 
3163 	ec->rx_coalesce_usecs = fep->rx_time_itr;
3164 	ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
3165 
3166 	ec->tx_coalesce_usecs = fep->tx_time_itr;
3167 	ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
3168 
3169 	return 0;
3170 }
3171 
3172 static int fec_enet_set_coalesce(struct net_device *ndev,
3173 				 struct ethtool_coalesce *ec,
3174 				 struct kernel_ethtool_coalesce *kernel_coal,
3175 				 struct netlink_ext_ack *extack)
3176 {
3177 	struct fec_enet_private *fep = netdev_priv(ndev);
3178 	struct device *dev = &fep->pdev->dev;
3179 	unsigned int cycle;
3180 
3181 	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3182 		return -EOPNOTSUPP;
3183 
3184 	if (ec->rx_max_coalesced_frames > 255) {
3185 		dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
3186 		return -EINVAL;
3187 	}
3188 
3189 	if (ec->tx_max_coalesced_frames > 255) {
3190 		dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
3191 		return -EINVAL;
3192 	}
3193 
3194 	cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
3195 	if (cycle > 0xFFFF) {
3196 		dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
3197 		return -EINVAL;
3198 	}
3199 
3200 	cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
3201 	if (cycle > 0xFFFF) {
3202 		dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
3203 		return -EINVAL;
3204 	}
3205 
3206 	fep->rx_time_itr = ec->rx_coalesce_usecs;
3207 	fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
3208 
3209 	fep->tx_time_itr = ec->tx_coalesce_usecs;
3210 	fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
3211 
3212 	fec_enet_itr_coal_set(ndev);
3213 
3214 	return 0;
3215 }
3216 
3217 static int
3218 fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
3219 {
3220 	struct fec_enet_private *fep = netdev_priv(ndev);
3221 
3222 	if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3223 		return -EOPNOTSUPP;
3224 
3225 	if (!netif_running(ndev))
3226 		return -ENETDOWN;
3227 
3228 	return phy_ethtool_get_eee(ndev->phydev, edata);
3229 }
3230 
3231 static int
3232 fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
3233 {
3234 	struct fec_enet_private *fep = netdev_priv(ndev);
3235 
3236 	if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3237 		return -EOPNOTSUPP;
3238 
3239 	if (!netif_running(ndev))
3240 		return -ENETDOWN;
3241 
3242 	return phy_ethtool_set_eee(ndev->phydev, edata);
3243 }
3244 
3245 static void
3246 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3247 {
3248 	struct fec_enet_private *fep = netdev_priv(ndev);
3249 
3250 	if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
3251 		wol->supported = WAKE_MAGIC;
3252 		wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
3253 	} else {
3254 		wol->supported = wol->wolopts = 0;
3255 	}
3256 }
3257 
3258 static int
3259 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3260 {
3261 	struct fec_enet_private *fep = netdev_priv(ndev);
3262 
3263 	if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
3264 		return -EINVAL;
3265 
3266 	if (wol->wolopts & ~WAKE_MAGIC)
3267 		return -EINVAL;
3268 
3269 	device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
3270 	if (device_may_wakeup(&ndev->dev))
3271 		fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
3272 	else
3273 		fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
3274 
3275 	return 0;
3276 }
3277 
3278 static const struct ethtool_ops fec_enet_ethtool_ops = {
3279 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3280 				     ETHTOOL_COALESCE_MAX_FRAMES,
3281 	.get_drvinfo		= fec_enet_get_drvinfo,
3282 	.get_regs_len		= fec_enet_get_regs_len,
3283 	.get_regs		= fec_enet_get_regs,
3284 	.nway_reset		= phy_ethtool_nway_reset,
3285 	.get_link		= ethtool_op_get_link,
3286 	.get_coalesce		= fec_enet_get_coalesce,
3287 	.set_coalesce		= fec_enet_set_coalesce,
3288 #ifndef CONFIG_M5272
3289 	.get_pauseparam		= fec_enet_get_pauseparam,
3290 	.set_pauseparam		= fec_enet_set_pauseparam,
3291 	.get_strings		= fec_enet_get_strings,
3292 	.get_ethtool_stats	= fec_enet_get_ethtool_stats,
3293 	.get_sset_count		= fec_enet_get_sset_count,
3294 #endif
3295 	.get_ts_info		= fec_enet_get_ts_info,
3296 	.get_wol		= fec_enet_get_wol,
3297 	.set_wol		= fec_enet_set_wol,
3298 	.get_eee		= fec_enet_get_eee,
3299 	.set_eee		= fec_enet_set_eee,
3300 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
3301 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
3302 	.self_test		= net_selftest,
3303 };
3304 
3305 static void fec_enet_free_buffers(struct net_device *ndev)
3306 {
3307 	struct fec_enet_private *fep = netdev_priv(ndev);
3308 	unsigned int i;
3309 	struct fec_enet_priv_tx_q *txq;
3310 	struct fec_enet_priv_rx_q *rxq;
3311 	unsigned int q;
3312 
3313 	for (q = 0; q < fep->num_rx_queues; q++) {
3314 		rxq = fep->rx_queue[q];
3315 		for (i = 0; i < rxq->bd.ring_size; i++)
3316 			page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
3317 						false);
3318 
3319 		for (i = 0; i < XDP_STATS_TOTAL; i++)
3320 			rxq->stats[i] = 0;
3321 
3322 		if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
3323 			xdp_rxq_info_unreg(&rxq->xdp_rxq);
3324 		page_pool_destroy(rxq->page_pool);
3325 		rxq->page_pool = NULL;
3326 	}
3327 
3328 	for (q = 0; q < fep->num_tx_queues; q++) {
3329 		txq = fep->tx_queue[q];
3330 		for (i = 0; i < txq->bd.ring_size; i++) {
3331 			kfree(txq->tx_bounce[i]);
3332 			txq->tx_bounce[i] = NULL;
3333 
3334 			if (!txq->tx_buf[i].buf_p) {
3335 				txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3336 				continue;
3337 			}
3338 
3339 			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3340 				dev_kfree_skb(txq->tx_buf[i].buf_p);
3341 			} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
3342 				xdp_return_frame(txq->tx_buf[i].buf_p);
3343 			} else {
3344 				struct page *page = txq->tx_buf[i].buf_p;
3345 
3346 				page_pool_put_page(pp_page_to_nmdesc(page)->pp,
3347 						   page, 0, false);
3348 			}
3349 
3350 			txq->tx_buf[i].buf_p = NULL;
3351 			txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3352 		}
3353 	}
3354 }
3355 
3356 static void fec_enet_free_queue(struct net_device *ndev)
3357 {
3358 	struct fec_enet_private *fep = netdev_priv(ndev);
3359 	int i;
3360 	struct fec_enet_priv_tx_q *txq;
3361 
3362 	for (i = 0; i < fep->num_tx_queues; i++)
3363 		if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
3364 			txq = fep->tx_queue[i];
3365 			fec_dma_free(&fep->pdev->dev,
3366 				     txq->bd.ring_size * TSO_HEADER_SIZE,
3367 				     txq->tso_hdrs, txq->tso_hdrs_dma);
3368 		}
3369 
3370 	for (i = 0; i < fep->num_rx_queues; i++)
3371 		kfree(fep->rx_queue[i]);
3372 	for (i = 0; i < fep->num_tx_queues; i++)
3373 		kfree(fep->tx_queue[i]);
3374 }
3375 
3376 static int fec_enet_alloc_queue(struct net_device *ndev)
3377 {
3378 	struct fec_enet_private *fep = netdev_priv(ndev);
3379 	int i;
3380 	int ret = 0;
3381 	struct fec_enet_priv_tx_q *txq;
3382 
3383 	for (i = 0; i < fep->num_tx_queues; i++) {
3384 		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
3385 		if (!txq) {
3386 			ret = -ENOMEM;
3387 			goto alloc_failed;
3388 		}
3389 
3390 		fep->tx_queue[i] = txq;
3391 		txq->bd.ring_size = TX_RING_SIZE;
3392 		fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
3393 
3394 		txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
3395 		txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
3396 
3397 		txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev,
3398 					txq->bd.ring_size * TSO_HEADER_SIZE,
3399 					&txq->tso_hdrs_dma, GFP_KERNEL);
3400 		if (!txq->tso_hdrs) {
3401 			ret = -ENOMEM;
3402 			goto alloc_failed;
3403 		}
3404 	}
3405 
3406 	for (i = 0; i < fep->num_rx_queues; i++) {
3407 		fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
3408 					   GFP_KERNEL);
3409 		if (!fep->rx_queue[i]) {
3410 			ret = -ENOMEM;
3411 			goto alloc_failed;
3412 		}
3413 
3414 		fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
3415 		fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
3416 	}
3417 	return ret;
3418 
3419 alloc_failed:
3420 	fec_enet_free_queue(ndev);
3421 	return ret;
3422 }
3423 
3424 static int
3425 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
3426 {
3427 	struct fec_enet_private *fep = netdev_priv(ndev);
3428 	struct fec_enet_priv_rx_q *rxq;
3429 	dma_addr_t phys_addr;
3430 	struct bufdesc	*bdp;
3431 	struct page *page;
3432 	int i, err;
3433 
3434 	rxq = fep->rx_queue[queue];
3435 	bdp = rxq->bd.base;
3436 
3437 	err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
3438 	if (err < 0) {
3439 		netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
3440 		return err;
3441 	}
3442 
3443 	/* Some platforms require the RX buffer must be 64 bytes alignment.
3444 	 * Some platforms require 16 bytes alignment. And some platforms
3445 	 * require 4 bytes alignment. But since the page pool have been
3446 	 * introduced into the driver, the address of RX buffer is always
3447 	 * the page address plus FEC_ENET_XDP_HEADROOM, and
3448 	 * FEC_ENET_XDP_HEADROOM is 256 bytes. Therefore, this address can
3449 	 * satisfy all platforms. To prevent future modifications to
3450 	 * FEC_ENET_XDP_HEADROOM from ignoring this hardware limitation, a
3451 	 * BUILD_BUG_ON() test has been added, which ensures that
3452 	 * FEC_ENET_XDP_HEADROOM provides the required alignment.
3453 	 */
3454 	BUILD_BUG_ON(FEC_ENET_XDP_HEADROOM & 0x3f);
3455 
3456 	for (i = 0; i < rxq->bd.ring_size; i++) {
3457 		page = page_pool_dev_alloc_pages(rxq->page_pool);
3458 		if (!page)
3459 			goto err_alloc;
3460 
3461 		phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
3462 		bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
3463 
3464 		rxq->rx_buf[i] = page;
3465 		bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
3466 
3467 		if (fep->bufdesc_ex) {
3468 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3469 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
3470 		}
3471 
3472 		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
3473 	}
3474 
3475 	/* Set the last buffer to wrap. */
3476 	bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
3477 	bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
3478 	return 0;
3479 
3480  err_alloc:
3481 	fec_enet_free_buffers(ndev);
3482 	return -ENOMEM;
3483 }
3484 
3485 static int
3486 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
3487 {
3488 	struct fec_enet_private *fep = netdev_priv(ndev);
3489 	unsigned int i;
3490 	struct bufdesc  *bdp;
3491 	struct fec_enet_priv_tx_q *txq;
3492 
3493 	txq = fep->tx_queue[queue];
3494 	bdp = txq->bd.base;
3495 	for (i = 0; i < txq->bd.ring_size; i++) {
3496 		txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
3497 		if (!txq->tx_bounce[i])
3498 			goto err_alloc;
3499 
3500 		bdp->cbd_sc = cpu_to_fec16(0);
3501 		bdp->cbd_bufaddr = cpu_to_fec32(0);
3502 
3503 		if (fep->bufdesc_ex) {
3504 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3505 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
3506 		}
3507 
3508 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3509 	}
3510 
3511 	/* Set the last buffer to wrap. */
3512 	bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
3513 	bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
3514 
3515 	return 0;
3516 
3517  err_alloc:
3518 	fec_enet_free_buffers(ndev);
3519 	return -ENOMEM;
3520 }
3521 
3522 static int fec_enet_alloc_buffers(struct net_device *ndev)
3523 {
3524 	struct fec_enet_private *fep = netdev_priv(ndev);
3525 	unsigned int i;
3526 
3527 	for (i = 0; i < fep->num_rx_queues; i++)
3528 		if (fec_enet_alloc_rxq_buffers(ndev, i))
3529 			return -ENOMEM;
3530 
3531 	for (i = 0; i < fep->num_tx_queues; i++)
3532 		if (fec_enet_alloc_txq_buffers(ndev, i))
3533 			return -ENOMEM;
3534 	return 0;
3535 }
3536 
3537 static int
3538 fec_enet_open(struct net_device *ndev)
3539 {
3540 	struct fec_enet_private *fep = netdev_priv(ndev);
3541 	int ret;
3542 	bool reset_again;
3543 
3544 	ret = pm_runtime_resume_and_get(&fep->pdev->dev);
3545 	if (ret < 0)
3546 		return ret;
3547 
3548 	pinctrl_pm_select_default_state(&fep->pdev->dev);
3549 	ret = fec_enet_clk_enable(ndev, true);
3550 	if (ret)
3551 		goto clk_enable;
3552 
3553 	/* During the first fec_enet_open call the PHY isn't probed at this
3554 	 * point. Therefore the phy_reset_after_clk_enable() call within
3555 	 * fec_enet_clk_enable() fails. As we need this reset in order to be
3556 	 * sure the PHY is working correctly we check if we need to reset again
3557 	 * later when the PHY is probed
3558 	 */
3559 	if (ndev->phydev && ndev->phydev->drv)
3560 		reset_again = false;
3561 	else
3562 		reset_again = true;
3563 
3564 	/* I should reset the ring buffers here, but I don't yet know
3565 	 * a simple way to do that.
3566 	 */
3567 
3568 	ret = fec_enet_alloc_buffers(ndev);
3569 	if (ret)
3570 		goto err_enet_alloc;
3571 
3572 	/* Init MAC prior to mii bus probe */
3573 	fec_restart(ndev);
3574 
3575 	/* Call phy_reset_after_clk_enable() again if it failed during
3576 	 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
3577 	 */
3578 	if (reset_again)
3579 		fec_enet_phy_reset_after_clk_enable(ndev);
3580 
3581 	/* Probe and connect to PHY when open the interface */
3582 	ret = fec_enet_mii_probe(ndev);
3583 	if (ret)
3584 		goto err_enet_mii_probe;
3585 
3586 	if (fep->quirks & FEC_QUIRK_ERR006687)
3587 		imx6q_cpuidle_fec_irqs_used();
3588 
3589 	if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3590 		cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
3591 
3592 	napi_enable(&fep->napi);
3593 	phy_start(ndev->phydev);
3594 	netif_tx_start_all_queues(ndev);
3595 
3596 	device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
3597 				 FEC_WOL_FLAG_ENABLE);
3598 
3599 	return 0;
3600 
3601 err_enet_mii_probe:
3602 	fec_enet_free_buffers(ndev);
3603 err_enet_alloc:
3604 	fec_enet_clk_enable(ndev, false);
3605 clk_enable:
3606 	pm_runtime_put_autosuspend(&fep->pdev->dev);
3607 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3608 	return ret;
3609 }
3610 
3611 static int
3612 fec_enet_close(struct net_device *ndev)
3613 {
3614 	struct fec_enet_private *fep = netdev_priv(ndev);
3615 	struct phy_device *phy_dev = ndev->phydev;
3616 
3617 	phy_stop(phy_dev);
3618 
3619 	if (netif_device_present(ndev)) {
3620 		napi_disable(&fep->napi);
3621 		netif_tx_disable(ndev);
3622 		fec_stop(ndev);
3623 	}
3624 
3625 	phy_disconnect(phy_dev);
3626 
3627 	if (!fep->phy_node && phy_is_pseudo_fixed_link(phy_dev))
3628 		fixed_phy_unregister(phy_dev);
3629 
3630 	if (fep->quirks & FEC_QUIRK_ERR006687)
3631 		imx6q_cpuidle_fec_irqs_unused();
3632 
3633 	fec_enet_update_ethtool_stats(ndev);
3634 
3635 	fec_enet_clk_enable(ndev, false);
3636 	if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3637 		cpu_latency_qos_remove_request(&fep->pm_qos_req);
3638 
3639 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3640 	pm_runtime_put_autosuspend(&fep->pdev->dev);
3641 
3642 	fec_enet_free_buffers(ndev);
3643 
3644 	return 0;
3645 }
3646 
3647 /* Set or clear the multicast filter for this adaptor.
3648  * Skeleton taken from sunlance driver.
3649  * The CPM Ethernet implementation allows Multicast as well as individual
3650  * MAC address filtering.  Some of the drivers check to make sure it is
3651  * a group multicast address, and discard those that are not.  I guess I
3652  * will do the same for now, but just remove the test if you want
3653  * individual filtering as well (do the upper net layers want or support
3654  * this kind of feature?).
3655  */
3656 
3657 #define FEC_HASH_BITS	6		/* #bits in hash */
3658 
3659 static void set_multicast_list(struct net_device *ndev)
3660 {
3661 	struct fec_enet_private *fep = netdev_priv(ndev);
3662 	struct netdev_hw_addr *ha;
3663 	unsigned int crc, tmp;
3664 	unsigned char hash;
3665 	unsigned int hash_high = 0, hash_low = 0;
3666 
3667 	if (ndev->flags & IFF_PROMISC) {
3668 		tmp = readl(fep->hwp + FEC_R_CNTRL);
3669 		tmp |= 0x8;
3670 		writel(tmp, fep->hwp + FEC_R_CNTRL);
3671 		return;
3672 	}
3673 
3674 	tmp = readl(fep->hwp + FEC_R_CNTRL);
3675 	tmp &= ~0x8;
3676 	writel(tmp, fep->hwp + FEC_R_CNTRL);
3677 
3678 	if (ndev->flags & IFF_ALLMULTI) {
3679 		/* Catch all multicast addresses, so set the
3680 		 * filter to all 1's
3681 		 */
3682 		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3683 		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3684 
3685 		return;
3686 	}
3687 
3688 	/* Add the addresses in hash register */
3689 	netdev_for_each_mc_addr(ha, ndev) {
3690 		/* calculate crc32 value of mac address */
3691 		crc = ether_crc_le(ndev->addr_len, ha->addr);
3692 
3693 		/* only upper 6 bits (FEC_HASH_BITS) are used
3694 		 * which point to specific bit in the hash registers
3695 		 */
3696 		hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
3697 
3698 		if (hash > 31)
3699 			hash_high |= 1 << (hash - 32);
3700 		else
3701 			hash_low |= 1 << hash;
3702 	}
3703 
3704 	writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3705 	writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3706 }
3707 
3708 /* Set a MAC change in hardware. */
3709 static int
3710 fec_set_mac_address(struct net_device *ndev, void *p)
3711 {
3712 	struct sockaddr *addr = p;
3713 
3714 	if (addr) {
3715 		if (!is_valid_ether_addr(addr->sa_data))
3716 			return -EADDRNOTAVAIL;
3717 		eth_hw_addr_set(ndev, addr->sa_data);
3718 	}
3719 
3720 	/* Add netif status check here to avoid system hang in below case:
3721 	 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3722 	 * After ethx down, fec all clocks are gated off and then register
3723 	 * access causes system hang.
3724 	 */
3725 	if (!netif_running(ndev))
3726 		return 0;
3727 
3728 	fec_set_hw_mac_addr(ndev);
3729 
3730 	return 0;
3731 }
3732 
3733 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3734 	netdev_features_t features)
3735 {
3736 	struct fec_enet_private *fep = netdev_priv(netdev);
3737 	netdev_features_t changed = features ^ netdev->features;
3738 
3739 	netdev->features = features;
3740 
3741 	/* Receive checksum has been changed */
3742 	if (changed & NETIF_F_RXCSUM) {
3743 		if (features & NETIF_F_RXCSUM)
3744 			fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3745 		else
3746 			fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3747 	}
3748 }
3749 
3750 static int fec_set_features(struct net_device *netdev,
3751 	netdev_features_t features)
3752 {
3753 	struct fec_enet_private *fep = netdev_priv(netdev);
3754 	netdev_features_t changed = features ^ netdev->features;
3755 
3756 	if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3757 		napi_disable(&fep->napi);
3758 		netif_tx_lock_bh(netdev);
3759 		fec_stop(netdev);
3760 		fec_enet_set_netdev_features(netdev, features);
3761 		fec_restart(netdev);
3762 		netif_tx_wake_all_queues(netdev);
3763 		netif_tx_unlock_bh(netdev);
3764 		napi_enable(&fep->napi);
3765 	} else {
3766 		fec_enet_set_netdev_features(netdev, features);
3767 	}
3768 
3769 	return 0;
3770 }
3771 
3772 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
3773 				 struct net_device *sb_dev)
3774 {
3775 	struct fec_enet_private *fep = netdev_priv(ndev);
3776 	u16 vlan_tag = 0;
3777 
3778 	if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
3779 		return netdev_pick_tx(ndev, skb, NULL);
3780 
3781 	/* VLAN is present in the payload.*/
3782 	if (eth_type_vlan(skb->protocol)) {
3783 		struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
3784 
3785 		vlan_tag = ntohs(vhdr->h_vlan_TCI);
3786 	/*  VLAN is present in the skb but not yet pushed in the payload.*/
3787 	} else if (skb_vlan_tag_present(skb)) {
3788 		vlan_tag = skb->vlan_tci;
3789 	} else {
3790 		return vlan_tag;
3791 	}
3792 
3793 	return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
3794 }
3795 
3796 static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
3797 {
3798 	struct fec_enet_private *fep = netdev_priv(dev);
3799 	bool is_run = netif_running(dev);
3800 	struct bpf_prog *old_prog;
3801 
3802 	switch (bpf->command) {
3803 	case XDP_SETUP_PROG:
3804 		/* No need to support the SoCs that require to
3805 		 * do the frame swap because the performance wouldn't be
3806 		 * better than the skb mode.
3807 		 */
3808 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
3809 			return -EOPNOTSUPP;
3810 
3811 		if (!bpf->prog)
3812 			xdp_features_clear_redirect_target(dev);
3813 
3814 		if (is_run) {
3815 			napi_disable(&fep->napi);
3816 			netif_tx_disable(dev);
3817 		}
3818 
3819 		old_prog = xchg(&fep->xdp_prog, bpf->prog);
3820 		if (old_prog)
3821 			bpf_prog_put(old_prog);
3822 
3823 		fec_restart(dev);
3824 
3825 		if (is_run) {
3826 			napi_enable(&fep->napi);
3827 			netif_tx_start_all_queues(dev);
3828 		}
3829 
3830 		if (bpf->prog)
3831 			xdp_features_set_redirect_target(dev, false);
3832 
3833 		return 0;
3834 
3835 	case XDP_SETUP_XSK_POOL:
3836 		return -EOPNOTSUPP;
3837 
3838 	default:
3839 		return -EOPNOTSUPP;
3840 	}
3841 }
3842 
3843 static int
3844 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
3845 {
3846 	if (unlikely(index < 0))
3847 		return 0;
3848 
3849 	return (index % fep->num_tx_queues);
3850 }
3851 
3852 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
3853 				   struct fec_enet_priv_tx_q *txq,
3854 				   void *frame, u32 dma_sync_len,
3855 				   bool ndo_xmit)
3856 {
3857 	unsigned int index, status, estatus;
3858 	struct bufdesc *bdp;
3859 	dma_addr_t dma_addr;
3860 	int entries_free;
3861 	u16 frame_len;
3862 
3863 	entries_free = fec_enet_get_free_txdesc_num(txq);
3864 	if (entries_free < MAX_SKB_FRAGS + 1) {
3865 		netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
3866 		return -EBUSY;
3867 	}
3868 
3869 	/* Fill in a Tx ring entry */
3870 	bdp = txq->bd.cur;
3871 	status = fec16_to_cpu(bdp->cbd_sc);
3872 	status &= ~BD_ENET_TX_STATS;
3873 
3874 	index = fec_enet_get_bd_index(bdp, &txq->bd);
3875 
3876 	if (ndo_xmit) {
3877 		struct xdp_frame *xdpf = frame;
3878 
3879 		dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
3880 					  xdpf->len, DMA_TO_DEVICE);
3881 		if (dma_mapping_error(&fep->pdev->dev, dma_addr))
3882 			return -ENOMEM;
3883 
3884 		frame_len = xdpf->len;
3885 		txq->tx_buf[index].buf_p = xdpf;
3886 		txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3887 	} else {
3888 		struct xdp_buff *xdpb = frame;
3889 		struct page *page;
3890 
3891 		page = virt_to_page(xdpb->data);
3892 		dma_addr = page_pool_get_dma_addr(page) +
3893 			   (xdpb->data - xdpb->data_hard_start);
3894 		dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
3895 					   dma_sync_len, DMA_BIDIRECTIONAL);
3896 		frame_len = xdpb->data_end - xdpb->data;
3897 		txq->tx_buf[index].buf_p = page;
3898 		txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
3899 	}
3900 
3901 	status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
3902 	if (fep->bufdesc_ex)
3903 		estatus = BD_ENET_TX_INT;
3904 
3905 	bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
3906 	bdp->cbd_datlen = cpu_to_fec16(frame_len);
3907 
3908 	if (fep->bufdesc_ex) {
3909 		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3910 
3911 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
3912 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
3913 
3914 		ebdp->cbd_bdu = 0;
3915 		ebdp->cbd_esc = cpu_to_fec32(estatus);
3916 	}
3917 
3918 	/* Make sure the updates to rest of the descriptor are performed before
3919 	 * transferring ownership.
3920 	 */
3921 	dma_wmb();
3922 
3923 	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
3924 	 * it's the last BD of the frame, and to put the CRC on the end.
3925 	 */
3926 	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
3927 	bdp->cbd_sc = cpu_to_fec16(status);
3928 
3929 	/* If this was the last BD in the ring, start at the beginning again. */
3930 	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3931 
3932 	/* Make sure the update to bdp are performed before txq->bd.cur. */
3933 	dma_wmb();
3934 
3935 	txq->bd.cur = bdp;
3936 
3937 	/* Trigger transmission start */
3938 	if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
3939 	    !readl(txq->bd.reg_desc_active) ||
3940 	    !readl(txq->bd.reg_desc_active) ||
3941 	    !readl(txq->bd.reg_desc_active) ||
3942 	    !readl(txq->bd.reg_desc_active))
3943 		writel(0, txq->bd.reg_desc_active);
3944 
3945 	return 0;
3946 }
3947 
3948 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
3949 				int cpu, struct xdp_buff *xdp,
3950 				u32 dma_sync_len)
3951 {
3952 	struct fec_enet_priv_tx_q *txq;
3953 	struct netdev_queue *nq;
3954 	int queue, ret;
3955 
3956 	queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3957 	txq = fep->tx_queue[queue];
3958 	nq = netdev_get_tx_queue(fep->netdev, queue);
3959 
3960 	__netif_tx_lock(nq, cpu);
3961 
3962 	/* Avoid tx timeout as XDP shares the queue with kernel stack */
3963 	txq_trans_cond_update(nq);
3964 	ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
3965 
3966 	__netif_tx_unlock(nq);
3967 
3968 	return ret;
3969 }
3970 
3971 static int fec_enet_xdp_xmit(struct net_device *dev,
3972 			     int num_frames,
3973 			     struct xdp_frame **frames,
3974 			     u32 flags)
3975 {
3976 	struct fec_enet_private *fep = netdev_priv(dev);
3977 	struct fec_enet_priv_tx_q *txq;
3978 	int cpu = smp_processor_id();
3979 	unsigned int sent_frames = 0;
3980 	struct netdev_queue *nq;
3981 	unsigned int queue;
3982 	int i;
3983 
3984 	queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3985 	txq = fep->tx_queue[queue];
3986 	nq = netdev_get_tx_queue(fep->netdev, queue);
3987 
3988 	__netif_tx_lock(nq, cpu);
3989 
3990 	/* Avoid tx timeout as XDP shares the queue with kernel stack */
3991 	txq_trans_cond_update(nq);
3992 	for (i = 0; i < num_frames; i++) {
3993 		if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
3994 			break;
3995 		sent_frames++;
3996 	}
3997 
3998 	__netif_tx_unlock(nq);
3999 
4000 	return sent_frames;
4001 }
4002 
4003 static int fec_hwtstamp_get(struct net_device *ndev,
4004 			    struct kernel_hwtstamp_config *config)
4005 {
4006 	struct fec_enet_private *fep = netdev_priv(ndev);
4007 
4008 	if (!netif_running(ndev))
4009 		return -EINVAL;
4010 
4011 	if (!fep->bufdesc_ex)
4012 		return -EOPNOTSUPP;
4013 
4014 	fec_ptp_get(ndev, config);
4015 
4016 	return 0;
4017 }
4018 
4019 static int fec_hwtstamp_set(struct net_device *ndev,
4020 			    struct kernel_hwtstamp_config *config,
4021 			    struct netlink_ext_ack *extack)
4022 {
4023 	struct fec_enet_private *fep = netdev_priv(ndev);
4024 
4025 	if (!netif_running(ndev))
4026 		return -EINVAL;
4027 
4028 	if (!fep->bufdesc_ex)
4029 		return -EOPNOTSUPP;
4030 
4031 	return fec_ptp_set(ndev, config, extack);
4032 }
4033 
4034 static int fec_change_mtu(struct net_device *ndev, int new_mtu)
4035 {
4036 	struct fec_enet_private *fep = netdev_priv(ndev);
4037 	int order;
4038 
4039 	if (netif_running(ndev))
4040 		return -EBUSY;
4041 
4042 	order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN
4043 			  + FEC_DRV_RESERVE_SPACE);
4044 	fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
4045 	fep->pagepool_order = order;
4046 	WRITE_ONCE(ndev->mtu, new_mtu);
4047 
4048 	return 0;
4049 }
4050 
4051 static const struct net_device_ops fec_netdev_ops = {
4052 	.ndo_open		= fec_enet_open,
4053 	.ndo_stop		= fec_enet_close,
4054 	.ndo_start_xmit		= fec_enet_start_xmit,
4055 	.ndo_select_queue       = fec_enet_select_queue,
4056 	.ndo_set_rx_mode	= set_multicast_list,
4057 	.ndo_validate_addr	= eth_validate_addr,
4058 	.ndo_tx_timeout		= fec_timeout,
4059 	.ndo_set_mac_address	= fec_set_mac_address,
4060 	.ndo_change_mtu		= fec_change_mtu,
4061 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4062 	.ndo_set_features	= fec_set_features,
4063 	.ndo_bpf		= fec_enet_bpf,
4064 	.ndo_xdp_xmit		= fec_enet_xdp_xmit,
4065 	.ndo_hwtstamp_get	= fec_hwtstamp_get,
4066 	.ndo_hwtstamp_set	= fec_hwtstamp_set,
4067 };
4068 
4069 static const unsigned short offset_des_active_rxq[] = {
4070 	FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
4071 };
4072 
4073 static const unsigned short offset_des_active_txq[] = {
4074 	FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
4075 };
4076 
4077  /*
4078   * XXX:  We need to clean up on failure exits here.
4079   *
4080   */
4081 static int fec_enet_init(struct net_device *ndev)
4082 {
4083 	struct fec_enet_private *fep = netdev_priv(ndev);
4084 	struct bufdesc *cbd_base;
4085 	dma_addr_t bd_dma;
4086 	int bd_size;
4087 	unsigned int i;
4088 	unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
4089 			sizeof(struct bufdesc);
4090 	unsigned dsize_log2 = __fls(dsize);
4091 	int ret;
4092 
4093 	WARN_ON(dsize != (1 << dsize_log2));
4094 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4095 	fep->tx_align = 0xf;
4096 #else
4097 	fep->tx_align = 0x3;
4098 #endif
4099 	fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4100 	fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4101 	fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
4102 	fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
4103 
4104 	/* Check mask of the streaming and coherent API */
4105 	ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
4106 	if (ret < 0) {
4107 		dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
4108 		return ret;
4109 	}
4110 
4111 	ret = fec_enet_alloc_queue(ndev);
4112 	if (ret)
4113 		return ret;
4114 
4115 	bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
4116 
4117 	/* Allocate memory for buffer descriptors. */
4118 	cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma,
4119 				  GFP_KERNEL);
4120 	if (!cbd_base) {
4121 		ret = -ENOMEM;
4122 		goto free_queue_mem;
4123 	}
4124 
4125 	/* Get the Ethernet address */
4126 	ret = fec_get_mac(ndev);
4127 	if (ret)
4128 		goto free_queue_mem;
4129 
4130 	/* Set receive and transmit descriptor base. */
4131 	for (i = 0; i < fep->num_rx_queues; i++) {
4132 		struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
4133 		unsigned size = dsize * rxq->bd.ring_size;
4134 
4135 		rxq->bd.qid = i;
4136 		rxq->bd.base = cbd_base;
4137 		rxq->bd.cur = cbd_base;
4138 		rxq->bd.dma = bd_dma;
4139 		rxq->bd.dsize = dsize;
4140 		rxq->bd.dsize_log2 = dsize_log2;
4141 		rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
4142 		bd_dma += size;
4143 		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4144 		rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4145 	}
4146 
4147 	for (i = 0; i < fep->num_tx_queues; i++) {
4148 		struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
4149 		unsigned size = dsize * txq->bd.ring_size;
4150 
4151 		txq->bd.qid = i;
4152 		txq->bd.base = cbd_base;
4153 		txq->bd.cur = cbd_base;
4154 		txq->bd.dma = bd_dma;
4155 		txq->bd.dsize = dsize;
4156 		txq->bd.dsize_log2 = dsize_log2;
4157 		txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
4158 		bd_dma += size;
4159 		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4160 		txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4161 	}
4162 
4163 
4164 	/* The FEC Ethernet specific entries in the device structure */
4165 	ndev->watchdog_timeo = TX_TIMEOUT;
4166 	ndev->netdev_ops = &fec_netdev_ops;
4167 	ndev->ethtool_ops = &fec_enet_ethtool_ops;
4168 
4169 	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
4170 	netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
4171 
4172 	if (fep->quirks & FEC_QUIRK_HAS_VLAN)
4173 		/* enable hw VLAN support */
4174 		ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4175 
4176 	if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
4177 		netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
4178 
4179 		/* enable hw accelerator */
4180 		ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
4181 				| NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
4182 		fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
4183 	}
4184 
4185 	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES)
4186 		fep->tx_align = 0;
4187 
4188 	ndev->hw_features = ndev->features;
4189 
4190 	if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
4191 		ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
4192 				     NETDEV_XDP_ACT_REDIRECT;
4193 
4194 	fec_restart(ndev);
4195 
4196 	if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
4197 		fec_enet_clear_ethtool_stats(ndev);
4198 	else
4199 		fec_enet_update_ethtool_stats(ndev);
4200 
4201 	return 0;
4202 
4203 free_queue_mem:
4204 	fec_enet_free_queue(ndev);
4205 	return ret;
4206 }
4207 
4208 static void fec_enet_deinit(struct net_device *ndev)
4209 {
4210 	struct fec_enet_private *fep = netdev_priv(ndev);
4211 
4212 	netif_napi_del(&fep->napi);
4213 	fec_enet_free_queue(ndev);
4214 }
4215 
4216 #ifdef CONFIG_OF
4217 static int fec_reset_phy(struct platform_device *pdev)
4218 {
4219 	struct gpio_desc *phy_reset;
4220 	int msec = 1, phy_post_delay = 0;
4221 	struct device_node *np = pdev->dev.of_node;
4222 	int err;
4223 
4224 	if (!np)
4225 		return 0;
4226 
4227 	err = of_property_read_u32(np, "phy-reset-duration", &msec);
4228 	/* A sane reset duration should not be longer than 1s */
4229 	if (!err && msec > 1000)
4230 		msec = 1;
4231 
4232 	err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
4233 	/* valid reset duration should be less than 1s */
4234 	if (!err && phy_post_delay > 1000)
4235 		return -EINVAL;
4236 
4237 	phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
4238 					    GPIOD_OUT_HIGH);
4239 	if (IS_ERR(phy_reset))
4240 		return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
4241 				     "failed to get phy-reset-gpios\n");
4242 
4243 	if (!phy_reset)
4244 		return 0;
4245 
4246 	if (msec > 20)
4247 		msleep(msec);
4248 	else
4249 		usleep_range(msec * 1000, msec * 1000 + 1000);
4250 
4251 	gpiod_set_value_cansleep(phy_reset, 0);
4252 
4253 	if (!phy_post_delay)
4254 		return 0;
4255 
4256 	if (phy_post_delay > 20)
4257 		msleep(phy_post_delay);
4258 	else
4259 		usleep_range(phy_post_delay * 1000,
4260 			     phy_post_delay * 1000 + 1000);
4261 
4262 	return 0;
4263 }
4264 #else /* CONFIG_OF */
4265 static int fec_reset_phy(struct platform_device *pdev)
4266 {
4267 	/*
4268 	 * In case of platform probe, the reset has been done
4269 	 * by machine code.
4270 	 */
4271 	return 0;
4272 }
4273 #endif /* CONFIG_OF */
4274 
4275 static void
4276 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
4277 {
4278 	struct device_node *np = pdev->dev.of_node;
4279 
4280 	*num_tx = *num_rx = 1;
4281 
4282 	if (!np || !of_device_is_available(np))
4283 		return;
4284 
4285 	/* parse the num of tx and rx queues */
4286 	of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
4287 
4288 	of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
4289 
4290 	if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
4291 		dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
4292 			 *num_tx);
4293 		*num_tx = 1;
4294 		return;
4295 	}
4296 
4297 	if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
4298 		dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
4299 			 *num_rx);
4300 		*num_rx = 1;
4301 		return;
4302 	}
4303 
4304 }
4305 
4306 static int fec_enet_get_irq_cnt(struct platform_device *pdev)
4307 {
4308 	int irq_cnt = platform_irq_count(pdev);
4309 
4310 	if (irq_cnt > FEC_IRQ_NUM)
4311 		irq_cnt = FEC_IRQ_NUM;	/* last for pps */
4312 	else if (irq_cnt == 2)
4313 		irq_cnt = 1;	/* last for pps */
4314 	else if (irq_cnt <= 0)
4315 		irq_cnt = 1;	/* At least 1 irq is needed */
4316 	return irq_cnt;
4317 }
4318 
4319 static void fec_enet_get_wakeup_irq(struct platform_device *pdev)
4320 {
4321 	struct net_device *ndev = platform_get_drvdata(pdev);
4322 	struct fec_enet_private *fep = netdev_priv(ndev);
4323 
4324 	if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
4325 		fep->wake_irq = fep->irq[2];
4326 	else
4327 		fep->wake_irq = fep->irq[0];
4328 }
4329 
4330 static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
4331 				   struct device_node *np)
4332 {
4333 	struct device_node *gpr_np;
4334 	u32 out_val[3];
4335 	int ret = 0;
4336 
4337 	gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
4338 	if (!gpr_np)
4339 		return 0;
4340 
4341 	ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
4342 					 ARRAY_SIZE(out_val));
4343 	if (ret) {
4344 		dev_dbg(&fep->pdev->dev, "no stop mode property\n");
4345 		goto out;
4346 	}
4347 
4348 	fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
4349 	if (IS_ERR(fep->stop_gpr.gpr)) {
4350 		dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
4351 		ret = PTR_ERR(fep->stop_gpr.gpr);
4352 		fep->stop_gpr.gpr = NULL;
4353 		goto out;
4354 	}
4355 
4356 	fep->stop_gpr.reg = out_val[1];
4357 	fep->stop_gpr.bit = out_val[2];
4358 
4359 out:
4360 	of_node_put(gpr_np);
4361 
4362 	return ret;
4363 }
4364 
4365 static int
4366 fec_probe(struct platform_device *pdev)
4367 {
4368 	struct fec_enet_private *fep;
4369 	struct fec_platform_data *pdata;
4370 	phy_interface_t interface;
4371 	struct net_device *ndev;
4372 	int i, irq, ret = 0;
4373 	static int dev_id;
4374 	struct device_node *np = pdev->dev.of_node, *phy_node;
4375 	int num_tx_qs;
4376 	int num_rx_qs;
4377 	char irq_name[8];
4378 	int irq_cnt;
4379 	const struct fec_devinfo *dev_info;
4380 
4381 	fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
4382 
4383 	/* Init network device */
4384 	ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
4385 				  FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
4386 	if (!ndev)
4387 		return -ENOMEM;
4388 
4389 	SET_NETDEV_DEV(ndev, &pdev->dev);
4390 
4391 	/* setup board info structure */
4392 	fep = netdev_priv(ndev);
4393 
4394 	dev_info = device_get_match_data(&pdev->dev);
4395 	if (!dev_info)
4396 		dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data;
4397 	if (dev_info)
4398 		fep->quirks = dev_info->quirks;
4399 
4400 	fep->netdev = ndev;
4401 	fep->num_rx_queues = num_rx_qs;
4402 	fep->num_tx_queues = num_tx_qs;
4403 
4404 	/* default enable pause frame auto negotiation */
4405 	if (fep->quirks & FEC_QUIRK_HAS_GBIT)
4406 		fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
4407 
4408 	/* Select default pin state */
4409 	pinctrl_pm_select_default_state(&pdev->dev);
4410 
4411 	fep->hwp = devm_platform_ioremap_resource(pdev, 0);
4412 	if (IS_ERR(fep->hwp)) {
4413 		ret = PTR_ERR(fep->hwp);
4414 		goto failed_ioremap;
4415 	}
4416 
4417 	fep->pdev = pdev;
4418 	fep->dev_id = dev_id++;
4419 
4420 	platform_set_drvdata(pdev, ndev);
4421 
4422 	if ((of_machine_is_compatible("fsl,imx6q") ||
4423 	     of_machine_is_compatible("fsl,imx6dl")) &&
4424 	    !of_property_read_bool(np, "fsl,err006687-workaround-present"))
4425 		fep->quirks |= FEC_QUIRK_ERR006687;
4426 
4427 	ret = fec_enet_ipc_handle_init(fep);
4428 	if (ret)
4429 		goto failed_ipc_init;
4430 
4431 	if (of_property_read_bool(np, "fsl,magic-packet"))
4432 		fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
4433 
4434 	ret = fec_enet_init_stop_mode(fep, np);
4435 	if (ret)
4436 		goto failed_stop_mode;
4437 
4438 	phy_node = of_parse_phandle(np, "phy-handle", 0);
4439 	if (!phy_node && of_phy_is_fixed_link(np)) {
4440 		ret = of_phy_register_fixed_link(np);
4441 		if (ret < 0) {
4442 			dev_err(&pdev->dev,
4443 				"broken fixed-link specification\n");
4444 			goto failed_phy;
4445 		}
4446 		phy_node = of_node_get(np);
4447 	}
4448 	fep->phy_node = phy_node;
4449 
4450 	ret = of_get_phy_mode(pdev->dev.of_node, &interface);
4451 	if (ret) {
4452 		pdata = dev_get_platdata(&pdev->dev);
4453 		if (pdata)
4454 			fep->phy_interface = pdata->phy;
4455 		else
4456 			fep->phy_interface = PHY_INTERFACE_MODE_MII;
4457 	} else {
4458 		fep->phy_interface = interface;
4459 	}
4460 
4461 	ret = fec_enet_parse_rgmii_delay(fep, np);
4462 	if (ret)
4463 		goto failed_rgmii_delay;
4464 
4465 	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
4466 	if (IS_ERR(fep->clk_ipg)) {
4467 		ret = PTR_ERR(fep->clk_ipg);
4468 		goto failed_clk;
4469 	}
4470 
4471 	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
4472 	if (IS_ERR(fep->clk_ahb)) {
4473 		ret = PTR_ERR(fep->clk_ahb);
4474 		goto failed_clk;
4475 	}
4476 
4477 	fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
4478 
4479 	/* enet_out is optional, depends on board */
4480 	fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
4481 	if (IS_ERR(fep->clk_enet_out)) {
4482 		ret = PTR_ERR(fep->clk_enet_out);
4483 		goto failed_clk;
4484 	}
4485 
4486 	fep->ptp_clk_on = false;
4487 	mutex_init(&fep->ptp_clk_mutex);
4488 
4489 	/* clk_ref is optional, depends on board */
4490 	fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
4491 	if (IS_ERR(fep->clk_ref)) {
4492 		ret = PTR_ERR(fep->clk_ref);
4493 		goto failed_clk;
4494 	}
4495 	fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
4496 
4497 	/* clk_2x_txclk is optional, depends on board */
4498 	if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
4499 		fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
4500 		if (IS_ERR(fep->clk_2x_txclk))
4501 			fep->clk_2x_txclk = NULL;
4502 	}
4503 
4504 	fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
4505 	fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
4506 	if (IS_ERR(fep->clk_ptp)) {
4507 		fep->clk_ptp = NULL;
4508 		fep->bufdesc_ex = false;
4509 	}
4510 
4511 	ret = fec_enet_clk_enable(ndev, true);
4512 	if (ret)
4513 		goto failed_clk;
4514 
4515 	ret = clk_prepare_enable(fep->clk_ipg);
4516 	if (ret)
4517 		goto failed_clk_ipg;
4518 	ret = clk_prepare_enable(fep->clk_ahb);
4519 	if (ret)
4520 		goto failed_clk_ahb;
4521 
4522 	fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
4523 	if (!IS_ERR(fep->reg_phy)) {
4524 		ret = regulator_enable(fep->reg_phy);
4525 		if (ret) {
4526 			dev_err(&pdev->dev,
4527 				"Failed to enable phy regulator: %d\n", ret);
4528 			goto failed_regulator;
4529 		}
4530 	} else {
4531 		if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
4532 			ret = -EPROBE_DEFER;
4533 			goto failed_regulator;
4534 		}
4535 		fep->reg_phy = NULL;
4536 	}
4537 
4538 	pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
4539 	pm_runtime_use_autosuspend(&pdev->dev);
4540 	pm_runtime_get_noresume(&pdev->dev);
4541 	pm_runtime_set_active(&pdev->dev);
4542 	pm_runtime_enable(&pdev->dev);
4543 
4544 	ret = fec_reset_phy(pdev);
4545 	if (ret)
4546 		goto failed_reset;
4547 
4548 	irq_cnt = fec_enet_get_irq_cnt(pdev);
4549 	if (fep->bufdesc_ex)
4550 		fec_ptp_init(pdev, irq_cnt);
4551 
4552 	ret = fec_enet_init(ndev);
4553 	if (ret)
4554 		goto failed_init;
4555 
4556 	for (i = 0; i < irq_cnt; i++) {
4557 		snprintf(irq_name, sizeof(irq_name), "int%d", i);
4558 		irq = platform_get_irq_byname_optional(pdev, irq_name);
4559 		if (irq < 0)
4560 			irq = platform_get_irq(pdev, i);
4561 		if (irq < 0) {
4562 			ret = irq;
4563 			goto failed_irq;
4564 		}
4565 		ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
4566 				       0, pdev->name, ndev);
4567 		if (ret)
4568 			goto failed_irq;
4569 
4570 		fep->irq[i] = irq;
4571 	}
4572 
4573 	/* Decide which interrupt line is wakeup capable */
4574 	fec_enet_get_wakeup_irq(pdev);
4575 
4576 	ret = fec_enet_mii_init(pdev);
4577 	if (ret)
4578 		goto failed_mii_init;
4579 
4580 	/* Carrier starts down, phylib will bring it up */
4581 	netif_carrier_off(ndev);
4582 	fec_enet_clk_enable(ndev, false);
4583 	pinctrl_pm_select_sleep_state(&pdev->dev);
4584 
4585 	fep->pagepool_order = 0;
4586 	fep->rx_frame_size = FEC_ENET_RX_FRSIZE;
4587 
4588 	if (fep->quirks & FEC_QUIRK_JUMBO_FRAME)
4589 		fep->max_buf_size = MAX_JUMBO_BUF_SIZE;
4590 	else
4591 		fep->max_buf_size = PKT_MAXBUF_SIZE;
4592 
4593 	ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
4594 
4595 	ret = register_netdev(ndev);
4596 	if (ret)
4597 		goto failed_register;
4598 
4599 	device_init_wakeup(&ndev->dev, fep->wol_flag &
4600 			   FEC_WOL_HAS_MAGIC_PACKET);
4601 
4602 	if (fep->bufdesc_ex && fep->ptp_clock)
4603 		netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
4604 
4605 	INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
4606 
4607 	pm_runtime_put_autosuspend(&pdev->dev);
4608 
4609 	return 0;
4610 
4611 failed_register:
4612 	fec_enet_mii_remove(fep);
4613 failed_mii_init:
4614 failed_irq:
4615 	fec_enet_deinit(ndev);
4616 failed_init:
4617 	fec_ptp_stop(pdev);
4618 failed_reset:
4619 	pm_runtime_put_noidle(&pdev->dev);
4620 	pm_runtime_disable(&pdev->dev);
4621 	if (fep->reg_phy)
4622 		regulator_disable(fep->reg_phy);
4623 failed_regulator:
4624 	clk_disable_unprepare(fep->clk_ahb);
4625 failed_clk_ahb:
4626 	clk_disable_unprepare(fep->clk_ipg);
4627 failed_clk_ipg:
4628 	fec_enet_clk_enable(ndev, false);
4629 failed_clk:
4630 failed_rgmii_delay:
4631 	if (of_phy_is_fixed_link(np))
4632 		of_phy_deregister_fixed_link(np);
4633 	of_node_put(phy_node);
4634 failed_stop_mode:
4635 failed_ipc_init:
4636 failed_phy:
4637 	dev_id--;
4638 failed_ioremap:
4639 	free_netdev(ndev);
4640 
4641 	return ret;
4642 }
4643 
4644 static void
4645 fec_drv_remove(struct platform_device *pdev)
4646 {
4647 	struct net_device *ndev = platform_get_drvdata(pdev);
4648 	struct fec_enet_private *fep = netdev_priv(ndev);
4649 	struct device_node *np = pdev->dev.of_node;
4650 	int ret;
4651 
4652 	ret = pm_runtime_get_sync(&pdev->dev);
4653 	if (ret < 0)
4654 		dev_err(&pdev->dev,
4655 			"Failed to resume device in remove callback (%pe)\n",
4656 			ERR_PTR(ret));
4657 
4658 	cancel_work_sync(&fep->tx_timeout_work);
4659 	fec_ptp_stop(pdev);
4660 	unregister_netdev(ndev);
4661 	fec_enet_mii_remove(fep);
4662 	if (fep->reg_phy)
4663 		regulator_disable(fep->reg_phy);
4664 
4665 	if (of_phy_is_fixed_link(np))
4666 		of_phy_deregister_fixed_link(np);
4667 	of_node_put(fep->phy_node);
4668 
4669 	/* After pm_runtime_get_sync() failed, the clks are still off, so skip
4670 	 * disabling them again.
4671 	 */
4672 	if (ret >= 0) {
4673 		clk_disable_unprepare(fep->clk_ahb);
4674 		clk_disable_unprepare(fep->clk_ipg);
4675 	}
4676 	pm_runtime_put_noidle(&pdev->dev);
4677 	pm_runtime_disable(&pdev->dev);
4678 
4679 	fec_enet_deinit(ndev);
4680 	free_netdev(ndev);
4681 }
4682 
4683 static int fec_suspend(struct device *dev)
4684 {
4685 	struct net_device *ndev = dev_get_drvdata(dev);
4686 	struct fec_enet_private *fep = netdev_priv(ndev);
4687 	int ret;
4688 
4689 	rtnl_lock();
4690 	if (netif_running(ndev)) {
4691 		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
4692 			fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
4693 		phy_stop(ndev->phydev);
4694 		napi_disable(&fep->napi);
4695 		netif_tx_lock_bh(ndev);
4696 		netif_device_detach(ndev);
4697 		netif_tx_unlock_bh(ndev);
4698 		fec_stop(ndev);
4699 		if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4700 			fec_irqs_disable(ndev);
4701 			pinctrl_pm_select_sleep_state(&fep->pdev->dev);
4702 		} else {
4703 			fec_irqs_disable_except_wakeup(ndev);
4704 			if (fep->wake_irq > 0) {
4705 				disable_irq(fep->wake_irq);
4706 				enable_irq_wake(fep->wake_irq);
4707 			}
4708 			fec_enet_stop_mode(fep, true);
4709 		}
4710 		/* It's safe to disable clocks since interrupts are masked */
4711 		fec_enet_clk_enable(ndev, false);
4712 
4713 		fep->rpm_active = !pm_runtime_status_suspended(dev);
4714 		if (fep->rpm_active) {
4715 			ret = pm_runtime_force_suspend(dev);
4716 			if (ret < 0) {
4717 				rtnl_unlock();
4718 				return ret;
4719 			}
4720 		}
4721 	}
4722 	rtnl_unlock();
4723 
4724 	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
4725 		regulator_disable(fep->reg_phy);
4726 
4727 	/* SOC supply clock to phy, when clock is disabled, phy link down
4728 	 * SOC control phy regulator, when regulator is disabled, phy link down
4729 	 */
4730 	if (fep->clk_enet_out || fep->reg_phy)
4731 		fep->link = 0;
4732 
4733 	return 0;
4734 }
4735 
4736 static int fec_resume(struct device *dev)
4737 {
4738 	struct net_device *ndev = dev_get_drvdata(dev);
4739 	struct fec_enet_private *fep = netdev_priv(ndev);
4740 	int ret;
4741 	int val;
4742 
4743 	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4744 		ret = regulator_enable(fep->reg_phy);
4745 		if (ret)
4746 			return ret;
4747 	}
4748 
4749 	rtnl_lock();
4750 	if (netif_running(ndev)) {
4751 		if (fep->rpm_active)
4752 			pm_runtime_force_resume(dev);
4753 
4754 		ret = fec_enet_clk_enable(ndev, true);
4755 		if (ret) {
4756 			rtnl_unlock();
4757 			goto failed_clk;
4758 		}
4759 		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
4760 			fec_enet_stop_mode(fep, false);
4761 			if (fep->wake_irq) {
4762 				disable_irq_wake(fep->wake_irq);
4763 				enable_irq(fep->wake_irq);
4764 			}
4765 
4766 			val = readl(fep->hwp + FEC_ECNTRL);
4767 			val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
4768 			writel(val, fep->hwp + FEC_ECNTRL);
4769 			fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
4770 		} else {
4771 			pinctrl_pm_select_default_state(&fep->pdev->dev);
4772 		}
4773 		fec_restart(ndev);
4774 		netif_tx_lock_bh(ndev);
4775 		netif_device_attach(ndev);
4776 		netif_tx_unlock_bh(ndev);
4777 		napi_enable(&fep->napi);
4778 		phy_init_hw(ndev->phydev);
4779 		phy_start(ndev->phydev);
4780 	}
4781 	rtnl_unlock();
4782 
4783 	return 0;
4784 
4785 failed_clk:
4786 	if (fep->reg_phy)
4787 		regulator_disable(fep->reg_phy);
4788 	return ret;
4789 }
4790 
4791 static int fec_runtime_suspend(struct device *dev)
4792 {
4793 	struct net_device *ndev = dev_get_drvdata(dev);
4794 	struct fec_enet_private *fep = netdev_priv(ndev);
4795 
4796 	clk_disable_unprepare(fep->clk_ahb);
4797 	clk_disable_unprepare(fep->clk_ipg);
4798 
4799 	return 0;
4800 }
4801 
4802 static int fec_runtime_resume(struct device *dev)
4803 {
4804 	struct net_device *ndev = dev_get_drvdata(dev);
4805 	struct fec_enet_private *fep = netdev_priv(ndev);
4806 	int ret;
4807 
4808 	ret = clk_prepare_enable(fep->clk_ahb);
4809 	if (ret)
4810 		return ret;
4811 	ret = clk_prepare_enable(fep->clk_ipg);
4812 	if (ret)
4813 		goto failed_clk_ipg;
4814 
4815 	return 0;
4816 
4817 failed_clk_ipg:
4818 	clk_disable_unprepare(fep->clk_ahb);
4819 	return ret;
4820 }
4821 
4822 static const struct dev_pm_ops fec_pm_ops = {
4823 	SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
4824 	RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
4825 };
4826 
4827 static struct platform_driver fec_driver = {
4828 	.driver	= {
4829 		.name	= DRIVER_NAME,
4830 		.pm	= pm_ptr(&fec_pm_ops),
4831 		.of_match_table = fec_dt_ids,
4832 		.suppress_bind_attrs = true,
4833 	},
4834 	.id_table = fec_devtype,
4835 	.probe	= fec_probe,
4836 	.remove = fec_drv_remove,
4837 };
4838 
4839 module_platform_driver(fec_driver);
4840 
4841 MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
4842 MODULE_LICENSE("GPL");
4843