xref: /linux/drivers/net/ethernet/freescale/fec_main.c (revision 0ad9617c78acbc71373fb341a6f75d4012b01d69)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
4  * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5  *
6  * Right now, I am very wasteful with the buffers.  I allocate memory
7  * pages and then divide them into 2K frame buffers.  This way I know I
8  * have buffers large enough to hold one frame within one buffer descriptor.
9  * Once I get this working, I will use 64 or 128 byte CPM buffers, which
10  * will be much more memory efficient and will easily handle lots of
11  * small packets.
12  *
13  * Much better multiple PHY support by Magnus Damm.
14  * Copyright (c) 2000 Ericsson Radio Systems AB.
15  *
16  * Support for FEC controller of ColdFire processors.
17  * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18  *
19  * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
20  * Copyright (c) 2004-2006 Macq Electronique SA.
21  *
22  * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
23  */
24 
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/ptrace.h>
30 #include <linux/errno.h>
31 #include <linux/ioport.h>
32 #include <linux/slab.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <net/ip.h>
41 #include <net/page_pool/helpers.h>
42 #include <net/selftests.h>
43 #include <net/tso.h>
44 #include <linux/tcp.h>
45 #include <linux/udp.h>
46 #include <linux/icmp.h>
47 #include <linux/spinlock.h>
48 #include <linux/workqueue.h>
49 #include <linux/bitops.h>
50 #include <linux/io.h>
51 #include <linux/irq.h>
52 #include <linux/clk.h>
53 #include <linux/crc32.h>
54 #include <linux/platform_device.h>
55 #include <linux/property.h>
56 #include <linux/mdio.h>
57 #include <linux/phy.h>
58 #include <linux/fec.h>
59 #include <linux/of.h>
60 #include <linux/of_mdio.h>
61 #include <linux/of_net.h>
62 #include <linux/regulator/consumer.h>
63 #include <linux/if_vlan.h>
64 #include <linux/pinctrl/consumer.h>
65 #include <linux/gpio/consumer.h>
66 #include <linux/prefetch.h>
67 #include <linux/mfd/syscon.h>
68 #include <linux/regmap.h>
69 #include <soc/imx/cpuidle.h>
70 #include <linux/filter.h>
71 #include <linux/bpf.h>
72 #include <linux/bpf_trace.h>
73 
74 #include <asm/cacheflush.h>
75 
76 #include "fec.h"
77 
78 static void set_multicast_list(struct net_device *ndev);
79 static void fec_enet_itr_coal_set(struct net_device *ndev);
80 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
81 				int cpu, struct xdp_buff *xdp,
82 				u32 dma_sync_len);
83 
84 #define DRIVER_NAME	"fec"
85 
86 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
87 
88 #define FEC_ENET_RSEM_V	0x84
89 #define FEC_ENET_RSFL_V	16
90 #define FEC_ENET_RAEM_V	0x8
91 #define FEC_ENET_RAFL_V	0x8
92 #define FEC_ENET_OPD_V	0xFFF0
93 #define FEC_MDIO_PM_TIMEOUT  100 /* ms */
94 
95 #define FEC_ENET_XDP_PASS          0
96 #define FEC_ENET_XDP_CONSUMED      BIT(0)
97 #define FEC_ENET_XDP_TX            BIT(1)
98 #define FEC_ENET_XDP_REDIR         BIT(2)
99 
100 struct fec_devinfo {
101 	u32 quirks;
102 };
103 
104 static const struct fec_devinfo fec_imx25_info = {
105 	.quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
106 		  FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
107 };
108 
109 static const struct fec_devinfo fec_imx27_info = {
110 	.quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
111 		  FEC_QUIRK_HAS_MDIO_C45,
112 };
113 
114 static const struct fec_devinfo fec_imx28_info = {
115 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
116 		  FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
117 		  FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
118 		  FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
119 };
120 
121 static const struct fec_devinfo fec_imx6q_info = {
122 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
123 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
124 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
125 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
126 		  FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
127 };
128 
129 static const struct fec_devinfo fec_mvf600_info = {
130 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
131 		  FEC_QUIRK_HAS_MDIO_C45,
132 };
133 
134 static const struct fec_devinfo fec_imx6x_info = {
135 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
136 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
137 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
138 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
139 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
140 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
141 		  FEC_QUIRK_HAS_MDIO_C45,
142 };
143 
144 static const struct fec_devinfo fec_imx6ul_info = {
145 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
146 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
147 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
148 		  FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
149 		  FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
150 		  FEC_QUIRK_HAS_MDIO_C45,
151 };
152 
153 static const struct fec_devinfo fec_imx8mq_info = {
154 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
155 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
156 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
157 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
158 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
159 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
160 		  FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
161 		  FEC_QUIRK_HAS_MDIO_C45,
162 };
163 
164 static const struct fec_devinfo fec_imx8qm_info = {
165 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
166 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
167 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
168 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
169 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
170 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
171 		  FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
172 };
173 
174 static const struct fec_devinfo fec_s32v234_info = {
175 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
176 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
177 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
178 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
179 		  FEC_QUIRK_HAS_MDIO_C45,
180 };
181 
182 static struct platform_device_id fec_devtype[] = {
183 	{
184 		/* keep it for coldfire */
185 		.name = DRIVER_NAME,
186 		.driver_data = 0,
187 	}, {
188 		/* sentinel */
189 	}
190 };
191 MODULE_DEVICE_TABLE(platform, fec_devtype);
192 
193 static const struct of_device_id fec_dt_ids[] = {
194 	{ .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, },
195 	{ .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, },
196 	{ .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
197 	{ .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
198 	{ .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
199 	{ .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
200 	{ .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
201 	{ .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
202 	{ .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
203 	{ .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, },
204 	{ /* sentinel */ }
205 };
206 MODULE_DEVICE_TABLE(of, fec_dt_ids);
207 
208 static unsigned char macaddr[ETH_ALEN];
209 module_param_array(macaddr, byte, NULL, 0);
210 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
211 
212 #if defined(CONFIG_M5272)
213 /*
214  * Some hardware gets it MAC address out of local flash memory.
215  * if this is non-zero then assume it is the address to get MAC from.
216  */
217 #if defined(CONFIG_NETtel)
218 #define	FEC_FLASHMAC	0xf0006006
219 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
220 #define	FEC_FLASHMAC	0xf0006000
221 #elif defined(CONFIG_CANCam)
222 #define	FEC_FLASHMAC	0xf0020000
223 #elif defined (CONFIG_M5272C3)
224 #define	FEC_FLASHMAC	(0xffe04000 + 4)
225 #elif defined(CONFIG_MOD5272)
226 #define FEC_FLASHMAC	0xffc0406b
227 #else
228 #define	FEC_FLASHMAC	0
229 #endif
230 #endif /* CONFIG_M5272 */
231 
232 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
233  *
234  * 2048 byte skbufs are allocated. However, alignment requirements
235  * varies between FEC variants. Worst case is 64, so round down by 64.
236  */
237 #define PKT_MAXBUF_SIZE		(round_down(2048 - 64, 64))
238 #define PKT_MINBUF_SIZE		64
239 
240 /* FEC receive acceleration */
241 #define FEC_RACC_IPDIS		BIT(1)
242 #define FEC_RACC_PRODIS		BIT(2)
243 #define FEC_RACC_SHIFT16	BIT(7)
244 #define FEC_RACC_OPTIONS	(FEC_RACC_IPDIS | FEC_RACC_PRODIS)
245 
246 /* MIB Control Register */
247 #define FEC_MIB_CTRLSTAT_DISABLE	BIT(31)
248 
249 /*
250  * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
251  * size bits. Other FEC hardware does not, so we need to take that into
252  * account when setting it.
253  */
254 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
255     defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
256     defined(CONFIG_ARM64)
257 #define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
258 #else
259 #define	OPT_FRAME_SIZE	0
260 #endif
261 
262 /* FEC MII MMFR bits definition */
263 #define FEC_MMFR_ST		(1 << 30)
264 #define FEC_MMFR_ST_C45		(0)
265 #define FEC_MMFR_OP_READ	(2 << 28)
266 #define FEC_MMFR_OP_READ_C45	(3 << 28)
267 #define FEC_MMFR_OP_WRITE	(1 << 28)
268 #define FEC_MMFR_OP_ADDR_WRITE	(0)
269 #define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
270 #define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
271 #define FEC_MMFR_TA		(2 << 16)
272 #define FEC_MMFR_DATA(v)	(v & 0xffff)
273 /* FEC ECR bits definition */
274 #define FEC_ECR_RESET           BIT(0)
275 #define FEC_ECR_ETHEREN         BIT(1)
276 #define FEC_ECR_MAGICEN         BIT(2)
277 #define FEC_ECR_SLEEP           BIT(3)
278 #define FEC_ECR_EN1588          BIT(4)
279 #define FEC_ECR_BYTESWP         BIT(8)
280 /* FEC RCR bits definition */
281 #define FEC_RCR_LOOP            BIT(0)
282 #define FEC_RCR_HALFDPX         BIT(1)
283 #define FEC_RCR_MII             BIT(2)
284 #define FEC_RCR_PROMISC         BIT(3)
285 #define FEC_RCR_BC_REJ          BIT(4)
286 #define FEC_RCR_FLOWCTL         BIT(5)
287 #define FEC_RCR_RMII            BIT(8)
288 #define FEC_RCR_10BASET         BIT(9)
289 /* TX WMARK bits */
290 #define FEC_TXWMRK_STRFWD       BIT(8)
291 
292 #define FEC_MII_TIMEOUT		30000 /* us */
293 
294 /* Transmitter timeout */
295 #define TX_TIMEOUT (2 * HZ)
296 
297 #define FEC_PAUSE_FLAG_AUTONEG	0x1
298 #define FEC_PAUSE_FLAG_ENABLE	0x2
299 #define FEC_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
300 #define FEC_WOL_FLAG_ENABLE		(0x1 << 1)
301 #define FEC_WOL_FLAG_SLEEP_ON		(0x1 << 2)
302 
303 /* Max number of allowed TCP segments for software TSO */
304 #define FEC_MAX_TSO_SEGS	100
305 #define FEC_MAX_SKB_DESCS	(FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
306 
307 #define IS_TSO_HEADER(txq, addr) \
308 	((addr >= txq->tso_hdrs_dma) && \
309 	(addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
310 
311 static int mii_cnt;
312 
313 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
314 					     struct bufdesc_prop *bd)
315 {
316 	return (bdp >= bd->last) ? bd->base
317 			: (struct bufdesc *)(((void *)bdp) + bd->dsize);
318 }
319 
320 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
321 					     struct bufdesc_prop *bd)
322 {
323 	return (bdp <= bd->base) ? bd->last
324 			: (struct bufdesc *)(((void *)bdp) - bd->dsize);
325 }
326 
327 static int fec_enet_get_bd_index(struct bufdesc *bdp,
328 				 struct bufdesc_prop *bd)
329 {
330 	return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
331 }
332 
333 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
334 {
335 	int entries;
336 
337 	entries = (((const char *)txq->dirty_tx -
338 			(const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
339 
340 	return entries >= 0 ? entries : entries + txq->bd.ring_size;
341 }
342 
343 static void swap_buffer(void *bufaddr, int len)
344 {
345 	int i;
346 	unsigned int *buf = bufaddr;
347 
348 	for (i = 0; i < len; i += 4, buf++)
349 		swab32s(buf);
350 }
351 
352 static void fec_dump(struct net_device *ndev)
353 {
354 	struct fec_enet_private *fep = netdev_priv(ndev);
355 	struct bufdesc *bdp;
356 	struct fec_enet_priv_tx_q *txq;
357 	int index = 0;
358 
359 	netdev_info(ndev, "TX ring dump\n");
360 	pr_info("Nr     SC     addr       len  SKB\n");
361 
362 	txq = fep->tx_queue[0];
363 	bdp = txq->bd.base;
364 
365 	do {
366 		pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
367 			index,
368 			bdp == txq->bd.cur ? 'S' : ' ',
369 			bdp == txq->dirty_tx ? 'H' : ' ',
370 			fec16_to_cpu(bdp->cbd_sc),
371 			fec32_to_cpu(bdp->cbd_bufaddr),
372 			fec16_to_cpu(bdp->cbd_datlen),
373 			txq->tx_buf[index].buf_p);
374 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
375 		index++;
376 	} while (bdp != txq->bd.base);
377 }
378 
379 /*
380  * Coldfire does not support DMA coherent allocations, and has historically used
381  * a band-aid with a manual flush in fec_enet_rx_queue.
382  */
383 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
384 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
385 		gfp_t gfp)
386 {
387 	return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp);
388 }
389 
390 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
391 		dma_addr_t handle)
392 {
393 	dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL);
394 }
395 #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
396 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
397 		gfp_t gfp)
398 {
399 	return dma_alloc_coherent(dev, size, handle, gfp);
400 }
401 
402 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
403 		dma_addr_t handle)
404 {
405 	dma_free_coherent(dev, size, cpu_addr, handle);
406 }
407 #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
408 
409 struct fec_dma_devres {
410 	size_t		size;
411 	void		*vaddr;
412 	dma_addr_t	dma_handle;
413 };
414 
415 static void fec_dmam_release(struct device *dev, void *res)
416 {
417 	struct fec_dma_devres *this = res;
418 
419 	fec_dma_free(dev, this->size, this->vaddr, this->dma_handle);
420 }
421 
422 static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle,
423 		gfp_t gfp)
424 {
425 	struct fec_dma_devres *dr;
426 	void *vaddr;
427 
428 	dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp);
429 	if (!dr)
430 		return NULL;
431 	vaddr = fec_dma_alloc(dev, size, handle, gfp);
432 	if (!vaddr) {
433 		devres_free(dr);
434 		return NULL;
435 	}
436 	dr->vaddr = vaddr;
437 	dr->dma_handle = *handle;
438 	dr->size = size;
439 	devres_add(dev, dr);
440 	return vaddr;
441 }
442 
443 static inline bool is_ipv4_pkt(struct sk_buff *skb)
444 {
445 	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
446 }
447 
448 static int
449 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
450 {
451 	/* Only run for packets requiring a checksum. */
452 	if (skb->ip_summed != CHECKSUM_PARTIAL)
453 		return 0;
454 
455 	if (unlikely(skb_cow_head(skb, 0)))
456 		return -1;
457 
458 	if (is_ipv4_pkt(skb))
459 		ip_hdr(skb)->check = 0;
460 	*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
461 
462 	return 0;
463 }
464 
465 static int
466 fec_enet_create_page_pool(struct fec_enet_private *fep,
467 			  struct fec_enet_priv_rx_q *rxq, int size)
468 {
469 	struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
470 	struct page_pool_params pp_params = {
471 		.order = 0,
472 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
473 		.pool_size = size,
474 		.nid = dev_to_node(&fep->pdev->dev),
475 		.dev = &fep->pdev->dev,
476 		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
477 		.offset = FEC_ENET_XDP_HEADROOM,
478 		.max_len = FEC_ENET_RX_FRSIZE,
479 	};
480 	int err;
481 
482 	rxq->page_pool = page_pool_create(&pp_params);
483 	if (IS_ERR(rxq->page_pool)) {
484 		err = PTR_ERR(rxq->page_pool);
485 		rxq->page_pool = NULL;
486 		return err;
487 	}
488 
489 	err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
490 	if (err < 0)
491 		goto err_free_pp;
492 
493 	err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
494 					 rxq->page_pool);
495 	if (err)
496 		goto err_unregister_rxq;
497 
498 	return 0;
499 
500 err_unregister_rxq:
501 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
502 err_free_pp:
503 	page_pool_destroy(rxq->page_pool);
504 	rxq->page_pool = NULL;
505 	return err;
506 }
507 
508 static struct bufdesc *
509 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
510 			     struct sk_buff *skb,
511 			     struct net_device *ndev)
512 {
513 	struct fec_enet_private *fep = netdev_priv(ndev);
514 	struct bufdesc *bdp = txq->bd.cur;
515 	struct bufdesc_ex *ebdp;
516 	int nr_frags = skb_shinfo(skb)->nr_frags;
517 	int frag, frag_len;
518 	unsigned short status;
519 	unsigned int estatus = 0;
520 	skb_frag_t *this_frag;
521 	unsigned int index;
522 	void *bufaddr;
523 	dma_addr_t addr;
524 	int i;
525 
526 	for (frag = 0; frag < nr_frags; frag++) {
527 		this_frag = &skb_shinfo(skb)->frags[frag];
528 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
529 		ebdp = (struct bufdesc_ex *)bdp;
530 
531 		status = fec16_to_cpu(bdp->cbd_sc);
532 		status &= ~BD_ENET_TX_STATS;
533 		status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
534 		frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
535 
536 		/* Handle the last BD specially */
537 		if (frag == nr_frags - 1) {
538 			status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
539 			if (fep->bufdesc_ex) {
540 				estatus |= BD_ENET_TX_INT;
541 				if (unlikely(skb_shinfo(skb)->tx_flags &
542 					SKBTX_HW_TSTAMP && fep->hwts_tx_en))
543 					estatus |= BD_ENET_TX_TS;
544 			}
545 		}
546 
547 		if (fep->bufdesc_ex) {
548 			if (fep->quirks & FEC_QUIRK_HAS_AVB)
549 				estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
550 			if (skb->ip_summed == CHECKSUM_PARTIAL)
551 				estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
552 
553 			ebdp->cbd_bdu = 0;
554 			ebdp->cbd_esc = cpu_to_fec32(estatus);
555 		}
556 
557 		bufaddr = skb_frag_address(this_frag);
558 
559 		index = fec_enet_get_bd_index(bdp, &txq->bd);
560 		if (((unsigned long) bufaddr) & fep->tx_align ||
561 			fep->quirks & FEC_QUIRK_SWAP_FRAME) {
562 			memcpy(txq->tx_bounce[index], bufaddr, frag_len);
563 			bufaddr = txq->tx_bounce[index];
564 
565 			if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
566 				swap_buffer(bufaddr, frag_len);
567 		}
568 
569 		addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
570 				      DMA_TO_DEVICE);
571 		if (dma_mapping_error(&fep->pdev->dev, addr)) {
572 			if (net_ratelimit())
573 				netdev_err(ndev, "Tx DMA memory map failed\n");
574 			goto dma_mapping_error;
575 		}
576 
577 		bdp->cbd_bufaddr = cpu_to_fec32(addr);
578 		bdp->cbd_datlen = cpu_to_fec16(frag_len);
579 		/* Make sure the updates to rest of the descriptor are
580 		 * performed before transferring ownership.
581 		 */
582 		wmb();
583 		bdp->cbd_sc = cpu_to_fec16(status);
584 	}
585 
586 	return bdp;
587 dma_mapping_error:
588 	bdp = txq->bd.cur;
589 	for (i = 0; i < frag; i++) {
590 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
591 		dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
592 				 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
593 	}
594 	return ERR_PTR(-ENOMEM);
595 }
596 
597 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
598 				   struct sk_buff *skb, struct net_device *ndev)
599 {
600 	struct fec_enet_private *fep = netdev_priv(ndev);
601 	int nr_frags = skb_shinfo(skb)->nr_frags;
602 	struct bufdesc *bdp, *last_bdp;
603 	void *bufaddr;
604 	dma_addr_t addr;
605 	unsigned short status;
606 	unsigned short buflen;
607 	unsigned int estatus = 0;
608 	unsigned int index;
609 	int entries_free;
610 
611 	entries_free = fec_enet_get_free_txdesc_num(txq);
612 	if (entries_free < MAX_SKB_FRAGS + 1) {
613 		dev_kfree_skb_any(skb);
614 		if (net_ratelimit())
615 			netdev_err(ndev, "NOT enough BD for SG!\n");
616 		return NETDEV_TX_OK;
617 	}
618 
619 	/* Protocol checksum off-load for TCP and UDP. */
620 	if (fec_enet_clear_csum(skb, ndev)) {
621 		dev_kfree_skb_any(skb);
622 		return NETDEV_TX_OK;
623 	}
624 
625 	/* Fill in a Tx ring entry */
626 	bdp = txq->bd.cur;
627 	last_bdp = bdp;
628 	status = fec16_to_cpu(bdp->cbd_sc);
629 	status &= ~BD_ENET_TX_STATS;
630 
631 	/* Set buffer length and buffer pointer */
632 	bufaddr = skb->data;
633 	buflen = skb_headlen(skb);
634 
635 	index = fec_enet_get_bd_index(bdp, &txq->bd);
636 	if (((unsigned long) bufaddr) & fep->tx_align ||
637 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
638 		memcpy(txq->tx_bounce[index], skb->data, buflen);
639 		bufaddr = txq->tx_bounce[index];
640 
641 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
642 			swap_buffer(bufaddr, buflen);
643 	}
644 
645 	/* Push the data cache so the CPM does not get stale memory data. */
646 	addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
647 	if (dma_mapping_error(&fep->pdev->dev, addr)) {
648 		dev_kfree_skb_any(skb);
649 		if (net_ratelimit())
650 			netdev_err(ndev, "Tx DMA memory map failed\n");
651 		return NETDEV_TX_OK;
652 	}
653 
654 	if (nr_frags) {
655 		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
656 		if (IS_ERR(last_bdp)) {
657 			dma_unmap_single(&fep->pdev->dev, addr,
658 					 buflen, DMA_TO_DEVICE);
659 			dev_kfree_skb_any(skb);
660 			return NETDEV_TX_OK;
661 		}
662 	} else {
663 		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
664 		if (fep->bufdesc_ex) {
665 			estatus = BD_ENET_TX_INT;
666 			if (unlikely(skb_shinfo(skb)->tx_flags &
667 				SKBTX_HW_TSTAMP && fep->hwts_tx_en))
668 				estatus |= BD_ENET_TX_TS;
669 		}
670 	}
671 	bdp->cbd_bufaddr = cpu_to_fec32(addr);
672 	bdp->cbd_datlen = cpu_to_fec16(buflen);
673 
674 	if (fep->bufdesc_ex) {
675 
676 		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
677 
678 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
679 			fep->hwts_tx_en))
680 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
681 
682 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
683 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
684 
685 		if (skb->ip_summed == CHECKSUM_PARTIAL)
686 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
687 
688 		ebdp->cbd_bdu = 0;
689 		ebdp->cbd_esc = cpu_to_fec32(estatus);
690 	}
691 
692 	index = fec_enet_get_bd_index(last_bdp, &txq->bd);
693 	/* Save skb pointer */
694 	txq->tx_buf[index].buf_p = skb;
695 
696 	/* Make sure the updates to rest of the descriptor are performed before
697 	 * transferring ownership.
698 	 */
699 	wmb();
700 
701 	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
702 	 * it's the last BD of the frame, and to put the CRC on the end.
703 	 */
704 	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
705 	bdp->cbd_sc = cpu_to_fec16(status);
706 
707 	/* If this was the last BD in the ring, start at the beginning again. */
708 	bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
709 
710 	skb_tx_timestamp(skb);
711 
712 	/* Make sure the update to bdp is performed before txq->bd.cur. */
713 	wmb();
714 	txq->bd.cur = bdp;
715 
716 	/* Trigger transmission start */
717 	writel(0, txq->bd.reg_desc_active);
718 
719 	return 0;
720 }
721 
722 static int
723 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
724 			  struct net_device *ndev,
725 			  struct bufdesc *bdp, int index, char *data,
726 			  int size, bool last_tcp, bool is_last)
727 {
728 	struct fec_enet_private *fep = netdev_priv(ndev);
729 	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
730 	unsigned short status;
731 	unsigned int estatus = 0;
732 	dma_addr_t addr;
733 
734 	status = fec16_to_cpu(bdp->cbd_sc);
735 	status &= ~BD_ENET_TX_STATS;
736 
737 	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
738 
739 	if (((unsigned long) data) & fep->tx_align ||
740 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
741 		memcpy(txq->tx_bounce[index], data, size);
742 		data = txq->tx_bounce[index];
743 
744 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
745 			swap_buffer(data, size);
746 	}
747 
748 	addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
749 	if (dma_mapping_error(&fep->pdev->dev, addr)) {
750 		dev_kfree_skb_any(skb);
751 		if (net_ratelimit())
752 			netdev_err(ndev, "Tx DMA memory map failed\n");
753 		return NETDEV_TX_OK;
754 	}
755 
756 	bdp->cbd_datlen = cpu_to_fec16(size);
757 	bdp->cbd_bufaddr = cpu_to_fec32(addr);
758 
759 	if (fep->bufdesc_ex) {
760 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
761 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
762 		if (skb->ip_summed == CHECKSUM_PARTIAL)
763 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
764 		ebdp->cbd_bdu = 0;
765 		ebdp->cbd_esc = cpu_to_fec32(estatus);
766 	}
767 
768 	/* Handle the last BD specially */
769 	if (last_tcp)
770 		status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
771 	if (is_last) {
772 		status |= BD_ENET_TX_INTR;
773 		if (fep->bufdesc_ex)
774 			ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
775 	}
776 
777 	bdp->cbd_sc = cpu_to_fec16(status);
778 
779 	return 0;
780 }
781 
782 static int
783 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
784 			 struct sk_buff *skb, struct net_device *ndev,
785 			 struct bufdesc *bdp, int index)
786 {
787 	struct fec_enet_private *fep = netdev_priv(ndev);
788 	int hdr_len = skb_tcp_all_headers(skb);
789 	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
790 	void *bufaddr;
791 	unsigned long dmabuf;
792 	unsigned short status;
793 	unsigned int estatus = 0;
794 
795 	status = fec16_to_cpu(bdp->cbd_sc);
796 	status &= ~BD_ENET_TX_STATS;
797 	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
798 
799 	bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
800 	dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
801 	if (((unsigned long)bufaddr) & fep->tx_align ||
802 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
803 		memcpy(txq->tx_bounce[index], skb->data, hdr_len);
804 		bufaddr = txq->tx_bounce[index];
805 
806 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
807 			swap_buffer(bufaddr, hdr_len);
808 
809 		dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
810 					hdr_len, DMA_TO_DEVICE);
811 		if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
812 			dev_kfree_skb_any(skb);
813 			if (net_ratelimit())
814 				netdev_err(ndev, "Tx DMA memory map failed\n");
815 			return NETDEV_TX_OK;
816 		}
817 	}
818 
819 	bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
820 	bdp->cbd_datlen = cpu_to_fec16(hdr_len);
821 
822 	if (fep->bufdesc_ex) {
823 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
824 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
825 		if (skb->ip_summed == CHECKSUM_PARTIAL)
826 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
827 		ebdp->cbd_bdu = 0;
828 		ebdp->cbd_esc = cpu_to_fec32(estatus);
829 	}
830 
831 	bdp->cbd_sc = cpu_to_fec16(status);
832 
833 	return 0;
834 }
835 
836 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
837 				   struct sk_buff *skb,
838 				   struct net_device *ndev)
839 {
840 	struct fec_enet_private *fep = netdev_priv(ndev);
841 	int hdr_len, total_len, data_left;
842 	struct bufdesc *bdp = txq->bd.cur;
843 	struct tso_t tso;
844 	unsigned int index = 0;
845 	int ret;
846 
847 	if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
848 		dev_kfree_skb_any(skb);
849 		if (net_ratelimit())
850 			netdev_err(ndev, "NOT enough BD for TSO!\n");
851 		return NETDEV_TX_OK;
852 	}
853 
854 	/* Protocol checksum off-load for TCP and UDP. */
855 	if (fec_enet_clear_csum(skb, ndev)) {
856 		dev_kfree_skb_any(skb);
857 		return NETDEV_TX_OK;
858 	}
859 
860 	/* Initialize the TSO handler, and prepare the first payload */
861 	hdr_len = tso_start(skb, &tso);
862 
863 	total_len = skb->len - hdr_len;
864 	while (total_len > 0) {
865 		char *hdr;
866 
867 		index = fec_enet_get_bd_index(bdp, &txq->bd);
868 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
869 		total_len -= data_left;
870 
871 		/* prepare packet headers: MAC + IP + TCP */
872 		hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
873 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
874 		ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
875 		if (ret)
876 			goto err_release;
877 
878 		while (data_left > 0) {
879 			int size;
880 
881 			size = min_t(int, tso.size, data_left);
882 			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
883 			index = fec_enet_get_bd_index(bdp, &txq->bd);
884 			ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
885 							bdp, index,
886 							tso.data, size,
887 							size == data_left,
888 							total_len == 0);
889 			if (ret)
890 				goto err_release;
891 
892 			data_left -= size;
893 			tso_build_data(skb, &tso, size);
894 		}
895 
896 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
897 	}
898 
899 	/* Save skb pointer */
900 	txq->tx_buf[index].buf_p = skb;
901 
902 	skb_tx_timestamp(skb);
903 	txq->bd.cur = bdp;
904 
905 	/* Trigger transmission start */
906 	if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
907 	    !readl(txq->bd.reg_desc_active) ||
908 	    !readl(txq->bd.reg_desc_active) ||
909 	    !readl(txq->bd.reg_desc_active) ||
910 	    !readl(txq->bd.reg_desc_active))
911 		writel(0, txq->bd.reg_desc_active);
912 
913 	return 0;
914 
915 err_release:
916 	/* TODO: Release all used data descriptors for TSO */
917 	return ret;
918 }
919 
920 static netdev_tx_t
921 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
922 {
923 	struct fec_enet_private *fep = netdev_priv(ndev);
924 	int entries_free;
925 	unsigned short queue;
926 	struct fec_enet_priv_tx_q *txq;
927 	struct netdev_queue *nq;
928 	int ret;
929 
930 	queue = skb_get_queue_mapping(skb);
931 	txq = fep->tx_queue[queue];
932 	nq = netdev_get_tx_queue(ndev, queue);
933 
934 	if (skb_is_gso(skb))
935 		ret = fec_enet_txq_submit_tso(txq, skb, ndev);
936 	else
937 		ret = fec_enet_txq_submit_skb(txq, skb, ndev);
938 	if (ret)
939 		return ret;
940 
941 	entries_free = fec_enet_get_free_txdesc_num(txq);
942 	if (entries_free <= txq->tx_stop_threshold)
943 		netif_tx_stop_queue(nq);
944 
945 	return NETDEV_TX_OK;
946 }
947 
948 /* Init RX & TX buffer descriptors
949  */
950 static void fec_enet_bd_init(struct net_device *dev)
951 {
952 	struct fec_enet_private *fep = netdev_priv(dev);
953 	struct fec_enet_priv_tx_q *txq;
954 	struct fec_enet_priv_rx_q *rxq;
955 	struct bufdesc *bdp;
956 	unsigned int i;
957 	unsigned int q;
958 
959 	for (q = 0; q < fep->num_rx_queues; q++) {
960 		/* Initialize the receive buffer descriptors. */
961 		rxq = fep->rx_queue[q];
962 		bdp = rxq->bd.base;
963 
964 		for (i = 0; i < rxq->bd.ring_size; i++) {
965 
966 			/* Initialize the BD for every fragment in the page. */
967 			if (bdp->cbd_bufaddr)
968 				bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
969 			else
970 				bdp->cbd_sc = cpu_to_fec16(0);
971 			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
972 		}
973 
974 		/* Set the last buffer to wrap */
975 		bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
976 		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
977 
978 		rxq->bd.cur = rxq->bd.base;
979 	}
980 
981 	for (q = 0; q < fep->num_tx_queues; q++) {
982 		/* ...and the same for transmit */
983 		txq = fep->tx_queue[q];
984 		bdp = txq->bd.base;
985 		txq->bd.cur = bdp;
986 
987 		for (i = 0; i < txq->bd.ring_size; i++) {
988 			/* Initialize the BD for every fragment in the page. */
989 			bdp->cbd_sc = cpu_to_fec16(0);
990 			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
991 				if (bdp->cbd_bufaddr &&
992 				    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
993 					dma_unmap_single(&fep->pdev->dev,
994 							 fec32_to_cpu(bdp->cbd_bufaddr),
995 							 fec16_to_cpu(bdp->cbd_datlen),
996 							 DMA_TO_DEVICE);
997 				if (txq->tx_buf[i].buf_p)
998 					dev_kfree_skb_any(txq->tx_buf[i].buf_p);
999 			} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
1000 				if (bdp->cbd_bufaddr)
1001 					dma_unmap_single(&fep->pdev->dev,
1002 							 fec32_to_cpu(bdp->cbd_bufaddr),
1003 							 fec16_to_cpu(bdp->cbd_datlen),
1004 							 DMA_TO_DEVICE);
1005 
1006 				if (txq->tx_buf[i].buf_p)
1007 					xdp_return_frame(txq->tx_buf[i].buf_p);
1008 			} else {
1009 				struct page *page = txq->tx_buf[i].buf_p;
1010 
1011 				if (page)
1012 					page_pool_put_page(page->pp, page, 0, false);
1013 			}
1014 
1015 			txq->tx_buf[i].buf_p = NULL;
1016 			/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1017 			txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
1018 			bdp->cbd_bufaddr = cpu_to_fec32(0);
1019 			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1020 		}
1021 
1022 		/* Set the last buffer to wrap */
1023 		bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
1024 		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1025 		txq->dirty_tx = bdp;
1026 	}
1027 }
1028 
1029 static void fec_enet_active_rxring(struct net_device *ndev)
1030 {
1031 	struct fec_enet_private *fep = netdev_priv(ndev);
1032 	int i;
1033 
1034 	for (i = 0; i < fep->num_rx_queues; i++)
1035 		writel(0, fep->rx_queue[i]->bd.reg_desc_active);
1036 }
1037 
1038 static void fec_enet_enable_ring(struct net_device *ndev)
1039 {
1040 	struct fec_enet_private *fep = netdev_priv(ndev);
1041 	struct fec_enet_priv_tx_q *txq;
1042 	struct fec_enet_priv_rx_q *rxq;
1043 	int i;
1044 
1045 	for (i = 0; i < fep->num_rx_queues; i++) {
1046 		rxq = fep->rx_queue[i];
1047 		writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
1048 		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
1049 
1050 		/* enable DMA1/2 */
1051 		if (i)
1052 			writel(RCMR_MATCHEN | RCMR_CMP(i),
1053 			       fep->hwp + FEC_RCMR(i));
1054 	}
1055 
1056 	for (i = 0; i < fep->num_tx_queues; i++) {
1057 		txq = fep->tx_queue[i];
1058 		writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1059 
1060 		/* enable DMA1/2 */
1061 		if (i)
1062 			writel(DMA_CLASS_EN | IDLE_SLOPE(i),
1063 			       fep->hwp + FEC_DMA_CFG(i));
1064 	}
1065 }
1066 
1067 /*
1068  * This function is called to start or restart the FEC during a link
1069  * change, transmit timeout, or to reconfigure the FEC.  The network
1070  * packet processing for this device must be stopped before this call.
1071  */
1072 static void
1073 fec_restart(struct net_device *ndev)
1074 {
1075 	struct fec_enet_private *fep = netdev_priv(ndev);
1076 	u32 temp_mac[2];
1077 	u32 rcntl = OPT_FRAME_SIZE | 0x04;
1078 	u32 ecntl = FEC_ECR_ETHEREN;
1079 
1080 	if (fep->bufdesc_ex)
1081 		fec_ptp_save_state(fep);
1082 
1083 	/* Whack a reset.  We should wait for this.
1084 	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1085 	 * instead of reset MAC itself.
1086 	 */
1087 	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
1088 	    ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
1089 		writel(0, fep->hwp + FEC_ECNTRL);
1090 	} else {
1091 		writel(1, fep->hwp + FEC_ECNTRL);
1092 		udelay(10);
1093 	}
1094 
1095 	/*
1096 	 * enet-mac reset will reset mac address registers too,
1097 	 * so need to reconfigure it.
1098 	 */
1099 	memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1100 	writel((__force u32)cpu_to_be32(temp_mac[0]),
1101 	       fep->hwp + FEC_ADDR_LOW);
1102 	writel((__force u32)cpu_to_be32(temp_mac[1]),
1103 	       fep->hwp + FEC_ADDR_HIGH);
1104 
1105 	/* Clear any outstanding interrupt, except MDIO. */
1106 	writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
1107 
1108 	fec_enet_bd_init(ndev);
1109 
1110 	fec_enet_enable_ring(ndev);
1111 
1112 	/* Enable MII mode */
1113 	if (fep->full_duplex == DUPLEX_FULL) {
1114 		/* FD enable */
1115 		writel(0x04, fep->hwp + FEC_X_CNTRL);
1116 	} else {
1117 		/* No Rcv on Xmit */
1118 		rcntl |= 0x02;
1119 		writel(0x0, fep->hwp + FEC_X_CNTRL);
1120 	}
1121 
1122 	/* Set MII speed */
1123 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1124 
1125 #if !defined(CONFIG_M5272)
1126 	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1127 		u32 val = readl(fep->hwp + FEC_RACC);
1128 
1129 		/* align IP header */
1130 		val |= FEC_RACC_SHIFT16;
1131 		if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1132 			/* set RX checksum */
1133 			val |= FEC_RACC_OPTIONS;
1134 		else
1135 			val &= ~FEC_RACC_OPTIONS;
1136 		writel(val, fep->hwp + FEC_RACC);
1137 		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1138 	}
1139 #endif
1140 
1141 	/*
1142 	 * The phy interface and speed need to get configured
1143 	 * differently on enet-mac.
1144 	 */
1145 	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1146 		/* Enable flow control and length check */
1147 		rcntl |= 0x40000000 | 0x00000020;
1148 
1149 		/* RGMII, RMII or MII */
1150 		if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
1151 		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1152 		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
1153 		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
1154 			rcntl |= (1 << 6);
1155 		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1156 			rcntl |= FEC_RCR_RMII;
1157 		else
1158 			rcntl &= ~FEC_RCR_RMII;
1159 
1160 		/* 1G, 100M or 10M */
1161 		if (ndev->phydev) {
1162 			if (ndev->phydev->speed == SPEED_1000)
1163 				ecntl |= (1 << 5);
1164 			else if (ndev->phydev->speed == SPEED_100)
1165 				rcntl &= ~FEC_RCR_10BASET;
1166 			else
1167 				rcntl |= FEC_RCR_10BASET;
1168 		}
1169 	} else {
1170 #ifdef FEC_MIIGSK_ENR
1171 		if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1172 			u32 cfgr;
1173 			/* disable the gasket and wait */
1174 			writel(0, fep->hwp + FEC_MIIGSK_ENR);
1175 			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1176 				udelay(1);
1177 
1178 			/*
1179 			 * configure the gasket:
1180 			 *   RMII, 50 MHz, no loopback, no echo
1181 			 *   MII, 25 MHz, no loopback, no echo
1182 			 */
1183 			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1184 				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1185 			if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1186 				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1187 			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1188 
1189 			/* re-enable the gasket */
1190 			writel(2, fep->hwp + FEC_MIIGSK_ENR);
1191 		}
1192 #endif
1193 	}
1194 
1195 #if !defined(CONFIG_M5272)
1196 	/* enable pause frame*/
1197 	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1198 	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1199 	     ndev->phydev && ndev->phydev->pause)) {
1200 		rcntl |= FEC_RCR_FLOWCTL;
1201 
1202 		/* set FIFO threshold parameter to reduce overrun */
1203 		writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1204 		writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1205 		writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1206 		writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1207 
1208 		/* OPD */
1209 		writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1210 	} else {
1211 		rcntl &= ~FEC_RCR_FLOWCTL;
1212 	}
1213 #endif /* !defined(CONFIG_M5272) */
1214 
1215 	writel(rcntl, fep->hwp + FEC_R_CNTRL);
1216 
1217 	/* Setup multicast filter. */
1218 	set_multicast_list(ndev);
1219 #ifndef CONFIG_M5272
1220 	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1221 	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1222 #endif
1223 
1224 	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1225 		/* enable ENET endian swap */
1226 		ecntl |= FEC_ECR_BYTESWP;
1227 		/* enable ENET store and forward mode */
1228 		writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
1229 	}
1230 
1231 	if (fep->bufdesc_ex)
1232 		ecntl |= FEC_ECR_EN1588;
1233 
1234 	if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1235 	    fep->rgmii_txc_dly)
1236 		ecntl |= FEC_ENET_TXC_DLY;
1237 	if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1238 	    fep->rgmii_rxc_dly)
1239 		ecntl |= FEC_ENET_RXC_DLY;
1240 
1241 #ifndef CONFIG_M5272
1242 	/* Enable the MIB statistic event counters */
1243 	writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1244 #endif
1245 
1246 	/* And last, enable the transmit and receive processing */
1247 	writel(ecntl, fep->hwp + FEC_ECNTRL);
1248 	fec_enet_active_rxring(ndev);
1249 
1250 	if (fep->bufdesc_ex) {
1251 		fec_ptp_start_cyclecounter(ndev);
1252 		fec_ptp_restore_state(fep);
1253 	}
1254 
1255 	/* Enable interrupts we wish to service */
1256 	if (fep->link)
1257 		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1258 	else
1259 		writel(0, fep->hwp + FEC_IMASK);
1260 
1261 	/* Init the interrupt coalescing */
1262 	if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
1263 		fec_enet_itr_coal_set(ndev);
1264 }
1265 
1266 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
1267 {
1268 	if (!(of_machine_is_compatible("fsl,imx8qm") ||
1269 	      of_machine_is_compatible("fsl,imx8qxp") ||
1270 	      of_machine_is_compatible("fsl,imx8dxl")))
1271 		return 0;
1272 
1273 	return imx_scu_get_handle(&fep->ipc_handle);
1274 }
1275 
1276 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
1277 {
1278 	struct device_node *np = fep->pdev->dev.of_node;
1279 	u32 rsrc_id, val;
1280 	int idx;
1281 
1282 	if (!np || !fep->ipc_handle)
1283 		return;
1284 
1285 	idx = of_alias_get_id(np, "ethernet");
1286 	if (idx < 0)
1287 		idx = 0;
1288 	rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
1289 
1290 	val = enabled ? 1 : 0;
1291 	imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
1292 }
1293 
1294 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
1295 {
1296 	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1297 	struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
1298 
1299 	if (stop_gpr->gpr) {
1300 		if (enabled)
1301 			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1302 					   BIT(stop_gpr->bit),
1303 					   BIT(stop_gpr->bit));
1304 		else
1305 			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1306 					   BIT(stop_gpr->bit), 0);
1307 	} else if (pdata && pdata->sleep_mode_enable) {
1308 		pdata->sleep_mode_enable(enabled);
1309 	} else {
1310 		fec_enet_ipg_stop_set(fep, enabled);
1311 	}
1312 }
1313 
1314 static void fec_irqs_disable(struct net_device *ndev)
1315 {
1316 	struct fec_enet_private *fep = netdev_priv(ndev);
1317 
1318 	writel(0, fep->hwp + FEC_IMASK);
1319 }
1320 
1321 static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
1322 {
1323 	struct fec_enet_private *fep = netdev_priv(ndev);
1324 
1325 	writel(0, fep->hwp + FEC_IMASK);
1326 	writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1327 }
1328 
1329 static void
1330 fec_stop(struct net_device *ndev)
1331 {
1332 	struct fec_enet_private *fep = netdev_priv(ndev);
1333 	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
1334 	u32 val;
1335 
1336 	/* We cannot expect a graceful transmit stop without link !!! */
1337 	if (fep->link) {
1338 		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1339 		udelay(10);
1340 		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1341 			netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1342 	}
1343 
1344 	if (fep->bufdesc_ex)
1345 		fec_ptp_save_state(fep);
1346 
1347 	/* Whack a reset.  We should wait for this.
1348 	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1349 	 * instead of reset MAC itself.
1350 	 */
1351 	if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1352 		if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
1353 			writel(0, fep->hwp + FEC_ECNTRL);
1354 		} else {
1355 			writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
1356 			udelay(10);
1357 		}
1358 	} else {
1359 		val = readl(fep->hwp + FEC_ECNTRL);
1360 		val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1361 		writel(val, fep->hwp + FEC_ECNTRL);
1362 	}
1363 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1364 	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1365 
1366 	/* We have to keep ENET enabled to have MII interrupt stay working */
1367 	if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1368 		!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1369 		writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
1370 		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1371 	}
1372 
1373 	if (fep->bufdesc_ex) {
1374 		val = readl(fep->hwp + FEC_ECNTRL);
1375 		val |= FEC_ECR_EN1588;
1376 		writel(val, fep->hwp + FEC_ECNTRL);
1377 
1378 		fec_ptp_start_cyclecounter(ndev);
1379 		fec_ptp_restore_state(fep);
1380 	}
1381 }
1382 
1383 static void
1384 fec_timeout(struct net_device *ndev, unsigned int txqueue)
1385 {
1386 	struct fec_enet_private *fep = netdev_priv(ndev);
1387 
1388 	fec_dump(ndev);
1389 
1390 	ndev->stats.tx_errors++;
1391 
1392 	schedule_work(&fep->tx_timeout_work);
1393 }
1394 
1395 static void fec_enet_timeout_work(struct work_struct *work)
1396 {
1397 	struct fec_enet_private *fep =
1398 		container_of(work, struct fec_enet_private, tx_timeout_work);
1399 	struct net_device *ndev = fep->netdev;
1400 
1401 	rtnl_lock();
1402 	if (netif_device_present(ndev) || netif_running(ndev)) {
1403 		napi_disable(&fep->napi);
1404 		netif_tx_lock_bh(ndev);
1405 		fec_restart(ndev);
1406 		netif_tx_wake_all_queues(ndev);
1407 		netif_tx_unlock_bh(ndev);
1408 		napi_enable(&fep->napi);
1409 	}
1410 	rtnl_unlock();
1411 }
1412 
1413 static void
1414 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1415 	struct skb_shared_hwtstamps *hwtstamps)
1416 {
1417 	unsigned long flags;
1418 	u64 ns;
1419 
1420 	spin_lock_irqsave(&fep->tmreg_lock, flags);
1421 	ns = timecounter_cyc2time(&fep->tc, ts);
1422 	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1423 
1424 	memset(hwtstamps, 0, sizeof(*hwtstamps));
1425 	hwtstamps->hwtstamp = ns_to_ktime(ns);
1426 }
1427 
1428 static void
1429 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1430 {
1431 	struct	fec_enet_private *fep;
1432 	struct xdp_frame *xdpf;
1433 	struct bufdesc *bdp;
1434 	unsigned short status;
1435 	struct	sk_buff	*skb;
1436 	struct fec_enet_priv_tx_q *txq;
1437 	struct netdev_queue *nq;
1438 	int	index = 0;
1439 	int	entries_free;
1440 	struct page *page;
1441 	int frame_len;
1442 
1443 	fep = netdev_priv(ndev);
1444 
1445 	txq = fep->tx_queue[queue_id];
1446 	/* get next bdp of dirty_tx */
1447 	nq = netdev_get_tx_queue(ndev, queue_id);
1448 	bdp = txq->dirty_tx;
1449 
1450 	/* get next bdp of dirty_tx */
1451 	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1452 
1453 	while (bdp != READ_ONCE(txq->bd.cur)) {
1454 		/* Order the load of bd.cur and cbd_sc */
1455 		rmb();
1456 		status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1457 		if (status & BD_ENET_TX_READY)
1458 			break;
1459 
1460 		index = fec_enet_get_bd_index(bdp, &txq->bd);
1461 
1462 		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1463 			skb = txq->tx_buf[index].buf_p;
1464 			if (bdp->cbd_bufaddr &&
1465 			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1466 				dma_unmap_single(&fep->pdev->dev,
1467 						 fec32_to_cpu(bdp->cbd_bufaddr),
1468 						 fec16_to_cpu(bdp->cbd_datlen),
1469 						 DMA_TO_DEVICE);
1470 			bdp->cbd_bufaddr = cpu_to_fec32(0);
1471 			if (!skb)
1472 				goto tx_buf_done;
1473 		} else {
1474 			/* Tx processing cannot call any XDP (or page pool) APIs if
1475 			 * the "budget" is 0. Because NAPI is called with budget of
1476 			 * 0 (such as netpoll) indicates we may be in an IRQ context,
1477 			 * however, we can't use the page pool from IRQ context.
1478 			 */
1479 			if (unlikely(!budget))
1480 				break;
1481 
1482 			if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1483 				xdpf = txq->tx_buf[index].buf_p;
1484 				if (bdp->cbd_bufaddr)
1485 					dma_unmap_single(&fep->pdev->dev,
1486 							 fec32_to_cpu(bdp->cbd_bufaddr),
1487 							 fec16_to_cpu(bdp->cbd_datlen),
1488 							 DMA_TO_DEVICE);
1489 			} else {
1490 				page = txq->tx_buf[index].buf_p;
1491 			}
1492 
1493 			bdp->cbd_bufaddr = cpu_to_fec32(0);
1494 			if (unlikely(!txq->tx_buf[index].buf_p)) {
1495 				txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1496 				goto tx_buf_done;
1497 			}
1498 
1499 			frame_len = fec16_to_cpu(bdp->cbd_datlen);
1500 		}
1501 
1502 		/* Check for errors. */
1503 		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1504 				   BD_ENET_TX_RL | BD_ENET_TX_UN |
1505 				   BD_ENET_TX_CSL)) {
1506 			ndev->stats.tx_errors++;
1507 			if (status & BD_ENET_TX_HB)  /* No heartbeat */
1508 				ndev->stats.tx_heartbeat_errors++;
1509 			if (status & BD_ENET_TX_LC)  /* Late collision */
1510 				ndev->stats.tx_window_errors++;
1511 			if (status & BD_ENET_TX_RL)  /* Retrans limit */
1512 				ndev->stats.tx_aborted_errors++;
1513 			if (status & BD_ENET_TX_UN)  /* Underrun */
1514 				ndev->stats.tx_fifo_errors++;
1515 			if (status & BD_ENET_TX_CSL) /* Carrier lost */
1516 				ndev->stats.tx_carrier_errors++;
1517 		} else {
1518 			ndev->stats.tx_packets++;
1519 
1520 			if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
1521 				ndev->stats.tx_bytes += skb->len;
1522 			else
1523 				ndev->stats.tx_bytes += frame_len;
1524 		}
1525 
1526 		/* Deferred means some collisions occurred during transmit,
1527 		 * but we eventually sent the packet OK.
1528 		 */
1529 		if (status & BD_ENET_TX_DEF)
1530 			ndev->stats.collisions++;
1531 
1532 		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1533 			/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1534 			 * are to time stamp the packet, so we still need to check time
1535 			 * stamping enabled flag.
1536 			 */
1537 			if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1538 				     fep->hwts_tx_en) && fep->bufdesc_ex) {
1539 				struct skb_shared_hwtstamps shhwtstamps;
1540 				struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1541 
1542 				fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1543 				skb_tstamp_tx(skb, &shhwtstamps);
1544 			}
1545 
1546 			/* Free the sk buffer associated with this last transmit */
1547 			napi_consume_skb(skb, budget);
1548 		} else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1549 			xdp_return_frame_rx_napi(xdpf);
1550 		} else { /* recycle pages of XDP_TX frames */
1551 			/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
1552 			page_pool_put_page(page->pp, page, 0, true);
1553 		}
1554 
1555 		txq->tx_buf[index].buf_p = NULL;
1556 		/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1557 		txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1558 
1559 tx_buf_done:
1560 		/* Make sure the update to bdp and tx_buf are performed
1561 		 * before dirty_tx
1562 		 */
1563 		wmb();
1564 		txq->dirty_tx = bdp;
1565 
1566 		/* Update pointer to next buffer descriptor to be transmitted */
1567 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1568 
1569 		/* Since we have freed up a buffer, the ring is no longer full
1570 		 */
1571 		if (netif_tx_queue_stopped(nq)) {
1572 			entries_free = fec_enet_get_free_txdesc_num(txq);
1573 			if (entries_free >= txq->tx_wake_threshold)
1574 				netif_tx_wake_queue(nq);
1575 		}
1576 	}
1577 
1578 	/* ERR006358: Keep the transmitter going */
1579 	if (bdp != txq->bd.cur &&
1580 	    readl(txq->bd.reg_desc_active) == 0)
1581 		writel(0, txq->bd.reg_desc_active);
1582 }
1583 
1584 static void fec_enet_tx(struct net_device *ndev, int budget)
1585 {
1586 	struct fec_enet_private *fep = netdev_priv(ndev);
1587 	int i;
1588 
1589 	/* Make sure that AVB queues are processed first. */
1590 	for (i = fep->num_tx_queues - 1; i >= 0; i--)
1591 		fec_enet_tx_queue(ndev, i, budget);
1592 }
1593 
1594 static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
1595 				struct bufdesc *bdp, int index)
1596 {
1597 	struct page *new_page;
1598 	dma_addr_t phys_addr;
1599 
1600 	new_page = page_pool_dev_alloc_pages(rxq->page_pool);
1601 	if (unlikely(!new_page))
1602 		return -ENOMEM;
1603 
1604 	rxq->rx_skb_info[index].page = new_page;
1605 	rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
1606 	phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
1607 	bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
1608 
1609 	return 0;
1610 }
1611 
1612 static u32
1613 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
1614 		 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
1615 {
1616 	unsigned int sync, len = xdp->data_end - xdp->data;
1617 	u32 ret = FEC_ENET_XDP_PASS;
1618 	struct page *page;
1619 	int err;
1620 	u32 act;
1621 
1622 	act = bpf_prog_run_xdp(prog, xdp);
1623 
1624 	/* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
1625 	 * max len CPU touch
1626 	 */
1627 	sync = xdp->data_end - xdp->data;
1628 	sync = max(sync, len);
1629 
1630 	switch (act) {
1631 	case XDP_PASS:
1632 		rxq->stats[RX_XDP_PASS]++;
1633 		ret = FEC_ENET_XDP_PASS;
1634 		break;
1635 
1636 	case XDP_REDIRECT:
1637 		rxq->stats[RX_XDP_REDIRECT]++;
1638 		err = xdp_do_redirect(fep->netdev, xdp, prog);
1639 		if (unlikely(err))
1640 			goto xdp_err;
1641 
1642 		ret = FEC_ENET_XDP_REDIR;
1643 		break;
1644 
1645 	case XDP_TX:
1646 		rxq->stats[RX_XDP_TX]++;
1647 		err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
1648 		if (unlikely(err)) {
1649 			rxq->stats[RX_XDP_TX_ERRORS]++;
1650 			goto xdp_err;
1651 		}
1652 
1653 		ret = FEC_ENET_XDP_TX;
1654 		break;
1655 
1656 	default:
1657 		bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
1658 		fallthrough;
1659 
1660 	case XDP_ABORTED:
1661 		fallthrough;    /* handle aborts by dropping packet */
1662 
1663 	case XDP_DROP:
1664 		rxq->stats[RX_XDP_DROP]++;
1665 xdp_err:
1666 		ret = FEC_ENET_XDP_CONSUMED;
1667 		page = virt_to_head_page(xdp->data);
1668 		page_pool_put_page(rxq->page_pool, page, sync, true);
1669 		if (act != XDP_DROP)
1670 			trace_xdp_exception(fep->netdev, prog, act);
1671 		break;
1672 	}
1673 
1674 	return ret;
1675 }
1676 
1677 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1678  * When we update through the ring, if the next incoming buffer has
1679  * not been given to the system, we just set the empty indicator,
1680  * effectively tossing the packet.
1681  */
1682 static int
1683 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1684 {
1685 	struct fec_enet_private *fep = netdev_priv(ndev);
1686 	struct fec_enet_priv_rx_q *rxq;
1687 	struct bufdesc *bdp;
1688 	unsigned short status;
1689 	struct  sk_buff *skb;
1690 	ushort	pkt_len;
1691 	__u8 *data;
1692 	int	pkt_received = 0;
1693 	struct	bufdesc_ex *ebdp = NULL;
1694 	bool	vlan_packet_rcvd = false;
1695 	u16	vlan_tag;
1696 	int	index = 0;
1697 	bool	need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1698 	struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
1699 	u32 ret, xdp_result = FEC_ENET_XDP_PASS;
1700 	u32 data_start = FEC_ENET_XDP_HEADROOM;
1701 	int cpu = smp_processor_id();
1702 	struct xdp_buff xdp;
1703 	struct page *page;
1704 	__fec32 cbd_bufaddr;
1705 	u32 sub_len = 4;
1706 
1707 #if !defined(CONFIG_M5272)
1708 	/*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
1709 	 * FEC_RACC_SHIFT16 is set by default in the probe function.
1710 	 */
1711 	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1712 		data_start += 2;
1713 		sub_len += 2;
1714 	}
1715 #endif
1716 
1717 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
1718 	/*
1719 	 * Hacky flush of all caches instead of using the DMA API for the TSO
1720 	 * headers.
1721 	 */
1722 	flush_cache_all();
1723 #endif
1724 	rxq = fep->rx_queue[queue_id];
1725 
1726 	/* First, grab all of the stats for the incoming packet.
1727 	 * These get messed up if we get called due to a busy condition.
1728 	 */
1729 	bdp = rxq->bd.cur;
1730 	xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
1731 
1732 	while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1733 
1734 		if (pkt_received >= budget)
1735 			break;
1736 		pkt_received++;
1737 
1738 		writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
1739 
1740 		/* Check for errors. */
1741 		status ^= BD_ENET_RX_LAST;
1742 		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1743 			   BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1744 			   BD_ENET_RX_CL)) {
1745 			ndev->stats.rx_errors++;
1746 			if (status & BD_ENET_RX_OV) {
1747 				/* FIFO overrun */
1748 				ndev->stats.rx_fifo_errors++;
1749 				goto rx_processing_done;
1750 			}
1751 			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1752 						| BD_ENET_RX_LAST)) {
1753 				/* Frame too long or too short. */
1754 				ndev->stats.rx_length_errors++;
1755 				if (status & BD_ENET_RX_LAST)
1756 					netdev_err(ndev, "rcv is not +last\n");
1757 			}
1758 			if (status & BD_ENET_RX_CR)	/* CRC Error */
1759 				ndev->stats.rx_crc_errors++;
1760 			/* Report late collisions as a frame error. */
1761 			if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1762 				ndev->stats.rx_frame_errors++;
1763 			goto rx_processing_done;
1764 		}
1765 
1766 		/* Process the incoming frame. */
1767 		ndev->stats.rx_packets++;
1768 		pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1769 		ndev->stats.rx_bytes += pkt_len;
1770 
1771 		index = fec_enet_get_bd_index(bdp, &rxq->bd);
1772 		page = rxq->rx_skb_info[index].page;
1773 		cbd_bufaddr = bdp->cbd_bufaddr;
1774 		if (fec_enet_update_cbd(rxq, bdp, index)) {
1775 			ndev->stats.rx_dropped++;
1776 			goto rx_processing_done;
1777 		}
1778 
1779 		dma_sync_single_for_cpu(&fep->pdev->dev,
1780 					fec32_to_cpu(cbd_bufaddr),
1781 					pkt_len,
1782 					DMA_FROM_DEVICE);
1783 		prefetch(page_address(page));
1784 
1785 		if (xdp_prog) {
1786 			xdp_buff_clear_frags_flag(&xdp);
1787 			/* subtract 16bit shift and FCS */
1788 			xdp_prepare_buff(&xdp, page_address(page),
1789 					 data_start, pkt_len - sub_len, false);
1790 			ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
1791 			xdp_result |= ret;
1792 			if (ret != FEC_ENET_XDP_PASS)
1793 				goto rx_processing_done;
1794 		}
1795 
1796 		/* The packet length includes FCS, but we don't want to
1797 		 * include that when passing upstream as it messes up
1798 		 * bridging applications.
1799 		 */
1800 		skb = build_skb(page_address(page), PAGE_SIZE);
1801 		if (unlikely(!skb)) {
1802 			page_pool_recycle_direct(rxq->page_pool, page);
1803 			ndev->stats.rx_dropped++;
1804 
1805 			netdev_err_once(ndev, "build_skb failed!\n");
1806 			goto rx_processing_done;
1807 		}
1808 
1809 		skb_reserve(skb, data_start);
1810 		skb_put(skb, pkt_len - sub_len);
1811 		skb_mark_for_recycle(skb);
1812 
1813 		if (unlikely(need_swap)) {
1814 			data = page_address(page) + FEC_ENET_XDP_HEADROOM;
1815 			swap_buffer(data, pkt_len);
1816 		}
1817 		data = skb->data;
1818 
1819 		/* Extract the enhanced buffer descriptor */
1820 		ebdp = NULL;
1821 		if (fep->bufdesc_ex)
1822 			ebdp = (struct bufdesc_ex *)bdp;
1823 
1824 		/* If this is a VLAN packet remove the VLAN Tag */
1825 		vlan_packet_rcvd = false;
1826 		if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1827 		    fep->bufdesc_ex &&
1828 		    (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1829 			/* Push and remove the vlan tag */
1830 			struct vlan_hdr *vlan_header =
1831 					(struct vlan_hdr *) (data + ETH_HLEN);
1832 			vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1833 
1834 			vlan_packet_rcvd = true;
1835 
1836 			memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1837 			skb_pull(skb, VLAN_HLEN);
1838 		}
1839 
1840 		skb->protocol = eth_type_trans(skb, ndev);
1841 
1842 		/* Get receive timestamp from the skb */
1843 		if (fep->hwts_rx_en && fep->bufdesc_ex)
1844 			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1845 					  skb_hwtstamps(skb));
1846 
1847 		if (fep->bufdesc_ex &&
1848 		    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1849 			if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1850 				/* don't check it */
1851 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1852 			} else {
1853 				skb_checksum_none_assert(skb);
1854 			}
1855 		}
1856 
1857 		/* Handle received VLAN packets */
1858 		if (vlan_packet_rcvd)
1859 			__vlan_hwaccel_put_tag(skb,
1860 					       htons(ETH_P_8021Q),
1861 					       vlan_tag);
1862 
1863 		skb_record_rx_queue(skb, queue_id);
1864 		napi_gro_receive(&fep->napi, skb);
1865 
1866 rx_processing_done:
1867 		/* Clear the status flags for this buffer */
1868 		status &= ~BD_ENET_RX_STATS;
1869 
1870 		/* Mark the buffer empty */
1871 		status |= BD_ENET_RX_EMPTY;
1872 
1873 		if (fep->bufdesc_ex) {
1874 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1875 
1876 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1877 			ebdp->cbd_prot = 0;
1878 			ebdp->cbd_bdu = 0;
1879 		}
1880 		/* Make sure the updates to rest of the descriptor are
1881 		 * performed before transferring ownership.
1882 		 */
1883 		wmb();
1884 		bdp->cbd_sc = cpu_to_fec16(status);
1885 
1886 		/* Update BD pointer to next entry */
1887 		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1888 
1889 		/* Doing this here will keep the FEC running while we process
1890 		 * incoming frames.  On a heavily loaded network, we should be
1891 		 * able to keep up at the expense of system resources.
1892 		 */
1893 		writel(0, rxq->bd.reg_desc_active);
1894 	}
1895 	rxq->bd.cur = bdp;
1896 
1897 	if (xdp_result & FEC_ENET_XDP_REDIR)
1898 		xdp_do_flush();
1899 
1900 	return pkt_received;
1901 }
1902 
1903 static int fec_enet_rx(struct net_device *ndev, int budget)
1904 {
1905 	struct fec_enet_private *fep = netdev_priv(ndev);
1906 	int i, done = 0;
1907 
1908 	/* Make sure that AVB queues are processed first. */
1909 	for (i = fep->num_rx_queues - 1; i >= 0; i--)
1910 		done += fec_enet_rx_queue(ndev, budget - done, i);
1911 
1912 	return done;
1913 }
1914 
1915 static bool fec_enet_collect_events(struct fec_enet_private *fep)
1916 {
1917 	uint int_events;
1918 
1919 	int_events = readl(fep->hwp + FEC_IEVENT);
1920 
1921 	/* Don't clear MDIO events, we poll for those */
1922 	int_events &= ~FEC_ENET_MII;
1923 
1924 	writel(int_events, fep->hwp + FEC_IEVENT);
1925 
1926 	return int_events != 0;
1927 }
1928 
1929 static irqreturn_t
1930 fec_enet_interrupt(int irq, void *dev_id)
1931 {
1932 	struct net_device *ndev = dev_id;
1933 	struct fec_enet_private *fep = netdev_priv(ndev);
1934 	irqreturn_t ret = IRQ_NONE;
1935 
1936 	if (fec_enet_collect_events(fep) && fep->link) {
1937 		ret = IRQ_HANDLED;
1938 
1939 		if (napi_schedule_prep(&fep->napi)) {
1940 			/* Disable interrupts */
1941 			writel(0, fep->hwp + FEC_IMASK);
1942 			__napi_schedule(&fep->napi);
1943 		}
1944 	}
1945 
1946 	return ret;
1947 }
1948 
1949 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1950 {
1951 	struct net_device *ndev = napi->dev;
1952 	struct fec_enet_private *fep = netdev_priv(ndev);
1953 	int done = 0;
1954 
1955 	do {
1956 		done += fec_enet_rx(ndev, budget - done);
1957 		fec_enet_tx(ndev, budget);
1958 	} while ((done < budget) && fec_enet_collect_events(fep));
1959 
1960 	if (done < budget) {
1961 		napi_complete_done(napi, done);
1962 		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1963 	}
1964 
1965 	return done;
1966 }
1967 
1968 /* ------------------------------------------------------------------------- */
1969 static int fec_get_mac(struct net_device *ndev)
1970 {
1971 	struct fec_enet_private *fep = netdev_priv(ndev);
1972 	unsigned char *iap, tmpaddr[ETH_ALEN];
1973 	int ret;
1974 
1975 	/*
1976 	 * try to get mac address in following order:
1977 	 *
1978 	 * 1) module parameter via kernel command line in form
1979 	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1980 	 */
1981 	iap = macaddr;
1982 
1983 	/*
1984 	 * 2) from device tree data
1985 	 */
1986 	if (!is_valid_ether_addr(iap)) {
1987 		struct device_node *np = fep->pdev->dev.of_node;
1988 		if (np) {
1989 			ret = of_get_mac_address(np, tmpaddr);
1990 			if (!ret)
1991 				iap = tmpaddr;
1992 			else if (ret == -EPROBE_DEFER)
1993 				return ret;
1994 		}
1995 	}
1996 
1997 	/*
1998 	 * 3) from flash or fuse (via platform data)
1999 	 */
2000 	if (!is_valid_ether_addr(iap)) {
2001 #ifdef CONFIG_M5272
2002 		if (FEC_FLASHMAC)
2003 			iap = (unsigned char *)FEC_FLASHMAC;
2004 #else
2005 		struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
2006 
2007 		if (pdata)
2008 			iap = (unsigned char *)&pdata->mac;
2009 #endif
2010 	}
2011 
2012 	/*
2013 	 * 4) FEC mac registers set by bootloader
2014 	 */
2015 	if (!is_valid_ether_addr(iap)) {
2016 		*((__be32 *) &tmpaddr[0]) =
2017 			cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
2018 		*((__be16 *) &tmpaddr[4]) =
2019 			cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
2020 		iap = &tmpaddr[0];
2021 	}
2022 
2023 	/*
2024 	 * 5) random mac address
2025 	 */
2026 	if (!is_valid_ether_addr(iap)) {
2027 		/* Report it and use a random ethernet address instead */
2028 		dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
2029 		eth_hw_addr_random(ndev);
2030 		dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
2031 			 ndev->dev_addr);
2032 		return 0;
2033 	}
2034 
2035 	/* Adjust MAC if using macaddr */
2036 	eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
2037 
2038 	return 0;
2039 }
2040 
2041 /* ------------------------------------------------------------------------- */
2042 
2043 /*
2044  * Phy section
2045  */
2046 
2047 /* LPI Sleep Ts count base on tx clk (clk_ref).
2048  * The lpi sleep cnt value = X us / (cycle_ns).
2049  */
2050 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
2051 {
2052 	struct fec_enet_private *fep = netdev_priv(ndev);
2053 
2054 	return us * (fep->clk_ref_rate / 1000) / 1000;
2055 }
2056 
2057 static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer,
2058 				 bool enable)
2059 {
2060 	struct fec_enet_private *fep = netdev_priv(ndev);
2061 	unsigned int sleep_cycle, wake_cycle;
2062 
2063 	if (enable) {
2064 		sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer);
2065 		wake_cycle = sleep_cycle;
2066 	} else {
2067 		sleep_cycle = 0;
2068 		wake_cycle = 0;
2069 	}
2070 
2071 	writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
2072 	writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
2073 
2074 	return 0;
2075 }
2076 
2077 static void fec_enet_adjust_link(struct net_device *ndev)
2078 {
2079 	struct fec_enet_private *fep = netdev_priv(ndev);
2080 	struct phy_device *phy_dev = ndev->phydev;
2081 	int status_change = 0;
2082 
2083 	/*
2084 	 * If the netdev is down, or is going down, we're not interested
2085 	 * in link state events, so just mark our idea of the link as down
2086 	 * and ignore the event.
2087 	 */
2088 	if (!netif_running(ndev) || !netif_device_present(ndev)) {
2089 		fep->link = 0;
2090 	} else if (phy_dev->link) {
2091 		if (!fep->link) {
2092 			fep->link = phy_dev->link;
2093 			status_change = 1;
2094 		}
2095 
2096 		if (fep->full_duplex != phy_dev->duplex) {
2097 			fep->full_duplex = phy_dev->duplex;
2098 			status_change = 1;
2099 		}
2100 
2101 		if (phy_dev->speed != fep->speed) {
2102 			fep->speed = phy_dev->speed;
2103 			status_change = 1;
2104 		}
2105 
2106 		/* if any of the above changed restart the FEC */
2107 		if (status_change) {
2108 			netif_stop_queue(ndev);
2109 			napi_disable(&fep->napi);
2110 			netif_tx_lock_bh(ndev);
2111 			fec_restart(ndev);
2112 			netif_tx_wake_all_queues(ndev);
2113 			netif_tx_unlock_bh(ndev);
2114 			napi_enable(&fep->napi);
2115 		}
2116 		if (fep->quirks & FEC_QUIRK_HAS_EEE)
2117 			fec_enet_eee_mode_set(ndev,
2118 					      phy_dev->eee_cfg.tx_lpi_timer,
2119 					      phy_dev->enable_tx_lpi);
2120 	} else {
2121 		if (fep->link) {
2122 			netif_stop_queue(ndev);
2123 			napi_disable(&fep->napi);
2124 			netif_tx_lock_bh(ndev);
2125 			fec_stop(ndev);
2126 			netif_tx_unlock_bh(ndev);
2127 			napi_enable(&fep->napi);
2128 			fep->link = phy_dev->link;
2129 			status_change = 1;
2130 		}
2131 	}
2132 
2133 	if (status_change)
2134 		phy_print_status(phy_dev);
2135 }
2136 
2137 static int fec_enet_mdio_wait(struct fec_enet_private *fep)
2138 {
2139 	uint ievent;
2140 	int ret;
2141 
2142 	ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
2143 					ievent & FEC_ENET_MII, 2, 30000);
2144 
2145 	if (!ret)
2146 		writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2147 
2148 	return ret;
2149 }
2150 
2151 static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
2152 {
2153 	struct fec_enet_private *fep = bus->priv;
2154 	struct device *dev = &fep->pdev->dev;
2155 	int ret = 0, frame_start, frame_addr, frame_op;
2156 
2157 	ret = pm_runtime_resume_and_get(dev);
2158 	if (ret < 0)
2159 		return ret;
2160 
2161 	/* C22 read */
2162 	frame_op = FEC_MMFR_OP_READ;
2163 	frame_start = FEC_MMFR_ST;
2164 	frame_addr = regnum;
2165 
2166 	/* start a read op */
2167 	writel(frame_start | frame_op |
2168 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2169 	       FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2170 
2171 	/* wait for end of transfer */
2172 	ret = fec_enet_mdio_wait(fep);
2173 	if (ret) {
2174 		netdev_err(fep->netdev, "MDIO read timeout\n");
2175 		goto out;
2176 	}
2177 
2178 	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2179 
2180 out:
2181 	pm_runtime_mark_last_busy(dev);
2182 	pm_runtime_put_autosuspend(dev);
2183 
2184 	return ret;
2185 }
2186 
2187 static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
2188 				  int devad, int regnum)
2189 {
2190 	struct fec_enet_private *fep = bus->priv;
2191 	struct device *dev = &fep->pdev->dev;
2192 	int ret = 0, frame_start, frame_op;
2193 
2194 	ret = pm_runtime_resume_and_get(dev);
2195 	if (ret < 0)
2196 		return ret;
2197 
2198 	frame_start = FEC_MMFR_ST_C45;
2199 
2200 	/* write address */
2201 	writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2202 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2203 	       FEC_MMFR_TA | (regnum & 0xFFFF),
2204 	       fep->hwp + FEC_MII_DATA);
2205 
2206 	/* wait for end of transfer */
2207 	ret = fec_enet_mdio_wait(fep);
2208 	if (ret) {
2209 		netdev_err(fep->netdev, "MDIO address write timeout\n");
2210 		goto out;
2211 	}
2212 
2213 	frame_op = FEC_MMFR_OP_READ_C45;
2214 
2215 	/* start a read op */
2216 	writel(frame_start | frame_op |
2217 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2218 	       FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2219 
2220 	/* wait for end of transfer */
2221 	ret = fec_enet_mdio_wait(fep);
2222 	if (ret) {
2223 		netdev_err(fep->netdev, "MDIO read timeout\n");
2224 		goto out;
2225 	}
2226 
2227 	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2228 
2229 out:
2230 	pm_runtime_mark_last_busy(dev);
2231 	pm_runtime_put_autosuspend(dev);
2232 
2233 	return ret;
2234 }
2235 
2236 static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
2237 				   u16 value)
2238 {
2239 	struct fec_enet_private *fep = bus->priv;
2240 	struct device *dev = &fep->pdev->dev;
2241 	int ret, frame_start, frame_addr;
2242 
2243 	ret = pm_runtime_resume_and_get(dev);
2244 	if (ret < 0)
2245 		return ret;
2246 
2247 	/* C22 write */
2248 	frame_start = FEC_MMFR_ST;
2249 	frame_addr = regnum;
2250 
2251 	/* start a write op */
2252 	writel(frame_start | FEC_MMFR_OP_WRITE |
2253 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2254 	       FEC_MMFR_TA | FEC_MMFR_DATA(value),
2255 	       fep->hwp + FEC_MII_DATA);
2256 
2257 	/* wait for end of transfer */
2258 	ret = fec_enet_mdio_wait(fep);
2259 	if (ret)
2260 		netdev_err(fep->netdev, "MDIO write timeout\n");
2261 
2262 	pm_runtime_mark_last_busy(dev);
2263 	pm_runtime_put_autosuspend(dev);
2264 
2265 	return ret;
2266 }
2267 
2268 static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
2269 				   int devad, int regnum, u16 value)
2270 {
2271 	struct fec_enet_private *fep = bus->priv;
2272 	struct device *dev = &fep->pdev->dev;
2273 	int ret, frame_start;
2274 
2275 	ret = pm_runtime_resume_and_get(dev);
2276 	if (ret < 0)
2277 		return ret;
2278 
2279 	frame_start = FEC_MMFR_ST_C45;
2280 
2281 	/* write address */
2282 	writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2283 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2284 	       FEC_MMFR_TA | (regnum & 0xFFFF),
2285 	       fep->hwp + FEC_MII_DATA);
2286 
2287 	/* wait for end of transfer */
2288 	ret = fec_enet_mdio_wait(fep);
2289 	if (ret) {
2290 		netdev_err(fep->netdev, "MDIO address write timeout\n");
2291 		goto out;
2292 	}
2293 
2294 	/* start a write op */
2295 	writel(frame_start | FEC_MMFR_OP_WRITE |
2296 	       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2297 	       FEC_MMFR_TA | FEC_MMFR_DATA(value),
2298 	       fep->hwp + FEC_MII_DATA);
2299 
2300 	/* wait for end of transfer */
2301 	ret = fec_enet_mdio_wait(fep);
2302 	if (ret)
2303 		netdev_err(fep->netdev, "MDIO write timeout\n");
2304 
2305 out:
2306 	pm_runtime_mark_last_busy(dev);
2307 	pm_runtime_put_autosuspend(dev);
2308 
2309 	return ret;
2310 }
2311 
2312 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
2313 {
2314 	struct fec_enet_private *fep = netdev_priv(ndev);
2315 	struct phy_device *phy_dev = ndev->phydev;
2316 
2317 	if (phy_dev) {
2318 		phy_reset_after_clk_enable(phy_dev);
2319 	} else if (fep->phy_node) {
2320 		/*
2321 		 * If the PHY still is not bound to the MAC, but there is
2322 		 * OF PHY node and a matching PHY device instance already,
2323 		 * use the OF PHY node to obtain the PHY device instance,
2324 		 * and then use that PHY device instance when triggering
2325 		 * the PHY reset.
2326 		 */
2327 		phy_dev = of_phy_find_device(fep->phy_node);
2328 		phy_reset_after_clk_enable(phy_dev);
2329 		put_device(&phy_dev->mdio.dev);
2330 	}
2331 }
2332 
2333 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
2334 {
2335 	struct fec_enet_private *fep = netdev_priv(ndev);
2336 	int ret;
2337 
2338 	if (enable) {
2339 		ret = clk_prepare_enable(fep->clk_enet_out);
2340 		if (ret)
2341 			return ret;
2342 
2343 		if (fep->clk_ptp) {
2344 			mutex_lock(&fep->ptp_clk_mutex);
2345 			ret = clk_prepare_enable(fep->clk_ptp);
2346 			if (ret) {
2347 				mutex_unlock(&fep->ptp_clk_mutex);
2348 				goto failed_clk_ptp;
2349 			} else {
2350 				fep->ptp_clk_on = true;
2351 			}
2352 			mutex_unlock(&fep->ptp_clk_mutex);
2353 		}
2354 
2355 		ret = clk_prepare_enable(fep->clk_ref);
2356 		if (ret)
2357 			goto failed_clk_ref;
2358 
2359 		ret = clk_prepare_enable(fep->clk_2x_txclk);
2360 		if (ret)
2361 			goto failed_clk_2x_txclk;
2362 
2363 		fec_enet_phy_reset_after_clk_enable(ndev);
2364 	} else {
2365 		clk_disable_unprepare(fep->clk_enet_out);
2366 		if (fep->clk_ptp) {
2367 			mutex_lock(&fep->ptp_clk_mutex);
2368 			clk_disable_unprepare(fep->clk_ptp);
2369 			fep->ptp_clk_on = false;
2370 			mutex_unlock(&fep->ptp_clk_mutex);
2371 		}
2372 		clk_disable_unprepare(fep->clk_ref);
2373 		clk_disable_unprepare(fep->clk_2x_txclk);
2374 	}
2375 
2376 	return 0;
2377 
2378 failed_clk_2x_txclk:
2379 	if (fep->clk_ref)
2380 		clk_disable_unprepare(fep->clk_ref);
2381 failed_clk_ref:
2382 	if (fep->clk_ptp) {
2383 		mutex_lock(&fep->ptp_clk_mutex);
2384 		clk_disable_unprepare(fep->clk_ptp);
2385 		fep->ptp_clk_on = false;
2386 		mutex_unlock(&fep->ptp_clk_mutex);
2387 	}
2388 failed_clk_ptp:
2389 	clk_disable_unprepare(fep->clk_enet_out);
2390 
2391 	return ret;
2392 }
2393 
2394 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
2395 				      struct device_node *np)
2396 {
2397 	u32 rgmii_tx_delay, rgmii_rx_delay;
2398 
2399 	/* For rgmii tx internal delay, valid values are 0ps and 2000ps */
2400 	if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) {
2401 		if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) {
2402 			dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
2403 			return -EINVAL;
2404 		} else if (rgmii_tx_delay == 2000) {
2405 			fep->rgmii_txc_dly = true;
2406 		}
2407 	}
2408 
2409 	/* For rgmii rx internal delay, valid values are 0ps and 2000ps */
2410 	if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) {
2411 		if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) {
2412 			dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
2413 			return -EINVAL;
2414 		} else if (rgmii_rx_delay == 2000) {
2415 			fep->rgmii_rxc_dly = true;
2416 		}
2417 	}
2418 
2419 	return 0;
2420 }
2421 
2422 static int fec_enet_mii_probe(struct net_device *ndev)
2423 {
2424 	struct fec_enet_private *fep = netdev_priv(ndev);
2425 	struct phy_device *phy_dev = NULL;
2426 	char mdio_bus_id[MII_BUS_ID_SIZE];
2427 	char phy_name[MII_BUS_ID_SIZE + 3];
2428 	int phy_id;
2429 	int dev_id = fep->dev_id;
2430 
2431 	if (fep->phy_node) {
2432 		phy_dev = of_phy_connect(ndev, fep->phy_node,
2433 					 &fec_enet_adjust_link, 0,
2434 					 fep->phy_interface);
2435 		if (!phy_dev) {
2436 			netdev_err(ndev, "Unable to connect to phy\n");
2437 			return -ENODEV;
2438 		}
2439 	} else {
2440 		/* check for attached phy */
2441 		for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
2442 			if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
2443 				continue;
2444 			if (dev_id--)
2445 				continue;
2446 			strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
2447 			break;
2448 		}
2449 
2450 		if (phy_id >= PHY_MAX_ADDR) {
2451 			netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
2452 			strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
2453 			phy_id = 0;
2454 		}
2455 
2456 		snprintf(phy_name, sizeof(phy_name),
2457 			 PHY_ID_FMT, mdio_bus_id, phy_id);
2458 		phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
2459 				      fep->phy_interface);
2460 	}
2461 
2462 	if (IS_ERR(phy_dev)) {
2463 		netdev_err(ndev, "could not attach to PHY\n");
2464 		return PTR_ERR(phy_dev);
2465 	}
2466 
2467 	/* mask with MAC supported features */
2468 	if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
2469 		phy_set_max_speed(phy_dev, 1000);
2470 		phy_remove_link_mode(phy_dev,
2471 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2472 #if !defined(CONFIG_M5272)
2473 		phy_support_sym_pause(phy_dev);
2474 #endif
2475 	}
2476 	else
2477 		phy_set_max_speed(phy_dev, 100);
2478 
2479 	if (fep->quirks & FEC_QUIRK_HAS_EEE)
2480 		phy_support_eee(phy_dev);
2481 
2482 	fep->link = 0;
2483 	fep->full_duplex = 0;
2484 
2485 	phy_attached_info(phy_dev);
2486 
2487 	return 0;
2488 }
2489 
2490 static int fec_enet_mii_init(struct platform_device *pdev)
2491 {
2492 	static struct mii_bus *fec0_mii_bus;
2493 	struct net_device *ndev = platform_get_drvdata(pdev);
2494 	struct fec_enet_private *fep = netdev_priv(ndev);
2495 	bool suppress_preamble = false;
2496 	struct phy_device *phydev;
2497 	struct device_node *node;
2498 	int err = -ENXIO;
2499 	u32 mii_speed, holdtime;
2500 	u32 bus_freq;
2501 	int addr;
2502 
2503 	/*
2504 	 * The i.MX28 dual fec interfaces are not equal.
2505 	 * Here are the differences:
2506 	 *
2507 	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
2508 	 *  - fec0 acts as the 1588 time master while fec1 is slave
2509 	 *  - external phys can only be configured by fec0
2510 	 *
2511 	 * That is to say fec1 can not work independently. It only works
2512 	 * when fec0 is working. The reason behind this design is that the
2513 	 * second interface is added primarily for Switch mode.
2514 	 *
2515 	 * Because of the last point above, both phys are attached on fec0
2516 	 * mdio interface in board design, and need to be configured by
2517 	 * fec0 mii_bus.
2518 	 */
2519 	if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
2520 		/* fec1 uses fec0 mii_bus */
2521 		if (mii_cnt && fec0_mii_bus) {
2522 			fep->mii_bus = fec0_mii_bus;
2523 			mii_cnt++;
2524 			return 0;
2525 		}
2526 		return -ENOENT;
2527 	}
2528 
2529 	bus_freq = 2500000; /* 2.5MHz by default */
2530 	node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2531 	if (node) {
2532 		of_property_read_u32(node, "clock-frequency", &bus_freq);
2533 		suppress_preamble = of_property_read_bool(node,
2534 							  "suppress-preamble");
2535 	}
2536 
2537 	/*
2538 	 * Set MII speed (= clk_get_rate() / 2 * phy_speed)
2539 	 *
2540 	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2541 	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
2542 	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2543 	 * document.
2544 	 */
2545 	mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
2546 	if (fep->quirks & FEC_QUIRK_ENET_MAC)
2547 		mii_speed--;
2548 	if (mii_speed > 63) {
2549 		dev_err(&pdev->dev,
2550 			"fec clock (%lu) too fast to get right mii speed\n",
2551 			clk_get_rate(fep->clk_ipg));
2552 		err = -EINVAL;
2553 		goto err_out;
2554 	}
2555 
2556 	/*
2557 	 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2558 	 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2559 	 * versions are RAZ there, so just ignore the difference and write the
2560 	 * register always.
2561 	 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2562 	 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2563 	 * output.
2564 	 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2565 	 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2566 	 * holdtime cannot result in a value greater than 3.
2567 	 */
2568 	holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2569 
2570 	fep->phy_speed = mii_speed << 1 | holdtime << 8;
2571 
2572 	if (suppress_preamble)
2573 		fep->phy_speed |= BIT(7);
2574 
2575 	if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
2576 		/* Clear MMFR to avoid to generate MII event by writing MSCR.
2577 		 * MII event generation condition:
2578 		 * - writing MSCR:
2579 		 *	- mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
2580 		 *	  mscr_reg_data_in[7:0] != 0
2581 		 * - writing MMFR:
2582 		 *	- mscr[7:0]_not_zero
2583 		 */
2584 		writel(0, fep->hwp + FEC_MII_DATA);
2585 	}
2586 
2587 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2588 
2589 	/* Clear any pending transaction complete indication */
2590 	writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2591 
2592 	fep->mii_bus = mdiobus_alloc();
2593 	if (fep->mii_bus == NULL) {
2594 		err = -ENOMEM;
2595 		goto err_out;
2596 	}
2597 
2598 	fep->mii_bus->name = "fec_enet_mii_bus";
2599 	fep->mii_bus->read = fec_enet_mdio_read_c22;
2600 	fep->mii_bus->write = fec_enet_mdio_write_c22;
2601 	if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
2602 		fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
2603 		fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
2604 	}
2605 	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2606 		pdev->name, fep->dev_id + 1);
2607 	fep->mii_bus->priv = fep;
2608 	fep->mii_bus->parent = &pdev->dev;
2609 
2610 	err = of_mdiobus_register(fep->mii_bus, node);
2611 	if (err)
2612 		goto err_out_free_mdiobus;
2613 	of_node_put(node);
2614 
2615 	/* find all the PHY devices on the bus and set mac_managed_pm to true */
2616 	for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
2617 		phydev = mdiobus_get_phy(fep->mii_bus, addr);
2618 		if (phydev)
2619 			phydev->mac_managed_pm = true;
2620 	}
2621 
2622 	mii_cnt++;
2623 
2624 	/* save fec0 mii_bus */
2625 	if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2626 		fec0_mii_bus = fep->mii_bus;
2627 
2628 	return 0;
2629 
2630 err_out_free_mdiobus:
2631 	mdiobus_free(fep->mii_bus);
2632 err_out:
2633 	of_node_put(node);
2634 	return err;
2635 }
2636 
2637 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2638 {
2639 	if (--mii_cnt == 0) {
2640 		mdiobus_unregister(fep->mii_bus);
2641 		mdiobus_free(fep->mii_bus);
2642 	}
2643 }
2644 
2645 static void fec_enet_get_drvinfo(struct net_device *ndev,
2646 				 struct ethtool_drvinfo *info)
2647 {
2648 	struct fec_enet_private *fep = netdev_priv(ndev);
2649 
2650 	strscpy(info->driver, fep->pdev->dev.driver->name,
2651 		sizeof(info->driver));
2652 	strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2653 }
2654 
2655 static int fec_enet_get_regs_len(struct net_device *ndev)
2656 {
2657 	struct fec_enet_private *fep = netdev_priv(ndev);
2658 	struct resource *r;
2659 	int s = 0;
2660 
2661 	r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2662 	if (r)
2663 		s = resource_size(r);
2664 
2665 	return s;
2666 }
2667 
2668 /* List of registers that can be safety be read to dump them with ethtool */
2669 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2670 	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2671 	defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2672 static __u32 fec_enet_register_version = 2;
2673 static u32 fec_enet_register_offset[] = {
2674 	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2675 	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2676 	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2677 	FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2678 	FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2679 	FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2680 	FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2681 	FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2682 	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2683 	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2684 	FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2685 	FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2686 	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2687 	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2688 	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2689 	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2690 	RMON_T_P_GTE2048, RMON_T_OCTETS,
2691 	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2692 	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2693 	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2694 	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2695 	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2696 	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2697 	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2698 	RMON_R_P_GTE2048, RMON_R_OCTETS,
2699 	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2700 	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2701 };
2702 /* for i.MX6ul */
2703 static u32 fec_enet_register_offset_6ul[] = {
2704 	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2705 	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2706 	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
2707 	FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
2708 	FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
2709 	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2710 	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
2711 	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2712 	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2713 	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2714 	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2715 	RMON_T_P_GTE2048, RMON_T_OCTETS,
2716 	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2717 	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2718 	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2719 	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2720 	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2721 	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2722 	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2723 	RMON_R_P_GTE2048, RMON_R_OCTETS,
2724 	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2725 	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2726 };
2727 #else
2728 static __u32 fec_enet_register_version = 1;
2729 static u32 fec_enet_register_offset[] = {
2730 	FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2731 	FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2732 	FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2733 	FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2734 	FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2735 	FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2736 	FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2737 	FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2738 	FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2739 };
2740 #endif
2741 
2742 static void fec_enet_get_regs(struct net_device *ndev,
2743 			      struct ethtool_regs *regs, void *regbuf)
2744 {
2745 	struct fec_enet_private *fep = netdev_priv(ndev);
2746 	u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2747 	struct device *dev = &fep->pdev->dev;
2748 	u32 *buf = (u32 *)regbuf;
2749 	u32 i, off;
2750 	int ret;
2751 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2752 	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2753 	defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2754 	u32 *reg_list;
2755 	u32 reg_cnt;
2756 
2757 	if (!of_machine_is_compatible("fsl,imx6ul")) {
2758 		reg_list = fec_enet_register_offset;
2759 		reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2760 	} else {
2761 		reg_list = fec_enet_register_offset_6ul;
2762 		reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
2763 	}
2764 #else
2765 	/* coldfire */
2766 	static u32 *reg_list = fec_enet_register_offset;
2767 	static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2768 #endif
2769 	ret = pm_runtime_resume_and_get(dev);
2770 	if (ret < 0)
2771 		return;
2772 
2773 	regs->version = fec_enet_register_version;
2774 
2775 	memset(buf, 0, regs->len);
2776 
2777 	for (i = 0; i < reg_cnt; i++) {
2778 		off = reg_list[i];
2779 
2780 		if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
2781 		    !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2782 			continue;
2783 
2784 		off >>= 2;
2785 		buf[off] = readl(&theregs[off]);
2786 	}
2787 
2788 	pm_runtime_mark_last_busy(dev);
2789 	pm_runtime_put_autosuspend(dev);
2790 }
2791 
2792 static int fec_enet_get_ts_info(struct net_device *ndev,
2793 				struct kernel_ethtool_ts_info *info)
2794 {
2795 	struct fec_enet_private *fep = netdev_priv(ndev);
2796 
2797 	if (fep->bufdesc_ex) {
2798 
2799 		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2800 					SOF_TIMESTAMPING_TX_HARDWARE |
2801 					SOF_TIMESTAMPING_RX_HARDWARE |
2802 					SOF_TIMESTAMPING_RAW_HARDWARE;
2803 		if (fep->ptp_clock)
2804 			info->phc_index = ptp_clock_index(fep->ptp_clock);
2805 
2806 		info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2807 				 (1 << HWTSTAMP_TX_ON);
2808 
2809 		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2810 				   (1 << HWTSTAMP_FILTER_ALL);
2811 		return 0;
2812 	} else {
2813 		return ethtool_op_get_ts_info(ndev, info);
2814 	}
2815 }
2816 
2817 #if !defined(CONFIG_M5272)
2818 
2819 static void fec_enet_get_pauseparam(struct net_device *ndev,
2820 				    struct ethtool_pauseparam *pause)
2821 {
2822 	struct fec_enet_private *fep = netdev_priv(ndev);
2823 
2824 	pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2825 	pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2826 	pause->rx_pause = pause->tx_pause;
2827 }
2828 
2829 static int fec_enet_set_pauseparam(struct net_device *ndev,
2830 				   struct ethtool_pauseparam *pause)
2831 {
2832 	struct fec_enet_private *fep = netdev_priv(ndev);
2833 
2834 	if (!ndev->phydev)
2835 		return -ENODEV;
2836 
2837 	if (pause->tx_pause != pause->rx_pause) {
2838 		netdev_info(ndev,
2839 			"hardware only support enable/disable both tx and rx");
2840 		return -EINVAL;
2841 	}
2842 
2843 	fep->pause_flag = 0;
2844 
2845 	/* tx pause must be same as rx pause */
2846 	fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2847 	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2848 
2849 	phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
2850 			  pause->autoneg);
2851 
2852 	if (pause->autoneg) {
2853 		if (netif_running(ndev))
2854 			fec_stop(ndev);
2855 		phy_start_aneg(ndev->phydev);
2856 	}
2857 	if (netif_running(ndev)) {
2858 		napi_disable(&fep->napi);
2859 		netif_tx_lock_bh(ndev);
2860 		fec_restart(ndev);
2861 		netif_tx_wake_all_queues(ndev);
2862 		netif_tx_unlock_bh(ndev);
2863 		napi_enable(&fep->napi);
2864 	}
2865 
2866 	return 0;
2867 }
2868 
2869 static const struct fec_stat {
2870 	char name[ETH_GSTRING_LEN];
2871 	u16 offset;
2872 } fec_stats[] = {
2873 	/* RMON TX */
2874 	{ "tx_dropped", RMON_T_DROP },
2875 	{ "tx_packets", RMON_T_PACKETS },
2876 	{ "tx_broadcast", RMON_T_BC_PKT },
2877 	{ "tx_multicast", RMON_T_MC_PKT },
2878 	{ "tx_crc_errors", RMON_T_CRC_ALIGN },
2879 	{ "tx_undersize", RMON_T_UNDERSIZE },
2880 	{ "tx_oversize", RMON_T_OVERSIZE },
2881 	{ "tx_fragment", RMON_T_FRAG },
2882 	{ "tx_jabber", RMON_T_JAB },
2883 	{ "tx_collision", RMON_T_COL },
2884 	{ "tx_64byte", RMON_T_P64 },
2885 	{ "tx_65to127byte", RMON_T_P65TO127 },
2886 	{ "tx_128to255byte", RMON_T_P128TO255 },
2887 	{ "tx_256to511byte", RMON_T_P256TO511 },
2888 	{ "tx_512to1023byte", RMON_T_P512TO1023 },
2889 	{ "tx_1024to2047byte", RMON_T_P1024TO2047 },
2890 	{ "tx_GTE2048byte", RMON_T_P_GTE2048 },
2891 	{ "tx_octets", RMON_T_OCTETS },
2892 
2893 	/* IEEE TX */
2894 	{ "IEEE_tx_drop", IEEE_T_DROP },
2895 	{ "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2896 	{ "IEEE_tx_1col", IEEE_T_1COL },
2897 	{ "IEEE_tx_mcol", IEEE_T_MCOL },
2898 	{ "IEEE_tx_def", IEEE_T_DEF },
2899 	{ "IEEE_tx_lcol", IEEE_T_LCOL },
2900 	{ "IEEE_tx_excol", IEEE_T_EXCOL },
2901 	{ "IEEE_tx_macerr", IEEE_T_MACERR },
2902 	{ "IEEE_tx_cserr", IEEE_T_CSERR },
2903 	{ "IEEE_tx_sqe", IEEE_T_SQE },
2904 	{ "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2905 	{ "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2906 
2907 	/* RMON RX */
2908 	{ "rx_packets", RMON_R_PACKETS },
2909 	{ "rx_broadcast", RMON_R_BC_PKT },
2910 	{ "rx_multicast", RMON_R_MC_PKT },
2911 	{ "rx_crc_errors", RMON_R_CRC_ALIGN },
2912 	{ "rx_undersize", RMON_R_UNDERSIZE },
2913 	{ "rx_oversize", RMON_R_OVERSIZE },
2914 	{ "rx_fragment", RMON_R_FRAG },
2915 	{ "rx_jabber", RMON_R_JAB },
2916 	{ "rx_64byte", RMON_R_P64 },
2917 	{ "rx_65to127byte", RMON_R_P65TO127 },
2918 	{ "rx_128to255byte", RMON_R_P128TO255 },
2919 	{ "rx_256to511byte", RMON_R_P256TO511 },
2920 	{ "rx_512to1023byte", RMON_R_P512TO1023 },
2921 	{ "rx_1024to2047byte", RMON_R_P1024TO2047 },
2922 	{ "rx_GTE2048byte", RMON_R_P_GTE2048 },
2923 	{ "rx_octets", RMON_R_OCTETS },
2924 
2925 	/* IEEE RX */
2926 	{ "IEEE_rx_drop", IEEE_R_DROP },
2927 	{ "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2928 	{ "IEEE_rx_crc", IEEE_R_CRC },
2929 	{ "IEEE_rx_align", IEEE_R_ALIGN },
2930 	{ "IEEE_rx_macerr", IEEE_R_MACERR },
2931 	{ "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2932 	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2933 };
2934 
2935 #define FEC_STATS_SIZE		(ARRAY_SIZE(fec_stats) * sizeof(u64))
2936 
2937 static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
2938 	"rx_xdp_redirect",           /* RX_XDP_REDIRECT = 0, */
2939 	"rx_xdp_pass",               /* RX_XDP_PASS, */
2940 	"rx_xdp_drop",               /* RX_XDP_DROP, */
2941 	"rx_xdp_tx",                 /* RX_XDP_TX, */
2942 	"rx_xdp_tx_errors",          /* RX_XDP_TX_ERRORS, */
2943 	"tx_xdp_xmit",               /* TX_XDP_XMIT, */
2944 	"tx_xdp_xmit_errors",        /* TX_XDP_XMIT_ERRORS, */
2945 };
2946 
2947 static void fec_enet_update_ethtool_stats(struct net_device *dev)
2948 {
2949 	struct fec_enet_private *fep = netdev_priv(dev);
2950 	int i;
2951 
2952 	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2953 		fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2954 }
2955 
2956 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
2957 {
2958 	u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
2959 	struct fec_enet_priv_rx_q *rxq;
2960 	int i, j;
2961 
2962 	for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2963 		rxq = fep->rx_queue[i];
2964 
2965 		for (j = 0; j < XDP_STATS_TOTAL; j++)
2966 			xdp_stats[j] += rxq->stats[j];
2967 	}
2968 
2969 	memcpy(data, xdp_stats, sizeof(xdp_stats));
2970 }
2971 
2972 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
2973 {
2974 #ifdef CONFIG_PAGE_POOL_STATS
2975 	struct page_pool_stats stats = {};
2976 	struct fec_enet_priv_rx_q *rxq;
2977 	int i;
2978 
2979 	for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2980 		rxq = fep->rx_queue[i];
2981 
2982 		if (!rxq->page_pool)
2983 			continue;
2984 
2985 		page_pool_get_stats(rxq->page_pool, &stats);
2986 	}
2987 
2988 	page_pool_ethtool_stats_get(data, &stats);
2989 #endif
2990 }
2991 
2992 static void fec_enet_get_ethtool_stats(struct net_device *dev,
2993 				       struct ethtool_stats *stats, u64 *data)
2994 {
2995 	struct fec_enet_private *fep = netdev_priv(dev);
2996 
2997 	if (netif_running(dev))
2998 		fec_enet_update_ethtool_stats(dev);
2999 
3000 	memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
3001 	data += FEC_STATS_SIZE / sizeof(u64);
3002 
3003 	fec_enet_get_xdp_stats(fep, data);
3004 	data += XDP_STATS_TOTAL;
3005 
3006 	fec_enet_page_pool_stats(fep, data);
3007 }
3008 
3009 static void fec_enet_get_strings(struct net_device *netdev,
3010 	u32 stringset, u8 *data)
3011 {
3012 	int i;
3013 	switch (stringset) {
3014 	case ETH_SS_STATS:
3015 		for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
3016 			ethtool_puts(&data, fec_stats[i].name);
3017 		}
3018 		for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
3019 			ethtool_puts(&data, fec_xdp_stat_strs[i]);
3020 		}
3021 		page_pool_ethtool_stats_get_strings(data);
3022 
3023 		break;
3024 	case ETH_SS_TEST:
3025 		net_selftest_get_strings(data);
3026 		break;
3027 	}
3028 }
3029 
3030 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
3031 {
3032 	int count;
3033 
3034 	switch (sset) {
3035 	case ETH_SS_STATS:
3036 		count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
3037 		count += page_pool_ethtool_stats_get_count();
3038 		return count;
3039 
3040 	case ETH_SS_TEST:
3041 		return net_selftest_get_count();
3042 	default:
3043 		return -EOPNOTSUPP;
3044 	}
3045 }
3046 
3047 static void fec_enet_clear_ethtool_stats(struct net_device *dev)
3048 {
3049 	struct fec_enet_private *fep = netdev_priv(dev);
3050 	struct fec_enet_priv_rx_q *rxq;
3051 	int i, j;
3052 
3053 	/* Disable MIB statistics counters */
3054 	writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
3055 
3056 	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
3057 		writel(0, fep->hwp + fec_stats[i].offset);
3058 
3059 	for (i = fep->num_rx_queues - 1; i >= 0; i--) {
3060 		rxq = fep->rx_queue[i];
3061 		for (j = 0; j < XDP_STATS_TOTAL; j++)
3062 			rxq->stats[j] = 0;
3063 	}
3064 
3065 	/* Don't disable MIB statistics counters */
3066 	writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
3067 }
3068 
3069 #else	/* !defined(CONFIG_M5272) */
3070 #define FEC_STATS_SIZE	0
3071 static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
3072 {
3073 }
3074 
3075 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
3076 {
3077 }
3078 #endif /* !defined(CONFIG_M5272) */
3079 
3080 /* ITR clock source is enet system clock (clk_ahb).
3081  * TCTT unit is cycle_ns * 64 cycle
3082  * So, the ICTT value = X us / (cycle_ns * 64)
3083  */
3084 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
3085 {
3086 	struct fec_enet_private *fep = netdev_priv(ndev);
3087 
3088 	return us * (fep->itr_clk_rate / 64000) / 1000;
3089 }
3090 
3091 /* Set threshold for interrupt coalescing */
3092 static void fec_enet_itr_coal_set(struct net_device *ndev)
3093 {
3094 	struct fec_enet_private *fep = netdev_priv(ndev);
3095 	int rx_itr, tx_itr;
3096 
3097 	/* Must be greater than zero to avoid unpredictable behavior */
3098 	if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
3099 	    !fep->tx_time_itr || !fep->tx_pkts_itr)
3100 		return;
3101 
3102 	/* Select enet system clock as Interrupt Coalescing
3103 	 * timer Clock Source
3104 	 */
3105 	rx_itr = FEC_ITR_CLK_SEL;
3106 	tx_itr = FEC_ITR_CLK_SEL;
3107 
3108 	/* set ICFT and ICTT */
3109 	rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
3110 	rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
3111 	tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
3112 	tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
3113 
3114 	rx_itr |= FEC_ITR_EN;
3115 	tx_itr |= FEC_ITR_EN;
3116 
3117 	writel(tx_itr, fep->hwp + FEC_TXIC0);
3118 	writel(rx_itr, fep->hwp + FEC_RXIC0);
3119 	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
3120 		writel(tx_itr, fep->hwp + FEC_TXIC1);
3121 		writel(rx_itr, fep->hwp + FEC_RXIC1);
3122 		writel(tx_itr, fep->hwp + FEC_TXIC2);
3123 		writel(rx_itr, fep->hwp + FEC_RXIC2);
3124 	}
3125 }
3126 
3127 static int fec_enet_get_coalesce(struct net_device *ndev,
3128 				 struct ethtool_coalesce *ec,
3129 				 struct kernel_ethtool_coalesce *kernel_coal,
3130 				 struct netlink_ext_ack *extack)
3131 {
3132 	struct fec_enet_private *fep = netdev_priv(ndev);
3133 
3134 	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3135 		return -EOPNOTSUPP;
3136 
3137 	ec->rx_coalesce_usecs = fep->rx_time_itr;
3138 	ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
3139 
3140 	ec->tx_coalesce_usecs = fep->tx_time_itr;
3141 	ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
3142 
3143 	return 0;
3144 }
3145 
3146 static int fec_enet_set_coalesce(struct net_device *ndev,
3147 				 struct ethtool_coalesce *ec,
3148 				 struct kernel_ethtool_coalesce *kernel_coal,
3149 				 struct netlink_ext_ack *extack)
3150 {
3151 	struct fec_enet_private *fep = netdev_priv(ndev);
3152 	struct device *dev = &fep->pdev->dev;
3153 	unsigned int cycle;
3154 
3155 	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3156 		return -EOPNOTSUPP;
3157 
3158 	if (ec->rx_max_coalesced_frames > 255) {
3159 		dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
3160 		return -EINVAL;
3161 	}
3162 
3163 	if (ec->tx_max_coalesced_frames > 255) {
3164 		dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
3165 		return -EINVAL;
3166 	}
3167 
3168 	cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
3169 	if (cycle > 0xFFFF) {
3170 		dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
3171 		return -EINVAL;
3172 	}
3173 
3174 	cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
3175 	if (cycle > 0xFFFF) {
3176 		dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
3177 		return -EINVAL;
3178 	}
3179 
3180 	fep->rx_time_itr = ec->rx_coalesce_usecs;
3181 	fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
3182 
3183 	fep->tx_time_itr = ec->tx_coalesce_usecs;
3184 	fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
3185 
3186 	fec_enet_itr_coal_set(ndev);
3187 
3188 	return 0;
3189 }
3190 
3191 static int
3192 fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
3193 {
3194 	struct fec_enet_private *fep = netdev_priv(ndev);
3195 
3196 	if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3197 		return -EOPNOTSUPP;
3198 
3199 	if (!netif_running(ndev))
3200 		return -ENETDOWN;
3201 
3202 	return phy_ethtool_get_eee(ndev->phydev, edata);
3203 }
3204 
3205 static int
3206 fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
3207 {
3208 	struct fec_enet_private *fep = netdev_priv(ndev);
3209 
3210 	if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3211 		return -EOPNOTSUPP;
3212 
3213 	if (!netif_running(ndev))
3214 		return -ENETDOWN;
3215 
3216 	return phy_ethtool_set_eee(ndev->phydev, edata);
3217 }
3218 
3219 static void
3220 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3221 {
3222 	struct fec_enet_private *fep = netdev_priv(ndev);
3223 
3224 	if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
3225 		wol->supported = WAKE_MAGIC;
3226 		wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
3227 	} else {
3228 		wol->supported = wol->wolopts = 0;
3229 	}
3230 }
3231 
3232 static int
3233 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3234 {
3235 	struct fec_enet_private *fep = netdev_priv(ndev);
3236 
3237 	if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
3238 		return -EINVAL;
3239 
3240 	if (wol->wolopts & ~WAKE_MAGIC)
3241 		return -EINVAL;
3242 
3243 	device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
3244 	if (device_may_wakeup(&ndev->dev))
3245 		fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
3246 	else
3247 		fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
3248 
3249 	return 0;
3250 }
3251 
3252 static const struct ethtool_ops fec_enet_ethtool_ops = {
3253 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3254 				     ETHTOOL_COALESCE_MAX_FRAMES,
3255 	.get_drvinfo		= fec_enet_get_drvinfo,
3256 	.get_regs_len		= fec_enet_get_regs_len,
3257 	.get_regs		= fec_enet_get_regs,
3258 	.nway_reset		= phy_ethtool_nway_reset,
3259 	.get_link		= ethtool_op_get_link,
3260 	.get_coalesce		= fec_enet_get_coalesce,
3261 	.set_coalesce		= fec_enet_set_coalesce,
3262 #ifndef CONFIG_M5272
3263 	.get_pauseparam		= fec_enet_get_pauseparam,
3264 	.set_pauseparam		= fec_enet_set_pauseparam,
3265 	.get_strings		= fec_enet_get_strings,
3266 	.get_ethtool_stats	= fec_enet_get_ethtool_stats,
3267 	.get_sset_count		= fec_enet_get_sset_count,
3268 #endif
3269 	.get_ts_info		= fec_enet_get_ts_info,
3270 	.get_wol		= fec_enet_get_wol,
3271 	.set_wol		= fec_enet_set_wol,
3272 	.get_eee		= fec_enet_get_eee,
3273 	.set_eee		= fec_enet_set_eee,
3274 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
3275 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
3276 	.self_test		= net_selftest,
3277 };
3278 
3279 static void fec_enet_free_buffers(struct net_device *ndev)
3280 {
3281 	struct fec_enet_private *fep = netdev_priv(ndev);
3282 	unsigned int i;
3283 	struct fec_enet_priv_tx_q *txq;
3284 	struct fec_enet_priv_rx_q *rxq;
3285 	unsigned int q;
3286 
3287 	for (q = 0; q < fep->num_rx_queues; q++) {
3288 		rxq = fep->rx_queue[q];
3289 		for (i = 0; i < rxq->bd.ring_size; i++)
3290 			page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
3291 
3292 		for (i = 0; i < XDP_STATS_TOTAL; i++)
3293 			rxq->stats[i] = 0;
3294 
3295 		if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
3296 			xdp_rxq_info_unreg(&rxq->xdp_rxq);
3297 		page_pool_destroy(rxq->page_pool);
3298 		rxq->page_pool = NULL;
3299 	}
3300 
3301 	for (q = 0; q < fep->num_tx_queues; q++) {
3302 		txq = fep->tx_queue[q];
3303 		for (i = 0; i < txq->bd.ring_size; i++) {
3304 			kfree(txq->tx_bounce[i]);
3305 			txq->tx_bounce[i] = NULL;
3306 
3307 			if (!txq->tx_buf[i].buf_p) {
3308 				txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3309 				continue;
3310 			}
3311 
3312 			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3313 				dev_kfree_skb(txq->tx_buf[i].buf_p);
3314 			} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
3315 				xdp_return_frame(txq->tx_buf[i].buf_p);
3316 			} else {
3317 				struct page *page = txq->tx_buf[i].buf_p;
3318 
3319 				page_pool_put_page(page->pp, page, 0, false);
3320 			}
3321 
3322 			txq->tx_buf[i].buf_p = NULL;
3323 			txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3324 		}
3325 	}
3326 }
3327 
3328 static void fec_enet_free_queue(struct net_device *ndev)
3329 {
3330 	struct fec_enet_private *fep = netdev_priv(ndev);
3331 	int i;
3332 	struct fec_enet_priv_tx_q *txq;
3333 
3334 	for (i = 0; i < fep->num_tx_queues; i++)
3335 		if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
3336 			txq = fep->tx_queue[i];
3337 			fec_dma_free(&fep->pdev->dev,
3338 				     txq->bd.ring_size * TSO_HEADER_SIZE,
3339 				     txq->tso_hdrs, txq->tso_hdrs_dma);
3340 		}
3341 
3342 	for (i = 0; i < fep->num_rx_queues; i++)
3343 		kfree(fep->rx_queue[i]);
3344 	for (i = 0; i < fep->num_tx_queues; i++)
3345 		kfree(fep->tx_queue[i]);
3346 }
3347 
3348 static int fec_enet_alloc_queue(struct net_device *ndev)
3349 {
3350 	struct fec_enet_private *fep = netdev_priv(ndev);
3351 	int i;
3352 	int ret = 0;
3353 	struct fec_enet_priv_tx_q *txq;
3354 
3355 	for (i = 0; i < fep->num_tx_queues; i++) {
3356 		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
3357 		if (!txq) {
3358 			ret = -ENOMEM;
3359 			goto alloc_failed;
3360 		}
3361 
3362 		fep->tx_queue[i] = txq;
3363 		txq->bd.ring_size = TX_RING_SIZE;
3364 		fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
3365 
3366 		txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
3367 		txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
3368 
3369 		txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev,
3370 					txq->bd.ring_size * TSO_HEADER_SIZE,
3371 					&txq->tso_hdrs_dma, GFP_KERNEL);
3372 		if (!txq->tso_hdrs) {
3373 			ret = -ENOMEM;
3374 			goto alloc_failed;
3375 		}
3376 	}
3377 
3378 	for (i = 0; i < fep->num_rx_queues; i++) {
3379 		fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
3380 					   GFP_KERNEL);
3381 		if (!fep->rx_queue[i]) {
3382 			ret = -ENOMEM;
3383 			goto alloc_failed;
3384 		}
3385 
3386 		fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
3387 		fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
3388 	}
3389 	return ret;
3390 
3391 alloc_failed:
3392 	fec_enet_free_queue(ndev);
3393 	return ret;
3394 }
3395 
3396 static int
3397 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
3398 {
3399 	struct fec_enet_private *fep = netdev_priv(ndev);
3400 	struct fec_enet_priv_rx_q *rxq;
3401 	dma_addr_t phys_addr;
3402 	struct bufdesc	*bdp;
3403 	struct page *page;
3404 	int i, err;
3405 
3406 	rxq = fep->rx_queue[queue];
3407 	bdp = rxq->bd.base;
3408 
3409 	err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
3410 	if (err < 0) {
3411 		netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
3412 		return err;
3413 	}
3414 
3415 	for (i = 0; i < rxq->bd.ring_size; i++) {
3416 		page = page_pool_dev_alloc_pages(rxq->page_pool);
3417 		if (!page)
3418 			goto err_alloc;
3419 
3420 		phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
3421 		bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
3422 
3423 		rxq->rx_skb_info[i].page = page;
3424 		rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
3425 		bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
3426 
3427 		if (fep->bufdesc_ex) {
3428 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3429 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
3430 		}
3431 
3432 		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
3433 	}
3434 
3435 	/* Set the last buffer to wrap. */
3436 	bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
3437 	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3438 	return 0;
3439 
3440  err_alloc:
3441 	fec_enet_free_buffers(ndev);
3442 	return -ENOMEM;
3443 }
3444 
3445 static int
3446 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
3447 {
3448 	struct fec_enet_private *fep = netdev_priv(ndev);
3449 	unsigned int i;
3450 	struct bufdesc  *bdp;
3451 	struct fec_enet_priv_tx_q *txq;
3452 
3453 	txq = fep->tx_queue[queue];
3454 	bdp = txq->bd.base;
3455 	for (i = 0; i < txq->bd.ring_size; i++) {
3456 		txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
3457 		if (!txq->tx_bounce[i])
3458 			goto err_alloc;
3459 
3460 		bdp->cbd_sc = cpu_to_fec16(0);
3461 		bdp->cbd_bufaddr = cpu_to_fec32(0);
3462 
3463 		if (fep->bufdesc_ex) {
3464 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3465 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
3466 		}
3467 
3468 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3469 	}
3470 
3471 	/* Set the last buffer to wrap. */
3472 	bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
3473 	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3474 
3475 	return 0;
3476 
3477  err_alloc:
3478 	fec_enet_free_buffers(ndev);
3479 	return -ENOMEM;
3480 }
3481 
3482 static int fec_enet_alloc_buffers(struct net_device *ndev)
3483 {
3484 	struct fec_enet_private *fep = netdev_priv(ndev);
3485 	unsigned int i;
3486 
3487 	for (i = 0; i < fep->num_rx_queues; i++)
3488 		if (fec_enet_alloc_rxq_buffers(ndev, i))
3489 			return -ENOMEM;
3490 
3491 	for (i = 0; i < fep->num_tx_queues; i++)
3492 		if (fec_enet_alloc_txq_buffers(ndev, i))
3493 			return -ENOMEM;
3494 	return 0;
3495 }
3496 
3497 static int
3498 fec_enet_open(struct net_device *ndev)
3499 {
3500 	struct fec_enet_private *fep = netdev_priv(ndev);
3501 	int ret;
3502 	bool reset_again;
3503 
3504 	ret = pm_runtime_resume_and_get(&fep->pdev->dev);
3505 	if (ret < 0)
3506 		return ret;
3507 
3508 	pinctrl_pm_select_default_state(&fep->pdev->dev);
3509 	ret = fec_enet_clk_enable(ndev, true);
3510 	if (ret)
3511 		goto clk_enable;
3512 
3513 	/* During the first fec_enet_open call the PHY isn't probed at this
3514 	 * point. Therefore the phy_reset_after_clk_enable() call within
3515 	 * fec_enet_clk_enable() fails. As we need this reset in order to be
3516 	 * sure the PHY is working correctly we check if we need to reset again
3517 	 * later when the PHY is probed
3518 	 */
3519 	if (ndev->phydev && ndev->phydev->drv)
3520 		reset_again = false;
3521 	else
3522 		reset_again = true;
3523 
3524 	/* I should reset the ring buffers here, but I don't yet know
3525 	 * a simple way to do that.
3526 	 */
3527 
3528 	ret = fec_enet_alloc_buffers(ndev);
3529 	if (ret)
3530 		goto err_enet_alloc;
3531 
3532 	/* Init MAC prior to mii bus probe */
3533 	fec_restart(ndev);
3534 
3535 	/* Call phy_reset_after_clk_enable() again if it failed during
3536 	 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
3537 	 */
3538 	if (reset_again)
3539 		fec_enet_phy_reset_after_clk_enable(ndev);
3540 
3541 	/* Probe and connect to PHY when open the interface */
3542 	ret = fec_enet_mii_probe(ndev);
3543 	if (ret)
3544 		goto err_enet_mii_probe;
3545 
3546 	if (fep->quirks & FEC_QUIRK_ERR006687)
3547 		imx6q_cpuidle_fec_irqs_used();
3548 
3549 	if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3550 		cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
3551 
3552 	napi_enable(&fep->napi);
3553 	phy_start(ndev->phydev);
3554 	netif_tx_start_all_queues(ndev);
3555 
3556 	device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
3557 				 FEC_WOL_FLAG_ENABLE);
3558 
3559 	return 0;
3560 
3561 err_enet_mii_probe:
3562 	fec_enet_free_buffers(ndev);
3563 err_enet_alloc:
3564 	fec_enet_clk_enable(ndev, false);
3565 clk_enable:
3566 	pm_runtime_mark_last_busy(&fep->pdev->dev);
3567 	pm_runtime_put_autosuspend(&fep->pdev->dev);
3568 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3569 	return ret;
3570 }
3571 
3572 static int
3573 fec_enet_close(struct net_device *ndev)
3574 {
3575 	struct fec_enet_private *fep = netdev_priv(ndev);
3576 
3577 	phy_stop(ndev->phydev);
3578 
3579 	if (netif_device_present(ndev)) {
3580 		napi_disable(&fep->napi);
3581 		netif_tx_disable(ndev);
3582 		fec_stop(ndev);
3583 	}
3584 
3585 	phy_disconnect(ndev->phydev);
3586 
3587 	if (fep->quirks & FEC_QUIRK_ERR006687)
3588 		imx6q_cpuidle_fec_irqs_unused();
3589 
3590 	fec_enet_update_ethtool_stats(ndev);
3591 
3592 	fec_enet_clk_enable(ndev, false);
3593 	if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3594 		cpu_latency_qos_remove_request(&fep->pm_qos_req);
3595 
3596 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3597 	pm_runtime_mark_last_busy(&fep->pdev->dev);
3598 	pm_runtime_put_autosuspend(&fep->pdev->dev);
3599 
3600 	fec_enet_free_buffers(ndev);
3601 
3602 	return 0;
3603 }
3604 
3605 /* Set or clear the multicast filter for this adaptor.
3606  * Skeleton taken from sunlance driver.
3607  * The CPM Ethernet implementation allows Multicast as well as individual
3608  * MAC address filtering.  Some of the drivers check to make sure it is
3609  * a group multicast address, and discard those that are not.  I guess I
3610  * will do the same for now, but just remove the test if you want
3611  * individual filtering as well (do the upper net layers want or support
3612  * this kind of feature?).
3613  */
3614 
3615 #define FEC_HASH_BITS	6		/* #bits in hash */
3616 
3617 static void set_multicast_list(struct net_device *ndev)
3618 {
3619 	struct fec_enet_private *fep = netdev_priv(ndev);
3620 	struct netdev_hw_addr *ha;
3621 	unsigned int crc, tmp;
3622 	unsigned char hash;
3623 	unsigned int hash_high = 0, hash_low = 0;
3624 
3625 	if (ndev->flags & IFF_PROMISC) {
3626 		tmp = readl(fep->hwp + FEC_R_CNTRL);
3627 		tmp |= 0x8;
3628 		writel(tmp, fep->hwp + FEC_R_CNTRL);
3629 		return;
3630 	}
3631 
3632 	tmp = readl(fep->hwp + FEC_R_CNTRL);
3633 	tmp &= ~0x8;
3634 	writel(tmp, fep->hwp + FEC_R_CNTRL);
3635 
3636 	if (ndev->flags & IFF_ALLMULTI) {
3637 		/* Catch all multicast addresses, so set the
3638 		 * filter to all 1's
3639 		 */
3640 		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3641 		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3642 
3643 		return;
3644 	}
3645 
3646 	/* Add the addresses in hash register */
3647 	netdev_for_each_mc_addr(ha, ndev) {
3648 		/* calculate crc32 value of mac address */
3649 		crc = ether_crc_le(ndev->addr_len, ha->addr);
3650 
3651 		/* only upper 6 bits (FEC_HASH_BITS) are used
3652 		 * which point to specific bit in the hash registers
3653 		 */
3654 		hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
3655 
3656 		if (hash > 31)
3657 			hash_high |= 1 << (hash - 32);
3658 		else
3659 			hash_low |= 1 << hash;
3660 	}
3661 
3662 	writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3663 	writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3664 }
3665 
3666 /* Set a MAC change in hardware. */
3667 static int
3668 fec_set_mac_address(struct net_device *ndev, void *p)
3669 {
3670 	struct fec_enet_private *fep = netdev_priv(ndev);
3671 	struct sockaddr *addr = p;
3672 
3673 	if (addr) {
3674 		if (!is_valid_ether_addr(addr->sa_data))
3675 			return -EADDRNOTAVAIL;
3676 		eth_hw_addr_set(ndev, addr->sa_data);
3677 	}
3678 
3679 	/* Add netif status check here to avoid system hang in below case:
3680 	 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3681 	 * After ethx down, fec all clocks are gated off and then register
3682 	 * access causes system hang.
3683 	 */
3684 	if (!netif_running(ndev))
3685 		return 0;
3686 
3687 	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
3688 		(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
3689 		fep->hwp + FEC_ADDR_LOW);
3690 	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3691 		fep->hwp + FEC_ADDR_HIGH);
3692 	return 0;
3693 }
3694 
3695 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3696 	netdev_features_t features)
3697 {
3698 	struct fec_enet_private *fep = netdev_priv(netdev);
3699 	netdev_features_t changed = features ^ netdev->features;
3700 
3701 	netdev->features = features;
3702 
3703 	/* Receive checksum has been changed */
3704 	if (changed & NETIF_F_RXCSUM) {
3705 		if (features & NETIF_F_RXCSUM)
3706 			fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3707 		else
3708 			fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3709 	}
3710 }
3711 
3712 static int fec_set_features(struct net_device *netdev,
3713 	netdev_features_t features)
3714 {
3715 	struct fec_enet_private *fep = netdev_priv(netdev);
3716 	netdev_features_t changed = features ^ netdev->features;
3717 
3718 	if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3719 		napi_disable(&fep->napi);
3720 		netif_tx_lock_bh(netdev);
3721 		fec_stop(netdev);
3722 		fec_enet_set_netdev_features(netdev, features);
3723 		fec_restart(netdev);
3724 		netif_tx_wake_all_queues(netdev);
3725 		netif_tx_unlock_bh(netdev);
3726 		napi_enable(&fep->napi);
3727 	} else {
3728 		fec_enet_set_netdev_features(netdev, features);
3729 	}
3730 
3731 	return 0;
3732 }
3733 
3734 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
3735 				 struct net_device *sb_dev)
3736 {
3737 	struct fec_enet_private *fep = netdev_priv(ndev);
3738 	u16 vlan_tag = 0;
3739 
3740 	if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
3741 		return netdev_pick_tx(ndev, skb, NULL);
3742 
3743 	/* VLAN is present in the payload.*/
3744 	if (eth_type_vlan(skb->protocol)) {
3745 		struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
3746 
3747 		vlan_tag = ntohs(vhdr->h_vlan_TCI);
3748 	/*  VLAN is present in the skb but not yet pushed in the payload.*/
3749 	} else if (skb_vlan_tag_present(skb)) {
3750 		vlan_tag = skb->vlan_tci;
3751 	} else {
3752 		return vlan_tag;
3753 	}
3754 
3755 	return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
3756 }
3757 
3758 static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
3759 {
3760 	struct fec_enet_private *fep = netdev_priv(dev);
3761 	bool is_run = netif_running(dev);
3762 	struct bpf_prog *old_prog;
3763 
3764 	switch (bpf->command) {
3765 	case XDP_SETUP_PROG:
3766 		/* No need to support the SoCs that require to
3767 		 * do the frame swap because the performance wouldn't be
3768 		 * better than the skb mode.
3769 		 */
3770 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
3771 			return -EOPNOTSUPP;
3772 
3773 		if (!bpf->prog)
3774 			xdp_features_clear_redirect_target(dev);
3775 
3776 		if (is_run) {
3777 			napi_disable(&fep->napi);
3778 			netif_tx_disable(dev);
3779 		}
3780 
3781 		old_prog = xchg(&fep->xdp_prog, bpf->prog);
3782 		if (old_prog)
3783 			bpf_prog_put(old_prog);
3784 
3785 		fec_restart(dev);
3786 
3787 		if (is_run) {
3788 			napi_enable(&fep->napi);
3789 			netif_tx_start_all_queues(dev);
3790 		}
3791 
3792 		if (bpf->prog)
3793 			xdp_features_set_redirect_target(dev, false);
3794 
3795 		return 0;
3796 
3797 	case XDP_SETUP_XSK_POOL:
3798 		return -EOPNOTSUPP;
3799 
3800 	default:
3801 		return -EOPNOTSUPP;
3802 	}
3803 }
3804 
3805 static int
3806 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
3807 {
3808 	if (unlikely(index < 0))
3809 		return 0;
3810 
3811 	return (index % fep->num_tx_queues);
3812 }
3813 
3814 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
3815 				   struct fec_enet_priv_tx_q *txq,
3816 				   void *frame, u32 dma_sync_len,
3817 				   bool ndo_xmit)
3818 {
3819 	unsigned int index, status, estatus;
3820 	struct bufdesc *bdp;
3821 	dma_addr_t dma_addr;
3822 	int entries_free;
3823 	u16 frame_len;
3824 
3825 	entries_free = fec_enet_get_free_txdesc_num(txq);
3826 	if (entries_free < MAX_SKB_FRAGS + 1) {
3827 		netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
3828 		return -EBUSY;
3829 	}
3830 
3831 	/* Fill in a Tx ring entry */
3832 	bdp = txq->bd.cur;
3833 	status = fec16_to_cpu(bdp->cbd_sc);
3834 	status &= ~BD_ENET_TX_STATS;
3835 
3836 	index = fec_enet_get_bd_index(bdp, &txq->bd);
3837 
3838 	if (ndo_xmit) {
3839 		struct xdp_frame *xdpf = frame;
3840 
3841 		dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
3842 					  xdpf->len, DMA_TO_DEVICE);
3843 		if (dma_mapping_error(&fep->pdev->dev, dma_addr))
3844 			return -ENOMEM;
3845 
3846 		frame_len = xdpf->len;
3847 		txq->tx_buf[index].buf_p = xdpf;
3848 		txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3849 	} else {
3850 		struct xdp_buff *xdpb = frame;
3851 		struct page *page;
3852 
3853 		page = virt_to_page(xdpb->data);
3854 		dma_addr = page_pool_get_dma_addr(page) +
3855 			   (xdpb->data - xdpb->data_hard_start);
3856 		dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
3857 					   dma_sync_len, DMA_BIDIRECTIONAL);
3858 		frame_len = xdpb->data_end - xdpb->data;
3859 		txq->tx_buf[index].buf_p = page;
3860 		txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
3861 	}
3862 
3863 	status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
3864 	if (fep->bufdesc_ex)
3865 		estatus = BD_ENET_TX_INT;
3866 
3867 	bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
3868 	bdp->cbd_datlen = cpu_to_fec16(frame_len);
3869 
3870 	if (fep->bufdesc_ex) {
3871 		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3872 
3873 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
3874 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
3875 
3876 		ebdp->cbd_bdu = 0;
3877 		ebdp->cbd_esc = cpu_to_fec32(estatus);
3878 	}
3879 
3880 	/* Make sure the updates to rest of the descriptor are performed before
3881 	 * transferring ownership.
3882 	 */
3883 	dma_wmb();
3884 
3885 	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
3886 	 * it's the last BD of the frame, and to put the CRC on the end.
3887 	 */
3888 	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
3889 	bdp->cbd_sc = cpu_to_fec16(status);
3890 
3891 	/* If this was the last BD in the ring, start at the beginning again. */
3892 	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3893 
3894 	/* Make sure the update to bdp are performed before txq->bd.cur. */
3895 	dma_wmb();
3896 
3897 	txq->bd.cur = bdp;
3898 
3899 	/* Trigger transmission start */
3900 	writel(0, txq->bd.reg_desc_active);
3901 
3902 	return 0;
3903 }
3904 
3905 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
3906 				int cpu, struct xdp_buff *xdp,
3907 				u32 dma_sync_len)
3908 {
3909 	struct fec_enet_priv_tx_q *txq;
3910 	struct netdev_queue *nq;
3911 	int queue, ret;
3912 
3913 	queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3914 	txq = fep->tx_queue[queue];
3915 	nq = netdev_get_tx_queue(fep->netdev, queue);
3916 
3917 	__netif_tx_lock(nq, cpu);
3918 
3919 	/* Avoid tx timeout as XDP shares the queue with kernel stack */
3920 	txq_trans_cond_update(nq);
3921 	ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
3922 
3923 	__netif_tx_unlock(nq);
3924 
3925 	return ret;
3926 }
3927 
3928 static int fec_enet_xdp_xmit(struct net_device *dev,
3929 			     int num_frames,
3930 			     struct xdp_frame **frames,
3931 			     u32 flags)
3932 {
3933 	struct fec_enet_private *fep = netdev_priv(dev);
3934 	struct fec_enet_priv_tx_q *txq;
3935 	int cpu = smp_processor_id();
3936 	unsigned int sent_frames = 0;
3937 	struct netdev_queue *nq;
3938 	unsigned int queue;
3939 	int i;
3940 
3941 	queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3942 	txq = fep->tx_queue[queue];
3943 	nq = netdev_get_tx_queue(fep->netdev, queue);
3944 
3945 	__netif_tx_lock(nq, cpu);
3946 
3947 	/* Avoid tx timeout as XDP shares the queue with kernel stack */
3948 	txq_trans_cond_update(nq);
3949 	for (i = 0; i < num_frames; i++) {
3950 		if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
3951 			break;
3952 		sent_frames++;
3953 	}
3954 
3955 	__netif_tx_unlock(nq);
3956 
3957 	return sent_frames;
3958 }
3959 
3960 static int fec_hwtstamp_get(struct net_device *ndev,
3961 			    struct kernel_hwtstamp_config *config)
3962 {
3963 	struct fec_enet_private *fep = netdev_priv(ndev);
3964 
3965 	if (!netif_running(ndev))
3966 		return -EINVAL;
3967 
3968 	if (!fep->bufdesc_ex)
3969 		return -EOPNOTSUPP;
3970 
3971 	fec_ptp_get(ndev, config);
3972 
3973 	return 0;
3974 }
3975 
3976 static int fec_hwtstamp_set(struct net_device *ndev,
3977 			    struct kernel_hwtstamp_config *config,
3978 			    struct netlink_ext_ack *extack)
3979 {
3980 	struct fec_enet_private *fep = netdev_priv(ndev);
3981 
3982 	if (!netif_running(ndev))
3983 		return -EINVAL;
3984 
3985 	if (!fep->bufdesc_ex)
3986 		return -EOPNOTSUPP;
3987 
3988 	return fec_ptp_set(ndev, config, extack);
3989 }
3990 
3991 static const struct net_device_ops fec_netdev_ops = {
3992 	.ndo_open		= fec_enet_open,
3993 	.ndo_stop		= fec_enet_close,
3994 	.ndo_start_xmit		= fec_enet_start_xmit,
3995 	.ndo_select_queue       = fec_enet_select_queue,
3996 	.ndo_set_rx_mode	= set_multicast_list,
3997 	.ndo_validate_addr	= eth_validate_addr,
3998 	.ndo_tx_timeout		= fec_timeout,
3999 	.ndo_set_mac_address	= fec_set_mac_address,
4000 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4001 	.ndo_set_features	= fec_set_features,
4002 	.ndo_bpf		= fec_enet_bpf,
4003 	.ndo_xdp_xmit		= fec_enet_xdp_xmit,
4004 	.ndo_hwtstamp_get	= fec_hwtstamp_get,
4005 	.ndo_hwtstamp_set	= fec_hwtstamp_set,
4006 };
4007 
4008 static const unsigned short offset_des_active_rxq[] = {
4009 	FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
4010 };
4011 
4012 static const unsigned short offset_des_active_txq[] = {
4013 	FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
4014 };
4015 
4016  /*
4017   * XXX:  We need to clean up on failure exits here.
4018   *
4019   */
4020 static int fec_enet_init(struct net_device *ndev)
4021 {
4022 	struct fec_enet_private *fep = netdev_priv(ndev);
4023 	struct bufdesc *cbd_base;
4024 	dma_addr_t bd_dma;
4025 	int bd_size;
4026 	unsigned int i;
4027 	unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
4028 			sizeof(struct bufdesc);
4029 	unsigned dsize_log2 = __fls(dsize);
4030 	int ret;
4031 
4032 	WARN_ON(dsize != (1 << dsize_log2));
4033 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4034 	fep->rx_align = 0xf;
4035 	fep->tx_align = 0xf;
4036 #else
4037 	fep->rx_align = 0x3;
4038 	fep->tx_align = 0x3;
4039 #endif
4040 	fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4041 	fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4042 	fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
4043 	fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
4044 
4045 	/* Check mask of the streaming and coherent API */
4046 	ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
4047 	if (ret < 0) {
4048 		dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
4049 		return ret;
4050 	}
4051 
4052 	ret = fec_enet_alloc_queue(ndev);
4053 	if (ret)
4054 		return ret;
4055 
4056 	bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
4057 
4058 	/* Allocate memory for buffer descriptors. */
4059 	cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma,
4060 				  GFP_KERNEL);
4061 	if (!cbd_base) {
4062 		ret = -ENOMEM;
4063 		goto free_queue_mem;
4064 	}
4065 
4066 	/* Get the Ethernet address */
4067 	ret = fec_get_mac(ndev);
4068 	if (ret)
4069 		goto free_queue_mem;
4070 
4071 	/* Set receive and transmit descriptor base. */
4072 	for (i = 0; i < fep->num_rx_queues; i++) {
4073 		struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
4074 		unsigned size = dsize * rxq->bd.ring_size;
4075 
4076 		rxq->bd.qid = i;
4077 		rxq->bd.base = cbd_base;
4078 		rxq->bd.cur = cbd_base;
4079 		rxq->bd.dma = bd_dma;
4080 		rxq->bd.dsize = dsize;
4081 		rxq->bd.dsize_log2 = dsize_log2;
4082 		rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
4083 		bd_dma += size;
4084 		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4085 		rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4086 	}
4087 
4088 	for (i = 0; i < fep->num_tx_queues; i++) {
4089 		struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
4090 		unsigned size = dsize * txq->bd.ring_size;
4091 
4092 		txq->bd.qid = i;
4093 		txq->bd.base = cbd_base;
4094 		txq->bd.cur = cbd_base;
4095 		txq->bd.dma = bd_dma;
4096 		txq->bd.dsize = dsize;
4097 		txq->bd.dsize_log2 = dsize_log2;
4098 		txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
4099 		bd_dma += size;
4100 		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4101 		txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4102 	}
4103 
4104 
4105 	/* The FEC Ethernet specific entries in the device structure */
4106 	ndev->watchdog_timeo = TX_TIMEOUT;
4107 	ndev->netdev_ops = &fec_netdev_ops;
4108 	ndev->ethtool_ops = &fec_enet_ethtool_ops;
4109 
4110 	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
4111 	netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
4112 
4113 	if (fep->quirks & FEC_QUIRK_HAS_VLAN)
4114 		/* enable hw VLAN support */
4115 		ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4116 
4117 	if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
4118 		netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
4119 
4120 		/* enable hw accelerator */
4121 		ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
4122 				| NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
4123 		fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
4124 	}
4125 
4126 	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
4127 		fep->tx_align = 0;
4128 		fep->rx_align = 0x3f;
4129 	}
4130 
4131 	ndev->hw_features = ndev->features;
4132 
4133 	if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
4134 		ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
4135 				     NETDEV_XDP_ACT_REDIRECT;
4136 
4137 	fec_restart(ndev);
4138 
4139 	if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
4140 		fec_enet_clear_ethtool_stats(ndev);
4141 	else
4142 		fec_enet_update_ethtool_stats(ndev);
4143 
4144 	return 0;
4145 
4146 free_queue_mem:
4147 	fec_enet_free_queue(ndev);
4148 	return ret;
4149 }
4150 
4151 static void fec_enet_deinit(struct net_device *ndev)
4152 {
4153 	struct fec_enet_private *fep = netdev_priv(ndev);
4154 
4155 	netif_napi_del(&fep->napi);
4156 	fec_enet_free_queue(ndev);
4157 }
4158 
4159 #ifdef CONFIG_OF
4160 static int fec_reset_phy(struct platform_device *pdev)
4161 {
4162 	struct gpio_desc *phy_reset;
4163 	int msec = 1, phy_post_delay = 0;
4164 	struct device_node *np = pdev->dev.of_node;
4165 	int err;
4166 
4167 	if (!np)
4168 		return 0;
4169 
4170 	err = of_property_read_u32(np, "phy-reset-duration", &msec);
4171 	/* A sane reset duration should not be longer than 1s */
4172 	if (!err && msec > 1000)
4173 		msec = 1;
4174 
4175 	err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
4176 	/* valid reset duration should be less than 1s */
4177 	if (!err && phy_post_delay > 1000)
4178 		return -EINVAL;
4179 
4180 	phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
4181 					    GPIOD_OUT_HIGH);
4182 	if (IS_ERR(phy_reset))
4183 		return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
4184 				     "failed to get phy-reset-gpios\n");
4185 
4186 	if (!phy_reset)
4187 		return 0;
4188 
4189 	if (msec > 20)
4190 		msleep(msec);
4191 	else
4192 		usleep_range(msec * 1000, msec * 1000 + 1000);
4193 
4194 	gpiod_set_value_cansleep(phy_reset, 0);
4195 
4196 	if (!phy_post_delay)
4197 		return 0;
4198 
4199 	if (phy_post_delay > 20)
4200 		msleep(phy_post_delay);
4201 	else
4202 		usleep_range(phy_post_delay * 1000,
4203 			     phy_post_delay * 1000 + 1000);
4204 
4205 	return 0;
4206 }
4207 #else /* CONFIG_OF */
4208 static int fec_reset_phy(struct platform_device *pdev)
4209 {
4210 	/*
4211 	 * In case of platform probe, the reset has been done
4212 	 * by machine code.
4213 	 */
4214 	return 0;
4215 }
4216 #endif /* CONFIG_OF */
4217 
4218 static void
4219 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
4220 {
4221 	struct device_node *np = pdev->dev.of_node;
4222 
4223 	*num_tx = *num_rx = 1;
4224 
4225 	if (!np || !of_device_is_available(np))
4226 		return;
4227 
4228 	/* parse the num of tx and rx queues */
4229 	of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
4230 
4231 	of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
4232 
4233 	if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
4234 		dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
4235 			 *num_tx);
4236 		*num_tx = 1;
4237 		return;
4238 	}
4239 
4240 	if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
4241 		dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
4242 			 *num_rx);
4243 		*num_rx = 1;
4244 		return;
4245 	}
4246 
4247 }
4248 
4249 static int fec_enet_get_irq_cnt(struct platform_device *pdev)
4250 {
4251 	int irq_cnt = platform_irq_count(pdev);
4252 
4253 	if (irq_cnt > FEC_IRQ_NUM)
4254 		irq_cnt = FEC_IRQ_NUM;	/* last for pps */
4255 	else if (irq_cnt == 2)
4256 		irq_cnt = 1;	/* last for pps */
4257 	else if (irq_cnt <= 0)
4258 		irq_cnt = 1;	/* At least 1 irq is needed */
4259 	return irq_cnt;
4260 }
4261 
4262 static void fec_enet_get_wakeup_irq(struct platform_device *pdev)
4263 {
4264 	struct net_device *ndev = platform_get_drvdata(pdev);
4265 	struct fec_enet_private *fep = netdev_priv(ndev);
4266 
4267 	if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
4268 		fep->wake_irq = fep->irq[2];
4269 	else
4270 		fep->wake_irq = fep->irq[0];
4271 }
4272 
4273 static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
4274 				   struct device_node *np)
4275 {
4276 	struct device_node *gpr_np;
4277 	u32 out_val[3];
4278 	int ret = 0;
4279 
4280 	gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
4281 	if (!gpr_np)
4282 		return 0;
4283 
4284 	ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
4285 					 ARRAY_SIZE(out_val));
4286 	if (ret) {
4287 		dev_dbg(&fep->pdev->dev, "no stop mode property\n");
4288 		goto out;
4289 	}
4290 
4291 	fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
4292 	if (IS_ERR(fep->stop_gpr.gpr)) {
4293 		dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
4294 		ret = PTR_ERR(fep->stop_gpr.gpr);
4295 		fep->stop_gpr.gpr = NULL;
4296 		goto out;
4297 	}
4298 
4299 	fep->stop_gpr.reg = out_val[1];
4300 	fep->stop_gpr.bit = out_val[2];
4301 
4302 out:
4303 	of_node_put(gpr_np);
4304 
4305 	return ret;
4306 }
4307 
4308 static int
4309 fec_probe(struct platform_device *pdev)
4310 {
4311 	struct fec_enet_private *fep;
4312 	struct fec_platform_data *pdata;
4313 	phy_interface_t interface;
4314 	struct net_device *ndev;
4315 	int i, irq, ret = 0;
4316 	static int dev_id;
4317 	struct device_node *np = pdev->dev.of_node, *phy_node;
4318 	int num_tx_qs;
4319 	int num_rx_qs;
4320 	char irq_name[8];
4321 	int irq_cnt;
4322 	const struct fec_devinfo *dev_info;
4323 
4324 	fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
4325 
4326 	/* Init network device */
4327 	ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
4328 				  FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
4329 	if (!ndev)
4330 		return -ENOMEM;
4331 
4332 	SET_NETDEV_DEV(ndev, &pdev->dev);
4333 
4334 	/* setup board info structure */
4335 	fep = netdev_priv(ndev);
4336 
4337 	dev_info = device_get_match_data(&pdev->dev);
4338 	if (!dev_info)
4339 		dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data;
4340 	if (dev_info)
4341 		fep->quirks = dev_info->quirks;
4342 
4343 	fep->netdev = ndev;
4344 	fep->num_rx_queues = num_rx_qs;
4345 	fep->num_tx_queues = num_tx_qs;
4346 
4347 #if !defined(CONFIG_M5272)
4348 	/* default enable pause frame auto negotiation */
4349 	if (fep->quirks & FEC_QUIRK_HAS_GBIT)
4350 		fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
4351 #endif
4352 
4353 	/* Select default pin state */
4354 	pinctrl_pm_select_default_state(&pdev->dev);
4355 
4356 	fep->hwp = devm_platform_ioremap_resource(pdev, 0);
4357 	if (IS_ERR(fep->hwp)) {
4358 		ret = PTR_ERR(fep->hwp);
4359 		goto failed_ioremap;
4360 	}
4361 
4362 	fep->pdev = pdev;
4363 	fep->dev_id = dev_id++;
4364 
4365 	platform_set_drvdata(pdev, ndev);
4366 
4367 	if ((of_machine_is_compatible("fsl,imx6q") ||
4368 	     of_machine_is_compatible("fsl,imx6dl")) &&
4369 	    !of_property_read_bool(np, "fsl,err006687-workaround-present"))
4370 		fep->quirks |= FEC_QUIRK_ERR006687;
4371 
4372 	ret = fec_enet_ipc_handle_init(fep);
4373 	if (ret)
4374 		goto failed_ipc_init;
4375 
4376 	if (of_property_read_bool(np, "fsl,magic-packet"))
4377 		fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
4378 
4379 	ret = fec_enet_init_stop_mode(fep, np);
4380 	if (ret)
4381 		goto failed_stop_mode;
4382 
4383 	phy_node = of_parse_phandle(np, "phy-handle", 0);
4384 	if (!phy_node && of_phy_is_fixed_link(np)) {
4385 		ret = of_phy_register_fixed_link(np);
4386 		if (ret < 0) {
4387 			dev_err(&pdev->dev,
4388 				"broken fixed-link specification\n");
4389 			goto failed_phy;
4390 		}
4391 		phy_node = of_node_get(np);
4392 	}
4393 	fep->phy_node = phy_node;
4394 
4395 	ret = of_get_phy_mode(pdev->dev.of_node, &interface);
4396 	if (ret) {
4397 		pdata = dev_get_platdata(&pdev->dev);
4398 		if (pdata)
4399 			fep->phy_interface = pdata->phy;
4400 		else
4401 			fep->phy_interface = PHY_INTERFACE_MODE_MII;
4402 	} else {
4403 		fep->phy_interface = interface;
4404 	}
4405 
4406 	ret = fec_enet_parse_rgmii_delay(fep, np);
4407 	if (ret)
4408 		goto failed_rgmii_delay;
4409 
4410 	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
4411 	if (IS_ERR(fep->clk_ipg)) {
4412 		ret = PTR_ERR(fep->clk_ipg);
4413 		goto failed_clk;
4414 	}
4415 
4416 	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
4417 	if (IS_ERR(fep->clk_ahb)) {
4418 		ret = PTR_ERR(fep->clk_ahb);
4419 		goto failed_clk;
4420 	}
4421 
4422 	fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
4423 
4424 	/* enet_out is optional, depends on board */
4425 	fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
4426 	if (IS_ERR(fep->clk_enet_out)) {
4427 		ret = PTR_ERR(fep->clk_enet_out);
4428 		goto failed_clk;
4429 	}
4430 
4431 	fep->ptp_clk_on = false;
4432 	mutex_init(&fep->ptp_clk_mutex);
4433 
4434 	/* clk_ref is optional, depends on board */
4435 	fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
4436 	if (IS_ERR(fep->clk_ref)) {
4437 		ret = PTR_ERR(fep->clk_ref);
4438 		goto failed_clk;
4439 	}
4440 	fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
4441 
4442 	/* clk_2x_txclk is optional, depends on board */
4443 	if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
4444 		fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
4445 		if (IS_ERR(fep->clk_2x_txclk))
4446 			fep->clk_2x_txclk = NULL;
4447 	}
4448 
4449 	fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
4450 	fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
4451 	if (IS_ERR(fep->clk_ptp)) {
4452 		fep->clk_ptp = NULL;
4453 		fep->bufdesc_ex = false;
4454 	}
4455 
4456 	ret = fec_enet_clk_enable(ndev, true);
4457 	if (ret)
4458 		goto failed_clk;
4459 
4460 	ret = clk_prepare_enable(fep->clk_ipg);
4461 	if (ret)
4462 		goto failed_clk_ipg;
4463 	ret = clk_prepare_enable(fep->clk_ahb);
4464 	if (ret)
4465 		goto failed_clk_ahb;
4466 
4467 	fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
4468 	if (!IS_ERR(fep->reg_phy)) {
4469 		ret = regulator_enable(fep->reg_phy);
4470 		if (ret) {
4471 			dev_err(&pdev->dev,
4472 				"Failed to enable phy regulator: %d\n", ret);
4473 			goto failed_regulator;
4474 		}
4475 	} else {
4476 		if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
4477 			ret = -EPROBE_DEFER;
4478 			goto failed_regulator;
4479 		}
4480 		fep->reg_phy = NULL;
4481 	}
4482 
4483 	pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
4484 	pm_runtime_use_autosuspend(&pdev->dev);
4485 	pm_runtime_get_noresume(&pdev->dev);
4486 	pm_runtime_set_active(&pdev->dev);
4487 	pm_runtime_enable(&pdev->dev);
4488 
4489 	ret = fec_reset_phy(pdev);
4490 	if (ret)
4491 		goto failed_reset;
4492 
4493 	irq_cnt = fec_enet_get_irq_cnt(pdev);
4494 	if (fep->bufdesc_ex)
4495 		fec_ptp_init(pdev, irq_cnt);
4496 
4497 	ret = fec_enet_init(ndev);
4498 	if (ret)
4499 		goto failed_init;
4500 
4501 	for (i = 0; i < irq_cnt; i++) {
4502 		snprintf(irq_name, sizeof(irq_name), "int%d", i);
4503 		irq = platform_get_irq_byname_optional(pdev, irq_name);
4504 		if (irq < 0)
4505 			irq = platform_get_irq(pdev, i);
4506 		if (irq < 0) {
4507 			ret = irq;
4508 			goto failed_irq;
4509 		}
4510 		ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
4511 				       0, pdev->name, ndev);
4512 		if (ret)
4513 			goto failed_irq;
4514 
4515 		fep->irq[i] = irq;
4516 	}
4517 
4518 	/* Decide which interrupt line is wakeup capable */
4519 	fec_enet_get_wakeup_irq(pdev);
4520 
4521 	ret = fec_enet_mii_init(pdev);
4522 	if (ret)
4523 		goto failed_mii_init;
4524 
4525 	/* Carrier starts down, phylib will bring it up */
4526 	netif_carrier_off(ndev);
4527 	fec_enet_clk_enable(ndev, false);
4528 	pinctrl_pm_select_sleep_state(&pdev->dev);
4529 
4530 	ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
4531 
4532 	ret = register_netdev(ndev);
4533 	if (ret)
4534 		goto failed_register;
4535 
4536 	device_init_wakeup(&ndev->dev, fep->wol_flag &
4537 			   FEC_WOL_HAS_MAGIC_PACKET);
4538 
4539 	if (fep->bufdesc_ex && fep->ptp_clock)
4540 		netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
4541 
4542 	INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
4543 
4544 	pm_runtime_mark_last_busy(&pdev->dev);
4545 	pm_runtime_put_autosuspend(&pdev->dev);
4546 
4547 	return 0;
4548 
4549 failed_register:
4550 	fec_enet_mii_remove(fep);
4551 failed_mii_init:
4552 failed_irq:
4553 	fec_enet_deinit(ndev);
4554 failed_init:
4555 	fec_ptp_stop(pdev);
4556 failed_reset:
4557 	pm_runtime_put_noidle(&pdev->dev);
4558 	pm_runtime_disable(&pdev->dev);
4559 	if (fep->reg_phy)
4560 		regulator_disable(fep->reg_phy);
4561 failed_regulator:
4562 	clk_disable_unprepare(fep->clk_ahb);
4563 failed_clk_ahb:
4564 	clk_disable_unprepare(fep->clk_ipg);
4565 failed_clk_ipg:
4566 	fec_enet_clk_enable(ndev, false);
4567 failed_clk:
4568 failed_rgmii_delay:
4569 	if (of_phy_is_fixed_link(np))
4570 		of_phy_deregister_fixed_link(np);
4571 	of_node_put(phy_node);
4572 failed_stop_mode:
4573 failed_ipc_init:
4574 failed_phy:
4575 	dev_id--;
4576 failed_ioremap:
4577 	free_netdev(ndev);
4578 
4579 	return ret;
4580 }
4581 
4582 static void
4583 fec_drv_remove(struct platform_device *pdev)
4584 {
4585 	struct net_device *ndev = platform_get_drvdata(pdev);
4586 	struct fec_enet_private *fep = netdev_priv(ndev);
4587 	struct device_node *np = pdev->dev.of_node;
4588 	int ret;
4589 
4590 	ret = pm_runtime_get_sync(&pdev->dev);
4591 	if (ret < 0)
4592 		dev_err(&pdev->dev,
4593 			"Failed to resume device in remove callback (%pe)\n",
4594 			ERR_PTR(ret));
4595 
4596 	cancel_work_sync(&fep->tx_timeout_work);
4597 	fec_ptp_stop(pdev);
4598 	unregister_netdev(ndev);
4599 	fec_enet_mii_remove(fep);
4600 	if (fep->reg_phy)
4601 		regulator_disable(fep->reg_phy);
4602 
4603 	if (of_phy_is_fixed_link(np))
4604 		of_phy_deregister_fixed_link(np);
4605 	of_node_put(fep->phy_node);
4606 
4607 	/* After pm_runtime_get_sync() failed, the clks are still off, so skip
4608 	 * disabling them again.
4609 	 */
4610 	if (ret >= 0) {
4611 		clk_disable_unprepare(fep->clk_ahb);
4612 		clk_disable_unprepare(fep->clk_ipg);
4613 	}
4614 	pm_runtime_put_noidle(&pdev->dev);
4615 	pm_runtime_disable(&pdev->dev);
4616 
4617 	fec_enet_deinit(ndev);
4618 	free_netdev(ndev);
4619 }
4620 
4621 static int fec_suspend(struct device *dev)
4622 {
4623 	struct net_device *ndev = dev_get_drvdata(dev);
4624 	struct fec_enet_private *fep = netdev_priv(ndev);
4625 	int ret;
4626 
4627 	rtnl_lock();
4628 	if (netif_running(ndev)) {
4629 		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
4630 			fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
4631 		phy_stop(ndev->phydev);
4632 		napi_disable(&fep->napi);
4633 		netif_tx_lock_bh(ndev);
4634 		netif_device_detach(ndev);
4635 		netif_tx_unlock_bh(ndev);
4636 		fec_stop(ndev);
4637 		if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4638 			fec_irqs_disable(ndev);
4639 			pinctrl_pm_select_sleep_state(&fep->pdev->dev);
4640 		} else {
4641 			fec_irqs_disable_except_wakeup(ndev);
4642 			if (fep->wake_irq > 0) {
4643 				disable_irq(fep->wake_irq);
4644 				enable_irq_wake(fep->wake_irq);
4645 			}
4646 			fec_enet_stop_mode(fep, true);
4647 		}
4648 		/* It's safe to disable clocks since interrupts are masked */
4649 		fec_enet_clk_enable(ndev, false);
4650 
4651 		fep->rpm_active = !pm_runtime_status_suspended(dev);
4652 		if (fep->rpm_active) {
4653 			ret = pm_runtime_force_suspend(dev);
4654 			if (ret < 0) {
4655 				rtnl_unlock();
4656 				return ret;
4657 			}
4658 		}
4659 	}
4660 	rtnl_unlock();
4661 
4662 	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
4663 		regulator_disable(fep->reg_phy);
4664 
4665 	/* SOC supply clock to phy, when clock is disabled, phy link down
4666 	 * SOC control phy regulator, when regulator is disabled, phy link down
4667 	 */
4668 	if (fep->clk_enet_out || fep->reg_phy)
4669 		fep->link = 0;
4670 
4671 	return 0;
4672 }
4673 
4674 static int fec_resume(struct device *dev)
4675 {
4676 	struct net_device *ndev = dev_get_drvdata(dev);
4677 	struct fec_enet_private *fep = netdev_priv(ndev);
4678 	int ret;
4679 	int val;
4680 
4681 	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4682 		ret = regulator_enable(fep->reg_phy);
4683 		if (ret)
4684 			return ret;
4685 	}
4686 
4687 	rtnl_lock();
4688 	if (netif_running(ndev)) {
4689 		if (fep->rpm_active)
4690 			pm_runtime_force_resume(dev);
4691 
4692 		ret = fec_enet_clk_enable(ndev, true);
4693 		if (ret) {
4694 			rtnl_unlock();
4695 			goto failed_clk;
4696 		}
4697 		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
4698 			fec_enet_stop_mode(fep, false);
4699 			if (fep->wake_irq) {
4700 				disable_irq_wake(fep->wake_irq);
4701 				enable_irq(fep->wake_irq);
4702 			}
4703 
4704 			val = readl(fep->hwp + FEC_ECNTRL);
4705 			val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
4706 			writel(val, fep->hwp + FEC_ECNTRL);
4707 			fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
4708 		} else {
4709 			pinctrl_pm_select_default_state(&fep->pdev->dev);
4710 		}
4711 		fec_restart(ndev);
4712 		netif_tx_lock_bh(ndev);
4713 		netif_device_attach(ndev);
4714 		netif_tx_unlock_bh(ndev);
4715 		napi_enable(&fep->napi);
4716 		phy_init_hw(ndev->phydev);
4717 		phy_start(ndev->phydev);
4718 	}
4719 	rtnl_unlock();
4720 
4721 	return 0;
4722 
4723 failed_clk:
4724 	if (fep->reg_phy)
4725 		regulator_disable(fep->reg_phy);
4726 	return ret;
4727 }
4728 
4729 static int fec_runtime_suspend(struct device *dev)
4730 {
4731 	struct net_device *ndev = dev_get_drvdata(dev);
4732 	struct fec_enet_private *fep = netdev_priv(ndev);
4733 
4734 	clk_disable_unprepare(fep->clk_ahb);
4735 	clk_disable_unprepare(fep->clk_ipg);
4736 
4737 	return 0;
4738 }
4739 
4740 static int fec_runtime_resume(struct device *dev)
4741 {
4742 	struct net_device *ndev = dev_get_drvdata(dev);
4743 	struct fec_enet_private *fep = netdev_priv(ndev);
4744 	int ret;
4745 
4746 	ret = clk_prepare_enable(fep->clk_ahb);
4747 	if (ret)
4748 		return ret;
4749 	ret = clk_prepare_enable(fep->clk_ipg);
4750 	if (ret)
4751 		goto failed_clk_ipg;
4752 
4753 	return 0;
4754 
4755 failed_clk_ipg:
4756 	clk_disable_unprepare(fep->clk_ahb);
4757 	return ret;
4758 }
4759 
4760 static const struct dev_pm_ops fec_pm_ops = {
4761 	SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
4762 	RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
4763 };
4764 
4765 static struct platform_driver fec_driver = {
4766 	.driver	= {
4767 		.name	= DRIVER_NAME,
4768 		.pm	= pm_ptr(&fec_pm_ops),
4769 		.of_match_table = fec_dt_ids,
4770 		.suppress_bind_attrs = true,
4771 	},
4772 	.id_table = fec_devtype,
4773 	.probe	= fec_probe,
4774 	.remove = fec_drv_remove,
4775 };
4776 
4777 module_platform_driver(fec_driver);
4778 
4779 MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
4780 MODULE_LICENSE("GPL");
4781