xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision b6459415b384cb829f0b2a4268f211c789f6cf0b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52 
53 /* As long as the interface is active, we keep the timestamping counter enabled
54  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55  * (clock jumps) when changing timestamping settings at runtime.
56  */
57 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58 				 PTP_TCR_TSCTRLSSR)
59 
60 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
62 
63 /* Module parameters */
64 #define TX_TIMEO	5000
65 static int watchdog = TX_TIMEO;
66 module_param(watchdog, int, 0644);
67 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
68 
69 static int debug = -1;
70 module_param(debug, int, 0644);
71 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
72 
73 static int phyaddr = -1;
74 module_param(phyaddr, int, 0444);
75 MODULE_PARM_DESC(phyaddr, "Physical device address");
76 
77 #define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
78 #define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
79 
80 /* Limit to make sure XDP TX and slow path can coexist */
81 #define STMMAC_XSK_TX_BUDGET_MAX	256
82 #define STMMAC_TX_XSK_AVAIL		16
83 #define STMMAC_RX_FILL_BATCH		16
84 
85 #define STMMAC_XDP_PASS		0
86 #define STMMAC_XDP_CONSUMED	BIT(0)
87 #define STMMAC_XDP_TX		BIT(1)
88 #define STMMAC_XDP_REDIRECT	BIT(2)
89 
90 static int flow_ctrl = FLOW_AUTO;
91 module_param(flow_ctrl, int, 0644);
92 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93 
94 static int pause = PAUSE_TIME;
95 module_param(pause, int, 0644);
96 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97 
98 #define TC_DEFAULT 64
99 static int tc = TC_DEFAULT;
100 module_param(tc, int, 0644);
101 MODULE_PARM_DESC(tc, "DMA threshold control value");
102 
103 #define	DEFAULT_BUFSIZE	1536
104 static int buf_sz = DEFAULT_BUFSIZE;
105 module_param(buf_sz, int, 0644);
106 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107 
108 #define	STMMAC_RX_COPYBREAK	256
109 
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
112 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113 
114 #define STMMAC_DEFAULT_LPI_TIMER	1000
115 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, int, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119 
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126 
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
135 
136 #ifdef CONFIG_DEBUG_FS
137 static const struct net_device_ops stmmac_netdev_ops;
138 static void stmmac_init_fs(struct net_device *dev);
139 static void stmmac_exit_fs(struct net_device *dev);
140 #endif
141 
142 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
143 
144 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
145 {
146 	int ret = 0;
147 
148 	if (enabled) {
149 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
150 		if (ret)
151 			return ret;
152 		ret = clk_prepare_enable(priv->plat->pclk);
153 		if (ret) {
154 			clk_disable_unprepare(priv->plat->stmmac_clk);
155 			return ret;
156 		}
157 		if (priv->plat->clks_config) {
158 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
159 			if (ret) {
160 				clk_disable_unprepare(priv->plat->stmmac_clk);
161 				clk_disable_unprepare(priv->plat->pclk);
162 				return ret;
163 			}
164 		}
165 	} else {
166 		clk_disable_unprepare(priv->plat->stmmac_clk);
167 		clk_disable_unprepare(priv->plat->pclk);
168 		if (priv->plat->clks_config)
169 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
170 	}
171 
172 	return ret;
173 }
174 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
175 
176 /**
177  * stmmac_verify_args - verify the driver parameters.
178  * Description: it checks the driver parameters and set a default in case of
179  * errors.
180  */
181 static void stmmac_verify_args(void)
182 {
183 	if (unlikely(watchdog < 0))
184 		watchdog = TX_TIMEO;
185 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
186 		buf_sz = DEFAULT_BUFSIZE;
187 	if (unlikely(flow_ctrl > 1))
188 		flow_ctrl = FLOW_AUTO;
189 	else if (likely(flow_ctrl < 0))
190 		flow_ctrl = FLOW_OFF;
191 	if (unlikely((pause < 0) || (pause > 0xffff)))
192 		pause = PAUSE_TIME;
193 	if (eee_timer < 0)
194 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
195 }
196 
197 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
198 {
199 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
200 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
201 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
202 	u32 queue;
203 
204 	for (queue = 0; queue < maxq; queue++) {
205 		struct stmmac_channel *ch = &priv->channel[queue];
206 
207 		if (stmmac_xdp_is_enabled(priv) &&
208 		    test_bit(queue, priv->af_xdp_zc_qps)) {
209 			napi_disable(&ch->rxtx_napi);
210 			continue;
211 		}
212 
213 		if (queue < rx_queues_cnt)
214 			napi_disable(&ch->rx_napi);
215 		if (queue < tx_queues_cnt)
216 			napi_disable(&ch->tx_napi);
217 	}
218 }
219 
220 /**
221  * stmmac_disable_all_queues - Disable all queues
222  * @priv: driver private structure
223  */
224 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
225 {
226 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
227 	struct stmmac_rx_queue *rx_q;
228 	u32 queue;
229 
230 	/* synchronize_rcu() needed for pending XDP buffers to drain */
231 	for (queue = 0; queue < rx_queues_cnt; queue++) {
232 		rx_q = &priv->rx_queue[queue];
233 		if (rx_q->xsk_pool) {
234 			synchronize_rcu();
235 			break;
236 		}
237 	}
238 
239 	__stmmac_disable_all_queues(priv);
240 }
241 
242 /**
243  * stmmac_enable_all_queues - Enable all queues
244  * @priv: driver private structure
245  */
246 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
247 {
248 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
249 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
250 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
251 	u32 queue;
252 
253 	for (queue = 0; queue < maxq; queue++) {
254 		struct stmmac_channel *ch = &priv->channel[queue];
255 
256 		if (stmmac_xdp_is_enabled(priv) &&
257 		    test_bit(queue, priv->af_xdp_zc_qps)) {
258 			napi_enable(&ch->rxtx_napi);
259 			continue;
260 		}
261 
262 		if (queue < rx_queues_cnt)
263 			napi_enable(&ch->rx_napi);
264 		if (queue < tx_queues_cnt)
265 			napi_enable(&ch->tx_napi);
266 	}
267 }
268 
269 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
270 {
271 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
272 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
273 		queue_work(priv->wq, &priv->service_task);
274 }
275 
276 static void stmmac_global_err(struct stmmac_priv *priv)
277 {
278 	netif_carrier_off(priv->dev);
279 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
280 	stmmac_service_event_schedule(priv);
281 }
282 
283 /**
284  * stmmac_clk_csr_set - dynamically set the MDC clock
285  * @priv: driver private structure
286  * Description: this is to dynamically set the MDC clock according to the csr
287  * clock input.
288  * Note:
289  *	If a specific clk_csr value is passed from the platform
290  *	this means that the CSR Clock Range selection cannot be
291  *	changed at run-time and it is fixed (as reported in the driver
292  *	documentation). Viceversa the driver will try to set the MDC
293  *	clock dynamically according to the actual clock input.
294  */
295 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
296 {
297 	u32 clk_rate;
298 
299 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
300 
301 	/* Platform provided default clk_csr would be assumed valid
302 	 * for all other cases except for the below mentioned ones.
303 	 * For values higher than the IEEE 802.3 specified frequency
304 	 * we can not estimate the proper divider as it is not known
305 	 * the frequency of clk_csr_i. So we do not change the default
306 	 * divider.
307 	 */
308 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
309 		if (clk_rate < CSR_F_35M)
310 			priv->clk_csr = STMMAC_CSR_20_35M;
311 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
312 			priv->clk_csr = STMMAC_CSR_35_60M;
313 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
314 			priv->clk_csr = STMMAC_CSR_60_100M;
315 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
316 			priv->clk_csr = STMMAC_CSR_100_150M;
317 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
318 			priv->clk_csr = STMMAC_CSR_150_250M;
319 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
320 			priv->clk_csr = STMMAC_CSR_250_300M;
321 	}
322 
323 	if (priv->plat->has_sun8i) {
324 		if (clk_rate > 160000000)
325 			priv->clk_csr = 0x03;
326 		else if (clk_rate > 80000000)
327 			priv->clk_csr = 0x02;
328 		else if (clk_rate > 40000000)
329 			priv->clk_csr = 0x01;
330 		else
331 			priv->clk_csr = 0;
332 	}
333 
334 	if (priv->plat->has_xgmac) {
335 		if (clk_rate > 400000000)
336 			priv->clk_csr = 0x5;
337 		else if (clk_rate > 350000000)
338 			priv->clk_csr = 0x4;
339 		else if (clk_rate > 300000000)
340 			priv->clk_csr = 0x3;
341 		else if (clk_rate > 250000000)
342 			priv->clk_csr = 0x2;
343 		else if (clk_rate > 150000000)
344 			priv->clk_csr = 0x1;
345 		else
346 			priv->clk_csr = 0x0;
347 	}
348 }
349 
350 static void print_pkt(unsigned char *buf, int len)
351 {
352 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
353 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
354 }
355 
356 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
357 {
358 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
359 	u32 avail;
360 
361 	if (tx_q->dirty_tx > tx_q->cur_tx)
362 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
363 	else
364 		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
365 
366 	return avail;
367 }
368 
369 /**
370  * stmmac_rx_dirty - Get RX queue dirty
371  * @priv: driver private structure
372  * @queue: RX queue index
373  */
374 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
375 {
376 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
377 	u32 dirty;
378 
379 	if (rx_q->dirty_rx <= rx_q->cur_rx)
380 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
381 	else
382 		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
383 
384 	return dirty;
385 }
386 
387 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
388 {
389 	int tx_lpi_timer;
390 
391 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
392 	priv->eee_sw_timer_en = en ? 0 : 1;
393 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
394 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
395 }
396 
397 /**
398  * stmmac_enable_eee_mode - check and enter in LPI mode
399  * @priv: driver private structure
400  * Description: this function is to verify and enter in LPI mode in case of
401  * EEE.
402  */
403 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
404 {
405 	u32 tx_cnt = priv->plat->tx_queues_to_use;
406 	u32 queue;
407 
408 	/* check if all TX queues have the work finished */
409 	for (queue = 0; queue < tx_cnt; queue++) {
410 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
411 
412 		if (tx_q->dirty_tx != tx_q->cur_tx)
413 			return; /* still unfinished work */
414 	}
415 
416 	/* Check and enter in LPI mode */
417 	if (!priv->tx_path_in_lpi_mode)
418 		stmmac_set_eee_mode(priv, priv->hw,
419 				priv->plat->en_tx_lpi_clockgating);
420 }
421 
422 /**
423  * stmmac_disable_eee_mode - disable and exit from LPI mode
424  * @priv: driver private structure
425  * Description: this function is to exit and disable EEE in case of
426  * LPI state is true. This is called by the xmit.
427  */
428 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
429 {
430 	if (!priv->eee_sw_timer_en) {
431 		stmmac_lpi_entry_timer_config(priv, 0);
432 		return;
433 	}
434 
435 	stmmac_reset_eee_mode(priv, priv->hw);
436 	del_timer_sync(&priv->eee_ctrl_timer);
437 	priv->tx_path_in_lpi_mode = false;
438 }
439 
440 /**
441  * stmmac_eee_ctrl_timer - EEE TX SW timer.
442  * @t:  timer_list struct containing private info
443  * Description:
444  *  if there is no data transfer and if we are not in LPI state,
445  *  then MAC Transmitter can be moved to LPI state.
446  */
447 static void stmmac_eee_ctrl_timer(struct timer_list *t)
448 {
449 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
450 
451 	stmmac_enable_eee_mode(priv);
452 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
453 }
454 
455 /**
456  * stmmac_eee_init - init EEE
457  * @priv: driver private structure
458  * Description:
459  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
460  *  can also manage EEE, this function enable the LPI state and start related
461  *  timer.
462  */
463 bool stmmac_eee_init(struct stmmac_priv *priv)
464 {
465 	int eee_tw_timer = priv->eee_tw_timer;
466 
467 	/* Using PCS we cannot dial with the phy registers at this stage
468 	 * so we do not support extra feature like EEE.
469 	 */
470 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
471 	    priv->hw->pcs == STMMAC_PCS_RTBI)
472 		return false;
473 
474 	/* Check if MAC core supports the EEE feature. */
475 	if (!priv->dma_cap.eee)
476 		return false;
477 
478 	mutex_lock(&priv->lock);
479 
480 	/* Check if it needs to be deactivated */
481 	if (!priv->eee_active) {
482 		if (priv->eee_enabled) {
483 			netdev_dbg(priv->dev, "disable EEE\n");
484 			stmmac_lpi_entry_timer_config(priv, 0);
485 			del_timer_sync(&priv->eee_ctrl_timer);
486 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
487 			if (priv->hw->xpcs)
488 				xpcs_config_eee(priv->hw->xpcs,
489 						priv->plat->mult_fact_100ns,
490 						false);
491 		}
492 		mutex_unlock(&priv->lock);
493 		return false;
494 	}
495 
496 	if (priv->eee_active && !priv->eee_enabled) {
497 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
498 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
499 				     eee_tw_timer);
500 		if (priv->hw->xpcs)
501 			xpcs_config_eee(priv->hw->xpcs,
502 					priv->plat->mult_fact_100ns,
503 					true);
504 	}
505 
506 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
507 		del_timer_sync(&priv->eee_ctrl_timer);
508 		priv->tx_path_in_lpi_mode = false;
509 		stmmac_lpi_entry_timer_config(priv, 1);
510 	} else {
511 		stmmac_lpi_entry_timer_config(priv, 0);
512 		mod_timer(&priv->eee_ctrl_timer,
513 			  STMMAC_LPI_T(priv->tx_lpi_timer));
514 	}
515 
516 	mutex_unlock(&priv->lock);
517 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
518 	return true;
519 }
520 
521 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
522  * @priv: driver private structure
523  * @p : descriptor pointer
524  * @skb : the socket buffer
525  * Description :
526  * This function will read timestamp from the descriptor & pass it to stack.
527  * and also perform some sanity checks.
528  */
529 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
530 				   struct dma_desc *p, struct sk_buff *skb)
531 {
532 	struct skb_shared_hwtstamps shhwtstamp;
533 	bool found = false;
534 	u64 ns = 0;
535 
536 	if (!priv->hwts_tx_en)
537 		return;
538 
539 	/* exit if skb doesn't support hw tstamp */
540 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
541 		return;
542 
543 	/* check tx tstamp status */
544 	if (stmmac_get_tx_timestamp_status(priv, p)) {
545 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
546 		found = true;
547 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
548 		found = true;
549 	}
550 
551 	if (found) {
552 		ns -= priv->plat->cdc_error_adj;
553 
554 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
555 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
556 
557 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
558 		/* pass tstamp to stack */
559 		skb_tstamp_tx(skb, &shhwtstamp);
560 	}
561 }
562 
563 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
564  * @priv: driver private structure
565  * @p : descriptor pointer
566  * @np : next descriptor pointer
567  * @skb : the socket buffer
568  * Description :
569  * This function will read received packet's timestamp from the descriptor
570  * and pass it to stack. It also perform some sanity checks.
571  */
572 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
573 				   struct dma_desc *np, struct sk_buff *skb)
574 {
575 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
576 	struct dma_desc *desc = p;
577 	u64 ns = 0;
578 
579 	if (!priv->hwts_rx_en)
580 		return;
581 	/* For GMAC4, the valid timestamp is from CTX next desc. */
582 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
583 		desc = np;
584 
585 	/* Check if timestamp is available */
586 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
587 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
588 
589 		ns -= priv->plat->cdc_error_adj;
590 
591 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
592 		shhwtstamp = skb_hwtstamps(skb);
593 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
594 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
595 	} else  {
596 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
597 	}
598 }
599 
600 /**
601  *  stmmac_hwtstamp_set - control hardware timestamping.
602  *  @dev: device pointer.
603  *  @ifr: An IOCTL specific structure, that can contain a pointer to
604  *  a proprietary structure used to pass information to the driver.
605  *  Description:
606  *  This function configures the MAC to enable/disable both outgoing(TX)
607  *  and incoming(RX) packets time stamping based on user input.
608  *  Return Value:
609  *  0 on success and an appropriate -ve integer on failure.
610  */
611 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
612 {
613 	struct stmmac_priv *priv = netdev_priv(dev);
614 	struct hwtstamp_config config;
615 	u32 ptp_v2 = 0;
616 	u32 tstamp_all = 0;
617 	u32 ptp_over_ipv4_udp = 0;
618 	u32 ptp_over_ipv6_udp = 0;
619 	u32 ptp_over_ethernet = 0;
620 	u32 snap_type_sel = 0;
621 	u32 ts_master_en = 0;
622 	u32 ts_event_en = 0;
623 
624 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
625 		netdev_alert(priv->dev, "No support for HW time stamping\n");
626 		priv->hwts_tx_en = 0;
627 		priv->hwts_rx_en = 0;
628 
629 		return -EOPNOTSUPP;
630 	}
631 
632 	if (copy_from_user(&config, ifr->ifr_data,
633 			   sizeof(config)))
634 		return -EFAULT;
635 
636 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
637 		   __func__, config.flags, config.tx_type, config.rx_filter);
638 
639 	/* reserved for future extensions */
640 	if (config.flags)
641 		return -EINVAL;
642 
643 	if (config.tx_type != HWTSTAMP_TX_OFF &&
644 	    config.tx_type != HWTSTAMP_TX_ON)
645 		return -ERANGE;
646 
647 	if (priv->adv_ts) {
648 		switch (config.rx_filter) {
649 		case HWTSTAMP_FILTER_NONE:
650 			/* time stamp no incoming packet at all */
651 			config.rx_filter = HWTSTAMP_FILTER_NONE;
652 			break;
653 
654 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
655 			/* PTP v1, UDP, any kind of event packet */
656 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
657 			/* 'xmac' hardware can support Sync, Pdelay_Req and
658 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
659 			 * This leaves Delay_Req timestamps out.
660 			 * Enable all events *and* general purpose message
661 			 * timestamping
662 			 */
663 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
664 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
665 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
666 			break;
667 
668 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
669 			/* PTP v1, UDP, Sync packet */
670 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
671 			/* take time stamp for SYNC messages only */
672 			ts_event_en = PTP_TCR_TSEVNTENA;
673 
674 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
675 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
676 			break;
677 
678 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
679 			/* PTP v1, UDP, Delay_req packet */
680 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
681 			/* take time stamp for Delay_Req messages only */
682 			ts_master_en = PTP_TCR_TSMSTRENA;
683 			ts_event_en = PTP_TCR_TSEVNTENA;
684 
685 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
686 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
687 			break;
688 
689 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
690 			/* PTP v2, UDP, any kind of event packet */
691 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
692 			ptp_v2 = PTP_TCR_TSVER2ENA;
693 			/* take time stamp for all event messages */
694 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
695 
696 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
697 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
698 			break;
699 
700 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
701 			/* PTP v2, UDP, Sync packet */
702 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
703 			ptp_v2 = PTP_TCR_TSVER2ENA;
704 			/* take time stamp for SYNC messages only */
705 			ts_event_en = PTP_TCR_TSEVNTENA;
706 
707 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
708 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
709 			break;
710 
711 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
712 			/* PTP v2, UDP, Delay_req packet */
713 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
714 			ptp_v2 = PTP_TCR_TSVER2ENA;
715 			/* take time stamp for Delay_Req messages only */
716 			ts_master_en = PTP_TCR_TSMSTRENA;
717 			ts_event_en = PTP_TCR_TSEVNTENA;
718 
719 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
720 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
721 			break;
722 
723 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
724 			/* PTP v2/802.AS1 any layer, any kind of event packet */
725 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
726 			ptp_v2 = PTP_TCR_TSVER2ENA;
727 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
728 			if (priv->synopsys_id < DWMAC_CORE_4_10)
729 				ts_event_en = PTP_TCR_TSEVNTENA;
730 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
731 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
732 			ptp_over_ethernet = PTP_TCR_TSIPENA;
733 			break;
734 
735 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
736 			/* PTP v2/802.AS1, any layer, Sync packet */
737 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
738 			ptp_v2 = PTP_TCR_TSVER2ENA;
739 			/* take time stamp for SYNC messages only */
740 			ts_event_en = PTP_TCR_TSEVNTENA;
741 
742 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
743 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
744 			ptp_over_ethernet = PTP_TCR_TSIPENA;
745 			break;
746 
747 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
748 			/* PTP v2/802.AS1, any layer, Delay_req packet */
749 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
750 			ptp_v2 = PTP_TCR_TSVER2ENA;
751 			/* take time stamp for Delay_Req messages only */
752 			ts_master_en = PTP_TCR_TSMSTRENA;
753 			ts_event_en = PTP_TCR_TSEVNTENA;
754 
755 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
756 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
757 			ptp_over_ethernet = PTP_TCR_TSIPENA;
758 			break;
759 
760 		case HWTSTAMP_FILTER_NTP_ALL:
761 		case HWTSTAMP_FILTER_ALL:
762 			/* time stamp any incoming packet */
763 			config.rx_filter = HWTSTAMP_FILTER_ALL;
764 			tstamp_all = PTP_TCR_TSENALL;
765 			break;
766 
767 		default:
768 			return -ERANGE;
769 		}
770 	} else {
771 		switch (config.rx_filter) {
772 		case HWTSTAMP_FILTER_NONE:
773 			config.rx_filter = HWTSTAMP_FILTER_NONE;
774 			break;
775 		default:
776 			/* PTP v1, UDP, any kind of event packet */
777 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
778 			break;
779 		}
780 	}
781 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
782 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
783 
784 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
785 
786 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
787 		priv->systime_flags |= tstamp_all | ptp_v2 |
788 				       ptp_over_ethernet | ptp_over_ipv6_udp |
789 				       ptp_over_ipv4_udp | ts_event_en |
790 				       ts_master_en | snap_type_sel;
791 	}
792 
793 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
794 
795 	memcpy(&priv->tstamp_config, &config, sizeof(config));
796 
797 	return copy_to_user(ifr->ifr_data, &config,
798 			    sizeof(config)) ? -EFAULT : 0;
799 }
800 
801 /**
802  *  stmmac_hwtstamp_get - read hardware timestamping.
803  *  @dev: device pointer.
804  *  @ifr: An IOCTL specific structure, that can contain a pointer to
805  *  a proprietary structure used to pass information to the driver.
806  *  Description:
807  *  This function obtain the current hardware timestamping settings
808  *  as requested.
809  */
810 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
811 {
812 	struct stmmac_priv *priv = netdev_priv(dev);
813 	struct hwtstamp_config *config = &priv->tstamp_config;
814 
815 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
816 		return -EOPNOTSUPP;
817 
818 	return copy_to_user(ifr->ifr_data, config,
819 			    sizeof(*config)) ? -EFAULT : 0;
820 }
821 
822 /**
823  * stmmac_init_tstamp_counter - init hardware timestamping counter
824  * @priv: driver private structure
825  * @systime_flags: timestamping flags
826  * Description:
827  * Initialize hardware counter for packet timestamping.
828  * This is valid as long as the interface is open and not suspended.
829  * Will be rerun after resuming from suspend, case in which the timestamping
830  * flags updated by stmmac_hwtstamp_set() also need to be restored.
831  */
832 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
833 {
834 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
835 	struct timespec64 now;
836 	u32 sec_inc = 0;
837 	u64 temp = 0;
838 	int ret;
839 
840 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
841 		return -EOPNOTSUPP;
842 
843 	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
844 	if (ret < 0) {
845 		netdev_warn(priv->dev,
846 			    "failed to enable PTP reference clock: %pe\n",
847 			    ERR_PTR(ret));
848 		return ret;
849 	}
850 
851 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
852 	priv->systime_flags = systime_flags;
853 
854 	/* program Sub Second Increment reg */
855 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
856 					   priv->plat->clk_ptp_rate,
857 					   xmac, &sec_inc);
858 	temp = div_u64(1000000000ULL, sec_inc);
859 
860 	/* Store sub second increment for later use */
861 	priv->sub_second_inc = sec_inc;
862 
863 	/* calculate default added value:
864 	 * formula is :
865 	 * addend = (2^32)/freq_div_ratio;
866 	 * where, freq_div_ratio = 1e9ns/sec_inc
867 	 */
868 	temp = (u64)(temp << 32);
869 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
870 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
871 
872 	/* initialize system time */
873 	ktime_get_real_ts64(&now);
874 
875 	/* lower 32 bits of tv_sec are safe until y2106 */
876 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
877 
878 	return 0;
879 }
880 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
881 
882 /**
883  * stmmac_init_ptp - init PTP
884  * @priv: driver private structure
885  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
886  * This is done by looking at the HW cap. register.
887  * This function also registers the ptp driver.
888  */
889 static int stmmac_init_ptp(struct stmmac_priv *priv)
890 {
891 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
892 	int ret;
893 
894 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
895 	if (ret)
896 		return ret;
897 
898 	priv->adv_ts = 0;
899 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
900 	if (xmac && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 	/* Dwmac 3.x core with extend_desc can support adv_ts */
903 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
904 		priv->adv_ts = 1;
905 
906 	if (priv->dma_cap.time_stamp)
907 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
908 
909 	if (priv->adv_ts)
910 		netdev_info(priv->dev,
911 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
912 
913 	priv->hwts_tx_en = 0;
914 	priv->hwts_rx_en = 0;
915 
916 	stmmac_ptp_register(priv);
917 
918 	return 0;
919 }
920 
921 static void stmmac_release_ptp(struct stmmac_priv *priv)
922 {
923 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
924 	stmmac_ptp_unregister(priv);
925 }
926 
927 /**
928  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
929  *  @priv: driver private structure
930  *  @duplex: duplex passed to the next function
931  *  Description: It is used for configuring the flow control in all queues
932  */
933 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
934 {
935 	u32 tx_cnt = priv->plat->tx_queues_to_use;
936 
937 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
938 			priv->pause, tx_cnt);
939 }
940 
941 static void stmmac_validate(struct phylink_config *config,
942 			    unsigned long *supported,
943 			    struct phylink_link_state *state)
944 {
945 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
946 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
947 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
948 	int tx_cnt = priv->plat->tx_queues_to_use;
949 	int max_speed = priv->plat->max_speed;
950 
951 	phylink_set(mac_supported, 10baseT_Half);
952 	phylink_set(mac_supported, 10baseT_Full);
953 	phylink_set(mac_supported, 100baseT_Half);
954 	phylink_set(mac_supported, 100baseT_Full);
955 	phylink_set(mac_supported, 1000baseT_Half);
956 	phylink_set(mac_supported, 1000baseT_Full);
957 	phylink_set(mac_supported, 1000baseKX_Full);
958 
959 	phylink_set(mac_supported, Autoneg);
960 	phylink_set(mac_supported, Pause);
961 	phylink_set(mac_supported, Asym_Pause);
962 	phylink_set_port_modes(mac_supported);
963 
964 	/* Cut down 1G if asked to */
965 	if ((max_speed > 0) && (max_speed < 1000)) {
966 		phylink_set(mask, 1000baseT_Full);
967 		phylink_set(mask, 1000baseX_Full);
968 	} else if (priv->plat->has_gmac4) {
969 		if (!max_speed || max_speed >= 2500) {
970 			phylink_set(mac_supported, 2500baseT_Full);
971 			phylink_set(mac_supported, 2500baseX_Full);
972 		}
973 	} else if (priv->plat->has_xgmac) {
974 		if (!max_speed || (max_speed >= 2500)) {
975 			phylink_set(mac_supported, 2500baseT_Full);
976 			phylink_set(mac_supported, 2500baseX_Full);
977 		}
978 		if (!max_speed || (max_speed >= 5000)) {
979 			phylink_set(mac_supported, 5000baseT_Full);
980 		}
981 		if (!max_speed || (max_speed >= 10000)) {
982 			phylink_set(mac_supported, 10000baseSR_Full);
983 			phylink_set(mac_supported, 10000baseLR_Full);
984 			phylink_set(mac_supported, 10000baseER_Full);
985 			phylink_set(mac_supported, 10000baseLRM_Full);
986 			phylink_set(mac_supported, 10000baseT_Full);
987 			phylink_set(mac_supported, 10000baseKX4_Full);
988 			phylink_set(mac_supported, 10000baseKR_Full);
989 		}
990 		if (!max_speed || (max_speed >= 25000)) {
991 			phylink_set(mac_supported, 25000baseCR_Full);
992 			phylink_set(mac_supported, 25000baseKR_Full);
993 			phylink_set(mac_supported, 25000baseSR_Full);
994 		}
995 		if (!max_speed || (max_speed >= 40000)) {
996 			phylink_set(mac_supported, 40000baseKR4_Full);
997 			phylink_set(mac_supported, 40000baseCR4_Full);
998 			phylink_set(mac_supported, 40000baseSR4_Full);
999 			phylink_set(mac_supported, 40000baseLR4_Full);
1000 		}
1001 		if (!max_speed || (max_speed >= 50000)) {
1002 			phylink_set(mac_supported, 50000baseCR2_Full);
1003 			phylink_set(mac_supported, 50000baseKR2_Full);
1004 			phylink_set(mac_supported, 50000baseSR2_Full);
1005 			phylink_set(mac_supported, 50000baseKR_Full);
1006 			phylink_set(mac_supported, 50000baseSR_Full);
1007 			phylink_set(mac_supported, 50000baseCR_Full);
1008 			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
1009 			phylink_set(mac_supported, 50000baseDR_Full);
1010 		}
1011 		if (!max_speed || (max_speed >= 100000)) {
1012 			phylink_set(mac_supported, 100000baseKR4_Full);
1013 			phylink_set(mac_supported, 100000baseSR4_Full);
1014 			phylink_set(mac_supported, 100000baseCR4_Full);
1015 			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
1016 			phylink_set(mac_supported, 100000baseKR2_Full);
1017 			phylink_set(mac_supported, 100000baseSR2_Full);
1018 			phylink_set(mac_supported, 100000baseCR2_Full);
1019 			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
1020 			phylink_set(mac_supported, 100000baseDR2_Full);
1021 		}
1022 	}
1023 
1024 	/* Half-Duplex can only work with single queue */
1025 	if (tx_cnt > 1) {
1026 		phylink_set(mask, 10baseT_Half);
1027 		phylink_set(mask, 100baseT_Half);
1028 		phylink_set(mask, 1000baseT_Half);
1029 	}
1030 
1031 	linkmode_and(supported, supported, mac_supported);
1032 	linkmode_andnot(supported, supported, mask);
1033 
1034 	linkmode_and(state->advertising, state->advertising, mac_supported);
1035 	linkmode_andnot(state->advertising, state->advertising, mask);
1036 
1037 	/* If PCS is supported, check which modes it supports. */
1038 	if (priv->hw->xpcs)
1039 		xpcs_validate(priv->hw->xpcs, supported, state);
1040 }
1041 
1042 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
1043 			      const struct phylink_link_state *state)
1044 {
1045 	/* Nothing to do, xpcs_config() handles everything */
1046 }
1047 
1048 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
1049 {
1050 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
1051 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
1052 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
1053 	bool *hs_enable = &fpe_cfg->hs_enable;
1054 
1055 	if (is_up && *hs_enable) {
1056 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
1057 	} else {
1058 		*lo_state = FPE_STATE_OFF;
1059 		*lp_state = FPE_STATE_OFF;
1060 	}
1061 }
1062 
1063 static void stmmac_mac_link_down(struct phylink_config *config,
1064 				 unsigned int mode, phy_interface_t interface)
1065 {
1066 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1067 
1068 	stmmac_mac_set(priv, priv->ioaddr, false);
1069 	priv->eee_active = false;
1070 	priv->tx_lpi_enabled = false;
1071 	priv->eee_enabled = stmmac_eee_init(priv);
1072 	stmmac_set_eee_pls(priv, priv->hw, false);
1073 
1074 	if (priv->dma_cap.fpesel)
1075 		stmmac_fpe_link_state_handle(priv, false);
1076 }
1077 
1078 static void stmmac_mac_link_up(struct phylink_config *config,
1079 			       struct phy_device *phy,
1080 			       unsigned int mode, phy_interface_t interface,
1081 			       int speed, int duplex,
1082 			       bool tx_pause, bool rx_pause)
1083 {
1084 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1085 	u32 ctrl;
1086 
1087 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1088 	ctrl &= ~priv->hw->link.speed_mask;
1089 
1090 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1091 		switch (speed) {
1092 		case SPEED_10000:
1093 			ctrl |= priv->hw->link.xgmii.speed10000;
1094 			break;
1095 		case SPEED_5000:
1096 			ctrl |= priv->hw->link.xgmii.speed5000;
1097 			break;
1098 		case SPEED_2500:
1099 			ctrl |= priv->hw->link.xgmii.speed2500;
1100 			break;
1101 		default:
1102 			return;
1103 		}
1104 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1105 		switch (speed) {
1106 		case SPEED_100000:
1107 			ctrl |= priv->hw->link.xlgmii.speed100000;
1108 			break;
1109 		case SPEED_50000:
1110 			ctrl |= priv->hw->link.xlgmii.speed50000;
1111 			break;
1112 		case SPEED_40000:
1113 			ctrl |= priv->hw->link.xlgmii.speed40000;
1114 			break;
1115 		case SPEED_25000:
1116 			ctrl |= priv->hw->link.xlgmii.speed25000;
1117 			break;
1118 		case SPEED_10000:
1119 			ctrl |= priv->hw->link.xgmii.speed10000;
1120 			break;
1121 		case SPEED_2500:
1122 			ctrl |= priv->hw->link.speed2500;
1123 			break;
1124 		case SPEED_1000:
1125 			ctrl |= priv->hw->link.speed1000;
1126 			break;
1127 		default:
1128 			return;
1129 		}
1130 	} else {
1131 		switch (speed) {
1132 		case SPEED_2500:
1133 			ctrl |= priv->hw->link.speed2500;
1134 			break;
1135 		case SPEED_1000:
1136 			ctrl |= priv->hw->link.speed1000;
1137 			break;
1138 		case SPEED_100:
1139 			ctrl |= priv->hw->link.speed100;
1140 			break;
1141 		case SPEED_10:
1142 			ctrl |= priv->hw->link.speed10;
1143 			break;
1144 		default:
1145 			return;
1146 		}
1147 	}
1148 
1149 	priv->speed = speed;
1150 
1151 	if (priv->plat->fix_mac_speed)
1152 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1153 
1154 	if (!duplex)
1155 		ctrl &= ~priv->hw->link.duplex;
1156 	else
1157 		ctrl |= priv->hw->link.duplex;
1158 
1159 	/* Flow Control operation */
1160 	if (tx_pause && rx_pause)
1161 		stmmac_mac_flow_ctrl(priv, duplex);
1162 
1163 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1164 
1165 	stmmac_mac_set(priv, priv->ioaddr, true);
1166 	if (phy && priv->dma_cap.eee) {
1167 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
1168 		priv->eee_enabled = stmmac_eee_init(priv);
1169 		priv->tx_lpi_enabled = priv->eee_enabled;
1170 		stmmac_set_eee_pls(priv, priv->hw, true);
1171 	}
1172 
1173 	if (priv->dma_cap.fpesel)
1174 		stmmac_fpe_link_state_handle(priv, true);
1175 }
1176 
1177 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1178 	.validate = stmmac_validate,
1179 	.mac_config = stmmac_mac_config,
1180 	.mac_link_down = stmmac_mac_link_down,
1181 	.mac_link_up = stmmac_mac_link_up,
1182 };
1183 
1184 /**
1185  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1186  * @priv: driver private structure
1187  * Description: this is to verify if the HW supports the PCS.
1188  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1189  * configured for the TBI, RTBI, or SGMII PHY interface.
1190  */
1191 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1192 {
1193 	int interface = priv->plat->interface;
1194 
1195 	if (priv->dma_cap.pcs) {
1196 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1197 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1198 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1199 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1200 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1201 			priv->hw->pcs = STMMAC_PCS_RGMII;
1202 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1203 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1204 			priv->hw->pcs = STMMAC_PCS_SGMII;
1205 		}
1206 	}
1207 }
1208 
1209 /**
1210  * stmmac_init_phy - PHY initialization
1211  * @dev: net device structure
1212  * Description: it initializes the driver's PHY state, and attaches the PHY
1213  * to the mac driver.
1214  *  Return value:
1215  *  0 on success
1216  */
1217 static int stmmac_init_phy(struct net_device *dev)
1218 {
1219 	struct stmmac_priv *priv = netdev_priv(dev);
1220 	struct device_node *node;
1221 	int ret;
1222 
1223 	node = priv->plat->phylink_node;
1224 
1225 	if (node)
1226 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
1227 
1228 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1229 	 * manually parse it
1230 	 */
1231 	if (!node || ret) {
1232 		int addr = priv->plat->phy_addr;
1233 		struct phy_device *phydev;
1234 
1235 		phydev = mdiobus_get_phy(priv->mii, addr);
1236 		if (!phydev) {
1237 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1238 			return -ENODEV;
1239 		}
1240 
1241 		ret = phylink_connect_phy(priv->phylink, phydev);
1242 	}
1243 
1244 	if (!priv->plat->pmt) {
1245 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1246 
1247 		phylink_ethtool_get_wol(priv->phylink, &wol);
1248 		device_set_wakeup_capable(priv->device, !!wol.supported);
1249 	}
1250 
1251 	return ret;
1252 }
1253 
1254 static int stmmac_phy_setup(struct stmmac_priv *priv)
1255 {
1256 	struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1257 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1258 	int mode = priv->plat->phy_interface;
1259 	struct phylink *phylink;
1260 
1261 	priv->phylink_config.dev = &priv->dev->dev;
1262 	priv->phylink_config.type = PHYLINK_NETDEV;
1263 	priv->phylink_config.pcs_poll = true;
1264 	if (priv->plat->mdio_bus_data)
1265 		priv->phylink_config.ovr_an_inband =
1266 			mdio_bus_data->xpcs_an_inband;
1267 
1268 	if (!fwnode)
1269 		fwnode = dev_fwnode(priv->device);
1270 
1271 	phylink = phylink_create(&priv->phylink_config, fwnode,
1272 				 mode, &stmmac_phylink_mac_ops);
1273 	if (IS_ERR(phylink))
1274 		return PTR_ERR(phylink);
1275 
1276 	if (priv->hw->xpcs)
1277 		phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
1278 
1279 	priv->phylink = phylink;
1280 	return 0;
1281 }
1282 
1283 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1284 {
1285 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1286 	unsigned int desc_size;
1287 	void *head_rx;
1288 	u32 queue;
1289 
1290 	/* Display RX rings */
1291 	for (queue = 0; queue < rx_cnt; queue++) {
1292 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1293 
1294 		pr_info("\tRX Queue %u rings\n", queue);
1295 
1296 		if (priv->extend_desc) {
1297 			head_rx = (void *)rx_q->dma_erx;
1298 			desc_size = sizeof(struct dma_extended_desc);
1299 		} else {
1300 			head_rx = (void *)rx_q->dma_rx;
1301 			desc_size = sizeof(struct dma_desc);
1302 		}
1303 
1304 		/* Display RX ring */
1305 		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1306 				    rx_q->dma_rx_phy, desc_size);
1307 	}
1308 }
1309 
1310 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1311 {
1312 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1313 	unsigned int desc_size;
1314 	void *head_tx;
1315 	u32 queue;
1316 
1317 	/* Display TX rings */
1318 	for (queue = 0; queue < tx_cnt; queue++) {
1319 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1320 
1321 		pr_info("\tTX Queue %d rings\n", queue);
1322 
1323 		if (priv->extend_desc) {
1324 			head_tx = (void *)tx_q->dma_etx;
1325 			desc_size = sizeof(struct dma_extended_desc);
1326 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1327 			head_tx = (void *)tx_q->dma_entx;
1328 			desc_size = sizeof(struct dma_edesc);
1329 		} else {
1330 			head_tx = (void *)tx_q->dma_tx;
1331 			desc_size = sizeof(struct dma_desc);
1332 		}
1333 
1334 		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1335 				    tx_q->dma_tx_phy, desc_size);
1336 	}
1337 }
1338 
1339 static void stmmac_display_rings(struct stmmac_priv *priv)
1340 {
1341 	/* Display RX ring */
1342 	stmmac_display_rx_rings(priv);
1343 
1344 	/* Display TX ring */
1345 	stmmac_display_tx_rings(priv);
1346 }
1347 
1348 static int stmmac_set_bfsize(int mtu, int bufsize)
1349 {
1350 	int ret = bufsize;
1351 
1352 	if (mtu >= BUF_SIZE_8KiB)
1353 		ret = BUF_SIZE_16KiB;
1354 	else if (mtu >= BUF_SIZE_4KiB)
1355 		ret = BUF_SIZE_8KiB;
1356 	else if (mtu >= BUF_SIZE_2KiB)
1357 		ret = BUF_SIZE_4KiB;
1358 	else if (mtu > DEFAULT_BUFSIZE)
1359 		ret = BUF_SIZE_2KiB;
1360 	else
1361 		ret = DEFAULT_BUFSIZE;
1362 
1363 	return ret;
1364 }
1365 
1366 /**
1367  * stmmac_clear_rx_descriptors - clear RX descriptors
1368  * @priv: driver private structure
1369  * @queue: RX queue index
1370  * Description: this function is called to clear the RX descriptors
1371  * in case of both basic and extended descriptors are used.
1372  */
1373 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1374 {
1375 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1376 	int i;
1377 
1378 	/* Clear the RX descriptors */
1379 	for (i = 0; i < priv->dma_rx_size; i++)
1380 		if (priv->extend_desc)
1381 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1382 					priv->use_riwt, priv->mode,
1383 					(i == priv->dma_rx_size - 1),
1384 					priv->dma_buf_sz);
1385 		else
1386 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1387 					priv->use_riwt, priv->mode,
1388 					(i == priv->dma_rx_size - 1),
1389 					priv->dma_buf_sz);
1390 }
1391 
1392 /**
1393  * stmmac_clear_tx_descriptors - clear tx descriptors
1394  * @priv: driver private structure
1395  * @queue: TX queue index.
1396  * Description: this function is called to clear the TX descriptors
1397  * in case of both basic and extended descriptors are used.
1398  */
1399 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1400 {
1401 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1402 	int i;
1403 
1404 	/* Clear the TX descriptors */
1405 	for (i = 0; i < priv->dma_tx_size; i++) {
1406 		int last = (i == (priv->dma_tx_size - 1));
1407 		struct dma_desc *p;
1408 
1409 		if (priv->extend_desc)
1410 			p = &tx_q->dma_etx[i].basic;
1411 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1412 			p = &tx_q->dma_entx[i].basic;
1413 		else
1414 			p = &tx_q->dma_tx[i];
1415 
1416 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1417 	}
1418 }
1419 
1420 /**
1421  * stmmac_clear_descriptors - clear descriptors
1422  * @priv: driver private structure
1423  * Description: this function is called to clear the TX and RX descriptors
1424  * in case of both basic and extended descriptors are used.
1425  */
1426 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1427 {
1428 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1429 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1430 	u32 queue;
1431 
1432 	/* Clear the RX descriptors */
1433 	for (queue = 0; queue < rx_queue_cnt; queue++)
1434 		stmmac_clear_rx_descriptors(priv, queue);
1435 
1436 	/* Clear the TX descriptors */
1437 	for (queue = 0; queue < tx_queue_cnt; queue++)
1438 		stmmac_clear_tx_descriptors(priv, queue);
1439 }
1440 
1441 /**
1442  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1443  * @priv: driver private structure
1444  * @p: descriptor pointer
1445  * @i: descriptor index
1446  * @flags: gfp flag
1447  * @queue: RX queue index
1448  * Description: this function is called to allocate a receive buffer, perform
1449  * the DMA mapping and init the descriptor.
1450  */
1451 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1452 				  int i, gfp_t flags, u32 queue)
1453 {
1454 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1455 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1456 
1457 	if (!buf->page) {
1458 		buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1459 		if (!buf->page)
1460 			return -ENOMEM;
1461 		buf->page_offset = stmmac_rx_offset(priv);
1462 	}
1463 
1464 	if (priv->sph && !buf->sec_page) {
1465 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1466 		if (!buf->sec_page)
1467 			return -ENOMEM;
1468 
1469 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471 	} else {
1472 		buf->sec_page = NULL;
1473 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474 	}
1475 
1476 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477 
1478 	stmmac_set_desc_addr(priv, p, buf->addr);
1479 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1480 		stmmac_init_desc3(priv, p);
1481 
1482 	return 0;
1483 }
1484 
1485 /**
1486  * stmmac_free_rx_buffer - free RX dma buffers
1487  * @priv: private structure
1488  * @queue: RX queue index
1489  * @i: buffer index.
1490  */
1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1492 {
1493 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1494 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1495 
1496 	if (buf->page)
1497 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1498 	buf->page = NULL;
1499 
1500 	if (buf->sec_page)
1501 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1502 	buf->sec_page = NULL;
1503 }
1504 
1505 /**
1506  * stmmac_free_tx_buffer - free RX dma buffers
1507  * @priv: private structure
1508  * @queue: RX queue index
1509  * @i: buffer index.
1510  */
1511 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1512 {
1513 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1514 
1515 	if (tx_q->tx_skbuff_dma[i].buf &&
1516 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1517 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1518 			dma_unmap_page(priv->device,
1519 				       tx_q->tx_skbuff_dma[i].buf,
1520 				       tx_q->tx_skbuff_dma[i].len,
1521 				       DMA_TO_DEVICE);
1522 		else
1523 			dma_unmap_single(priv->device,
1524 					 tx_q->tx_skbuff_dma[i].buf,
1525 					 tx_q->tx_skbuff_dma[i].len,
1526 					 DMA_TO_DEVICE);
1527 	}
1528 
1529 	if (tx_q->xdpf[i] &&
1530 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1531 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1532 		xdp_return_frame(tx_q->xdpf[i]);
1533 		tx_q->xdpf[i] = NULL;
1534 	}
1535 
1536 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1537 		tx_q->xsk_frames_done++;
1538 
1539 	if (tx_q->tx_skbuff[i] &&
1540 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1541 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1542 		tx_q->tx_skbuff[i] = NULL;
1543 	}
1544 
1545 	tx_q->tx_skbuff_dma[i].buf = 0;
1546 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1547 }
1548 
1549 /**
1550  * dma_free_rx_skbufs - free RX dma buffers
1551  * @priv: private structure
1552  * @queue: RX queue index
1553  */
1554 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1555 {
1556 	int i;
1557 
1558 	for (i = 0; i < priv->dma_rx_size; i++)
1559 		stmmac_free_rx_buffer(priv, queue, i);
1560 }
1561 
1562 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
1563 				   gfp_t flags)
1564 {
1565 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1566 	int i;
1567 
1568 	for (i = 0; i < priv->dma_rx_size; i++) {
1569 		struct dma_desc *p;
1570 		int ret;
1571 
1572 		if (priv->extend_desc)
1573 			p = &((rx_q->dma_erx + i)->basic);
1574 		else
1575 			p = rx_q->dma_rx + i;
1576 
1577 		ret = stmmac_init_rx_buffers(priv, p, i, flags,
1578 					     queue);
1579 		if (ret)
1580 			return ret;
1581 
1582 		rx_q->buf_alloc_num++;
1583 	}
1584 
1585 	return 0;
1586 }
1587 
1588 /**
1589  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1590  * @priv: private structure
1591  * @queue: RX queue index
1592  */
1593 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
1594 {
1595 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1596 	int i;
1597 
1598 	for (i = 0; i < priv->dma_rx_size; i++) {
1599 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1600 
1601 		if (!buf->xdp)
1602 			continue;
1603 
1604 		xsk_buff_free(buf->xdp);
1605 		buf->xdp = NULL;
1606 	}
1607 }
1608 
1609 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
1610 {
1611 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1612 	int i;
1613 
1614 	for (i = 0; i < priv->dma_rx_size; i++) {
1615 		struct stmmac_rx_buffer *buf;
1616 		dma_addr_t dma_addr;
1617 		struct dma_desc *p;
1618 
1619 		if (priv->extend_desc)
1620 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1621 		else
1622 			p = rx_q->dma_rx + i;
1623 
1624 		buf = &rx_q->buf_pool[i];
1625 
1626 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1627 		if (!buf->xdp)
1628 			return -ENOMEM;
1629 
1630 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1631 		stmmac_set_desc_addr(priv, p, dma_addr);
1632 		rx_q->buf_alloc_num++;
1633 	}
1634 
1635 	return 0;
1636 }
1637 
1638 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1639 {
1640 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1641 		return NULL;
1642 
1643 	return xsk_get_pool_from_qid(priv->dev, queue);
1644 }
1645 
1646 /**
1647  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1648  * @priv: driver private structure
1649  * @queue: RX queue index
1650  * @flags: gfp flag.
1651  * Description: this function initializes the DMA RX descriptors
1652  * and allocates the socket buffers. It supports the chained and ring
1653  * modes.
1654  */
1655 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
1656 {
1657 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1658 	int ret;
1659 
1660 	netif_dbg(priv, probe, priv->dev,
1661 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1662 		  (u32)rx_q->dma_rx_phy);
1663 
1664 	stmmac_clear_rx_descriptors(priv, queue);
1665 
1666 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1667 
1668 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1669 
1670 	if (rx_q->xsk_pool) {
1671 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1672 						   MEM_TYPE_XSK_BUFF_POOL,
1673 						   NULL));
1674 		netdev_info(priv->dev,
1675 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1676 			    rx_q->queue_index);
1677 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1678 	} else {
1679 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680 						   MEM_TYPE_PAGE_POOL,
1681 						   rx_q->page_pool));
1682 		netdev_info(priv->dev,
1683 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1684 			    rx_q->queue_index);
1685 	}
1686 
1687 	if (rx_q->xsk_pool) {
1688 		/* RX XDP ZC buffer pool may not be populated, e.g.
1689 		 * xdpsock TX-only.
1690 		 */
1691 		stmmac_alloc_rx_buffers_zc(priv, queue);
1692 	} else {
1693 		ret = stmmac_alloc_rx_buffers(priv, queue, flags);
1694 		if (ret < 0)
1695 			return -ENOMEM;
1696 	}
1697 
1698 	rx_q->cur_rx = 0;
1699 	rx_q->dirty_rx = 0;
1700 
1701 	/* Setup the chained descriptor addresses */
1702 	if (priv->mode == STMMAC_CHAIN_MODE) {
1703 		if (priv->extend_desc)
1704 			stmmac_mode_init(priv, rx_q->dma_erx,
1705 					 rx_q->dma_rx_phy,
1706 					 priv->dma_rx_size, 1);
1707 		else
1708 			stmmac_mode_init(priv, rx_q->dma_rx,
1709 					 rx_q->dma_rx_phy,
1710 					 priv->dma_rx_size, 0);
1711 	}
1712 
1713 	return 0;
1714 }
1715 
1716 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1717 {
1718 	struct stmmac_priv *priv = netdev_priv(dev);
1719 	u32 rx_count = priv->plat->rx_queues_to_use;
1720 	u32 queue;
1721 	int ret;
1722 
1723 	/* RX INITIALIZATION */
1724 	netif_dbg(priv, probe, priv->dev,
1725 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1726 
1727 	for (queue = 0; queue < rx_count; queue++) {
1728 		ret = __init_dma_rx_desc_rings(priv, queue, flags);
1729 		if (ret)
1730 			goto err_init_rx_buffers;
1731 	}
1732 
1733 	return 0;
1734 
1735 err_init_rx_buffers:
1736 	while (queue >= 0) {
1737 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1738 
1739 		if (rx_q->xsk_pool)
1740 			dma_free_rx_xskbufs(priv, queue);
1741 		else
1742 			dma_free_rx_skbufs(priv, queue);
1743 
1744 		rx_q->buf_alloc_num = 0;
1745 		rx_q->xsk_pool = NULL;
1746 
1747 		if (queue == 0)
1748 			break;
1749 
1750 		queue--;
1751 	}
1752 
1753 	return ret;
1754 }
1755 
1756 /**
1757  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1758  * @priv: driver private structure
1759  * @queue : TX queue index
1760  * Description: this function initializes the DMA TX descriptors
1761  * and allocates the socket buffers. It supports the chained and ring
1762  * modes.
1763  */
1764 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
1765 {
1766 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1767 	int i;
1768 
1769 	netif_dbg(priv, probe, priv->dev,
1770 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1771 		  (u32)tx_q->dma_tx_phy);
1772 
1773 	/* Setup the chained descriptor addresses */
1774 	if (priv->mode == STMMAC_CHAIN_MODE) {
1775 		if (priv->extend_desc)
1776 			stmmac_mode_init(priv, tx_q->dma_etx,
1777 					 tx_q->dma_tx_phy,
1778 					 priv->dma_tx_size, 1);
1779 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1780 			stmmac_mode_init(priv, tx_q->dma_tx,
1781 					 tx_q->dma_tx_phy,
1782 					 priv->dma_tx_size, 0);
1783 	}
1784 
1785 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1786 
1787 	for (i = 0; i < priv->dma_tx_size; i++) {
1788 		struct dma_desc *p;
1789 
1790 		if (priv->extend_desc)
1791 			p = &((tx_q->dma_etx + i)->basic);
1792 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1793 			p = &((tx_q->dma_entx + i)->basic);
1794 		else
1795 			p = tx_q->dma_tx + i;
1796 
1797 		stmmac_clear_desc(priv, p);
1798 
1799 		tx_q->tx_skbuff_dma[i].buf = 0;
1800 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1801 		tx_q->tx_skbuff_dma[i].len = 0;
1802 		tx_q->tx_skbuff_dma[i].last_segment = false;
1803 		tx_q->tx_skbuff[i] = NULL;
1804 	}
1805 
1806 	tx_q->dirty_tx = 0;
1807 	tx_q->cur_tx = 0;
1808 	tx_q->mss = 0;
1809 
1810 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1811 
1812 	return 0;
1813 }
1814 
1815 static int init_dma_tx_desc_rings(struct net_device *dev)
1816 {
1817 	struct stmmac_priv *priv = netdev_priv(dev);
1818 	u32 tx_queue_cnt;
1819 	u32 queue;
1820 
1821 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1822 
1823 	for (queue = 0; queue < tx_queue_cnt; queue++)
1824 		__init_dma_tx_desc_rings(priv, queue);
1825 
1826 	return 0;
1827 }
1828 
1829 /**
1830  * init_dma_desc_rings - init the RX/TX descriptor rings
1831  * @dev: net device structure
1832  * @flags: gfp flag.
1833  * Description: this function initializes the DMA RX/TX descriptors
1834  * and allocates the socket buffers. It supports the chained and ring
1835  * modes.
1836  */
1837 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1838 {
1839 	struct stmmac_priv *priv = netdev_priv(dev);
1840 	int ret;
1841 
1842 	ret = init_dma_rx_desc_rings(dev, flags);
1843 	if (ret)
1844 		return ret;
1845 
1846 	ret = init_dma_tx_desc_rings(dev);
1847 
1848 	stmmac_clear_descriptors(priv);
1849 
1850 	if (netif_msg_hw(priv))
1851 		stmmac_display_rings(priv);
1852 
1853 	return ret;
1854 }
1855 
1856 /**
1857  * dma_free_tx_skbufs - free TX dma buffers
1858  * @priv: private structure
1859  * @queue: TX queue index
1860  */
1861 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1862 {
1863 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1864 	int i;
1865 
1866 	tx_q->xsk_frames_done = 0;
1867 
1868 	for (i = 0; i < priv->dma_tx_size; i++)
1869 		stmmac_free_tx_buffer(priv, queue, i);
1870 
1871 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1872 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1873 		tx_q->xsk_frames_done = 0;
1874 		tx_q->xsk_pool = NULL;
1875 	}
1876 }
1877 
1878 /**
1879  * stmmac_free_tx_skbufs - free TX skb buffers
1880  * @priv: private structure
1881  */
1882 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1883 {
1884 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1885 	u32 queue;
1886 
1887 	for (queue = 0; queue < tx_queue_cnt; queue++)
1888 		dma_free_tx_skbufs(priv, queue);
1889 }
1890 
1891 /**
1892  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1893  * @priv: private structure
1894  * @queue: RX queue index
1895  */
1896 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1897 {
1898 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1899 
1900 	/* Release the DMA RX socket buffers */
1901 	if (rx_q->xsk_pool)
1902 		dma_free_rx_xskbufs(priv, queue);
1903 	else
1904 		dma_free_rx_skbufs(priv, queue);
1905 
1906 	rx_q->buf_alloc_num = 0;
1907 	rx_q->xsk_pool = NULL;
1908 
1909 	/* Free DMA regions of consistent memory previously allocated */
1910 	if (!priv->extend_desc)
1911 		dma_free_coherent(priv->device, priv->dma_rx_size *
1912 				  sizeof(struct dma_desc),
1913 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1914 	else
1915 		dma_free_coherent(priv->device, priv->dma_rx_size *
1916 				  sizeof(struct dma_extended_desc),
1917 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1918 
1919 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1920 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1921 
1922 	kfree(rx_q->buf_pool);
1923 	if (rx_q->page_pool)
1924 		page_pool_destroy(rx_q->page_pool);
1925 }
1926 
1927 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1928 {
1929 	u32 rx_count = priv->plat->rx_queues_to_use;
1930 	u32 queue;
1931 
1932 	/* Free RX queue resources */
1933 	for (queue = 0; queue < rx_count; queue++)
1934 		__free_dma_rx_desc_resources(priv, queue);
1935 }
1936 
1937 /**
1938  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1939  * @priv: private structure
1940  * @queue: TX queue index
1941  */
1942 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
1943 {
1944 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1945 	size_t size;
1946 	void *addr;
1947 
1948 	/* Release the DMA TX socket buffers */
1949 	dma_free_tx_skbufs(priv, queue);
1950 
1951 	if (priv->extend_desc) {
1952 		size = sizeof(struct dma_extended_desc);
1953 		addr = tx_q->dma_etx;
1954 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1955 		size = sizeof(struct dma_edesc);
1956 		addr = tx_q->dma_entx;
1957 	} else {
1958 		size = sizeof(struct dma_desc);
1959 		addr = tx_q->dma_tx;
1960 	}
1961 
1962 	size *= priv->dma_tx_size;
1963 
1964 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1965 
1966 	kfree(tx_q->tx_skbuff_dma);
1967 	kfree(tx_q->tx_skbuff);
1968 }
1969 
1970 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1971 {
1972 	u32 tx_count = priv->plat->tx_queues_to_use;
1973 	u32 queue;
1974 
1975 	/* Free TX queue resources */
1976 	for (queue = 0; queue < tx_count; queue++)
1977 		__free_dma_tx_desc_resources(priv, queue);
1978 }
1979 
1980 /**
1981  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1982  * @priv: private structure
1983  * @queue: RX queue index
1984  * Description: according to which descriptor can be used (extend or basic)
1985  * this function allocates the resources for TX and RX paths. In case of
1986  * reception, for example, it pre-allocated the RX socket buffer in order to
1987  * allow zero-copy mechanism.
1988  */
1989 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1990 {
1991 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1992 	struct stmmac_channel *ch = &priv->channel[queue];
1993 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
1994 	struct page_pool_params pp_params = { 0 };
1995 	unsigned int num_pages;
1996 	unsigned int napi_id;
1997 	int ret;
1998 
1999 	rx_q->queue_index = queue;
2000 	rx_q->priv_data = priv;
2001 
2002 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2003 	pp_params.pool_size = priv->dma_rx_size;
2004 	num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
2005 	pp_params.order = ilog2(num_pages);
2006 	pp_params.nid = dev_to_node(priv->device);
2007 	pp_params.dev = priv->device;
2008 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2009 	pp_params.offset = stmmac_rx_offset(priv);
2010 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2011 
2012 	rx_q->page_pool = page_pool_create(&pp_params);
2013 	if (IS_ERR(rx_q->page_pool)) {
2014 		ret = PTR_ERR(rx_q->page_pool);
2015 		rx_q->page_pool = NULL;
2016 		return ret;
2017 	}
2018 
2019 	rx_q->buf_pool = kcalloc(priv->dma_rx_size,
2020 				 sizeof(*rx_q->buf_pool),
2021 				 GFP_KERNEL);
2022 	if (!rx_q->buf_pool)
2023 		return -ENOMEM;
2024 
2025 	if (priv->extend_desc) {
2026 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2027 						   priv->dma_rx_size *
2028 						   sizeof(struct dma_extended_desc),
2029 						   &rx_q->dma_rx_phy,
2030 						   GFP_KERNEL);
2031 		if (!rx_q->dma_erx)
2032 			return -ENOMEM;
2033 
2034 	} else {
2035 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2036 						  priv->dma_rx_size *
2037 						  sizeof(struct dma_desc),
2038 						  &rx_q->dma_rx_phy,
2039 						  GFP_KERNEL);
2040 		if (!rx_q->dma_rx)
2041 			return -ENOMEM;
2042 	}
2043 
2044 	if (stmmac_xdp_is_enabled(priv) &&
2045 	    test_bit(queue, priv->af_xdp_zc_qps))
2046 		napi_id = ch->rxtx_napi.napi_id;
2047 	else
2048 		napi_id = ch->rx_napi.napi_id;
2049 
2050 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2051 			       rx_q->queue_index,
2052 			       napi_id);
2053 	if (ret) {
2054 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2055 		return -EINVAL;
2056 	}
2057 
2058 	return 0;
2059 }
2060 
2061 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
2062 {
2063 	u32 rx_count = priv->plat->rx_queues_to_use;
2064 	u32 queue;
2065 	int ret;
2066 
2067 	/* RX queues buffers and DMA */
2068 	for (queue = 0; queue < rx_count; queue++) {
2069 		ret = __alloc_dma_rx_desc_resources(priv, queue);
2070 		if (ret)
2071 			goto err_dma;
2072 	}
2073 
2074 	return 0;
2075 
2076 err_dma:
2077 	free_dma_rx_desc_resources(priv);
2078 
2079 	return ret;
2080 }
2081 
2082 /**
2083  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2084  * @priv: private structure
2085  * @queue: TX queue index
2086  * Description: according to which descriptor can be used (extend or basic)
2087  * this function allocates the resources for TX and RX paths. In case of
2088  * reception, for example, it pre-allocated the RX socket buffer in order to
2089  * allow zero-copy mechanism.
2090  */
2091 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
2092 {
2093 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2094 	size_t size;
2095 	void *addr;
2096 
2097 	tx_q->queue_index = queue;
2098 	tx_q->priv_data = priv;
2099 
2100 	tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
2101 				      sizeof(*tx_q->tx_skbuff_dma),
2102 				      GFP_KERNEL);
2103 	if (!tx_q->tx_skbuff_dma)
2104 		return -ENOMEM;
2105 
2106 	tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
2107 				  sizeof(struct sk_buff *),
2108 				  GFP_KERNEL);
2109 	if (!tx_q->tx_skbuff)
2110 		return -ENOMEM;
2111 
2112 	if (priv->extend_desc)
2113 		size = sizeof(struct dma_extended_desc);
2114 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2115 		size = sizeof(struct dma_edesc);
2116 	else
2117 		size = sizeof(struct dma_desc);
2118 
2119 	size *= priv->dma_tx_size;
2120 
2121 	addr = dma_alloc_coherent(priv->device, size,
2122 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2123 	if (!addr)
2124 		return -ENOMEM;
2125 
2126 	if (priv->extend_desc)
2127 		tx_q->dma_etx = addr;
2128 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2129 		tx_q->dma_entx = addr;
2130 	else
2131 		tx_q->dma_tx = addr;
2132 
2133 	return 0;
2134 }
2135 
2136 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
2137 {
2138 	u32 tx_count = priv->plat->tx_queues_to_use;
2139 	u32 queue;
2140 	int ret;
2141 
2142 	/* TX queues buffers and DMA */
2143 	for (queue = 0; queue < tx_count; queue++) {
2144 		ret = __alloc_dma_tx_desc_resources(priv, queue);
2145 		if (ret)
2146 			goto err_dma;
2147 	}
2148 
2149 	return 0;
2150 
2151 err_dma:
2152 	free_dma_tx_desc_resources(priv);
2153 	return ret;
2154 }
2155 
2156 /**
2157  * alloc_dma_desc_resources - alloc TX/RX resources.
2158  * @priv: private structure
2159  * Description: according to which descriptor can be used (extend or basic)
2160  * this function allocates the resources for TX and RX paths. In case of
2161  * reception, for example, it pre-allocated the RX socket buffer in order to
2162  * allow zero-copy mechanism.
2163  */
2164 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
2165 {
2166 	/* RX Allocation */
2167 	int ret = alloc_dma_rx_desc_resources(priv);
2168 
2169 	if (ret)
2170 		return ret;
2171 
2172 	ret = alloc_dma_tx_desc_resources(priv);
2173 
2174 	return ret;
2175 }
2176 
2177 /**
2178  * free_dma_desc_resources - free dma desc resources
2179  * @priv: private structure
2180  */
2181 static void free_dma_desc_resources(struct stmmac_priv *priv)
2182 {
2183 	/* Release the DMA TX socket buffers */
2184 	free_dma_tx_desc_resources(priv);
2185 
2186 	/* Release the DMA RX socket buffers later
2187 	 * to ensure all pending XDP_TX buffers are returned.
2188 	 */
2189 	free_dma_rx_desc_resources(priv);
2190 }
2191 
2192 /**
2193  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2194  *  @priv: driver private structure
2195  *  Description: It is used for enabling the rx queues in the MAC
2196  */
2197 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2198 {
2199 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2200 	int queue;
2201 	u8 mode;
2202 
2203 	for (queue = 0; queue < rx_queues_count; queue++) {
2204 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2205 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2206 	}
2207 }
2208 
2209 /**
2210  * stmmac_start_rx_dma - start RX DMA channel
2211  * @priv: driver private structure
2212  * @chan: RX channel index
2213  * Description:
2214  * This starts a RX DMA channel
2215  */
2216 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2217 {
2218 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2219 	stmmac_start_rx(priv, priv->ioaddr, chan);
2220 }
2221 
2222 /**
2223  * stmmac_start_tx_dma - start TX DMA channel
2224  * @priv: driver private structure
2225  * @chan: TX channel index
2226  * Description:
2227  * This starts a TX DMA channel
2228  */
2229 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2230 {
2231 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2232 	stmmac_start_tx(priv, priv->ioaddr, chan);
2233 }
2234 
2235 /**
2236  * stmmac_stop_rx_dma - stop RX DMA channel
2237  * @priv: driver private structure
2238  * @chan: RX channel index
2239  * Description:
2240  * This stops a RX DMA channel
2241  */
2242 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2243 {
2244 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2245 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2246 }
2247 
2248 /**
2249  * stmmac_stop_tx_dma - stop TX DMA channel
2250  * @priv: driver private structure
2251  * @chan: TX channel index
2252  * Description:
2253  * This stops a TX DMA channel
2254  */
2255 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2256 {
2257 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2258 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2259 }
2260 
2261 /**
2262  * stmmac_start_all_dma - start all RX and TX DMA channels
2263  * @priv: driver private structure
2264  * Description:
2265  * This starts all the RX and TX DMA channels
2266  */
2267 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2268 {
2269 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2270 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2271 	u32 chan = 0;
2272 
2273 	for (chan = 0; chan < rx_channels_count; chan++)
2274 		stmmac_start_rx_dma(priv, chan);
2275 
2276 	for (chan = 0; chan < tx_channels_count; chan++)
2277 		stmmac_start_tx_dma(priv, chan);
2278 }
2279 
2280 /**
2281  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2282  * @priv: driver private structure
2283  * Description:
2284  * This stops the RX and TX DMA channels
2285  */
2286 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2287 {
2288 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2289 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2290 	u32 chan = 0;
2291 
2292 	for (chan = 0; chan < rx_channels_count; chan++)
2293 		stmmac_stop_rx_dma(priv, chan);
2294 
2295 	for (chan = 0; chan < tx_channels_count; chan++)
2296 		stmmac_stop_tx_dma(priv, chan);
2297 }
2298 
2299 /**
2300  *  stmmac_dma_operation_mode - HW DMA operation mode
2301  *  @priv: driver private structure
2302  *  Description: it is used for configuring the DMA operation mode register in
2303  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2304  */
2305 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2306 {
2307 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2308 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2309 	int rxfifosz = priv->plat->rx_fifo_size;
2310 	int txfifosz = priv->plat->tx_fifo_size;
2311 	u32 txmode = 0;
2312 	u32 rxmode = 0;
2313 	u32 chan = 0;
2314 	u8 qmode = 0;
2315 
2316 	if (rxfifosz == 0)
2317 		rxfifosz = priv->dma_cap.rx_fifo_size;
2318 	if (txfifosz == 0)
2319 		txfifosz = priv->dma_cap.tx_fifo_size;
2320 
2321 	/* Adjust for real per queue fifo size */
2322 	rxfifosz /= rx_channels_count;
2323 	txfifosz /= tx_channels_count;
2324 
2325 	if (priv->plat->force_thresh_dma_mode) {
2326 		txmode = tc;
2327 		rxmode = tc;
2328 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2329 		/*
2330 		 * In case of GMAC, SF mode can be enabled
2331 		 * to perform the TX COE in HW. This depends on:
2332 		 * 1) TX COE if actually supported
2333 		 * 2) There is no bugged Jumbo frame support
2334 		 *    that needs to not insert csum in the TDES.
2335 		 */
2336 		txmode = SF_DMA_MODE;
2337 		rxmode = SF_DMA_MODE;
2338 		priv->xstats.threshold = SF_DMA_MODE;
2339 	} else {
2340 		txmode = tc;
2341 		rxmode = SF_DMA_MODE;
2342 	}
2343 
2344 	/* configure all channels */
2345 	for (chan = 0; chan < rx_channels_count; chan++) {
2346 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2347 		u32 buf_size;
2348 
2349 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2350 
2351 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2352 				rxfifosz, qmode);
2353 
2354 		if (rx_q->xsk_pool) {
2355 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2356 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2357 					      buf_size,
2358 					      chan);
2359 		} else {
2360 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2361 					      priv->dma_buf_sz,
2362 					      chan);
2363 		}
2364 	}
2365 
2366 	for (chan = 0; chan < tx_channels_count; chan++) {
2367 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2368 
2369 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2370 				txfifosz, qmode);
2371 	}
2372 }
2373 
2374 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2375 {
2376 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2377 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2378 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2379 	unsigned int entry = tx_q->cur_tx;
2380 	struct dma_desc *tx_desc = NULL;
2381 	struct xdp_desc xdp_desc;
2382 	bool work_done = true;
2383 
2384 	/* Avoids TX time-out as we are sharing with slow path */
2385 	txq_trans_cond_update(nq);
2386 
2387 	budget = min(budget, stmmac_tx_avail(priv, queue));
2388 
2389 	while (budget-- > 0) {
2390 		dma_addr_t dma_addr;
2391 		bool set_ic;
2392 
2393 		/* We are sharing with slow path and stop XSK TX desc submission when
2394 		 * available TX ring is less than threshold.
2395 		 */
2396 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2397 		    !netif_carrier_ok(priv->dev)) {
2398 			work_done = false;
2399 			break;
2400 		}
2401 
2402 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2403 			break;
2404 
2405 		if (likely(priv->extend_desc))
2406 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2407 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2408 			tx_desc = &tx_q->dma_entx[entry].basic;
2409 		else
2410 			tx_desc = tx_q->dma_tx + entry;
2411 
2412 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2413 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2414 
2415 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2416 
2417 		/* To return XDP buffer to XSK pool, we simple call
2418 		 * xsk_tx_completed(), so we don't need to fill up
2419 		 * 'buf' and 'xdpf'.
2420 		 */
2421 		tx_q->tx_skbuff_dma[entry].buf = 0;
2422 		tx_q->xdpf[entry] = NULL;
2423 
2424 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2425 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2426 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2427 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2428 
2429 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2430 
2431 		tx_q->tx_count_frames++;
2432 
2433 		if (!priv->tx_coal_frames[queue])
2434 			set_ic = false;
2435 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2436 			set_ic = true;
2437 		else
2438 			set_ic = false;
2439 
2440 		if (set_ic) {
2441 			tx_q->tx_count_frames = 0;
2442 			stmmac_set_tx_ic(priv, tx_desc);
2443 			priv->xstats.tx_set_ic_bit++;
2444 		}
2445 
2446 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2447 				       true, priv->mode, true, true,
2448 				       xdp_desc.len);
2449 
2450 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2451 
2452 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2453 		entry = tx_q->cur_tx;
2454 	}
2455 
2456 	if (tx_desc) {
2457 		stmmac_flush_tx_descriptors(priv, queue);
2458 		xsk_tx_release(pool);
2459 	}
2460 
2461 	/* Return true if all of the 3 conditions are met
2462 	 *  a) TX Budget is still available
2463 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2464 	 *     pending XSK TX for transmission)
2465 	 */
2466 	return !!budget && work_done;
2467 }
2468 
2469 /**
2470  * stmmac_tx_clean - to manage the transmission completion
2471  * @priv: driver private structure
2472  * @budget: napi budget limiting this functions packet handling
2473  * @queue: TX queue index
2474  * Description: it reclaims the transmit resources after transmission completes.
2475  */
2476 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2477 {
2478 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2479 	unsigned int bytes_compl = 0, pkts_compl = 0;
2480 	unsigned int entry, xmits = 0, count = 0;
2481 
2482 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2483 
2484 	priv->xstats.tx_clean++;
2485 
2486 	tx_q->xsk_frames_done = 0;
2487 
2488 	entry = tx_q->dirty_tx;
2489 
2490 	/* Try to clean all TX complete frame in 1 shot */
2491 	while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
2492 		struct xdp_frame *xdpf;
2493 		struct sk_buff *skb;
2494 		struct dma_desc *p;
2495 		int status;
2496 
2497 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2498 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2499 			xdpf = tx_q->xdpf[entry];
2500 			skb = NULL;
2501 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2502 			xdpf = NULL;
2503 			skb = tx_q->tx_skbuff[entry];
2504 		} else {
2505 			xdpf = NULL;
2506 			skb = NULL;
2507 		}
2508 
2509 		if (priv->extend_desc)
2510 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2511 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2512 			p = &tx_q->dma_entx[entry].basic;
2513 		else
2514 			p = tx_q->dma_tx + entry;
2515 
2516 		status = stmmac_tx_status(priv, &priv->dev->stats,
2517 				&priv->xstats, p, priv->ioaddr);
2518 		/* Check if the descriptor is owned by the DMA */
2519 		if (unlikely(status & tx_dma_own))
2520 			break;
2521 
2522 		count++;
2523 
2524 		/* Make sure descriptor fields are read after reading
2525 		 * the own bit.
2526 		 */
2527 		dma_rmb();
2528 
2529 		/* Just consider the last segment and ...*/
2530 		if (likely(!(status & tx_not_ls))) {
2531 			/* ... verify the status error condition */
2532 			if (unlikely(status & tx_err)) {
2533 				priv->dev->stats.tx_errors++;
2534 			} else {
2535 				priv->dev->stats.tx_packets++;
2536 				priv->xstats.tx_pkt_n++;
2537 				priv->xstats.txq_stats[queue].tx_pkt_n++;
2538 			}
2539 			if (skb)
2540 				stmmac_get_tx_hwtstamp(priv, p, skb);
2541 		}
2542 
2543 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2544 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2545 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2546 				dma_unmap_page(priv->device,
2547 					       tx_q->tx_skbuff_dma[entry].buf,
2548 					       tx_q->tx_skbuff_dma[entry].len,
2549 					       DMA_TO_DEVICE);
2550 			else
2551 				dma_unmap_single(priv->device,
2552 						 tx_q->tx_skbuff_dma[entry].buf,
2553 						 tx_q->tx_skbuff_dma[entry].len,
2554 						 DMA_TO_DEVICE);
2555 			tx_q->tx_skbuff_dma[entry].buf = 0;
2556 			tx_q->tx_skbuff_dma[entry].len = 0;
2557 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2558 		}
2559 
2560 		stmmac_clean_desc3(priv, tx_q, p);
2561 
2562 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2563 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2564 
2565 		if (xdpf &&
2566 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2567 			xdp_return_frame_rx_napi(xdpf);
2568 			tx_q->xdpf[entry] = NULL;
2569 		}
2570 
2571 		if (xdpf &&
2572 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2573 			xdp_return_frame(xdpf);
2574 			tx_q->xdpf[entry] = NULL;
2575 		}
2576 
2577 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2578 			tx_q->xsk_frames_done++;
2579 
2580 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2581 			if (likely(skb)) {
2582 				pkts_compl++;
2583 				bytes_compl += skb->len;
2584 				dev_consume_skb_any(skb);
2585 				tx_q->tx_skbuff[entry] = NULL;
2586 			}
2587 		}
2588 
2589 		stmmac_release_tx_desc(priv, p, priv->mode);
2590 
2591 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2592 	}
2593 	tx_q->dirty_tx = entry;
2594 
2595 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2596 				  pkts_compl, bytes_compl);
2597 
2598 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2599 								queue))) &&
2600 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2601 
2602 		netif_dbg(priv, tx_done, priv->dev,
2603 			  "%s: restart transmit\n", __func__);
2604 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2605 	}
2606 
2607 	if (tx_q->xsk_pool) {
2608 		bool work_done;
2609 
2610 		if (tx_q->xsk_frames_done)
2611 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2612 
2613 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2614 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2615 
2616 		/* For XSK TX, we try to send as many as possible.
2617 		 * If XSK work done (XSK TX desc empty and budget still
2618 		 * available), return "budget - 1" to reenable TX IRQ.
2619 		 * Else, return "budget" to make NAPI continue polling.
2620 		 */
2621 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2622 					       STMMAC_XSK_TX_BUDGET_MAX);
2623 		if (work_done)
2624 			xmits = budget - 1;
2625 		else
2626 			xmits = budget;
2627 	}
2628 
2629 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2630 	    priv->eee_sw_timer_en) {
2631 		stmmac_enable_eee_mode(priv);
2632 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2633 	}
2634 
2635 	/* We still have pending packets, let's call for a new scheduling */
2636 	if (tx_q->dirty_tx != tx_q->cur_tx)
2637 		hrtimer_start(&tx_q->txtimer,
2638 			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2639 			      HRTIMER_MODE_REL);
2640 
2641 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2642 
2643 	/* Combine decisions from TX clean and XSK TX */
2644 	return max(count, xmits);
2645 }
2646 
2647 /**
2648  * stmmac_tx_err - to manage the tx error
2649  * @priv: driver private structure
2650  * @chan: channel index
2651  * Description: it cleans the descriptors and restarts the transmission
2652  * in case of transmission errors.
2653  */
2654 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2655 {
2656 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2657 
2658 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2659 
2660 	stmmac_stop_tx_dma(priv, chan);
2661 	dma_free_tx_skbufs(priv, chan);
2662 	stmmac_clear_tx_descriptors(priv, chan);
2663 	tx_q->dirty_tx = 0;
2664 	tx_q->cur_tx = 0;
2665 	tx_q->mss = 0;
2666 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2667 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2668 			    tx_q->dma_tx_phy, chan);
2669 	stmmac_start_tx_dma(priv, chan);
2670 
2671 	priv->dev->stats.tx_errors++;
2672 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2673 }
2674 
2675 /**
2676  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2677  *  @priv: driver private structure
2678  *  @txmode: TX operating mode
2679  *  @rxmode: RX operating mode
2680  *  @chan: channel index
2681  *  Description: it is used for configuring of the DMA operation mode in
2682  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2683  *  mode.
2684  */
2685 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2686 					  u32 rxmode, u32 chan)
2687 {
2688 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2689 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2690 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2691 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2692 	int rxfifosz = priv->plat->rx_fifo_size;
2693 	int txfifosz = priv->plat->tx_fifo_size;
2694 
2695 	if (rxfifosz == 0)
2696 		rxfifosz = priv->dma_cap.rx_fifo_size;
2697 	if (txfifosz == 0)
2698 		txfifosz = priv->dma_cap.tx_fifo_size;
2699 
2700 	/* Adjust for real per queue fifo size */
2701 	rxfifosz /= rx_channels_count;
2702 	txfifosz /= tx_channels_count;
2703 
2704 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2705 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2706 }
2707 
2708 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2709 {
2710 	int ret;
2711 
2712 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2713 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2714 	if (ret && (ret != -EINVAL)) {
2715 		stmmac_global_err(priv);
2716 		return true;
2717 	}
2718 
2719 	return false;
2720 }
2721 
2722 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2723 {
2724 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2725 						 &priv->xstats, chan, dir);
2726 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2727 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2728 	struct stmmac_channel *ch = &priv->channel[chan];
2729 	struct napi_struct *rx_napi;
2730 	struct napi_struct *tx_napi;
2731 	unsigned long flags;
2732 
2733 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2734 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2735 
2736 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2737 		if (napi_schedule_prep(rx_napi)) {
2738 			spin_lock_irqsave(&ch->lock, flags);
2739 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2740 			spin_unlock_irqrestore(&ch->lock, flags);
2741 			__napi_schedule(rx_napi);
2742 		}
2743 	}
2744 
2745 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2746 		if (napi_schedule_prep(tx_napi)) {
2747 			spin_lock_irqsave(&ch->lock, flags);
2748 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2749 			spin_unlock_irqrestore(&ch->lock, flags);
2750 			__napi_schedule(tx_napi);
2751 		}
2752 	}
2753 
2754 	return status;
2755 }
2756 
2757 /**
2758  * stmmac_dma_interrupt - DMA ISR
2759  * @priv: driver private structure
2760  * Description: this is the DMA ISR. It is called by the main ISR.
2761  * It calls the dwmac dma routine and schedule poll method in case of some
2762  * work can be done.
2763  */
2764 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2765 {
2766 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2767 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2768 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2769 				tx_channel_count : rx_channel_count;
2770 	u32 chan;
2771 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2772 
2773 	/* Make sure we never check beyond our status buffer. */
2774 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2775 		channels_to_check = ARRAY_SIZE(status);
2776 
2777 	for (chan = 0; chan < channels_to_check; chan++)
2778 		status[chan] = stmmac_napi_check(priv, chan,
2779 						 DMA_DIR_RXTX);
2780 
2781 	for (chan = 0; chan < tx_channel_count; chan++) {
2782 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2783 			/* Try to bump up the dma threshold on this failure */
2784 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2785 			    (tc <= 256)) {
2786 				tc += 64;
2787 				if (priv->plat->force_thresh_dma_mode)
2788 					stmmac_set_dma_operation_mode(priv,
2789 								      tc,
2790 								      tc,
2791 								      chan);
2792 				else
2793 					stmmac_set_dma_operation_mode(priv,
2794 								    tc,
2795 								    SF_DMA_MODE,
2796 								    chan);
2797 				priv->xstats.threshold = tc;
2798 			}
2799 		} else if (unlikely(status[chan] == tx_hard_error)) {
2800 			stmmac_tx_err(priv, chan);
2801 		}
2802 	}
2803 }
2804 
2805 /**
2806  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2807  * @priv: driver private structure
2808  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2809  */
2810 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2811 {
2812 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2813 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2814 
2815 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2816 
2817 	if (priv->dma_cap.rmon) {
2818 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2819 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2820 	} else
2821 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2822 }
2823 
2824 /**
2825  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2826  * @priv: driver private structure
2827  * Description:
2828  *  new GMAC chip generations have a new register to indicate the
2829  *  presence of the optional feature/functions.
2830  *  This can be also used to override the value passed through the
2831  *  platform and necessary for old MAC10/100 and GMAC chips.
2832  */
2833 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2834 {
2835 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2836 }
2837 
2838 /**
2839  * stmmac_check_ether_addr - check if the MAC addr is valid
2840  * @priv: driver private structure
2841  * Description:
2842  * it is to verify if the MAC address is valid, in case of failures it
2843  * generates a random MAC address
2844  */
2845 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2846 {
2847 	u8 addr[ETH_ALEN];
2848 
2849 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2850 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2851 		if (is_valid_ether_addr(addr))
2852 			eth_hw_addr_set(priv->dev, addr);
2853 		else
2854 			eth_hw_addr_random(priv->dev);
2855 		dev_info(priv->device, "device MAC address %pM\n",
2856 			 priv->dev->dev_addr);
2857 	}
2858 }
2859 
2860 /**
2861  * stmmac_init_dma_engine - DMA init.
2862  * @priv: driver private structure
2863  * Description:
2864  * It inits the DMA invoking the specific MAC/GMAC callback.
2865  * Some DMA parameters can be passed from the platform;
2866  * in case of these are not passed a default is kept for the MAC or GMAC.
2867  */
2868 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2869 {
2870 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2871 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2872 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2873 	struct stmmac_rx_queue *rx_q;
2874 	struct stmmac_tx_queue *tx_q;
2875 	u32 chan = 0;
2876 	int atds = 0;
2877 	int ret = 0;
2878 
2879 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2880 		dev_err(priv->device, "Invalid DMA configuration\n");
2881 		return -EINVAL;
2882 	}
2883 
2884 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2885 		atds = 1;
2886 
2887 	ret = stmmac_reset(priv, priv->ioaddr);
2888 	if (ret) {
2889 		dev_err(priv->device, "Failed to reset the dma\n");
2890 		return ret;
2891 	}
2892 
2893 	/* DMA Configuration */
2894 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2895 
2896 	if (priv->plat->axi)
2897 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2898 
2899 	/* DMA CSR Channel configuration */
2900 	for (chan = 0; chan < dma_csr_ch; chan++)
2901 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2902 
2903 	/* DMA RX Channel Configuration */
2904 	for (chan = 0; chan < rx_channels_count; chan++) {
2905 		rx_q = &priv->rx_queue[chan];
2906 
2907 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2908 				    rx_q->dma_rx_phy, chan);
2909 
2910 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2911 				     (rx_q->buf_alloc_num *
2912 				      sizeof(struct dma_desc));
2913 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2914 				       rx_q->rx_tail_addr, chan);
2915 	}
2916 
2917 	/* DMA TX Channel Configuration */
2918 	for (chan = 0; chan < tx_channels_count; chan++) {
2919 		tx_q = &priv->tx_queue[chan];
2920 
2921 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2922 				    tx_q->dma_tx_phy, chan);
2923 
2924 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2925 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2926 				       tx_q->tx_tail_addr, chan);
2927 	}
2928 
2929 	return ret;
2930 }
2931 
2932 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2933 {
2934 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2935 
2936 	hrtimer_start(&tx_q->txtimer,
2937 		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2938 		      HRTIMER_MODE_REL);
2939 }
2940 
2941 /**
2942  * stmmac_tx_timer - mitigation sw timer for tx.
2943  * @t: data pointer
2944  * Description:
2945  * This is the timer handler to directly invoke the stmmac_tx_clean.
2946  */
2947 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2948 {
2949 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2950 	struct stmmac_priv *priv = tx_q->priv_data;
2951 	struct stmmac_channel *ch;
2952 	struct napi_struct *napi;
2953 
2954 	ch = &priv->channel[tx_q->queue_index];
2955 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2956 
2957 	if (likely(napi_schedule_prep(napi))) {
2958 		unsigned long flags;
2959 
2960 		spin_lock_irqsave(&ch->lock, flags);
2961 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2962 		spin_unlock_irqrestore(&ch->lock, flags);
2963 		__napi_schedule(napi);
2964 	}
2965 
2966 	return HRTIMER_NORESTART;
2967 }
2968 
2969 /**
2970  * stmmac_init_coalesce - init mitigation options.
2971  * @priv: driver private structure
2972  * Description:
2973  * This inits the coalesce parameters: i.e. timer rate,
2974  * timer handler and default threshold used for enabling the
2975  * interrupt on completion bit.
2976  */
2977 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2978 {
2979 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2980 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2981 	u32 chan;
2982 
2983 	for (chan = 0; chan < tx_channel_count; chan++) {
2984 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2985 
2986 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
2987 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
2988 
2989 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2990 		tx_q->txtimer.function = stmmac_tx_timer;
2991 	}
2992 
2993 	for (chan = 0; chan < rx_channel_count; chan++)
2994 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
2995 }
2996 
2997 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2998 {
2999 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3000 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3001 	u32 chan;
3002 
3003 	/* set TX ring length */
3004 	for (chan = 0; chan < tx_channels_count; chan++)
3005 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3006 				       (priv->dma_tx_size - 1), chan);
3007 
3008 	/* set RX ring length */
3009 	for (chan = 0; chan < rx_channels_count; chan++)
3010 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3011 				       (priv->dma_rx_size - 1), chan);
3012 }
3013 
3014 /**
3015  *  stmmac_set_tx_queue_weight - Set TX queue weight
3016  *  @priv: driver private structure
3017  *  Description: It is used for setting TX queues weight
3018  */
3019 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3020 {
3021 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3022 	u32 weight;
3023 	u32 queue;
3024 
3025 	for (queue = 0; queue < tx_queues_count; queue++) {
3026 		weight = priv->plat->tx_queues_cfg[queue].weight;
3027 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3028 	}
3029 }
3030 
3031 /**
3032  *  stmmac_configure_cbs - Configure CBS in TX queue
3033  *  @priv: driver private structure
3034  *  Description: It is used for configuring CBS in AVB TX queues
3035  */
3036 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3037 {
3038 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3039 	u32 mode_to_use;
3040 	u32 queue;
3041 
3042 	/* queue 0 is reserved for legacy traffic */
3043 	for (queue = 1; queue < tx_queues_count; queue++) {
3044 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3045 		if (mode_to_use == MTL_QUEUE_DCB)
3046 			continue;
3047 
3048 		stmmac_config_cbs(priv, priv->hw,
3049 				priv->plat->tx_queues_cfg[queue].send_slope,
3050 				priv->plat->tx_queues_cfg[queue].idle_slope,
3051 				priv->plat->tx_queues_cfg[queue].high_credit,
3052 				priv->plat->tx_queues_cfg[queue].low_credit,
3053 				queue);
3054 	}
3055 }
3056 
3057 /**
3058  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3059  *  @priv: driver private structure
3060  *  Description: It is used for mapping RX queues to RX dma channels
3061  */
3062 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3063 {
3064 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3065 	u32 queue;
3066 	u32 chan;
3067 
3068 	for (queue = 0; queue < rx_queues_count; queue++) {
3069 		chan = priv->plat->rx_queues_cfg[queue].chan;
3070 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3071 	}
3072 }
3073 
3074 /**
3075  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3076  *  @priv: driver private structure
3077  *  Description: It is used for configuring the RX Queue Priority
3078  */
3079 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3080 {
3081 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3082 	u32 queue;
3083 	u32 prio;
3084 
3085 	for (queue = 0; queue < rx_queues_count; queue++) {
3086 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3087 			continue;
3088 
3089 		prio = priv->plat->rx_queues_cfg[queue].prio;
3090 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3091 	}
3092 }
3093 
3094 /**
3095  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3096  *  @priv: driver private structure
3097  *  Description: It is used for configuring the TX Queue Priority
3098  */
3099 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3100 {
3101 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3102 	u32 queue;
3103 	u32 prio;
3104 
3105 	for (queue = 0; queue < tx_queues_count; queue++) {
3106 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3107 			continue;
3108 
3109 		prio = priv->plat->tx_queues_cfg[queue].prio;
3110 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3111 	}
3112 }
3113 
3114 /**
3115  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3116  *  @priv: driver private structure
3117  *  Description: It is used for configuring the RX queue routing
3118  */
3119 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3120 {
3121 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3122 	u32 queue;
3123 	u8 packet;
3124 
3125 	for (queue = 0; queue < rx_queues_count; queue++) {
3126 		/* no specific packet type routing specified for the queue */
3127 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3128 			continue;
3129 
3130 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3131 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3132 	}
3133 }
3134 
3135 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3136 {
3137 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3138 		priv->rss.enable = false;
3139 		return;
3140 	}
3141 
3142 	if (priv->dev->features & NETIF_F_RXHASH)
3143 		priv->rss.enable = true;
3144 	else
3145 		priv->rss.enable = false;
3146 
3147 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3148 			     priv->plat->rx_queues_to_use);
3149 }
3150 
3151 /**
3152  *  stmmac_mtl_configuration - Configure MTL
3153  *  @priv: driver private structure
3154  *  Description: It is used for configurring MTL
3155  */
3156 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3157 {
3158 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3159 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3160 
3161 	if (tx_queues_count > 1)
3162 		stmmac_set_tx_queue_weight(priv);
3163 
3164 	/* Configure MTL RX algorithms */
3165 	if (rx_queues_count > 1)
3166 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3167 				priv->plat->rx_sched_algorithm);
3168 
3169 	/* Configure MTL TX algorithms */
3170 	if (tx_queues_count > 1)
3171 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3172 				priv->plat->tx_sched_algorithm);
3173 
3174 	/* Configure CBS in AVB TX queues */
3175 	if (tx_queues_count > 1)
3176 		stmmac_configure_cbs(priv);
3177 
3178 	/* Map RX MTL to DMA channels */
3179 	stmmac_rx_queue_dma_chan_map(priv);
3180 
3181 	/* Enable MAC RX Queues */
3182 	stmmac_mac_enable_rx_queues(priv);
3183 
3184 	/* Set RX priorities */
3185 	if (rx_queues_count > 1)
3186 		stmmac_mac_config_rx_queues_prio(priv);
3187 
3188 	/* Set TX priorities */
3189 	if (tx_queues_count > 1)
3190 		stmmac_mac_config_tx_queues_prio(priv);
3191 
3192 	/* Set RX routing */
3193 	if (rx_queues_count > 1)
3194 		stmmac_mac_config_rx_queues_routing(priv);
3195 
3196 	/* Receive Side Scaling */
3197 	if (rx_queues_count > 1)
3198 		stmmac_mac_config_rss(priv);
3199 }
3200 
3201 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3202 {
3203 	if (priv->dma_cap.asp) {
3204 		netdev_info(priv->dev, "Enabling Safety Features\n");
3205 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3206 					  priv->plat->safety_feat_cfg);
3207 	} else {
3208 		netdev_info(priv->dev, "No Safety Features support found\n");
3209 	}
3210 }
3211 
3212 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3213 {
3214 	char *name;
3215 
3216 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3217 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3218 
3219 	name = priv->wq_name;
3220 	sprintf(name, "%s-fpe", priv->dev->name);
3221 
3222 	priv->fpe_wq = create_singlethread_workqueue(name);
3223 	if (!priv->fpe_wq) {
3224 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3225 
3226 		return -ENOMEM;
3227 	}
3228 	netdev_info(priv->dev, "FPE workqueue start");
3229 
3230 	return 0;
3231 }
3232 
3233 /**
3234  * stmmac_hw_setup - setup mac in a usable state.
3235  *  @dev : pointer to the device structure.
3236  *  @init_ptp: initialize PTP if set
3237  *  Description:
3238  *  this is the main function to setup the HW in a usable state because the
3239  *  dma engine is reset, the core registers are configured (e.g. AXI,
3240  *  Checksum features, timers). The DMA is ready to start receiving and
3241  *  transmitting.
3242  *  Return value:
3243  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3244  *  file on failure.
3245  */
3246 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
3247 {
3248 	struct stmmac_priv *priv = netdev_priv(dev);
3249 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3250 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3251 	bool sph_en;
3252 	u32 chan;
3253 	int ret;
3254 
3255 	/* DMA initialization and SW reset */
3256 	ret = stmmac_init_dma_engine(priv);
3257 	if (ret < 0) {
3258 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3259 			   __func__);
3260 		return ret;
3261 	}
3262 
3263 	/* Copy the MAC addr into the HW  */
3264 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3265 
3266 	/* PS and related bits will be programmed according to the speed */
3267 	if (priv->hw->pcs) {
3268 		int speed = priv->plat->mac_port_sel_speed;
3269 
3270 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3271 		    (speed == SPEED_1000)) {
3272 			priv->hw->ps = speed;
3273 		} else {
3274 			dev_warn(priv->device, "invalid port speed\n");
3275 			priv->hw->ps = 0;
3276 		}
3277 	}
3278 
3279 	/* Initialize the MAC Core */
3280 	stmmac_core_init(priv, priv->hw, dev);
3281 
3282 	/* Initialize MTL*/
3283 	stmmac_mtl_configuration(priv);
3284 
3285 	/* Initialize Safety Features */
3286 	stmmac_safety_feat_configuration(priv);
3287 
3288 	ret = stmmac_rx_ipc(priv, priv->hw);
3289 	if (!ret) {
3290 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3291 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3292 		priv->hw->rx_csum = 0;
3293 	}
3294 
3295 	/* Enable the MAC Rx/Tx */
3296 	stmmac_mac_set(priv, priv->ioaddr, true);
3297 
3298 	/* Set the HW DMA mode and the COE */
3299 	stmmac_dma_operation_mode(priv);
3300 
3301 	stmmac_mmc_setup(priv);
3302 
3303 	if (init_ptp) {
3304 		ret = stmmac_init_ptp(priv);
3305 		if (ret == -EOPNOTSUPP)
3306 			netdev_warn(priv->dev, "PTP not supported by HW\n");
3307 		else if (ret)
3308 			netdev_warn(priv->dev, "PTP init failed\n");
3309 	}
3310 
3311 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3312 
3313 	/* Convert the timer from msec to usec */
3314 	if (!priv->tx_lpi_timer)
3315 		priv->tx_lpi_timer = eee_timer * 1000;
3316 
3317 	if (priv->use_riwt) {
3318 		u32 queue;
3319 
3320 		for (queue = 0; queue < rx_cnt; queue++) {
3321 			if (!priv->rx_riwt[queue])
3322 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3323 
3324 			stmmac_rx_watchdog(priv, priv->ioaddr,
3325 					   priv->rx_riwt[queue], queue);
3326 		}
3327 	}
3328 
3329 	if (priv->hw->pcs)
3330 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3331 
3332 	/* set TX and RX rings length */
3333 	stmmac_set_rings_length(priv);
3334 
3335 	/* Enable TSO */
3336 	if (priv->tso) {
3337 		for (chan = 0; chan < tx_cnt; chan++) {
3338 			struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3339 
3340 			/* TSO and TBS cannot co-exist */
3341 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3342 				continue;
3343 
3344 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3345 		}
3346 	}
3347 
3348 	/* Enable Split Header */
3349 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3350 	for (chan = 0; chan < rx_cnt; chan++)
3351 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3352 
3353 
3354 	/* VLAN Tag Insertion */
3355 	if (priv->dma_cap.vlins)
3356 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3357 
3358 	/* TBS */
3359 	for (chan = 0; chan < tx_cnt; chan++) {
3360 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3361 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3362 
3363 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3364 	}
3365 
3366 	/* Configure real RX and TX queues */
3367 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3368 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3369 
3370 	/* Start the ball rolling... */
3371 	stmmac_start_all_dma(priv);
3372 
3373 	if (priv->dma_cap.fpesel) {
3374 		stmmac_fpe_start_wq(priv);
3375 
3376 		if (priv->plat->fpe_cfg->enable)
3377 			stmmac_fpe_handshake(priv, true);
3378 	}
3379 
3380 	return 0;
3381 }
3382 
3383 static void stmmac_hw_teardown(struct net_device *dev)
3384 {
3385 	struct stmmac_priv *priv = netdev_priv(dev);
3386 
3387 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3388 }
3389 
3390 static void stmmac_free_irq(struct net_device *dev,
3391 			    enum request_irq_err irq_err, int irq_idx)
3392 {
3393 	struct stmmac_priv *priv = netdev_priv(dev);
3394 	int j;
3395 
3396 	switch (irq_err) {
3397 	case REQ_IRQ_ERR_ALL:
3398 		irq_idx = priv->plat->tx_queues_to_use;
3399 		fallthrough;
3400 	case REQ_IRQ_ERR_TX:
3401 		for (j = irq_idx - 1; j >= 0; j--) {
3402 			if (priv->tx_irq[j] > 0) {
3403 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3404 				free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
3405 			}
3406 		}
3407 		irq_idx = priv->plat->rx_queues_to_use;
3408 		fallthrough;
3409 	case REQ_IRQ_ERR_RX:
3410 		for (j = irq_idx - 1; j >= 0; j--) {
3411 			if (priv->rx_irq[j] > 0) {
3412 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3413 				free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
3414 			}
3415 		}
3416 
3417 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3418 			free_irq(priv->sfty_ue_irq, dev);
3419 		fallthrough;
3420 	case REQ_IRQ_ERR_SFTY_UE:
3421 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3422 			free_irq(priv->sfty_ce_irq, dev);
3423 		fallthrough;
3424 	case REQ_IRQ_ERR_SFTY_CE:
3425 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3426 			free_irq(priv->lpi_irq, dev);
3427 		fallthrough;
3428 	case REQ_IRQ_ERR_LPI:
3429 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3430 			free_irq(priv->wol_irq, dev);
3431 		fallthrough;
3432 	case REQ_IRQ_ERR_WOL:
3433 		free_irq(dev->irq, dev);
3434 		fallthrough;
3435 	case REQ_IRQ_ERR_MAC:
3436 	case REQ_IRQ_ERR_NO:
3437 		/* If MAC IRQ request error, no more IRQ to free */
3438 		break;
3439 	}
3440 }
3441 
3442 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3443 {
3444 	struct stmmac_priv *priv = netdev_priv(dev);
3445 	enum request_irq_err irq_err;
3446 	cpumask_t cpu_mask;
3447 	int irq_idx = 0;
3448 	char *int_name;
3449 	int ret;
3450 	int i;
3451 
3452 	/* For common interrupt */
3453 	int_name = priv->int_name_mac;
3454 	sprintf(int_name, "%s:%s", dev->name, "mac");
3455 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3456 			  0, int_name, dev);
3457 	if (unlikely(ret < 0)) {
3458 		netdev_err(priv->dev,
3459 			   "%s: alloc mac MSI %d (error: %d)\n",
3460 			   __func__, dev->irq, ret);
3461 		irq_err = REQ_IRQ_ERR_MAC;
3462 		goto irq_error;
3463 	}
3464 
3465 	/* Request the Wake IRQ in case of another line
3466 	 * is used for WoL
3467 	 */
3468 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3469 		int_name = priv->int_name_wol;
3470 		sprintf(int_name, "%s:%s", dev->name, "wol");
3471 		ret = request_irq(priv->wol_irq,
3472 				  stmmac_mac_interrupt,
3473 				  0, int_name, dev);
3474 		if (unlikely(ret < 0)) {
3475 			netdev_err(priv->dev,
3476 				   "%s: alloc wol MSI %d (error: %d)\n",
3477 				   __func__, priv->wol_irq, ret);
3478 			irq_err = REQ_IRQ_ERR_WOL;
3479 			goto irq_error;
3480 		}
3481 	}
3482 
3483 	/* Request the LPI IRQ in case of another line
3484 	 * is used for LPI
3485 	 */
3486 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3487 		int_name = priv->int_name_lpi;
3488 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3489 		ret = request_irq(priv->lpi_irq,
3490 				  stmmac_mac_interrupt,
3491 				  0, int_name, dev);
3492 		if (unlikely(ret < 0)) {
3493 			netdev_err(priv->dev,
3494 				   "%s: alloc lpi MSI %d (error: %d)\n",
3495 				   __func__, priv->lpi_irq, ret);
3496 			irq_err = REQ_IRQ_ERR_LPI;
3497 			goto irq_error;
3498 		}
3499 	}
3500 
3501 	/* Request the Safety Feature Correctible Error line in
3502 	 * case of another line is used
3503 	 */
3504 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3505 		int_name = priv->int_name_sfty_ce;
3506 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3507 		ret = request_irq(priv->sfty_ce_irq,
3508 				  stmmac_safety_interrupt,
3509 				  0, int_name, dev);
3510 		if (unlikely(ret < 0)) {
3511 			netdev_err(priv->dev,
3512 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3513 				   __func__, priv->sfty_ce_irq, ret);
3514 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3515 			goto irq_error;
3516 		}
3517 	}
3518 
3519 	/* Request the Safety Feature Uncorrectible Error line in
3520 	 * case of another line is used
3521 	 */
3522 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3523 		int_name = priv->int_name_sfty_ue;
3524 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3525 		ret = request_irq(priv->sfty_ue_irq,
3526 				  stmmac_safety_interrupt,
3527 				  0, int_name, dev);
3528 		if (unlikely(ret < 0)) {
3529 			netdev_err(priv->dev,
3530 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3531 				   __func__, priv->sfty_ue_irq, ret);
3532 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3533 			goto irq_error;
3534 		}
3535 	}
3536 
3537 	/* Request Rx MSI irq */
3538 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3539 		if (i >= MTL_MAX_RX_QUEUES)
3540 			break;
3541 		if (priv->rx_irq[i] == 0)
3542 			continue;
3543 
3544 		int_name = priv->int_name_rx_irq[i];
3545 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3546 		ret = request_irq(priv->rx_irq[i],
3547 				  stmmac_msi_intr_rx,
3548 				  0, int_name, &priv->rx_queue[i]);
3549 		if (unlikely(ret < 0)) {
3550 			netdev_err(priv->dev,
3551 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3552 				   __func__, i, priv->rx_irq[i], ret);
3553 			irq_err = REQ_IRQ_ERR_RX;
3554 			irq_idx = i;
3555 			goto irq_error;
3556 		}
3557 		cpumask_clear(&cpu_mask);
3558 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3559 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3560 	}
3561 
3562 	/* Request Tx MSI irq */
3563 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3564 		if (i >= MTL_MAX_TX_QUEUES)
3565 			break;
3566 		if (priv->tx_irq[i] == 0)
3567 			continue;
3568 
3569 		int_name = priv->int_name_tx_irq[i];
3570 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3571 		ret = request_irq(priv->tx_irq[i],
3572 				  stmmac_msi_intr_tx,
3573 				  0, int_name, &priv->tx_queue[i]);
3574 		if (unlikely(ret < 0)) {
3575 			netdev_err(priv->dev,
3576 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3577 				   __func__, i, priv->tx_irq[i], ret);
3578 			irq_err = REQ_IRQ_ERR_TX;
3579 			irq_idx = i;
3580 			goto irq_error;
3581 		}
3582 		cpumask_clear(&cpu_mask);
3583 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3584 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3585 	}
3586 
3587 	return 0;
3588 
3589 irq_error:
3590 	stmmac_free_irq(dev, irq_err, irq_idx);
3591 	return ret;
3592 }
3593 
3594 static int stmmac_request_irq_single(struct net_device *dev)
3595 {
3596 	struct stmmac_priv *priv = netdev_priv(dev);
3597 	enum request_irq_err irq_err;
3598 	int ret;
3599 
3600 	ret = request_irq(dev->irq, stmmac_interrupt,
3601 			  IRQF_SHARED, dev->name, dev);
3602 	if (unlikely(ret < 0)) {
3603 		netdev_err(priv->dev,
3604 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3605 			   __func__, dev->irq, ret);
3606 		irq_err = REQ_IRQ_ERR_MAC;
3607 		goto irq_error;
3608 	}
3609 
3610 	/* Request the Wake IRQ in case of another line
3611 	 * is used for WoL
3612 	 */
3613 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3614 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3615 				  IRQF_SHARED, dev->name, dev);
3616 		if (unlikely(ret < 0)) {
3617 			netdev_err(priv->dev,
3618 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3619 				   __func__, priv->wol_irq, ret);
3620 			irq_err = REQ_IRQ_ERR_WOL;
3621 			goto irq_error;
3622 		}
3623 	}
3624 
3625 	/* Request the IRQ lines */
3626 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3627 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3628 				  IRQF_SHARED, dev->name, dev);
3629 		if (unlikely(ret < 0)) {
3630 			netdev_err(priv->dev,
3631 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3632 				   __func__, priv->lpi_irq, ret);
3633 			irq_err = REQ_IRQ_ERR_LPI;
3634 			goto irq_error;
3635 		}
3636 	}
3637 
3638 	return 0;
3639 
3640 irq_error:
3641 	stmmac_free_irq(dev, irq_err, 0);
3642 	return ret;
3643 }
3644 
3645 static int stmmac_request_irq(struct net_device *dev)
3646 {
3647 	struct stmmac_priv *priv = netdev_priv(dev);
3648 	int ret;
3649 
3650 	/* Request the IRQ lines */
3651 	if (priv->plat->multi_msi_en)
3652 		ret = stmmac_request_irq_multi_msi(dev);
3653 	else
3654 		ret = stmmac_request_irq_single(dev);
3655 
3656 	return ret;
3657 }
3658 
3659 /**
3660  *  stmmac_open - open entry point of the driver
3661  *  @dev : pointer to the device structure.
3662  *  Description:
3663  *  This function is the open entry point of the driver.
3664  *  Return value:
3665  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3666  *  file on failure.
3667  */
3668 static int stmmac_open(struct net_device *dev)
3669 {
3670 	struct stmmac_priv *priv = netdev_priv(dev);
3671 	int mode = priv->plat->phy_interface;
3672 	int bfsize = 0;
3673 	u32 chan;
3674 	int ret;
3675 
3676 	ret = pm_runtime_get_sync(priv->device);
3677 	if (ret < 0) {
3678 		pm_runtime_put_noidle(priv->device);
3679 		return ret;
3680 	}
3681 
3682 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3683 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3684 	    (!priv->hw->xpcs ||
3685 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3686 		ret = stmmac_init_phy(dev);
3687 		if (ret) {
3688 			netdev_err(priv->dev,
3689 				   "%s: Cannot attach to PHY (error: %d)\n",
3690 				   __func__, ret);
3691 			goto init_phy_error;
3692 		}
3693 	}
3694 
3695 	/* Extra statistics */
3696 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3697 	priv->xstats.threshold = tc;
3698 
3699 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
3700 	if (bfsize < 0)
3701 		bfsize = 0;
3702 
3703 	if (bfsize < BUF_SIZE_16KiB)
3704 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
3705 
3706 	priv->dma_buf_sz = bfsize;
3707 	buf_sz = bfsize;
3708 
3709 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3710 
3711 	if (!priv->dma_tx_size)
3712 		priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3713 	if (!priv->dma_rx_size)
3714 		priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3715 
3716 	/* Earlier check for TBS */
3717 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3718 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3719 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3720 
3721 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3722 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3723 	}
3724 
3725 	ret = alloc_dma_desc_resources(priv);
3726 	if (ret < 0) {
3727 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3728 			   __func__);
3729 		goto dma_desc_error;
3730 	}
3731 
3732 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
3733 	if (ret < 0) {
3734 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3735 			   __func__);
3736 		goto init_error;
3737 	}
3738 
3739 	ret = stmmac_hw_setup(dev, true);
3740 	if (ret < 0) {
3741 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3742 		goto init_error;
3743 	}
3744 
3745 	stmmac_init_coalesce(priv);
3746 
3747 	phylink_start(priv->phylink);
3748 	/* We may have called phylink_speed_down before */
3749 	phylink_speed_up(priv->phylink);
3750 
3751 	ret = stmmac_request_irq(dev);
3752 	if (ret)
3753 		goto irq_error;
3754 
3755 	stmmac_enable_all_queues(priv);
3756 	netif_tx_start_all_queues(priv->dev);
3757 
3758 	return 0;
3759 
3760 irq_error:
3761 	phylink_stop(priv->phylink);
3762 
3763 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3764 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3765 
3766 	stmmac_hw_teardown(dev);
3767 init_error:
3768 	free_dma_desc_resources(priv);
3769 dma_desc_error:
3770 	phylink_disconnect_phy(priv->phylink);
3771 init_phy_error:
3772 	pm_runtime_put(priv->device);
3773 	return ret;
3774 }
3775 
3776 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3777 {
3778 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3779 
3780 	if (priv->fpe_wq)
3781 		destroy_workqueue(priv->fpe_wq);
3782 
3783 	netdev_info(priv->dev, "FPE workqueue stop");
3784 }
3785 
3786 /**
3787  *  stmmac_release - close entry point of the driver
3788  *  @dev : device pointer.
3789  *  Description:
3790  *  This is the stop entry point of the driver.
3791  */
3792 static int stmmac_release(struct net_device *dev)
3793 {
3794 	struct stmmac_priv *priv = netdev_priv(dev);
3795 	u32 chan;
3796 
3797 	netif_tx_disable(dev);
3798 
3799 	if (device_may_wakeup(priv->device))
3800 		phylink_speed_down(priv->phylink, false);
3801 	/* Stop and disconnect the PHY */
3802 	phylink_stop(priv->phylink);
3803 	phylink_disconnect_phy(priv->phylink);
3804 
3805 	stmmac_disable_all_queues(priv);
3806 
3807 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3808 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3809 
3810 	/* Free the IRQ lines */
3811 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3812 
3813 	if (priv->eee_enabled) {
3814 		priv->tx_path_in_lpi_mode = false;
3815 		del_timer_sync(&priv->eee_ctrl_timer);
3816 	}
3817 
3818 	/* Stop TX/RX DMA and clear the descriptors */
3819 	stmmac_stop_all_dma(priv);
3820 
3821 	/* Release and free the Rx/Tx resources */
3822 	free_dma_desc_resources(priv);
3823 
3824 	/* Disable the MAC Rx/Tx */
3825 	stmmac_mac_set(priv, priv->ioaddr, false);
3826 
3827 	netif_carrier_off(dev);
3828 
3829 	stmmac_release_ptp(priv);
3830 
3831 	pm_runtime_put(priv->device);
3832 
3833 	if (priv->dma_cap.fpesel)
3834 		stmmac_fpe_stop_wq(priv);
3835 
3836 	return 0;
3837 }
3838 
3839 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3840 			       struct stmmac_tx_queue *tx_q)
3841 {
3842 	u16 tag = 0x0, inner_tag = 0x0;
3843 	u32 inner_type = 0x0;
3844 	struct dma_desc *p;
3845 
3846 	if (!priv->dma_cap.vlins)
3847 		return false;
3848 	if (!skb_vlan_tag_present(skb))
3849 		return false;
3850 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3851 		inner_tag = skb_vlan_tag_get(skb);
3852 		inner_type = STMMAC_VLAN_INSERT;
3853 	}
3854 
3855 	tag = skb_vlan_tag_get(skb);
3856 
3857 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3858 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3859 	else
3860 		p = &tx_q->dma_tx[tx_q->cur_tx];
3861 
3862 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3863 		return false;
3864 
3865 	stmmac_set_tx_owner(priv, p);
3866 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3867 	return true;
3868 }
3869 
3870 /**
3871  *  stmmac_tso_allocator - close entry point of the driver
3872  *  @priv: driver private structure
3873  *  @des: buffer start address
3874  *  @total_len: total length to fill in descriptors
3875  *  @last_segment: condition for the last descriptor
3876  *  @queue: TX queue index
3877  *  Description:
3878  *  This function fills descriptor and request new descriptors according to
3879  *  buffer length to fill
3880  */
3881 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3882 				 int total_len, bool last_segment, u32 queue)
3883 {
3884 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3885 	struct dma_desc *desc;
3886 	u32 buff_size;
3887 	int tmp_len;
3888 
3889 	tmp_len = total_len;
3890 
3891 	while (tmp_len > 0) {
3892 		dma_addr_t curr_addr;
3893 
3894 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3895 						priv->dma_tx_size);
3896 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3897 
3898 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3899 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3900 		else
3901 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3902 
3903 		curr_addr = des + (total_len - tmp_len);
3904 		if (priv->dma_cap.addr64 <= 32)
3905 			desc->des0 = cpu_to_le32(curr_addr);
3906 		else
3907 			stmmac_set_desc_addr(priv, desc, curr_addr);
3908 
3909 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3910 			    TSO_MAX_BUFF_SIZE : tmp_len;
3911 
3912 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3913 				0, 1,
3914 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3915 				0, 0);
3916 
3917 		tmp_len -= TSO_MAX_BUFF_SIZE;
3918 	}
3919 }
3920 
3921 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3922 {
3923 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3924 	int desc_size;
3925 
3926 	if (likely(priv->extend_desc))
3927 		desc_size = sizeof(struct dma_extended_desc);
3928 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3929 		desc_size = sizeof(struct dma_edesc);
3930 	else
3931 		desc_size = sizeof(struct dma_desc);
3932 
3933 	/* The own bit must be the latest setting done when prepare the
3934 	 * descriptor and then barrier is needed to make sure that
3935 	 * all is coherent before granting the DMA engine.
3936 	 */
3937 	wmb();
3938 
3939 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3940 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3941 }
3942 
3943 /**
3944  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3945  *  @skb : the socket buffer
3946  *  @dev : device pointer
3947  *  Description: this is the transmit function that is called on TSO frames
3948  *  (support available on GMAC4 and newer chips).
3949  *  Diagram below show the ring programming in case of TSO frames:
3950  *
3951  *  First Descriptor
3952  *   --------
3953  *   | DES0 |---> buffer1 = L2/L3/L4 header
3954  *   | DES1 |---> TCP Payload (can continue on next descr...)
3955  *   | DES2 |---> buffer 1 and 2 len
3956  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3957  *   --------
3958  *	|
3959  *     ...
3960  *	|
3961  *   --------
3962  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3963  *   | DES1 | --|
3964  *   | DES2 | --> buffer 1 and 2 len
3965  *   | DES3 |
3966  *   --------
3967  *
3968  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3969  */
3970 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3971 {
3972 	struct dma_desc *desc, *first, *mss_desc = NULL;
3973 	struct stmmac_priv *priv = netdev_priv(dev);
3974 	int nfrags = skb_shinfo(skb)->nr_frags;
3975 	u32 queue = skb_get_queue_mapping(skb);
3976 	unsigned int first_entry, tx_packets;
3977 	int tmp_pay_len = 0, first_tx;
3978 	struct stmmac_tx_queue *tx_q;
3979 	bool has_vlan, set_ic;
3980 	u8 proto_hdr_len, hdr;
3981 	u32 pay_len, mss;
3982 	dma_addr_t des;
3983 	int i;
3984 
3985 	tx_q = &priv->tx_queue[queue];
3986 	first_tx = tx_q->cur_tx;
3987 
3988 	/* Compute header lengths */
3989 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3990 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3991 		hdr = sizeof(struct udphdr);
3992 	} else {
3993 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3994 		hdr = tcp_hdrlen(skb);
3995 	}
3996 
3997 	/* Desc availability based on threshold should be enough safe */
3998 	if (unlikely(stmmac_tx_avail(priv, queue) <
3999 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4000 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4001 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4002 								queue));
4003 			/* This is a hard error, log it. */
4004 			netdev_err(priv->dev,
4005 				   "%s: Tx Ring full when queue awake\n",
4006 				   __func__);
4007 		}
4008 		return NETDEV_TX_BUSY;
4009 	}
4010 
4011 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4012 
4013 	mss = skb_shinfo(skb)->gso_size;
4014 
4015 	/* set new MSS value if needed */
4016 	if (mss != tx_q->mss) {
4017 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4018 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4019 		else
4020 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4021 
4022 		stmmac_set_mss(priv, mss_desc, mss);
4023 		tx_q->mss = mss;
4024 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4025 						priv->dma_tx_size);
4026 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4027 	}
4028 
4029 	if (netif_msg_tx_queued(priv)) {
4030 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4031 			__func__, hdr, proto_hdr_len, pay_len, mss);
4032 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4033 			skb->data_len);
4034 	}
4035 
4036 	/* Check if VLAN can be inserted by HW */
4037 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4038 
4039 	first_entry = tx_q->cur_tx;
4040 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4041 
4042 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4043 		desc = &tx_q->dma_entx[first_entry].basic;
4044 	else
4045 		desc = &tx_q->dma_tx[first_entry];
4046 	first = desc;
4047 
4048 	if (has_vlan)
4049 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4050 
4051 	/* first descriptor: fill Headers on Buf1 */
4052 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4053 			     DMA_TO_DEVICE);
4054 	if (dma_mapping_error(priv->device, des))
4055 		goto dma_map_err;
4056 
4057 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4058 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4059 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4060 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4061 
4062 	if (priv->dma_cap.addr64 <= 32) {
4063 		first->des0 = cpu_to_le32(des);
4064 
4065 		/* Fill start of payload in buff2 of first descriptor */
4066 		if (pay_len)
4067 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4068 
4069 		/* If needed take extra descriptors to fill the remaining payload */
4070 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4071 	} else {
4072 		stmmac_set_desc_addr(priv, first, des);
4073 		tmp_pay_len = pay_len;
4074 		des += proto_hdr_len;
4075 		pay_len = 0;
4076 	}
4077 
4078 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4079 
4080 	/* Prepare fragments */
4081 	for (i = 0; i < nfrags; i++) {
4082 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4083 
4084 		des = skb_frag_dma_map(priv->device, frag, 0,
4085 				       skb_frag_size(frag),
4086 				       DMA_TO_DEVICE);
4087 		if (dma_mapping_error(priv->device, des))
4088 			goto dma_map_err;
4089 
4090 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4091 				     (i == nfrags - 1), queue);
4092 
4093 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4094 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4095 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4096 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4097 	}
4098 
4099 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4100 
4101 	/* Only the last descriptor gets to point to the skb. */
4102 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4103 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4104 
4105 	/* Manage tx mitigation */
4106 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4107 	tx_q->tx_count_frames += tx_packets;
4108 
4109 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4110 		set_ic = true;
4111 	else if (!priv->tx_coal_frames[queue])
4112 		set_ic = false;
4113 	else if (tx_packets > priv->tx_coal_frames[queue])
4114 		set_ic = true;
4115 	else if ((tx_q->tx_count_frames %
4116 		  priv->tx_coal_frames[queue]) < tx_packets)
4117 		set_ic = true;
4118 	else
4119 		set_ic = false;
4120 
4121 	if (set_ic) {
4122 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4123 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4124 		else
4125 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4126 
4127 		tx_q->tx_count_frames = 0;
4128 		stmmac_set_tx_ic(priv, desc);
4129 		priv->xstats.tx_set_ic_bit++;
4130 	}
4131 
4132 	/* We've used all descriptors we need for this skb, however,
4133 	 * advance cur_tx so that it references a fresh descriptor.
4134 	 * ndo_start_xmit will fill this descriptor the next time it's
4135 	 * called and stmmac_tx_clean may clean up to this descriptor.
4136 	 */
4137 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
4138 
4139 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4140 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4141 			  __func__);
4142 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4143 	}
4144 
4145 	dev->stats.tx_bytes += skb->len;
4146 	priv->xstats.tx_tso_frames++;
4147 	priv->xstats.tx_tso_nfrags += nfrags;
4148 
4149 	if (priv->sarc_type)
4150 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4151 
4152 	skb_tx_timestamp(skb);
4153 
4154 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4155 		     priv->hwts_tx_en)) {
4156 		/* declare that device is doing timestamping */
4157 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4158 		stmmac_enable_tx_timestamp(priv, first);
4159 	}
4160 
4161 	/* Complete the first descriptor before granting the DMA */
4162 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4163 			proto_hdr_len,
4164 			pay_len,
4165 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4166 			hdr / 4, (skb->len - proto_hdr_len));
4167 
4168 	/* If context desc is used to change MSS */
4169 	if (mss_desc) {
4170 		/* Make sure that first descriptor has been completely
4171 		 * written, including its own bit. This is because MSS is
4172 		 * actually before first descriptor, so we need to make
4173 		 * sure that MSS's own bit is the last thing written.
4174 		 */
4175 		dma_wmb();
4176 		stmmac_set_tx_owner(priv, mss_desc);
4177 	}
4178 
4179 	if (netif_msg_pktdata(priv)) {
4180 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4181 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4182 			tx_q->cur_tx, first, nfrags);
4183 		pr_info(">>> frame to be transmitted: ");
4184 		print_pkt(skb->data, skb_headlen(skb));
4185 	}
4186 
4187 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4188 
4189 	stmmac_flush_tx_descriptors(priv, queue);
4190 	stmmac_tx_timer_arm(priv, queue);
4191 
4192 	return NETDEV_TX_OK;
4193 
4194 dma_map_err:
4195 	dev_err(priv->device, "Tx dma map failed\n");
4196 	dev_kfree_skb(skb);
4197 	priv->dev->stats.tx_dropped++;
4198 	return NETDEV_TX_OK;
4199 }
4200 
4201 /**
4202  *  stmmac_xmit - Tx entry point of the driver
4203  *  @skb : the socket buffer
4204  *  @dev : device pointer
4205  *  Description : this is the tx entry point of the driver.
4206  *  It programs the chain or the ring and supports oversized frames
4207  *  and SG feature.
4208  */
4209 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4210 {
4211 	unsigned int first_entry, tx_packets, enh_desc;
4212 	struct stmmac_priv *priv = netdev_priv(dev);
4213 	unsigned int nopaged_len = skb_headlen(skb);
4214 	int i, csum_insertion = 0, is_jumbo = 0;
4215 	u32 queue = skb_get_queue_mapping(skb);
4216 	int nfrags = skb_shinfo(skb)->nr_frags;
4217 	int gso = skb_shinfo(skb)->gso_type;
4218 	struct dma_edesc *tbs_desc = NULL;
4219 	struct dma_desc *desc, *first;
4220 	struct stmmac_tx_queue *tx_q;
4221 	bool has_vlan, set_ic;
4222 	int entry, first_tx;
4223 	dma_addr_t des;
4224 
4225 	tx_q = &priv->tx_queue[queue];
4226 	first_tx = tx_q->cur_tx;
4227 
4228 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4229 		stmmac_disable_eee_mode(priv);
4230 
4231 	/* Manage oversized TCP frames for GMAC4 device */
4232 	if (skb_is_gso(skb) && priv->tso) {
4233 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4234 			return stmmac_tso_xmit(skb, dev);
4235 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4236 			return stmmac_tso_xmit(skb, dev);
4237 	}
4238 
4239 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4240 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4241 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4242 								queue));
4243 			/* This is a hard error, log it. */
4244 			netdev_err(priv->dev,
4245 				   "%s: Tx Ring full when queue awake\n",
4246 				   __func__);
4247 		}
4248 		return NETDEV_TX_BUSY;
4249 	}
4250 
4251 	/* Check if VLAN can be inserted by HW */
4252 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4253 
4254 	entry = tx_q->cur_tx;
4255 	first_entry = entry;
4256 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4257 
4258 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4259 
4260 	if (likely(priv->extend_desc))
4261 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4262 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4263 		desc = &tx_q->dma_entx[entry].basic;
4264 	else
4265 		desc = tx_q->dma_tx + entry;
4266 
4267 	first = desc;
4268 
4269 	if (has_vlan)
4270 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4271 
4272 	enh_desc = priv->plat->enh_desc;
4273 	/* To program the descriptors according to the size of the frame */
4274 	if (enh_desc)
4275 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4276 
4277 	if (unlikely(is_jumbo)) {
4278 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4279 		if (unlikely(entry < 0) && (entry != -EINVAL))
4280 			goto dma_map_err;
4281 	}
4282 
4283 	for (i = 0; i < nfrags; i++) {
4284 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4285 		int len = skb_frag_size(frag);
4286 		bool last_segment = (i == (nfrags - 1));
4287 
4288 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4289 		WARN_ON(tx_q->tx_skbuff[entry]);
4290 
4291 		if (likely(priv->extend_desc))
4292 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4293 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4294 			desc = &tx_q->dma_entx[entry].basic;
4295 		else
4296 			desc = tx_q->dma_tx + entry;
4297 
4298 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4299 				       DMA_TO_DEVICE);
4300 		if (dma_mapping_error(priv->device, des))
4301 			goto dma_map_err; /* should reuse desc w/o issues */
4302 
4303 		tx_q->tx_skbuff_dma[entry].buf = des;
4304 
4305 		stmmac_set_desc_addr(priv, desc, des);
4306 
4307 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4308 		tx_q->tx_skbuff_dma[entry].len = len;
4309 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4310 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4311 
4312 		/* Prepare the descriptor and set the own bit too */
4313 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4314 				priv->mode, 1, last_segment, skb->len);
4315 	}
4316 
4317 	/* Only the last descriptor gets to point to the skb. */
4318 	tx_q->tx_skbuff[entry] = skb;
4319 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4320 
4321 	/* According to the coalesce parameter the IC bit for the latest
4322 	 * segment is reset and the timer re-started to clean the tx status.
4323 	 * This approach takes care about the fragments: desc is the first
4324 	 * element in case of no SG.
4325 	 */
4326 	tx_packets = (entry + 1) - first_tx;
4327 	tx_q->tx_count_frames += tx_packets;
4328 
4329 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4330 		set_ic = true;
4331 	else if (!priv->tx_coal_frames[queue])
4332 		set_ic = false;
4333 	else if (tx_packets > priv->tx_coal_frames[queue])
4334 		set_ic = true;
4335 	else if ((tx_q->tx_count_frames %
4336 		  priv->tx_coal_frames[queue]) < tx_packets)
4337 		set_ic = true;
4338 	else
4339 		set_ic = false;
4340 
4341 	if (set_ic) {
4342 		if (likely(priv->extend_desc))
4343 			desc = &tx_q->dma_etx[entry].basic;
4344 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4345 			desc = &tx_q->dma_entx[entry].basic;
4346 		else
4347 			desc = &tx_q->dma_tx[entry];
4348 
4349 		tx_q->tx_count_frames = 0;
4350 		stmmac_set_tx_ic(priv, desc);
4351 		priv->xstats.tx_set_ic_bit++;
4352 	}
4353 
4354 	/* We've used all descriptors we need for this skb, however,
4355 	 * advance cur_tx so that it references a fresh descriptor.
4356 	 * ndo_start_xmit will fill this descriptor the next time it's
4357 	 * called and stmmac_tx_clean may clean up to this descriptor.
4358 	 */
4359 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4360 	tx_q->cur_tx = entry;
4361 
4362 	if (netif_msg_pktdata(priv)) {
4363 		netdev_dbg(priv->dev,
4364 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4365 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4366 			   entry, first, nfrags);
4367 
4368 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4369 		print_pkt(skb->data, skb->len);
4370 	}
4371 
4372 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4373 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4374 			  __func__);
4375 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4376 	}
4377 
4378 	dev->stats.tx_bytes += skb->len;
4379 
4380 	if (priv->sarc_type)
4381 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4382 
4383 	skb_tx_timestamp(skb);
4384 
4385 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4386 	 * problems because all the descriptors are actually ready to be
4387 	 * passed to the DMA engine.
4388 	 */
4389 	if (likely(!is_jumbo)) {
4390 		bool last_segment = (nfrags == 0);
4391 
4392 		des = dma_map_single(priv->device, skb->data,
4393 				     nopaged_len, DMA_TO_DEVICE);
4394 		if (dma_mapping_error(priv->device, des))
4395 			goto dma_map_err;
4396 
4397 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4398 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4399 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4400 
4401 		stmmac_set_desc_addr(priv, first, des);
4402 
4403 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4404 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4405 
4406 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4407 			     priv->hwts_tx_en)) {
4408 			/* declare that device is doing timestamping */
4409 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4410 			stmmac_enable_tx_timestamp(priv, first);
4411 		}
4412 
4413 		/* Prepare the first descriptor setting the OWN bit too */
4414 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4415 				csum_insertion, priv->mode, 0, last_segment,
4416 				skb->len);
4417 	}
4418 
4419 	if (tx_q->tbs & STMMAC_TBS_EN) {
4420 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4421 
4422 		tbs_desc = &tx_q->dma_entx[first_entry];
4423 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4424 	}
4425 
4426 	stmmac_set_tx_owner(priv, first);
4427 
4428 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4429 
4430 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4431 
4432 	stmmac_flush_tx_descriptors(priv, queue);
4433 	stmmac_tx_timer_arm(priv, queue);
4434 
4435 	return NETDEV_TX_OK;
4436 
4437 dma_map_err:
4438 	netdev_err(priv->dev, "Tx DMA map failed\n");
4439 	dev_kfree_skb(skb);
4440 	priv->dev->stats.tx_dropped++;
4441 	return NETDEV_TX_OK;
4442 }
4443 
4444 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4445 {
4446 	struct vlan_ethhdr *veth;
4447 	__be16 vlan_proto;
4448 	u16 vlanid;
4449 
4450 	veth = (struct vlan_ethhdr *)skb->data;
4451 	vlan_proto = veth->h_vlan_proto;
4452 
4453 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4454 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4455 	    (vlan_proto == htons(ETH_P_8021AD) &&
4456 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4457 		/* pop the vlan tag */
4458 		vlanid = ntohs(veth->h_vlan_TCI);
4459 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4460 		skb_pull(skb, VLAN_HLEN);
4461 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4462 	}
4463 }
4464 
4465 /**
4466  * stmmac_rx_refill - refill used skb preallocated buffers
4467  * @priv: driver private structure
4468  * @queue: RX queue index
4469  * Description : this is to reallocate the skb for the reception process
4470  * that is based on zero-copy.
4471  */
4472 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4473 {
4474 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4475 	int dirty = stmmac_rx_dirty(priv, queue);
4476 	unsigned int entry = rx_q->dirty_rx;
4477 
4478 	while (dirty-- > 0) {
4479 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4480 		struct dma_desc *p;
4481 		bool use_rx_wd;
4482 
4483 		if (priv->extend_desc)
4484 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4485 		else
4486 			p = rx_q->dma_rx + entry;
4487 
4488 		if (!buf->page) {
4489 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
4490 			if (!buf->page)
4491 				break;
4492 		}
4493 
4494 		if (priv->sph && !buf->sec_page) {
4495 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
4496 			if (!buf->sec_page)
4497 				break;
4498 
4499 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4500 		}
4501 
4502 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4503 
4504 		stmmac_set_desc_addr(priv, p, buf->addr);
4505 		if (priv->sph)
4506 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4507 		else
4508 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4509 		stmmac_refill_desc3(priv, rx_q, p);
4510 
4511 		rx_q->rx_count_frames++;
4512 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4513 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4514 			rx_q->rx_count_frames = 0;
4515 
4516 		use_rx_wd = !priv->rx_coal_frames[queue];
4517 		use_rx_wd |= rx_q->rx_count_frames > 0;
4518 		if (!priv->use_riwt)
4519 			use_rx_wd = false;
4520 
4521 		dma_wmb();
4522 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4523 
4524 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4525 	}
4526 	rx_q->dirty_rx = entry;
4527 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4528 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4529 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4530 }
4531 
4532 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4533 				       struct dma_desc *p,
4534 				       int status, unsigned int len)
4535 {
4536 	unsigned int plen = 0, hlen = 0;
4537 	int coe = priv->hw->rx_csum;
4538 
4539 	/* Not first descriptor, buffer is always zero */
4540 	if (priv->sph && len)
4541 		return 0;
4542 
4543 	/* First descriptor, get split header length */
4544 	stmmac_get_rx_header_len(priv, p, &hlen);
4545 	if (priv->sph && hlen) {
4546 		priv->xstats.rx_split_hdr_pkt_n++;
4547 		return hlen;
4548 	}
4549 
4550 	/* First descriptor, not last descriptor and not split header */
4551 	if (status & rx_not_ls)
4552 		return priv->dma_buf_sz;
4553 
4554 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4555 
4556 	/* First descriptor and last descriptor and not split header */
4557 	return min_t(unsigned int, priv->dma_buf_sz, plen);
4558 }
4559 
4560 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4561 				       struct dma_desc *p,
4562 				       int status, unsigned int len)
4563 {
4564 	int coe = priv->hw->rx_csum;
4565 	unsigned int plen = 0;
4566 
4567 	/* Not split header, buffer is not available */
4568 	if (!priv->sph)
4569 		return 0;
4570 
4571 	/* Not last descriptor */
4572 	if (status & rx_not_ls)
4573 		return priv->dma_buf_sz;
4574 
4575 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4576 
4577 	/* Last descriptor */
4578 	return plen - len;
4579 }
4580 
4581 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4582 				struct xdp_frame *xdpf, bool dma_map)
4583 {
4584 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4585 	unsigned int entry = tx_q->cur_tx;
4586 	struct dma_desc *tx_desc;
4587 	dma_addr_t dma_addr;
4588 	bool set_ic;
4589 
4590 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4591 		return STMMAC_XDP_CONSUMED;
4592 
4593 	if (likely(priv->extend_desc))
4594 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4595 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4596 		tx_desc = &tx_q->dma_entx[entry].basic;
4597 	else
4598 		tx_desc = tx_q->dma_tx + entry;
4599 
4600 	if (dma_map) {
4601 		dma_addr = dma_map_single(priv->device, xdpf->data,
4602 					  xdpf->len, DMA_TO_DEVICE);
4603 		if (dma_mapping_error(priv->device, dma_addr))
4604 			return STMMAC_XDP_CONSUMED;
4605 
4606 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4607 	} else {
4608 		struct page *page = virt_to_page(xdpf->data);
4609 
4610 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4611 			   xdpf->headroom;
4612 		dma_sync_single_for_device(priv->device, dma_addr,
4613 					   xdpf->len, DMA_BIDIRECTIONAL);
4614 
4615 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4616 	}
4617 
4618 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4619 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4620 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4621 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4622 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4623 
4624 	tx_q->xdpf[entry] = xdpf;
4625 
4626 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4627 
4628 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4629 			       true, priv->mode, true, true,
4630 			       xdpf->len);
4631 
4632 	tx_q->tx_count_frames++;
4633 
4634 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4635 		set_ic = true;
4636 	else
4637 		set_ic = false;
4638 
4639 	if (set_ic) {
4640 		tx_q->tx_count_frames = 0;
4641 		stmmac_set_tx_ic(priv, tx_desc);
4642 		priv->xstats.tx_set_ic_bit++;
4643 	}
4644 
4645 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4646 
4647 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4648 	tx_q->cur_tx = entry;
4649 
4650 	return STMMAC_XDP_TX;
4651 }
4652 
4653 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4654 				   int cpu)
4655 {
4656 	int index = cpu;
4657 
4658 	if (unlikely(index < 0))
4659 		index = 0;
4660 
4661 	while (index >= priv->plat->tx_queues_to_use)
4662 		index -= priv->plat->tx_queues_to_use;
4663 
4664 	return index;
4665 }
4666 
4667 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4668 				struct xdp_buff *xdp)
4669 {
4670 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4671 	int cpu = smp_processor_id();
4672 	struct netdev_queue *nq;
4673 	int queue;
4674 	int res;
4675 
4676 	if (unlikely(!xdpf))
4677 		return STMMAC_XDP_CONSUMED;
4678 
4679 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4680 	nq = netdev_get_tx_queue(priv->dev, queue);
4681 
4682 	__netif_tx_lock(nq, cpu);
4683 	/* Avoids TX time-out as we are sharing with slow path */
4684 	txq_trans_cond_update(nq);
4685 
4686 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4687 	if (res == STMMAC_XDP_TX)
4688 		stmmac_flush_tx_descriptors(priv, queue);
4689 
4690 	__netif_tx_unlock(nq);
4691 
4692 	return res;
4693 }
4694 
4695 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4696 				 struct bpf_prog *prog,
4697 				 struct xdp_buff *xdp)
4698 {
4699 	u32 act;
4700 	int res;
4701 
4702 	act = bpf_prog_run_xdp(prog, xdp);
4703 	switch (act) {
4704 	case XDP_PASS:
4705 		res = STMMAC_XDP_PASS;
4706 		break;
4707 	case XDP_TX:
4708 		res = stmmac_xdp_xmit_back(priv, xdp);
4709 		break;
4710 	case XDP_REDIRECT:
4711 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4712 			res = STMMAC_XDP_CONSUMED;
4713 		else
4714 			res = STMMAC_XDP_REDIRECT;
4715 		break;
4716 	default:
4717 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4718 		fallthrough;
4719 	case XDP_ABORTED:
4720 		trace_xdp_exception(priv->dev, prog, act);
4721 		fallthrough;
4722 	case XDP_DROP:
4723 		res = STMMAC_XDP_CONSUMED;
4724 		break;
4725 	}
4726 
4727 	return res;
4728 }
4729 
4730 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4731 					   struct xdp_buff *xdp)
4732 {
4733 	struct bpf_prog *prog;
4734 	int res;
4735 
4736 	prog = READ_ONCE(priv->xdp_prog);
4737 	if (!prog) {
4738 		res = STMMAC_XDP_PASS;
4739 		goto out;
4740 	}
4741 
4742 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4743 out:
4744 	return ERR_PTR(-res);
4745 }
4746 
4747 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4748 				   int xdp_status)
4749 {
4750 	int cpu = smp_processor_id();
4751 	int queue;
4752 
4753 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4754 
4755 	if (xdp_status & STMMAC_XDP_TX)
4756 		stmmac_tx_timer_arm(priv, queue);
4757 
4758 	if (xdp_status & STMMAC_XDP_REDIRECT)
4759 		xdp_do_flush();
4760 }
4761 
4762 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4763 					       struct xdp_buff *xdp)
4764 {
4765 	unsigned int metasize = xdp->data - xdp->data_meta;
4766 	unsigned int datasize = xdp->data_end - xdp->data;
4767 	struct sk_buff *skb;
4768 
4769 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4770 			       xdp->data_end - xdp->data_hard_start,
4771 			       GFP_ATOMIC | __GFP_NOWARN);
4772 	if (unlikely(!skb))
4773 		return NULL;
4774 
4775 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4776 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4777 	if (metasize)
4778 		skb_metadata_set(skb, metasize);
4779 
4780 	return skb;
4781 }
4782 
4783 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4784 				   struct dma_desc *p, struct dma_desc *np,
4785 				   struct xdp_buff *xdp)
4786 {
4787 	struct stmmac_channel *ch = &priv->channel[queue];
4788 	unsigned int len = xdp->data_end - xdp->data;
4789 	enum pkt_hash_types hash_type;
4790 	int coe = priv->hw->rx_csum;
4791 	struct sk_buff *skb;
4792 	u32 hash;
4793 
4794 	skb = stmmac_construct_skb_zc(ch, xdp);
4795 	if (!skb) {
4796 		priv->dev->stats.rx_dropped++;
4797 		return;
4798 	}
4799 
4800 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4801 	stmmac_rx_vlan(priv->dev, skb);
4802 	skb->protocol = eth_type_trans(skb, priv->dev);
4803 
4804 	if (unlikely(!coe))
4805 		skb_checksum_none_assert(skb);
4806 	else
4807 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4808 
4809 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4810 		skb_set_hash(skb, hash, hash_type);
4811 
4812 	skb_record_rx_queue(skb, queue);
4813 	napi_gro_receive(&ch->rxtx_napi, skb);
4814 
4815 	priv->dev->stats.rx_packets++;
4816 	priv->dev->stats.rx_bytes += len;
4817 }
4818 
4819 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4820 {
4821 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4822 	unsigned int entry = rx_q->dirty_rx;
4823 	struct dma_desc *rx_desc = NULL;
4824 	bool ret = true;
4825 
4826 	budget = min(budget, stmmac_rx_dirty(priv, queue));
4827 
4828 	while (budget-- > 0 && entry != rx_q->cur_rx) {
4829 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4830 		dma_addr_t dma_addr;
4831 		bool use_rx_wd;
4832 
4833 		if (!buf->xdp) {
4834 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4835 			if (!buf->xdp) {
4836 				ret = false;
4837 				break;
4838 			}
4839 		}
4840 
4841 		if (priv->extend_desc)
4842 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4843 		else
4844 			rx_desc = rx_q->dma_rx + entry;
4845 
4846 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4847 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4848 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4849 		stmmac_refill_desc3(priv, rx_q, rx_desc);
4850 
4851 		rx_q->rx_count_frames++;
4852 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4853 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4854 			rx_q->rx_count_frames = 0;
4855 
4856 		use_rx_wd = !priv->rx_coal_frames[queue];
4857 		use_rx_wd |= rx_q->rx_count_frames > 0;
4858 		if (!priv->use_riwt)
4859 			use_rx_wd = false;
4860 
4861 		dma_wmb();
4862 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4863 
4864 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4865 	}
4866 
4867 	if (rx_desc) {
4868 		rx_q->dirty_rx = entry;
4869 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4870 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
4871 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4872 	}
4873 
4874 	return ret;
4875 }
4876 
4877 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4878 {
4879 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4880 	unsigned int count = 0, error = 0, len = 0;
4881 	int dirty = stmmac_rx_dirty(priv, queue);
4882 	unsigned int next_entry = rx_q->cur_rx;
4883 	unsigned int desc_size;
4884 	struct bpf_prog *prog;
4885 	bool failure = false;
4886 	int xdp_status = 0;
4887 	int status = 0;
4888 
4889 	if (netif_msg_rx_status(priv)) {
4890 		void *rx_head;
4891 
4892 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4893 		if (priv->extend_desc) {
4894 			rx_head = (void *)rx_q->dma_erx;
4895 			desc_size = sizeof(struct dma_extended_desc);
4896 		} else {
4897 			rx_head = (void *)rx_q->dma_rx;
4898 			desc_size = sizeof(struct dma_desc);
4899 		}
4900 
4901 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4902 				    rx_q->dma_rx_phy, desc_size);
4903 	}
4904 	while (count < limit) {
4905 		struct stmmac_rx_buffer *buf;
4906 		unsigned int buf1_len = 0;
4907 		struct dma_desc *np, *p;
4908 		int entry;
4909 		int res;
4910 
4911 		if (!count && rx_q->state_saved) {
4912 			error = rx_q->state.error;
4913 			len = rx_q->state.len;
4914 		} else {
4915 			rx_q->state_saved = false;
4916 			error = 0;
4917 			len = 0;
4918 		}
4919 
4920 		if (count >= limit)
4921 			break;
4922 
4923 read_again:
4924 		buf1_len = 0;
4925 		entry = next_entry;
4926 		buf = &rx_q->buf_pool[entry];
4927 
4928 		if (dirty >= STMMAC_RX_FILL_BATCH) {
4929 			failure = failure ||
4930 				  !stmmac_rx_refill_zc(priv, queue, dirty);
4931 			dirty = 0;
4932 		}
4933 
4934 		if (priv->extend_desc)
4935 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4936 		else
4937 			p = rx_q->dma_rx + entry;
4938 
4939 		/* read the status of the incoming frame */
4940 		status = stmmac_rx_status(priv, &priv->dev->stats,
4941 					  &priv->xstats, p);
4942 		/* check if managed by the DMA otherwise go ahead */
4943 		if (unlikely(status & dma_own))
4944 			break;
4945 
4946 		/* Prefetch the next RX descriptor */
4947 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4948 						priv->dma_rx_size);
4949 		next_entry = rx_q->cur_rx;
4950 
4951 		if (priv->extend_desc)
4952 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4953 		else
4954 			np = rx_q->dma_rx + next_entry;
4955 
4956 		prefetch(np);
4957 
4958 		/* Ensure a valid XSK buffer before proceed */
4959 		if (!buf->xdp)
4960 			break;
4961 
4962 		if (priv->extend_desc)
4963 			stmmac_rx_extended_status(priv, &priv->dev->stats,
4964 						  &priv->xstats,
4965 						  rx_q->dma_erx + entry);
4966 		if (unlikely(status == discard_frame)) {
4967 			xsk_buff_free(buf->xdp);
4968 			buf->xdp = NULL;
4969 			dirty++;
4970 			error = 1;
4971 			if (!priv->hwts_rx_en)
4972 				priv->dev->stats.rx_errors++;
4973 		}
4974 
4975 		if (unlikely(error && (status & rx_not_ls)))
4976 			goto read_again;
4977 		if (unlikely(error)) {
4978 			count++;
4979 			continue;
4980 		}
4981 
4982 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
4983 		if (likely(status & rx_not_ls)) {
4984 			xsk_buff_free(buf->xdp);
4985 			buf->xdp = NULL;
4986 			dirty++;
4987 			count++;
4988 			goto read_again;
4989 		}
4990 
4991 		/* XDP ZC Frame only support primary buffers for now */
4992 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
4993 		len += buf1_len;
4994 
4995 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
4996 		 * Type frames (LLC/LLC-SNAP)
4997 		 *
4998 		 * llc_snap is never checked in GMAC >= 4, so this ACS
4999 		 * feature is always disabled and packets need to be
5000 		 * stripped manually.
5001 		 */
5002 		if (likely(!(status & rx_not_ls)) &&
5003 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5004 		     unlikely(status != llc_snap))) {
5005 			buf1_len -= ETH_FCS_LEN;
5006 			len -= ETH_FCS_LEN;
5007 		}
5008 
5009 		/* RX buffer is good and fit into a XSK pool buffer */
5010 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5011 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5012 
5013 		prog = READ_ONCE(priv->xdp_prog);
5014 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5015 
5016 		switch (res) {
5017 		case STMMAC_XDP_PASS:
5018 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5019 			xsk_buff_free(buf->xdp);
5020 			break;
5021 		case STMMAC_XDP_CONSUMED:
5022 			xsk_buff_free(buf->xdp);
5023 			priv->dev->stats.rx_dropped++;
5024 			break;
5025 		case STMMAC_XDP_TX:
5026 		case STMMAC_XDP_REDIRECT:
5027 			xdp_status |= res;
5028 			break;
5029 		}
5030 
5031 		buf->xdp = NULL;
5032 		dirty++;
5033 		count++;
5034 	}
5035 
5036 	if (status & rx_not_ls) {
5037 		rx_q->state_saved = true;
5038 		rx_q->state.error = error;
5039 		rx_q->state.len = len;
5040 	}
5041 
5042 	stmmac_finalize_xdp_rx(priv, xdp_status);
5043 
5044 	priv->xstats.rx_pkt_n += count;
5045 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5046 
5047 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5048 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5049 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5050 		else
5051 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5052 
5053 		return (int)count;
5054 	}
5055 
5056 	return failure ? limit : (int)count;
5057 }
5058 
5059 /**
5060  * stmmac_rx - manage the receive process
5061  * @priv: driver private structure
5062  * @limit: napi bugget
5063  * @queue: RX queue index.
5064  * Description :  this the function called by the napi poll method.
5065  * It gets all the frames inside the ring.
5066  */
5067 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5068 {
5069 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5070 	struct stmmac_channel *ch = &priv->channel[queue];
5071 	unsigned int count = 0, error = 0, len = 0;
5072 	int status = 0, coe = priv->hw->rx_csum;
5073 	unsigned int next_entry = rx_q->cur_rx;
5074 	enum dma_data_direction dma_dir;
5075 	unsigned int desc_size;
5076 	struct sk_buff *skb = NULL;
5077 	struct xdp_buff xdp;
5078 	int xdp_status = 0;
5079 	int buf_sz;
5080 
5081 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5082 	buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5083 
5084 	if (netif_msg_rx_status(priv)) {
5085 		void *rx_head;
5086 
5087 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5088 		if (priv->extend_desc) {
5089 			rx_head = (void *)rx_q->dma_erx;
5090 			desc_size = sizeof(struct dma_extended_desc);
5091 		} else {
5092 			rx_head = (void *)rx_q->dma_rx;
5093 			desc_size = sizeof(struct dma_desc);
5094 		}
5095 
5096 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
5097 				    rx_q->dma_rx_phy, desc_size);
5098 	}
5099 	while (count < limit) {
5100 		unsigned int buf1_len = 0, buf2_len = 0;
5101 		enum pkt_hash_types hash_type;
5102 		struct stmmac_rx_buffer *buf;
5103 		struct dma_desc *np, *p;
5104 		int entry;
5105 		u32 hash;
5106 
5107 		if (!count && rx_q->state_saved) {
5108 			skb = rx_q->state.skb;
5109 			error = rx_q->state.error;
5110 			len = rx_q->state.len;
5111 		} else {
5112 			rx_q->state_saved = false;
5113 			skb = NULL;
5114 			error = 0;
5115 			len = 0;
5116 		}
5117 
5118 		if (count >= limit)
5119 			break;
5120 
5121 read_again:
5122 		buf1_len = 0;
5123 		buf2_len = 0;
5124 		entry = next_entry;
5125 		buf = &rx_q->buf_pool[entry];
5126 
5127 		if (priv->extend_desc)
5128 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5129 		else
5130 			p = rx_q->dma_rx + entry;
5131 
5132 		/* read the status of the incoming frame */
5133 		status = stmmac_rx_status(priv, &priv->dev->stats,
5134 				&priv->xstats, p);
5135 		/* check if managed by the DMA otherwise go ahead */
5136 		if (unlikely(status & dma_own))
5137 			break;
5138 
5139 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5140 						priv->dma_rx_size);
5141 		next_entry = rx_q->cur_rx;
5142 
5143 		if (priv->extend_desc)
5144 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5145 		else
5146 			np = rx_q->dma_rx + next_entry;
5147 
5148 		prefetch(np);
5149 
5150 		if (priv->extend_desc)
5151 			stmmac_rx_extended_status(priv, &priv->dev->stats,
5152 					&priv->xstats, rx_q->dma_erx + entry);
5153 		if (unlikely(status == discard_frame)) {
5154 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5155 			buf->page = NULL;
5156 			error = 1;
5157 			if (!priv->hwts_rx_en)
5158 				priv->dev->stats.rx_errors++;
5159 		}
5160 
5161 		if (unlikely(error && (status & rx_not_ls)))
5162 			goto read_again;
5163 		if (unlikely(error)) {
5164 			dev_kfree_skb(skb);
5165 			skb = NULL;
5166 			count++;
5167 			continue;
5168 		}
5169 
5170 		/* Buffer is good. Go on. */
5171 
5172 		prefetch(page_address(buf->page) + buf->page_offset);
5173 		if (buf->sec_page)
5174 			prefetch(page_address(buf->sec_page));
5175 
5176 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5177 		len += buf1_len;
5178 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5179 		len += buf2_len;
5180 
5181 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5182 		 * Type frames (LLC/LLC-SNAP)
5183 		 *
5184 		 * llc_snap is never checked in GMAC >= 4, so this ACS
5185 		 * feature is always disabled and packets need to be
5186 		 * stripped manually.
5187 		 */
5188 		if (likely(!(status & rx_not_ls)) &&
5189 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5190 		     unlikely(status != llc_snap))) {
5191 			if (buf2_len) {
5192 				buf2_len -= ETH_FCS_LEN;
5193 				len -= ETH_FCS_LEN;
5194 			} else if (buf1_len) {
5195 				buf1_len -= ETH_FCS_LEN;
5196 				len -= ETH_FCS_LEN;
5197 			}
5198 		}
5199 
5200 		if (!skb) {
5201 			unsigned int pre_len, sync_len;
5202 
5203 			dma_sync_single_for_cpu(priv->device, buf->addr,
5204 						buf1_len, dma_dir);
5205 
5206 			xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5207 			xdp_prepare_buff(&xdp, page_address(buf->page),
5208 					 buf->page_offset, buf1_len, false);
5209 
5210 			pre_len = xdp.data_end - xdp.data_hard_start -
5211 				  buf->page_offset;
5212 			skb = stmmac_xdp_run_prog(priv, &xdp);
5213 			/* Due xdp_adjust_tail: DMA sync for_device
5214 			 * cover max len CPU touch
5215 			 */
5216 			sync_len = xdp.data_end - xdp.data_hard_start -
5217 				   buf->page_offset;
5218 			sync_len = max(sync_len, pre_len);
5219 
5220 			/* For Not XDP_PASS verdict */
5221 			if (IS_ERR(skb)) {
5222 				unsigned int xdp_res = -PTR_ERR(skb);
5223 
5224 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5225 					page_pool_put_page(rx_q->page_pool,
5226 							   virt_to_head_page(xdp.data),
5227 							   sync_len, true);
5228 					buf->page = NULL;
5229 					priv->dev->stats.rx_dropped++;
5230 
5231 					/* Clear skb as it was set as
5232 					 * status by XDP program.
5233 					 */
5234 					skb = NULL;
5235 
5236 					if (unlikely((status & rx_not_ls)))
5237 						goto read_again;
5238 
5239 					count++;
5240 					continue;
5241 				} else if (xdp_res & (STMMAC_XDP_TX |
5242 						      STMMAC_XDP_REDIRECT)) {
5243 					xdp_status |= xdp_res;
5244 					buf->page = NULL;
5245 					skb = NULL;
5246 					count++;
5247 					continue;
5248 				}
5249 			}
5250 		}
5251 
5252 		if (!skb) {
5253 			/* XDP program may expand or reduce tail */
5254 			buf1_len = xdp.data_end - xdp.data;
5255 
5256 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5257 			if (!skb) {
5258 				priv->dev->stats.rx_dropped++;
5259 				count++;
5260 				goto drain_data;
5261 			}
5262 
5263 			/* XDP program may adjust header */
5264 			skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5265 			skb_put(skb, buf1_len);
5266 
5267 			/* Data payload copied into SKB, page ready for recycle */
5268 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5269 			buf->page = NULL;
5270 		} else if (buf1_len) {
5271 			dma_sync_single_for_cpu(priv->device, buf->addr,
5272 						buf1_len, dma_dir);
5273 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5274 					buf->page, buf->page_offset, buf1_len,
5275 					priv->dma_buf_sz);
5276 
5277 			/* Data payload appended into SKB */
5278 			page_pool_release_page(rx_q->page_pool, buf->page);
5279 			buf->page = NULL;
5280 		}
5281 
5282 		if (buf2_len) {
5283 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5284 						buf2_len, dma_dir);
5285 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5286 					buf->sec_page, 0, buf2_len,
5287 					priv->dma_buf_sz);
5288 
5289 			/* Data payload appended into SKB */
5290 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
5291 			buf->sec_page = NULL;
5292 		}
5293 
5294 drain_data:
5295 		if (likely(status & rx_not_ls))
5296 			goto read_again;
5297 		if (!skb)
5298 			continue;
5299 
5300 		/* Got entire packet into SKB. Finish it. */
5301 
5302 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5303 		stmmac_rx_vlan(priv->dev, skb);
5304 		skb->protocol = eth_type_trans(skb, priv->dev);
5305 
5306 		if (unlikely(!coe))
5307 			skb_checksum_none_assert(skb);
5308 		else
5309 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5310 
5311 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5312 			skb_set_hash(skb, hash, hash_type);
5313 
5314 		skb_record_rx_queue(skb, queue);
5315 		napi_gro_receive(&ch->rx_napi, skb);
5316 		skb = NULL;
5317 
5318 		priv->dev->stats.rx_packets++;
5319 		priv->dev->stats.rx_bytes += len;
5320 		count++;
5321 	}
5322 
5323 	if (status & rx_not_ls || skb) {
5324 		rx_q->state_saved = true;
5325 		rx_q->state.skb = skb;
5326 		rx_q->state.error = error;
5327 		rx_q->state.len = len;
5328 	}
5329 
5330 	stmmac_finalize_xdp_rx(priv, xdp_status);
5331 
5332 	stmmac_rx_refill(priv, queue);
5333 
5334 	priv->xstats.rx_pkt_n += count;
5335 	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5336 
5337 	return count;
5338 }
5339 
5340 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5341 {
5342 	struct stmmac_channel *ch =
5343 		container_of(napi, struct stmmac_channel, rx_napi);
5344 	struct stmmac_priv *priv = ch->priv_data;
5345 	u32 chan = ch->index;
5346 	int work_done;
5347 
5348 	priv->xstats.napi_poll++;
5349 
5350 	work_done = stmmac_rx(priv, budget, chan);
5351 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5352 		unsigned long flags;
5353 
5354 		spin_lock_irqsave(&ch->lock, flags);
5355 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5356 		spin_unlock_irqrestore(&ch->lock, flags);
5357 	}
5358 
5359 	return work_done;
5360 }
5361 
5362 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5363 {
5364 	struct stmmac_channel *ch =
5365 		container_of(napi, struct stmmac_channel, tx_napi);
5366 	struct stmmac_priv *priv = ch->priv_data;
5367 	u32 chan = ch->index;
5368 	int work_done;
5369 
5370 	priv->xstats.napi_poll++;
5371 
5372 	work_done = stmmac_tx_clean(priv, budget, chan);
5373 	work_done = min(work_done, budget);
5374 
5375 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5376 		unsigned long flags;
5377 
5378 		spin_lock_irqsave(&ch->lock, flags);
5379 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5380 		spin_unlock_irqrestore(&ch->lock, flags);
5381 	}
5382 
5383 	return work_done;
5384 }
5385 
5386 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5387 {
5388 	struct stmmac_channel *ch =
5389 		container_of(napi, struct stmmac_channel, rxtx_napi);
5390 	struct stmmac_priv *priv = ch->priv_data;
5391 	int rx_done, tx_done, rxtx_done;
5392 	u32 chan = ch->index;
5393 
5394 	priv->xstats.napi_poll++;
5395 
5396 	tx_done = stmmac_tx_clean(priv, budget, chan);
5397 	tx_done = min(tx_done, budget);
5398 
5399 	rx_done = stmmac_rx_zc(priv, budget, chan);
5400 
5401 	rxtx_done = max(tx_done, rx_done);
5402 
5403 	/* If either TX or RX work is not complete, return budget
5404 	 * and keep pooling
5405 	 */
5406 	if (rxtx_done >= budget)
5407 		return budget;
5408 
5409 	/* all work done, exit the polling mode */
5410 	if (napi_complete_done(napi, rxtx_done)) {
5411 		unsigned long flags;
5412 
5413 		spin_lock_irqsave(&ch->lock, flags);
5414 		/* Both RX and TX work done are compelte,
5415 		 * so enable both RX & TX IRQs.
5416 		 */
5417 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5418 		spin_unlock_irqrestore(&ch->lock, flags);
5419 	}
5420 
5421 	return min(rxtx_done, budget - 1);
5422 }
5423 
5424 /**
5425  *  stmmac_tx_timeout
5426  *  @dev : Pointer to net device structure
5427  *  @txqueue: the index of the hanging transmit queue
5428  *  Description: this function is called when a packet transmission fails to
5429  *   complete within a reasonable time. The driver will mark the error in the
5430  *   netdev structure and arrange for the device to be reset to a sane state
5431  *   in order to transmit a new packet.
5432  */
5433 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5434 {
5435 	struct stmmac_priv *priv = netdev_priv(dev);
5436 
5437 	stmmac_global_err(priv);
5438 }
5439 
5440 /**
5441  *  stmmac_set_rx_mode - entry point for multicast addressing
5442  *  @dev : pointer to the device structure
5443  *  Description:
5444  *  This function is a driver entry point which gets called by the kernel
5445  *  whenever multicast addresses must be enabled/disabled.
5446  *  Return value:
5447  *  void.
5448  */
5449 static void stmmac_set_rx_mode(struct net_device *dev)
5450 {
5451 	struct stmmac_priv *priv = netdev_priv(dev);
5452 
5453 	stmmac_set_filter(priv, priv->hw, dev);
5454 }
5455 
5456 /**
5457  *  stmmac_change_mtu - entry point to change MTU size for the device.
5458  *  @dev : device pointer.
5459  *  @new_mtu : the new MTU size for the device.
5460  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5461  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5462  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5463  *  Return value:
5464  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5465  *  file on failure.
5466  */
5467 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5468 {
5469 	struct stmmac_priv *priv = netdev_priv(dev);
5470 	int txfifosz = priv->plat->tx_fifo_size;
5471 	const int mtu = new_mtu;
5472 
5473 	if (txfifosz == 0)
5474 		txfifosz = priv->dma_cap.tx_fifo_size;
5475 
5476 	txfifosz /= priv->plat->tx_queues_to_use;
5477 
5478 	if (netif_running(dev)) {
5479 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
5480 		return -EBUSY;
5481 	}
5482 
5483 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5484 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5485 		return -EINVAL;
5486 	}
5487 
5488 	new_mtu = STMMAC_ALIGN(new_mtu);
5489 
5490 	/* If condition true, FIFO is too small or MTU too large */
5491 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5492 		return -EINVAL;
5493 
5494 	dev->mtu = mtu;
5495 
5496 	netdev_update_features(dev);
5497 
5498 	return 0;
5499 }
5500 
5501 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5502 					     netdev_features_t features)
5503 {
5504 	struct stmmac_priv *priv = netdev_priv(dev);
5505 
5506 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5507 		features &= ~NETIF_F_RXCSUM;
5508 
5509 	if (!priv->plat->tx_coe)
5510 		features &= ~NETIF_F_CSUM_MASK;
5511 
5512 	/* Some GMAC devices have a bugged Jumbo frame support that
5513 	 * needs to have the Tx COE disabled for oversized frames
5514 	 * (due to limited buffer sizes). In this case we disable
5515 	 * the TX csum insertion in the TDES and not use SF.
5516 	 */
5517 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5518 		features &= ~NETIF_F_CSUM_MASK;
5519 
5520 	/* Disable tso if asked by ethtool */
5521 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5522 		if (features & NETIF_F_TSO)
5523 			priv->tso = true;
5524 		else
5525 			priv->tso = false;
5526 	}
5527 
5528 	return features;
5529 }
5530 
5531 static int stmmac_set_features(struct net_device *netdev,
5532 			       netdev_features_t features)
5533 {
5534 	struct stmmac_priv *priv = netdev_priv(netdev);
5535 
5536 	/* Keep the COE Type in case of csum is supporting */
5537 	if (features & NETIF_F_RXCSUM)
5538 		priv->hw->rx_csum = priv->plat->rx_coe;
5539 	else
5540 		priv->hw->rx_csum = 0;
5541 	/* No check needed because rx_coe has been set before and it will be
5542 	 * fixed in case of issue.
5543 	 */
5544 	stmmac_rx_ipc(priv, priv->hw);
5545 
5546 	if (priv->sph_cap) {
5547 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5548 		u32 chan;
5549 
5550 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5551 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5552 	}
5553 
5554 	return 0;
5555 }
5556 
5557 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5558 {
5559 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5560 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5561 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5562 	bool *hs_enable = &fpe_cfg->hs_enable;
5563 
5564 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5565 		return;
5566 
5567 	/* If LP has sent verify mPacket, LP is FPE capable */
5568 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5569 		if (*lp_state < FPE_STATE_CAPABLE)
5570 			*lp_state = FPE_STATE_CAPABLE;
5571 
5572 		/* If user has requested FPE enable, quickly response */
5573 		if (*hs_enable)
5574 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5575 						MPACKET_RESPONSE);
5576 	}
5577 
5578 	/* If Local has sent verify mPacket, Local is FPE capable */
5579 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5580 		if (*lo_state < FPE_STATE_CAPABLE)
5581 			*lo_state = FPE_STATE_CAPABLE;
5582 	}
5583 
5584 	/* If LP has sent response mPacket, LP is entering FPE ON */
5585 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5586 		*lp_state = FPE_STATE_ENTERING_ON;
5587 
5588 	/* If Local has sent response mPacket, Local is entering FPE ON */
5589 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5590 		*lo_state = FPE_STATE_ENTERING_ON;
5591 
5592 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5593 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5594 	    priv->fpe_wq) {
5595 		queue_work(priv->fpe_wq, &priv->fpe_task);
5596 	}
5597 }
5598 
5599 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5600 {
5601 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5602 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5603 	u32 queues_count;
5604 	u32 queue;
5605 	bool xmac;
5606 
5607 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5608 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5609 
5610 	if (priv->irq_wake)
5611 		pm_wakeup_event(priv->device, 0);
5612 
5613 	if (priv->dma_cap.estsel)
5614 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5615 				      &priv->xstats, tx_cnt);
5616 
5617 	if (priv->dma_cap.fpesel) {
5618 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5619 						   priv->dev);
5620 
5621 		stmmac_fpe_event_status(priv, status);
5622 	}
5623 
5624 	/* To handle GMAC own interrupts */
5625 	if ((priv->plat->has_gmac) || xmac) {
5626 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5627 
5628 		if (unlikely(status)) {
5629 			/* For LPI we need to save the tx status */
5630 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5631 				priv->tx_path_in_lpi_mode = true;
5632 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5633 				priv->tx_path_in_lpi_mode = false;
5634 		}
5635 
5636 		for (queue = 0; queue < queues_count; queue++) {
5637 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5638 							    queue);
5639 		}
5640 
5641 		/* PCS link status */
5642 		if (priv->hw->pcs) {
5643 			if (priv->xstats.pcs_link)
5644 				netif_carrier_on(priv->dev);
5645 			else
5646 				netif_carrier_off(priv->dev);
5647 		}
5648 
5649 		stmmac_timestamp_interrupt(priv, priv);
5650 	}
5651 }
5652 
5653 /**
5654  *  stmmac_interrupt - main ISR
5655  *  @irq: interrupt number.
5656  *  @dev_id: to pass the net device pointer.
5657  *  Description: this is the main driver interrupt service routine.
5658  *  It can call:
5659  *  o DMA service routine (to manage incoming frame reception and transmission
5660  *    status)
5661  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5662  *    interrupts.
5663  */
5664 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5665 {
5666 	struct net_device *dev = (struct net_device *)dev_id;
5667 	struct stmmac_priv *priv = netdev_priv(dev);
5668 
5669 	/* Check if adapter is up */
5670 	if (test_bit(STMMAC_DOWN, &priv->state))
5671 		return IRQ_HANDLED;
5672 
5673 	/* Check if a fatal error happened */
5674 	if (stmmac_safety_feat_interrupt(priv))
5675 		return IRQ_HANDLED;
5676 
5677 	/* To handle Common interrupts */
5678 	stmmac_common_interrupt(priv);
5679 
5680 	/* To handle DMA interrupts */
5681 	stmmac_dma_interrupt(priv);
5682 
5683 	return IRQ_HANDLED;
5684 }
5685 
5686 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5687 {
5688 	struct net_device *dev = (struct net_device *)dev_id;
5689 	struct stmmac_priv *priv = netdev_priv(dev);
5690 
5691 	if (unlikely(!dev)) {
5692 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5693 		return IRQ_NONE;
5694 	}
5695 
5696 	/* Check if adapter is up */
5697 	if (test_bit(STMMAC_DOWN, &priv->state))
5698 		return IRQ_HANDLED;
5699 
5700 	/* To handle Common interrupts */
5701 	stmmac_common_interrupt(priv);
5702 
5703 	return IRQ_HANDLED;
5704 }
5705 
5706 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5707 {
5708 	struct net_device *dev = (struct net_device *)dev_id;
5709 	struct stmmac_priv *priv = netdev_priv(dev);
5710 
5711 	if (unlikely(!dev)) {
5712 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5713 		return IRQ_NONE;
5714 	}
5715 
5716 	/* Check if adapter is up */
5717 	if (test_bit(STMMAC_DOWN, &priv->state))
5718 		return IRQ_HANDLED;
5719 
5720 	/* Check if a fatal error happened */
5721 	stmmac_safety_feat_interrupt(priv);
5722 
5723 	return IRQ_HANDLED;
5724 }
5725 
5726 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5727 {
5728 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5729 	int chan = tx_q->queue_index;
5730 	struct stmmac_priv *priv;
5731 	int status;
5732 
5733 	priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
5734 
5735 	if (unlikely(!data)) {
5736 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5737 		return IRQ_NONE;
5738 	}
5739 
5740 	/* Check if adapter is up */
5741 	if (test_bit(STMMAC_DOWN, &priv->state))
5742 		return IRQ_HANDLED;
5743 
5744 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5745 
5746 	if (unlikely(status & tx_hard_error_bump_tc)) {
5747 		/* Try to bump up the dma threshold on this failure */
5748 		if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
5749 		    tc <= 256) {
5750 			tc += 64;
5751 			if (priv->plat->force_thresh_dma_mode)
5752 				stmmac_set_dma_operation_mode(priv,
5753 							      tc,
5754 							      tc,
5755 							      chan);
5756 			else
5757 				stmmac_set_dma_operation_mode(priv,
5758 							      tc,
5759 							      SF_DMA_MODE,
5760 							      chan);
5761 			priv->xstats.threshold = tc;
5762 		}
5763 	} else if (unlikely(status == tx_hard_error)) {
5764 		stmmac_tx_err(priv, chan);
5765 	}
5766 
5767 	return IRQ_HANDLED;
5768 }
5769 
5770 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5771 {
5772 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5773 	int chan = rx_q->queue_index;
5774 	struct stmmac_priv *priv;
5775 
5776 	priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
5777 
5778 	if (unlikely(!data)) {
5779 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5780 		return IRQ_NONE;
5781 	}
5782 
5783 	/* Check if adapter is up */
5784 	if (test_bit(STMMAC_DOWN, &priv->state))
5785 		return IRQ_HANDLED;
5786 
5787 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
5788 
5789 	return IRQ_HANDLED;
5790 }
5791 
5792 #ifdef CONFIG_NET_POLL_CONTROLLER
5793 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5794  * to allow network I/O with interrupts disabled.
5795  */
5796 static void stmmac_poll_controller(struct net_device *dev)
5797 {
5798 	struct stmmac_priv *priv = netdev_priv(dev);
5799 	int i;
5800 
5801 	/* If adapter is down, do nothing */
5802 	if (test_bit(STMMAC_DOWN, &priv->state))
5803 		return;
5804 
5805 	if (priv->plat->multi_msi_en) {
5806 		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5807 			stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
5808 
5809 		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5810 			stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
5811 	} else {
5812 		disable_irq(dev->irq);
5813 		stmmac_interrupt(dev->irq, dev);
5814 		enable_irq(dev->irq);
5815 	}
5816 }
5817 #endif
5818 
5819 /**
5820  *  stmmac_ioctl - Entry point for the Ioctl
5821  *  @dev: Device pointer.
5822  *  @rq: An IOCTL specefic structure, that can contain a pointer to
5823  *  a proprietary structure used to pass information to the driver.
5824  *  @cmd: IOCTL command
5825  *  Description:
5826  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5827  */
5828 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5829 {
5830 	struct stmmac_priv *priv = netdev_priv (dev);
5831 	int ret = -EOPNOTSUPP;
5832 
5833 	if (!netif_running(dev))
5834 		return -EINVAL;
5835 
5836 	switch (cmd) {
5837 	case SIOCGMIIPHY:
5838 	case SIOCGMIIREG:
5839 	case SIOCSMIIREG:
5840 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5841 		break;
5842 	case SIOCSHWTSTAMP:
5843 		ret = stmmac_hwtstamp_set(dev, rq);
5844 		break;
5845 	case SIOCGHWTSTAMP:
5846 		ret = stmmac_hwtstamp_get(dev, rq);
5847 		break;
5848 	default:
5849 		break;
5850 	}
5851 
5852 	return ret;
5853 }
5854 
5855 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5856 				    void *cb_priv)
5857 {
5858 	struct stmmac_priv *priv = cb_priv;
5859 	int ret = -EOPNOTSUPP;
5860 
5861 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5862 		return ret;
5863 
5864 	__stmmac_disable_all_queues(priv);
5865 
5866 	switch (type) {
5867 	case TC_SETUP_CLSU32:
5868 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5869 		break;
5870 	case TC_SETUP_CLSFLOWER:
5871 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
5872 		break;
5873 	default:
5874 		break;
5875 	}
5876 
5877 	stmmac_enable_all_queues(priv);
5878 	return ret;
5879 }
5880 
5881 static LIST_HEAD(stmmac_block_cb_list);
5882 
5883 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5884 			   void *type_data)
5885 {
5886 	struct stmmac_priv *priv = netdev_priv(ndev);
5887 
5888 	switch (type) {
5889 	case TC_SETUP_BLOCK:
5890 		return flow_block_cb_setup_simple(type_data,
5891 						  &stmmac_block_cb_list,
5892 						  stmmac_setup_tc_block_cb,
5893 						  priv, priv, true);
5894 	case TC_SETUP_QDISC_CBS:
5895 		return stmmac_tc_setup_cbs(priv, priv, type_data);
5896 	case TC_SETUP_QDISC_TAPRIO:
5897 		return stmmac_tc_setup_taprio(priv, priv, type_data);
5898 	case TC_SETUP_QDISC_ETF:
5899 		return stmmac_tc_setup_etf(priv, priv, type_data);
5900 	default:
5901 		return -EOPNOTSUPP;
5902 	}
5903 }
5904 
5905 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
5906 			       struct net_device *sb_dev)
5907 {
5908 	int gso = skb_shinfo(skb)->gso_type;
5909 
5910 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
5911 		/*
5912 		 * There is no way to determine the number of TSO/USO
5913 		 * capable Queues. Let's use always the Queue 0
5914 		 * because if TSO/USO is supported then at least this
5915 		 * one will be capable.
5916 		 */
5917 		return 0;
5918 	}
5919 
5920 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
5921 }
5922 
5923 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
5924 {
5925 	struct stmmac_priv *priv = netdev_priv(ndev);
5926 	int ret = 0;
5927 
5928 	ret = pm_runtime_get_sync(priv->device);
5929 	if (ret < 0) {
5930 		pm_runtime_put_noidle(priv->device);
5931 		return ret;
5932 	}
5933 
5934 	ret = eth_mac_addr(ndev, addr);
5935 	if (ret)
5936 		goto set_mac_error;
5937 
5938 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
5939 
5940 set_mac_error:
5941 	pm_runtime_put(priv->device);
5942 
5943 	return ret;
5944 }
5945 
5946 #ifdef CONFIG_DEBUG_FS
5947 static struct dentry *stmmac_fs_dir;
5948 
5949 static void sysfs_display_ring(void *head, int size, int extend_desc,
5950 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
5951 {
5952 	int i;
5953 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
5954 	struct dma_desc *p = (struct dma_desc *)head;
5955 	dma_addr_t dma_addr;
5956 
5957 	for (i = 0; i < size; i++) {
5958 		if (extend_desc) {
5959 			dma_addr = dma_phy_addr + i * sizeof(*ep);
5960 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5961 				   i, &dma_addr,
5962 				   le32_to_cpu(ep->basic.des0),
5963 				   le32_to_cpu(ep->basic.des1),
5964 				   le32_to_cpu(ep->basic.des2),
5965 				   le32_to_cpu(ep->basic.des3));
5966 			ep++;
5967 		} else {
5968 			dma_addr = dma_phy_addr + i * sizeof(*p);
5969 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5970 				   i, &dma_addr,
5971 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5972 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5973 			p++;
5974 		}
5975 		seq_printf(seq, "\n");
5976 	}
5977 }
5978 
5979 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
5980 {
5981 	struct net_device *dev = seq->private;
5982 	struct stmmac_priv *priv = netdev_priv(dev);
5983 	u32 rx_count = priv->plat->rx_queues_to_use;
5984 	u32 tx_count = priv->plat->tx_queues_to_use;
5985 	u32 queue;
5986 
5987 	if ((dev->flags & IFF_UP) == 0)
5988 		return 0;
5989 
5990 	for (queue = 0; queue < rx_count; queue++) {
5991 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5992 
5993 		seq_printf(seq, "RX Queue %d:\n", queue);
5994 
5995 		if (priv->extend_desc) {
5996 			seq_printf(seq, "Extended descriptor ring:\n");
5997 			sysfs_display_ring((void *)rx_q->dma_erx,
5998 					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
5999 		} else {
6000 			seq_printf(seq, "Descriptor ring:\n");
6001 			sysfs_display_ring((void *)rx_q->dma_rx,
6002 					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6003 		}
6004 	}
6005 
6006 	for (queue = 0; queue < tx_count; queue++) {
6007 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6008 
6009 		seq_printf(seq, "TX Queue %d:\n", queue);
6010 
6011 		if (priv->extend_desc) {
6012 			seq_printf(seq, "Extended descriptor ring:\n");
6013 			sysfs_display_ring((void *)tx_q->dma_etx,
6014 					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6015 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6016 			seq_printf(seq, "Descriptor ring:\n");
6017 			sysfs_display_ring((void *)tx_q->dma_tx,
6018 					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6019 		}
6020 	}
6021 
6022 	return 0;
6023 }
6024 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6025 
6026 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6027 {
6028 	struct net_device *dev = seq->private;
6029 	struct stmmac_priv *priv = netdev_priv(dev);
6030 
6031 	if (!priv->hw_cap_support) {
6032 		seq_printf(seq, "DMA HW features not supported\n");
6033 		return 0;
6034 	}
6035 
6036 	seq_printf(seq, "==============================\n");
6037 	seq_printf(seq, "\tDMA HW features\n");
6038 	seq_printf(seq, "==============================\n");
6039 
6040 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6041 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6042 	seq_printf(seq, "\t1000 Mbps: %s\n",
6043 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6044 	seq_printf(seq, "\tHalf duplex: %s\n",
6045 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6046 	seq_printf(seq, "\tHash Filter: %s\n",
6047 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
6048 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6049 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
6050 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6051 		   (priv->dma_cap.pcs) ? "Y" : "N");
6052 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6053 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6054 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6055 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6056 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6057 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6058 	seq_printf(seq, "\tRMON module: %s\n",
6059 		   (priv->dma_cap.rmon) ? "Y" : "N");
6060 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6061 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6062 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6063 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6064 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6065 		   (priv->dma_cap.eee) ? "Y" : "N");
6066 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6067 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6068 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6069 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6070 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6071 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6072 	} else {
6073 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6074 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6075 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6076 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6077 	}
6078 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6079 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6080 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6081 		   priv->dma_cap.number_rx_channel);
6082 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6083 		   priv->dma_cap.number_tx_channel);
6084 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6085 		   priv->dma_cap.number_rx_queues);
6086 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6087 		   priv->dma_cap.number_tx_queues);
6088 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6089 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6090 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6091 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6092 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6093 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6094 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6095 		   priv->dma_cap.pps_out_num);
6096 	seq_printf(seq, "\tSafety Features: %s\n",
6097 		   priv->dma_cap.asp ? "Y" : "N");
6098 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6099 		   priv->dma_cap.frpsel ? "Y" : "N");
6100 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6101 		   priv->dma_cap.addr64);
6102 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6103 		   priv->dma_cap.rssen ? "Y" : "N");
6104 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6105 		   priv->dma_cap.vlhash ? "Y" : "N");
6106 	seq_printf(seq, "\tSplit Header: %s\n",
6107 		   priv->dma_cap.sphen ? "Y" : "N");
6108 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6109 		   priv->dma_cap.vlins ? "Y" : "N");
6110 	seq_printf(seq, "\tDouble VLAN: %s\n",
6111 		   priv->dma_cap.dvlan ? "Y" : "N");
6112 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6113 		   priv->dma_cap.l3l4fnum);
6114 	seq_printf(seq, "\tARP Offloading: %s\n",
6115 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6116 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6117 		   priv->dma_cap.estsel ? "Y" : "N");
6118 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6119 		   priv->dma_cap.fpesel ? "Y" : "N");
6120 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6121 		   priv->dma_cap.tbssel ? "Y" : "N");
6122 	return 0;
6123 }
6124 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6125 
6126 /* Use network device events to rename debugfs file entries.
6127  */
6128 static int stmmac_device_event(struct notifier_block *unused,
6129 			       unsigned long event, void *ptr)
6130 {
6131 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6132 	struct stmmac_priv *priv = netdev_priv(dev);
6133 
6134 	if (dev->netdev_ops != &stmmac_netdev_ops)
6135 		goto done;
6136 
6137 	switch (event) {
6138 	case NETDEV_CHANGENAME:
6139 		if (priv->dbgfs_dir)
6140 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6141 							 priv->dbgfs_dir,
6142 							 stmmac_fs_dir,
6143 							 dev->name);
6144 		break;
6145 	}
6146 done:
6147 	return NOTIFY_DONE;
6148 }
6149 
6150 static struct notifier_block stmmac_notifier = {
6151 	.notifier_call = stmmac_device_event,
6152 };
6153 
6154 static void stmmac_init_fs(struct net_device *dev)
6155 {
6156 	struct stmmac_priv *priv = netdev_priv(dev);
6157 
6158 	rtnl_lock();
6159 
6160 	/* Create per netdev entries */
6161 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6162 
6163 	/* Entry to report DMA RX/TX rings */
6164 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6165 			    &stmmac_rings_status_fops);
6166 
6167 	/* Entry to report the DMA HW features */
6168 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6169 			    &stmmac_dma_cap_fops);
6170 
6171 	rtnl_unlock();
6172 }
6173 
6174 static void stmmac_exit_fs(struct net_device *dev)
6175 {
6176 	struct stmmac_priv *priv = netdev_priv(dev);
6177 
6178 	debugfs_remove_recursive(priv->dbgfs_dir);
6179 }
6180 #endif /* CONFIG_DEBUG_FS */
6181 
6182 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6183 {
6184 	unsigned char *data = (unsigned char *)&vid_le;
6185 	unsigned char data_byte = 0;
6186 	u32 crc = ~0x0;
6187 	u32 temp = 0;
6188 	int i, bits;
6189 
6190 	bits = get_bitmask_order(VLAN_VID_MASK);
6191 	for (i = 0; i < bits; i++) {
6192 		if ((i % 8) == 0)
6193 			data_byte = data[i / 8];
6194 
6195 		temp = ((crc & 1) ^ data_byte) & 1;
6196 		crc >>= 1;
6197 		data_byte >>= 1;
6198 
6199 		if (temp)
6200 			crc ^= 0xedb88320;
6201 	}
6202 
6203 	return crc;
6204 }
6205 
6206 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6207 {
6208 	u32 crc, hash = 0;
6209 	__le16 pmatch = 0;
6210 	int count = 0;
6211 	u16 vid = 0;
6212 
6213 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6214 		__le16 vid_le = cpu_to_le16(vid);
6215 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6216 		hash |= (1 << crc);
6217 		count++;
6218 	}
6219 
6220 	if (!priv->dma_cap.vlhash) {
6221 		if (count > 2) /* VID = 0 always passes filter */
6222 			return -EOPNOTSUPP;
6223 
6224 		pmatch = cpu_to_le16(vid);
6225 		hash = 0;
6226 	}
6227 
6228 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6229 }
6230 
6231 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6232 {
6233 	struct stmmac_priv *priv = netdev_priv(ndev);
6234 	bool is_double = false;
6235 	int ret;
6236 
6237 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6238 		is_double = true;
6239 
6240 	set_bit(vid, priv->active_vlans);
6241 	ret = stmmac_vlan_update(priv, is_double);
6242 	if (ret) {
6243 		clear_bit(vid, priv->active_vlans);
6244 		return ret;
6245 	}
6246 
6247 	if (priv->hw->num_vlan) {
6248 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6249 		if (ret)
6250 			return ret;
6251 	}
6252 
6253 	return 0;
6254 }
6255 
6256 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6257 {
6258 	struct stmmac_priv *priv = netdev_priv(ndev);
6259 	bool is_double = false;
6260 	int ret;
6261 
6262 	ret = pm_runtime_get_sync(priv->device);
6263 	if (ret < 0) {
6264 		pm_runtime_put_noidle(priv->device);
6265 		return ret;
6266 	}
6267 
6268 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6269 		is_double = true;
6270 
6271 	clear_bit(vid, priv->active_vlans);
6272 
6273 	if (priv->hw->num_vlan) {
6274 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6275 		if (ret)
6276 			goto del_vlan_error;
6277 	}
6278 
6279 	ret = stmmac_vlan_update(priv, is_double);
6280 
6281 del_vlan_error:
6282 	pm_runtime_put(priv->device);
6283 
6284 	return ret;
6285 }
6286 
6287 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6288 {
6289 	struct stmmac_priv *priv = netdev_priv(dev);
6290 
6291 	switch (bpf->command) {
6292 	case XDP_SETUP_PROG:
6293 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6294 	case XDP_SETUP_XSK_POOL:
6295 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6296 					     bpf->xsk.queue_id);
6297 	default:
6298 		return -EOPNOTSUPP;
6299 	}
6300 }
6301 
6302 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6303 			   struct xdp_frame **frames, u32 flags)
6304 {
6305 	struct stmmac_priv *priv = netdev_priv(dev);
6306 	int cpu = smp_processor_id();
6307 	struct netdev_queue *nq;
6308 	int i, nxmit = 0;
6309 	int queue;
6310 
6311 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6312 		return -ENETDOWN;
6313 
6314 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6315 		return -EINVAL;
6316 
6317 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6318 	nq = netdev_get_tx_queue(priv->dev, queue);
6319 
6320 	__netif_tx_lock(nq, cpu);
6321 	/* Avoids TX time-out as we are sharing with slow path */
6322 	txq_trans_cond_update(nq);
6323 
6324 	for (i = 0; i < num_frames; i++) {
6325 		int res;
6326 
6327 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6328 		if (res == STMMAC_XDP_CONSUMED)
6329 			break;
6330 
6331 		nxmit++;
6332 	}
6333 
6334 	if (flags & XDP_XMIT_FLUSH) {
6335 		stmmac_flush_tx_descriptors(priv, queue);
6336 		stmmac_tx_timer_arm(priv, queue);
6337 	}
6338 
6339 	__netif_tx_unlock(nq);
6340 
6341 	return nxmit;
6342 }
6343 
6344 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6345 {
6346 	struct stmmac_channel *ch = &priv->channel[queue];
6347 	unsigned long flags;
6348 
6349 	spin_lock_irqsave(&ch->lock, flags);
6350 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6351 	spin_unlock_irqrestore(&ch->lock, flags);
6352 
6353 	stmmac_stop_rx_dma(priv, queue);
6354 	__free_dma_rx_desc_resources(priv, queue);
6355 }
6356 
6357 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6358 {
6359 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6360 	struct stmmac_channel *ch = &priv->channel[queue];
6361 	unsigned long flags;
6362 	u32 buf_size;
6363 	int ret;
6364 
6365 	ret = __alloc_dma_rx_desc_resources(priv, queue);
6366 	if (ret) {
6367 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6368 		return;
6369 	}
6370 
6371 	ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
6372 	if (ret) {
6373 		__free_dma_rx_desc_resources(priv, queue);
6374 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6375 		return;
6376 	}
6377 
6378 	stmmac_clear_rx_descriptors(priv, queue);
6379 
6380 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6381 			    rx_q->dma_rx_phy, rx_q->queue_index);
6382 
6383 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6384 			     sizeof(struct dma_desc));
6385 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6386 			       rx_q->rx_tail_addr, rx_q->queue_index);
6387 
6388 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6389 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6390 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6391 				      buf_size,
6392 				      rx_q->queue_index);
6393 	} else {
6394 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6395 				      priv->dma_buf_sz,
6396 				      rx_q->queue_index);
6397 	}
6398 
6399 	stmmac_start_rx_dma(priv, queue);
6400 
6401 	spin_lock_irqsave(&ch->lock, flags);
6402 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6403 	spin_unlock_irqrestore(&ch->lock, flags);
6404 }
6405 
6406 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6407 {
6408 	struct stmmac_channel *ch = &priv->channel[queue];
6409 	unsigned long flags;
6410 
6411 	spin_lock_irqsave(&ch->lock, flags);
6412 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6413 	spin_unlock_irqrestore(&ch->lock, flags);
6414 
6415 	stmmac_stop_tx_dma(priv, queue);
6416 	__free_dma_tx_desc_resources(priv, queue);
6417 }
6418 
6419 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6420 {
6421 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6422 	struct stmmac_channel *ch = &priv->channel[queue];
6423 	unsigned long flags;
6424 	int ret;
6425 
6426 	ret = __alloc_dma_tx_desc_resources(priv, queue);
6427 	if (ret) {
6428 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6429 		return;
6430 	}
6431 
6432 	ret = __init_dma_tx_desc_rings(priv, queue);
6433 	if (ret) {
6434 		__free_dma_tx_desc_resources(priv, queue);
6435 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6436 		return;
6437 	}
6438 
6439 	stmmac_clear_tx_descriptors(priv, queue);
6440 
6441 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6442 			    tx_q->dma_tx_phy, tx_q->queue_index);
6443 
6444 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6445 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6446 
6447 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6448 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6449 			       tx_q->tx_tail_addr, tx_q->queue_index);
6450 
6451 	stmmac_start_tx_dma(priv, queue);
6452 
6453 	spin_lock_irqsave(&ch->lock, flags);
6454 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6455 	spin_unlock_irqrestore(&ch->lock, flags);
6456 }
6457 
6458 void stmmac_xdp_release(struct net_device *dev)
6459 {
6460 	struct stmmac_priv *priv = netdev_priv(dev);
6461 	u32 chan;
6462 
6463 	/* Disable NAPI process */
6464 	stmmac_disable_all_queues(priv);
6465 
6466 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6467 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6468 
6469 	/* Free the IRQ lines */
6470 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6471 
6472 	/* Stop TX/RX DMA channels */
6473 	stmmac_stop_all_dma(priv);
6474 
6475 	/* Release and free the Rx/Tx resources */
6476 	free_dma_desc_resources(priv);
6477 
6478 	/* Disable the MAC Rx/Tx */
6479 	stmmac_mac_set(priv, priv->ioaddr, false);
6480 
6481 	/* set trans_start so we don't get spurious
6482 	 * watchdogs during reset
6483 	 */
6484 	netif_trans_update(dev);
6485 	netif_carrier_off(dev);
6486 }
6487 
6488 int stmmac_xdp_open(struct net_device *dev)
6489 {
6490 	struct stmmac_priv *priv = netdev_priv(dev);
6491 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6492 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6493 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6494 	struct stmmac_rx_queue *rx_q;
6495 	struct stmmac_tx_queue *tx_q;
6496 	u32 buf_size;
6497 	bool sph_en;
6498 	u32 chan;
6499 	int ret;
6500 
6501 	ret = alloc_dma_desc_resources(priv);
6502 	if (ret < 0) {
6503 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6504 			   __func__);
6505 		goto dma_desc_error;
6506 	}
6507 
6508 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
6509 	if (ret < 0) {
6510 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6511 			   __func__);
6512 		goto init_error;
6513 	}
6514 
6515 	/* DMA CSR Channel configuration */
6516 	for (chan = 0; chan < dma_csr_ch; chan++)
6517 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6518 
6519 	/* Adjust Split header */
6520 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6521 
6522 	/* DMA RX Channel Configuration */
6523 	for (chan = 0; chan < rx_cnt; chan++) {
6524 		rx_q = &priv->rx_queue[chan];
6525 
6526 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6527 				    rx_q->dma_rx_phy, chan);
6528 
6529 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6530 				     (rx_q->buf_alloc_num *
6531 				      sizeof(struct dma_desc));
6532 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6533 				       rx_q->rx_tail_addr, chan);
6534 
6535 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6536 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6537 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6538 					      buf_size,
6539 					      rx_q->queue_index);
6540 		} else {
6541 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6542 					      priv->dma_buf_sz,
6543 					      rx_q->queue_index);
6544 		}
6545 
6546 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6547 	}
6548 
6549 	/* DMA TX Channel Configuration */
6550 	for (chan = 0; chan < tx_cnt; chan++) {
6551 		tx_q = &priv->tx_queue[chan];
6552 
6553 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6554 				    tx_q->dma_tx_phy, chan);
6555 
6556 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6557 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6558 				       tx_q->tx_tail_addr, chan);
6559 
6560 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6561 		tx_q->txtimer.function = stmmac_tx_timer;
6562 	}
6563 
6564 	/* Enable the MAC Rx/Tx */
6565 	stmmac_mac_set(priv, priv->ioaddr, true);
6566 
6567 	/* Start Rx & Tx DMA Channels */
6568 	stmmac_start_all_dma(priv);
6569 
6570 	ret = stmmac_request_irq(dev);
6571 	if (ret)
6572 		goto irq_error;
6573 
6574 	/* Enable NAPI process*/
6575 	stmmac_enable_all_queues(priv);
6576 	netif_carrier_on(dev);
6577 	netif_tx_start_all_queues(dev);
6578 
6579 	return 0;
6580 
6581 irq_error:
6582 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6583 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6584 
6585 	stmmac_hw_teardown(dev);
6586 init_error:
6587 	free_dma_desc_resources(priv);
6588 dma_desc_error:
6589 	return ret;
6590 }
6591 
6592 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6593 {
6594 	struct stmmac_priv *priv = netdev_priv(dev);
6595 	struct stmmac_rx_queue *rx_q;
6596 	struct stmmac_tx_queue *tx_q;
6597 	struct stmmac_channel *ch;
6598 
6599 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6600 	    !netif_carrier_ok(priv->dev))
6601 		return -ENETDOWN;
6602 
6603 	if (!stmmac_xdp_is_enabled(priv))
6604 		return -ENXIO;
6605 
6606 	if (queue >= priv->plat->rx_queues_to_use ||
6607 	    queue >= priv->plat->tx_queues_to_use)
6608 		return -EINVAL;
6609 
6610 	rx_q = &priv->rx_queue[queue];
6611 	tx_q = &priv->tx_queue[queue];
6612 	ch = &priv->channel[queue];
6613 
6614 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6615 		return -ENXIO;
6616 
6617 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6618 		/* EQoS does not have per-DMA channel SW interrupt,
6619 		 * so we schedule RX Napi straight-away.
6620 		 */
6621 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6622 			__napi_schedule(&ch->rxtx_napi);
6623 	}
6624 
6625 	return 0;
6626 }
6627 
6628 static const struct net_device_ops stmmac_netdev_ops = {
6629 	.ndo_open = stmmac_open,
6630 	.ndo_start_xmit = stmmac_xmit,
6631 	.ndo_stop = stmmac_release,
6632 	.ndo_change_mtu = stmmac_change_mtu,
6633 	.ndo_fix_features = stmmac_fix_features,
6634 	.ndo_set_features = stmmac_set_features,
6635 	.ndo_set_rx_mode = stmmac_set_rx_mode,
6636 	.ndo_tx_timeout = stmmac_tx_timeout,
6637 	.ndo_eth_ioctl = stmmac_ioctl,
6638 	.ndo_setup_tc = stmmac_setup_tc,
6639 	.ndo_select_queue = stmmac_select_queue,
6640 #ifdef CONFIG_NET_POLL_CONTROLLER
6641 	.ndo_poll_controller = stmmac_poll_controller,
6642 #endif
6643 	.ndo_set_mac_address = stmmac_set_mac_address,
6644 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6645 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6646 	.ndo_bpf = stmmac_bpf,
6647 	.ndo_xdp_xmit = stmmac_xdp_xmit,
6648 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
6649 };
6650 
6651 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6652 {
6653 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6654 		return;
6655 	if (test_bit(STMMAC_DOWN, &priv->state))
6656 		return;
6657 
6658 	netdev_err(priv->dev, "Reset adapter.\n");
6659 
6660 	rtnl_lock();
6661 	netif_trans_update(priv->dev);
6662 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6663 		usleep_range(1000, 2000);
6664 
6665 	set_bit(STMMAC_DOWN, &priv->state);
6666 	dev_close(priv->dev);
6667 	dev_open(priv->dev, NULL);
6668 	clear_bit(STMMAC_DOWN, &priv->state);
6669 	clear_bit(STMMAC_RESETING, &priv->state);
6670 	rtnl_unlock();
6671 }
6672 
6673 static void stmmac_service_task(struct work_struct *work)
6674 {
6675 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6676 			service_task);
6677 
6678 	stmmac_reset_subtask(priv);
6679 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6680 }
6681 
6682 /**
6683  *  stmmac_hw_init - Init the MAC device
6684  *  @priv: driver private structure
6685  *  Description: this function is to configure the MAC device according to
6686  *  some platform parameters or the HW capability register. It prepares the
6687  *  driver to use either ring or chain modes and to setup either enhanced or
6688  *  normal descriptors.
6689  */
6690 static int stmmac_hw_init(struct stmmac_priv *priv)
6691 {
6692 	int ret;
6693 
6694 	/* dwmac-sun8i only work in chain mode */
6695 	if (priv->plat->has_sun8i)
6696 		chain_mode = 1;
6697 	priv->chain_mode = chain_mode;
6698 
6699 	/* Initialize HW Interface */
6700 	ret = stmmac_hwif_init(priv);
6701 	if (ret)
6702 		return ret;
6703 
6704 	/* Get the HW capability (new GMAC newer than 3.50a) */
6705 	priv->hw_cap_support = stmmac_get_hw_features(priv);
6706 	if (priv->hw_cap_support) {
6707 		dev_info(priv->device, "DMA HW capability register supported\n");
6708 
6709 		/* We can override some gmac/dma configuration fields: e.g.
6710 		 * enh_desc, tx_coe (e.g. that are passed through the
6711 		 * platform) with the values from the HW capability
6712 		 * register (if supported).
6713 		 */
6714 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
6715 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6716 				!priv->plat->use_phy_wol;
6717 		priv->hw->pmt = priv->plat->pmt;
6718 		if (priv->dma_cap.hash_tb_sz) {
6719 			priv->hw->multicast_filter_bins =
6720 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
6721 			priv->hw->mcast_bits_log2 =
6722 					ilog2(priv->hw->multicast_filter_bins);
6723 		}
6724 
6725 		/* TXCOE doesn't work in thresh DMA mode */
6726 		if (priv->plat->force_thresh_dma_mode)
6727 			priv->plat->tx_coe = 0;
6728 		else
6729 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
6730 
6731 		/* In case of GMAC4 rx_coe is from HW cap register. */
6732 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
6733 
6734 		if (priv->dma_cap.rx_coe_type2)
6735 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6736 		else if (priv->dma_cap.rx_coe_type1)
6737 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6738 
6739 	} else {
6740 		dev_info(priv->device, "No HW DMA feature register supported\n");
6741 	}
6742 
6743 	if (priv->plat->rx_coe) {
6744 		priv->hw->rx_csum = priv->plat->rx_coe;
6745 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6746 		if (priv->synopsys_id < DWMAC_CORE_4_00)
6747 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6748 	}
6749 	if (priv->plat->tx_coe)
6750 		dev_info(priv->device, "TX Checksum insertion supported\n");
6751 
6752 	if (priv->plat->pmt) {
6753 		dev_info(priv->device, "Wake-Up On Lan supported\n");
6754 		device_set_wakeup_capable(priv->device, 1);
6755 	}
6756 
6757 	if (priv->dma_cap.tsoen)
6758 		dev_info(priv->device, "TSO supported\n");
6759 
6760 	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6761 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6762 
6763 	/* Run HW quirks, if any */
6764 	if (priv->hwif_quirks) {
6765 		ret = priv->hwif_quirks(priv);
6766 		if (ret)
6767 			return ret;
6768 	}
6769 
6770 	/* Rx Watchdog is available in the COREs newer than the 3.40.
6771 	 * In some case, for example on bugged HW this feature
6772 	 * has to be disable and this can be done by passing the
6773 	 * riwt_off field from the platform.
6774 	 */
6775 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6776 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6777 		priv->use_riwt = 1;
6778 		dev_info(priv->device,
6779 			 "Enable RX Mitigation via HW Watchdog Timer\n");
6780 	}
6781 
6782 	return 0;
6783 }
6784 
6785 static void stmmac_napi_add(struct net_device *dev)
6786 {
6787 	struct stmmac_priv *priv = netdev_priv(dev);
6788 	u32 queue, maxq;
6789 
6790 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6791 
6792 	for (queue = 0; queue < maxq; queue++) {
6793 		struct stmmac_channel *ch = &priv->channel[queue];
6794 
6795 		ch->priv_data = priv;
6796 		ch->index = queue;
6797 		spin_lock_init(&ch->lock);
6798 
6799 		if (queue < priv->plat->rx_queues_to_use) {
6800 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
6801 				       NAPI_POLL_WEIGHT);
6802 		}
6803 		if (queue < priv->plat->tx_queues_to_use) {
6804 			netif_tx_napi_add(dev, &ch->tx_napi,
6805 					  stmmac_napi_poll_tx,
6806 					  NAPI_POLL_WEIGHT);
6807 		}
6808 		if (queue < priv->plat->rx_queues_to_use &&
6809 		    queue < priv->plat->tx_queues_to_use) {
6810 			netif_napi_add(dev, &ch->rxtx_napi,
6811 				       stmmac_napi_poll_rxtx,
6812 				       NAPI_POLL_WEIGHT);
6813 		}
6814 	}
6815 }
6816 
6817 static void stmmac_napi_del(struct net_device *dev)
6818 {
6819 	struct stmmac_priv *priv = netdev_priv(dev);
6820 	u32 queue, maxq;
6821 
6822 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6823 
6824 	for (queue = 0; queue < maxq; queue++) {
6825 		struct stmmac_channel *ch = &priv->channel[queue];
6826 
6827 		if (queue < priv->plat->rx_queues_to_use)
6828 			netif_napi_del(&ch->rx_napi);
6829 		if (queue < priv->plat->tx_queues_to_use)
6830 			netif_napi_del(&ch->tx_napi);
6831 		if (queue < priv->plat->rx_queues_to_use &&
6832 		    queue < priv->plat->tx_queues_to_use) {
6833 			netif_napi_del(&ch->rxtx_napi);
6834 		}
6835 	}
6836 }
6837 
6838 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6839 {
6840 	struct stmmac_priv *priv = netdev_priv(dev);
6841 	int ret = 0;
6842 
6843 	if (netif_running(dev))
6844 		stmmac_release(dev);
6845 
6846 	stmmac_napi_del(dev);
6847 
6848 	priv->plat->rx_queues_to_use = rx_cnt;
6849 	priv->plat->tx_queues_to_use = tx_cnt;
6850 
6851 	stmmac_napi_add(dev);
6852 
6853 	if (netif_running(dev))
6854 		ret = stmmac_open(dev);
6855 
6856 	return ret;
6857 }
6858 
6859 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6860 {
6861 	struct stmmac_priv *priv = netdev_priv(dev);
6862 	int ret = 0;
6863 
6864 	if (netif_running(dev))
6865 		stmmac_release(dev);
6866 
6867 	priv->dma_rx_size = rx_size;
6868 	priv->dma_tx_size = tx_size;
6869 
6870 	if (netif_running(dev))
6871 		ret = stmmac_open(dev);
6872 
6873 	return ret;
6874 }
6875 
6876 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6877 static void stmmac_fpe_lp_task(struct work_struct *work)
6878 {
6879 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6880 						fpe_task);
6881 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6882 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6883 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6884 	bool *hs_enable = &fpe_cfg->hs_enable;
6885 	bool *enable = &fpe_cfg->enable;
6886 	int retries = 20;
6887 
6888 	while (retries-- > 0) {
6889 		/* Bail out immediately if FPE handshake is OFF */
6890 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6891 			break;
6892 
6893 		if (*lo_state == FPE_STATE_ENTERING_ON &&
6894 		    *lp_state == FPE_STATE_ENTERING_ON) {
6895 			stmmac_fpe_configure(priv, priv->ioaddr,
6896 					     priv->plat->tx_queues_to_use,
6897 					     priv->plat->rx_queues_to_use,
6898 					     *enable);
6899 
6900 			netdev_info(priv->dev, "configured FPE\n");
6901 
6902 			*lo_state = FPE_STATE_ON;
6903 			*lp_state = FPE_STATE_ON;
6904 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
6905 			break;
6906 		}
6907 
6908 		if ((*lo_state == FPE_STATE_CAPABLE ||
6909 		     *lo_state == FPE_STATE_ENTERING_ON) &&
6910 		     *lp_state != FPE_STATE_ON) {
6911 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
6912 				    *lo_state, *lp_state);
6913 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6914 						MPACKET_VERIFY);
6915 		}
6916 		/* Sleep then retry */
6917 		msleep(500);
6918 	}
6919 
6920 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
6921 }
6922 
6923 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
6924 {
6925 	if (priv->plat->fpe_cfg->hs_enable != enable) {
6926 		if (enable) {
6927 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6928 						MPACKET_VERIFY);
6929 		} else {
6930 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
6931 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
6932 		}
6933 
6934 		priv->plat->fpe_cfg->hs_enable = enable;
6935 	}
6936 }
6937 
6938 /**
6939  * stmmac_dvr_probe
6940  * @device: device pointer
6941  * @plat_dat: platform data pointer
6942  * @res: stmmac resource pointer
6943  * Description: this is the main probe function used to
6944  * call the alloc_etherdev, allocate the priv structure.
6945  * Return:
6946  * returns 0 on success, otherwise errno.
6947  */
6948 int stmmac_dvr_probe(struct device *device,
6949 		     struct plat_stmmacenet_data *plat_dat,
6950 		     struct stmmac_resources *res)
6951 {
6952 	struct net_device *ndev = NULL;
6953 	struct stmmac_priv *priv;
6954 	u32 rxq;
6955 	int i, ret = 0;
6956 
6957 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
6958 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
6959 	if (!ndev)
6960 		return -ENOMEM;
6961 
6962 	SET_NETDEV_DEV(ndev, device);
6963 
6964 	priv = netdev_priv(ndev);
6965 	priv->device = device;
6966 	priv->dev = ndev;
6967 
6968 	stmmac_set_ethtool_ops(ndev);
6969 	priv->pause = pause;
6970 	priv->plat = plat_dat;
6971 	priv->ioaddr = res->addr;
6972 	priv->dev->base_addr = (unsigned long)res->addr;
6973 	priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
6974 
6975 	priv->dev->irq = res->irq;
6976 	priv->wol_irq = res->wol_irq;
6977 	priv->lpi_irq = res->lpi_irq;
6978 	priv->sfty_ce_irq = res->sfty_ce_irq;
6979 	priv->sfty_ue_irq = res->sfty_ue_irq;
6980 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
6981 		priv->rx_irq[i] = res->rx_irq[i];
6982 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
6983 		priv->tx_irq[i] = res->tx_irq[i];
6984 
6985 	if (!is_zero_ether_addr(res->mac))
6986 		eth_hw_addr_set(priv->dev, res->mac);
6987 
6988 	dev_set_drvdata(device, priv->dev);
6989 
6990 	/* Verify driver arguments */
6991 	stmmac_verify_args();
6992 
6993 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
6994 	if (!priv->af_xdp_zc_qps)
6995 		return -ENOMEM;
6996 
6997 	/* Allocate workqueue */
6998 	priv->wq = create_singlethread_workqueue("stmmac_wq");
6999 	if (!priv->wq) {
7000 		dev_err(priv->device, "failed to create workqueue\n");
7001 		return -ENOMEM;
7002 	}
7003 
7004 	INIT_WORK(&priv->service_task, stmmac_service_task);
7005 
7006 	/* Initialize Link Partner FPE workqueue */
7007 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7008 
7009 	/* Override with kernel parameters if supplied XXX CRS XXX
7010 	 * this needs to have multiple instances
7011 	 */
7012 	if ((phyaddr >= 0) && (phyaddr <= 31))
7013 		priv->plat->phy_addr = phyaddr;
7014 
7015 	if (priv->plat->stmmac_rst) {
7016 		ret = reset_control_assert(priv->plat->stmmac_rst);
7017 		reset_control_deassert(priv->plat->stmmac_rst);
7018 		/* Some reset controllers have only reset callback instead of
7019 		 * assert + deassert callbacks pair.
7020 		 */
7021 		if (ret == -ENOTSUPP)
7022 			reset_control_reset(priv->plat->stmmac_rst);
7023 	}
7024 
7025 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7026 	if (ret == -ENOTSUPP)
7027 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7028 			ERR_PTR(ret));
7029 
7030 	/* Init MAC and get the capabilities */
7031 	ret = stmmac_hw_init(priv);
7032 	if (ret)
7033 		goto error_hw_init;
7034 
7035 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7036 	 */
7037 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7038 		priv->plat->dma_cfg->dche = false;
7039 
7040 	stmmac_check_ether_addr(priv);
7041 
7042 	ndev->netdev_ops = &stmmac_netdev_ops;
7043 
7044 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7045 			    NETIF_F_RXCSUM;
7046 
7047 	ret = stmmac_tc_init(priv, priv);
7048 	if (!ret) {
7049 		ndev->hw_features |= NETIF_F_HW_TC;
7050 	}
7051 
7052 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
7053 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7054 		if (priv->plat->has_gmac4)
7055 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7056 		priv->tso = true;
7057 		dev_info(priv->device, "TSO feature enabled\n");
7058 	}
7059 
7060 	if (priv->dma_cap.sphen) {
7061 		ndev->hw_features |= NETIF_F_GRO;
7062 		priv->sph_cap = true;
7063 		priv->sph = priv->sph_cap;
7064 		dev_info(priv->device, "SPH feature enabled\n");
7065 	}
7066 
7067 	/* The current IP register MAC_HW_Feature1[ADDR64] only define
7068 	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
7069 	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
7070 	 * So overwrite dma_cap.addr64 according to HW real design.
7071 	 */
7072 	if (priv->plat->addr64)
7073 		priv->dma_cap.addr64 = priv->plat->addr64;
7074 
7075 	if (priv->dma_cap.addr64) {
7076 		ret = dma_set_mask_and_coherent(device,
7077 				DMA_BIT_MASK(priv->dma_cap.addr64));
7078 		if (!ret) {
7079 			dev_info(priv->device, "Using %d bits DMA width\n",
7080 				 priv->dma_cap.addr64);
7081 
7082 			/*
7083 			 * If more than 32 bits can be addressed, make sure to
7084 			 * enable enhanced addressing mode.
7085 			 */
7086 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7087 				priv->plat->dma_cfg->eame = true;
7088 		} else {
7089 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7090 			if (ret) {
7091 				dev_err(priv->device, "Failed to set DMA Mask\n");
7092 				goto error_hw_init;
7093 			}
7094 
7095 			priv->dma_cap.addr64 = 32;
7096 		}
7097 	}
7098 
7099 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7100 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7101 #ifdef STMMAC_VLAN_TAG_USED
7102 	/* Both mac100 and gmac support receive VLAN tag detection */
7103 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7104 	if (priv->dma_cap.vlhash) {
7105 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7106 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7107 	}
7108 	if (priv->dma_cap.vlins) {
7109 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7110 		if (priv->dma_cap.dvlan)
7111 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7112 	}
7113 #endif
7114 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7115 
7116 	/* Initialize RSS */
7117 	rxq = priv->plat->rx_queues_to_use;
7118 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7119 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7120 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7121 
7122 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7123 		ndev->features |= NETIF_F_RXHASH;
7124 
7125 	/* MTU range: 46 - hw-specific max */
7126 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7127 	if (priv->plat->has_xgmac)
7128 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7129 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7130 		ndev->max_mtu = JUMBO_LEN;
7131 	else
7132 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7133 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7134 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7135 	 */
7136 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7137 	    (priv->plat->maxmtu >= ndev->min_mtu))
7138 		ndev->max_mtu = priv->plat->maxmtu;
7139 	else if (priv->plat->maxmtu < ndev->min_mtu)
7140 		dev_warn(priv->device,
7141 			 "%s: warning: maxmtu having invalid value (%d)\n",
7142 			 __func__, priv->plat->maxmtu);
7143 
7144 	if (flow_ctrl)
7145 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7146 
7147 	/* Setup channels NAPI */
7148 	stmmac_napi_add(ndev);
7149 
7150 	mutex_init(&priv->lock);
7151 
7152 	/* If a specific clk_csr value is passed from the platform
7153 	 * this means that the CSR Clock Range selection cannot be
7154 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7155 	 * set the MDC clock dynamically according to the csr actual
7156 	 * clock input.
7157 	 */
7158 	if (priv->plat->clk_csr >= 0)
7159 		priv->clk_csr = priv->plat->clk_csr;
7160 	else
7161 		stmmac_clk_csr_set(priv);
7162 
7163 	stmmac_check_pcs_mode(priv);
7164 
7165 	pm_runtime_get_noresume(device);
7166 	pm_runtime_set_active(device);
7167 	pm_runtime_enable(device);
7168 
7169 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7170 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7171 		/* MDIO bus Registration */
7172 		ret = stmmac_mdio_register(ndev);
7173 		if (ret < 0) {
7174 			dev_err(priv->device,
7175 				"%s: MDIO bus (id: %d) registration failed",
7176 				__func__, priv->plat->bus_id);
7177 			goto error_mdio_register;
7178 		}
7179 	}
7180 
7181 	if (priv->plat->speed_mode_2500)
7182 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7183 
7184 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7185 		ret = stmmac_xpcs_setup(priv->mii);
7186 		if (ret)
7187 			goto error_xpcs_setup;
7188 	}
7189 
7190 	ret = stmmac_phy_setup(priv);
7191 	if (ret) {
7192 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7193 		goto error_phy_setup;
7194 	}
7195 
7196 	ret = register_netdev(ndev);
7197 	if (ret) {
7198 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7199 			__func__, ret);
7200 		goto error_netdev_register;
7201 	}
7202 
7203 	if (priv->plat->serdes_powerup) {
7204 		ret = priv->plat->serdes_powerup(ndev,
7205 						 priv->plat->bsp_priv);
7206 
7207 		if (ret < 0)
7208 			goto error_serdes_powerup;
7209 	}
7210 
7211 #ifdef CONFIG_DEBUG_FS
7212 	stmmac_init_fs(ndev);
7213 #endif
7214 
7215 	if (priv->plat->dump_debug_regs)
7216 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7217 
7218 	/* Let pm_runtime_put() disable the clocks.
7219 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7220 	 */
7221 	pm_runtime_put(device);
7222 
7223 	return ret;
7224 
7225 error_serdes_powerup:
7226 	unregister_netdev(ndev);
7227 error_netdev_register:
7228 	phylink_destroy(priv->phylink);
7229 error_xpcs_setup:
7230 error_phy_setup:
7231 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7232 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7233 		stmmac_mdio_unregister(ndev);
7234 error_mdio_register:
7235 	stmmac_napi_del(ndev);
7236 error_hw_init:
7237 	destroy_workqueue(priv->wq);
7238 	bitmap_free(priv->af_xdp_zc_qps);
7239 
7240 	return ret;
7241 }
7242 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7243 
7244 /**
7245  * stmmac_dvr_remove
7246  * @dev: device pointer
7247  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7248  * changes the link status, releases the DMA descriptor rings.
7249  */
7250 int stmmac_dvr_remove(struct device *dev)
7251 {
7252 	struct net_device *ndev = dev_get_drvdata(dev);
7253 	struct stmmac_priv *priv = netdev_priv(ndev);
7254 
7255 	netdev_info(priv->dev, "%s: removing driver", __func__);
7256 
7257 	stmmac_stop_all_dma(priv);
7258 	stmmac_mac_set(priv, priv->ioaddr, false);
7259 	netif_carrier_off(ndev);
7260 	unregister_netdev(ndev);
7261 
7262 	/* Serdes power down needs to happen after VLAN filter
7263 	 * is deleted that is triggered by unregister_netdev().
7264 	 */
7265 	if (priv->plat->serdes_powerdown)
7266 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7267 
7268 #ifdef CONFIG_DEBUG_FS
7269 	stmmac_exit_fs(ndev);
7270 #endif
7271 	phylink_destroy(priv->phylink);
7272 	if (priv->plat->stmmac_rst)
7273 		reset_control_assert(priv->plat->stmmac_rst);
7274 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7275 	pm_runtime_put(dev);
7276 	pm_runtime_disable(dev);
7277 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7278 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7279 		stmmac_mdio_unregister(ndev);
7280 	destroy_workqueue(priv->wq);
7281 	mutex_destroy(&priv->lock);
7282 	bitmap_free(priv->af_xdp_zc_qps);
7283 
7284 	return 0;
7285 }
7286 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7287 
7288 /**
7289  * stmmac_suspend - suspend callback
7290  * @dev: device pointer
7291  * Description: this is the function to suspend the device and it is called
7292  * by the platform driver to stop the network queue, release the resources,
7293  * program the PMT register (for WoL), clean and release driver resources.
7294  */
7295 int stmmac_suspend(struct device *dev)
7296 {
7297 	struct net_device *ndev = dev_get_drvdata(dev);
7298 	struct stmmac_priv *priv = netdev_priv(ndev);
7299 	u32 chan;
7300 
7301 	if (!ndev || !netif_running(ndev))
7302 		return 0;
7303 
7304 	mutex_lock(&priv->lock);
7305 
7306 	netif_device_detach(ndev);
7307 
7308 	stmmac_disable_all_queues(priv);
7309 
7310 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7311 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
7312 
7313 	if (priv->eee_enabled) {
7314 		priv->tx_path_in_lpi_mode = false;
7315 		del_timer_sync(&priv->eee_ctrl_timer);
7316 	}
7317 
7318 	/* Stop TX/RX DMA */
7319 	stmmac_stop_all_dma(priv);
7320 
7321 	if (priv->plat->serdes_powerdown)
7322 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7323 
7324 	/* Enable Power down mode by programming the PMT regs */
7325 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7326 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7327 		priv->irq_wake = 1;
7328 	} else {
7329 		stmmac_mac_set(priv, priv->ioaddr, false);
7330 		pinctrl_pm_select_sleep_state(priv->device);
7331 	}
7332 
7333 	mutex_unlock(&priv->lock);
7334 
7335 	rtnl_lock();
7336 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7337 		phylink_suspend(priv->phylink, true);
7338 	} else {
7339 		if (device_may_wakeup(priv->device))
7340 			phylink_speed_down(priv->phylink, false);
7341 		phylink_suspend(priv->phylink, false);
7342 	}
7343 	rtnl_unlock();
7344 
7345 	if (priv->dma_cap.fpesel) {
7346 		/* Disable FPE */
7347 		stmmac_fpe_configure(priv, priv->ioaddr,
7348 				     priv->plat->tx_queues_to_use,
7349 				     priv->plat->rx_queues_to_use, false);
7350 
7351 		stmmac_fpe_handshake(priv, false);
7352 		stmmac_fpe_stop_wq(priv);
7353 	}
7354 
7355 	priv->speed = SPEED_UNKNOWN;
7356 	return 0;
7357 }
7358 EXPORT_SYMBOL_GPL(stmmac_suspend);
7359 
7360 /**
7361  * stmmac_reset_queues_param - reset queue parameters
7362  * @priv: device pointer
7363  */
7364 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7365 {
7366 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7367 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7368 	u32 queue;
7369 
7370 	for (queue = 0; queue < rx_cnt; queue++) {
7371 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
7372 
7373 		rx_q->cur_rx = 0;
7374 		rx_q->dirty_rx = 0;
7375 	}
7376 
7377 	for (queue = 0; queue < tx_cnt; queue++) {
7378 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
7379 
7380 		tx_q->cur_tx = 0;
7381 		tx_q->dirty_tx = 0;
7382 		tx_q->mss = 0;
7383 
7384 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7385 	}
7386 }
7387 
7388 /**
7389  * stmmac_resume - resume callback
7390  * @dev: device pointer
7391  * Description: when resume this function is invoked to setup the DMA and CORE
7392  * in a usable state.
7393  */
7394 int stmmac_resume(struct device *dev)
7395 {
7396 	struct net_device *ndev = dev_get_drvdata(dev);
7397 	struct stmmac_priv *priv = netdev_priv(ndev);
7398 	int ret;
7399 
7400 	if (!netif_running(ndev))
7401 		return 0;
7402 
7403 	/* Power Down bit, into the PM register, is cleared
7404 	 * automatically as soon as a magic packet or a Wake-up frame
7405 	 * is received. Anyway, it's better to manually clear
7406 	 * this bit because it can generate problems while resuming
7407 	 * from another devices (e.g. serial console).
7408 	 */
7409 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7410 		mutex_lock(&priv->lock);
7411 		stmmac_pmt(priv, priv->hw, 0);
7412 		mutex_unlock(&priv->lock);
7413 		priv->irq_wake = 0;
7414 	} else {
7415 		pinctrl_pm_select_default_state(priv->device);
7416 		/* reset the phy so that it's ready */
7417 		if (priv->mii)
7418 			stmmac_mdio_reset(priv->mii);
7419 	}
7420 
7421 	if (priv->plat->serdes_powerup) {
7422 		ret = priv->plat->serdes_powerup(ndev,
7423 						 priv->plat->bsp_priv);
7424 
7425 		if (ret < 0)
7426 			return ret;
7427 	}
7428 
7429 	rtnl_lock();
7430 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7431 		phylink_resume(priv->phylink);
7432 	} else {
7433 		phylink_resume(priv->phylink);
7434 		if (device_may_wakeup(priv->device))
7435 			phylink_speed_up(priv->phylink);
7436 	}
7437 	rtnl_unlock();
7438 
7439 	rtnl_lock();
7440 	mutex_lock(&priv->lock);
7441 
7442 	stmmac_reset_queues_param(priv);
7443 
7444 	stmmac_free_tx_skbufs(priv);
7445 	stmmac_clear_descriptors(priv);
7446 
7447 	stmmac_hw_setup(ndev, false);
7448 	stmmac_init_coalesce(priv);
7449 	stmmac_set_rx_mode(ndev);
7450 
7451 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7452 
7453 	stmmac_enable_all_queues(priv);
7454 
7455 	mutex_unlock(&priv->lock);
7456 	rtnl_unlock();
7457 
7458 	netif_device_attach(ndev);
7459 
7460 	return 0;
7461 }
7462 EXPORT_SYMBOL_GPL(stmmac_resume);
7463 
7464 #ifndef MODULE
7465 static int __init stmmac_cmdline_opt(char *str)
7466 {
7467 	char *opt;
7468 
7469 	if (!str || !*str)
7470 		return -EINVAL;
7471 	while ((opt = strsep(&str, ",")) != NULL) {
7472 		if (!strncmp(opt, "debug:", 6)) {
7473 			if (kstrtoint(opt + 6, 0, &debug))
7474 				goto err;
7475 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7476 			if (kstrtoint(opt + 8, 0, &phyaddr))
7477 				goto err;
7478 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7479 			if (kstrtoint(opt + 7, 0, &buf_sz))
7480 				goto err;
7481 		} else if (!strncmp(opt, "tc:", 3)) {
7482 			if (kstrtoint(opt + 3, 0, &tc))
7483 				goto err;
7484 		} else if (!strncmp(opt, "watchdog:", 9)) {
7485 			if (kstrtoint(opt + 9, 0, &watchdog))
7486 				goto err;
7487 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7488 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7489 				goto err;
7490 		} else if (!strncmp(opt, "pause:", 6)) {
7491 			if (kstrtoint(opt + 6, 0, &pause))
7492 				goto err;
7493 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7494 			if (kstrtoint(opt + 10, 0, &eee_timer))
7495 				goto err;
7496 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7497 			if (kstrtoint(opt + 11, 0, &chain_mode))
7498 				goto err;
7499 		}
7500 	}
7501 	return 0;
7502 
7503 err:
7504 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7505 	return -EINVAL;
7506 }
7507 
7508 __setup("stmmaceth=", stmmac_cmdline_opt);
7509 #endif /* MODULE */
7510 
7511 static int __init stmmac_init(void)
7512 {
7513 #ifdef CONFIG_DEBUG_FS
7514 	/* Create debugfs main directory if it doesn't exist yet */
7515 	if (!stmmac_fs_dir)
7516 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7517 	register_netdevice_notifier(&stmmac_notifier);
7518 #endif
7519 
7520 	return 0;
7521 }
7522 
7523 static void __exit stmmac_exit(void)
7524 {
7525 #ifdef CONFIG_DEBUG_FS
7526 	unregister_netdevice_notifier(&stmmac_notifier);
7527 	debugfs_remove_recursive(stmmac_fs_dir);
7528 #endif
7529 }
7530 
7531 module_init(stmmac_init)
7532 module_exit(stmmac_exit)
7533 
7534 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7535 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7536 MODULE_LICENSE("GPL");
7537