xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 778e73d2411abc8f3a2d60dbf038acaec218792e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968 					MPACKET_VERIFY);
969 	} else {
970 		*lo_state = FPE_STATE_OFF;
971 		*lp_state = FPE_STATE_OFF;
972 	}
973 }
974 
975 static void stmmac_mac_link_down(struct phylink_config *config,
976 				 unsigned int mode, phy_interface_t interface)
977 {
978 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979 
980 	stmmac_mac_set(priv, priv->ioaddr, false);
981 	priv->eee_active = false;
982 	priv->tx_lpi_enabled = false;
983 	priv->eee_enabled = stmmac_eee_init(priv);
984 	stmmac_set_eee_pls(priv, priv->hw, false);
985 
986 	if (priv->dma_cap.fpesel)
987 		stmmac_fpe_link_state_handle(priv, false);
988 }
989 
990 static void stmmac_mac_link_up(struct phylink_config *config,
991 			       struct phy_device *phy,
992 			       unsigned int mode, phy_interface_t interface,
993 			       int speed, int duplex,
994 			       bool tx_pause, bool rx_pause)
995 {
996 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997 	u32 old_ctrl, ctrl;
998 
999 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000 	    priv->plat->serdes_powerup)
1001 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002 
1003 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005 
1006 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007 		switch (speed) {
1008 		case SPEED_10000:
1009 			ctrl |= priv->hw->link.xgmii.speed10000;
1010 			break;
1011 		case SPEED_5000:
1012 			ctrl |= priv->hw->link.xgmii.speed5000;
1013 			break;
1014 		case SPEED_2500:
1015 			ctrl |= priv->hw->link.xgmii.speed2500;
1016 			break;
1017 		default:
1018 			return;
1019 		}
1020 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021 		switch (speed) {
1022 		case SPEED_100000:
1023 			ctrl |= priv->hw->link.xlgmii.speed100000;
1024 			break;
1025 		case SPEED_50000:
1026 			ctrl |= priv->hw->link.xlgmii.speed50000;
1027 			break;
1028 		case SPEED_40000:
1029 			ctrl |= priv->hw->link.xlgmii.speed40000;
1030 			break;
1031 		case SPEED_25000:
1032 			ctrl |= priv->hw->link.xlgmii.speed25000;
1033 			break;
1034 		case SPEED_10000:
1035 			ctrl |= priv->hw->link.xgmii.speed10000;
1036 			break;
1037 		case SPEED_2500:
1038 			ctrl |= priv->hw->link.speed2500;
1039 			break;
1040 		case SPEED_1000:
1041 			ctrl |= priv->hw->link.speed1000;
1042 			break;
1043 		default:
1044 			return;
1045 		}
1046 	} else {
1047 		switch (speed) {
1048 		case SPEED_2500:
1049 			ctrl |= priv->hw->link.speed2500;
1050 			break;
1051 		case SPEED_1000:
1052 			ctrl |= priv->hw->link.speed1000;
1053 			break;
1054 		case SPEED_100:
1055 			ctrl |= priv->hw->link.speed100;
1056 			break;
1057 		case SPEED_10:
1058 			ctrl |= priv->hw->link.speed10;
1059 			break;
1060 		default:
1061 			return;
1062 		}
1063 	}
1064 
1065 	priv->speed = speed;
1066 
1067 	if (priv->plat->fix_mac_speed)
1068 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069 
1070 	if (!duplex)
1071 		ctrl &= ~priv->hw->link.duplex;
1072 	else
1073 		ctrl |= priv->hw->link.duplex;
1074 
1075 	/* Flow Control operation */
1076 	if (rx_pause && tx_pause)
1077 		priv->flow_ctrl = FLOW_AUTO;
1078 	else if (rx_pause && !tx_pause)
1079 		priv->flow_ctrl = FLOW_RX;
1080 	else if (!rx_pause && tx_pause)
1081 		priv->flow_ctrl = FLOW_TX;
1082 	else
1083 		priv->flow_ctrl = FLOW_OFF;
1084 
1085 	stmmac_mac_flow_ctrl(priv, duplex);
1086 
1087 	if (ctrl != old_ctrl)
1088 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089 
1090 	stmmac_mac_set(priv, priv->ioaddr, true);
1091 	if (phy && priv->dma_cap.eee) {
1092 		priv->eee_active =
1093 			phy_init_eee(phy, !(priv->plat->flags &
1094 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095 		priv->eee_enabled = stmmac_eee_init(priv);
1096 		priv->tx_lpi_enabled = priv->eee_enabled;
1097 		stmmac_set_eee_pls(priv, priv->hw, true);
1098 	}
1099 
1100 	if (priv->dma_cap.fpesel)
1101 		stmmac_fpe_link_state_handle(priv, true);
1102 
1103 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104 		stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106 
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108 	.mac_select_pcs = stmmac_mac_select_pcs,
1109 	.mac_config = stmmac_mac_config,
1110 	.mac_link_down = stmmac_mac_link_down,
1111 	.mac_link_up = stmmac_mac_link_up,
1112 };
1113 
1114 /**
1115  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116  * @priv: driver private structure
1117  * Description: this is to verify if the HW supports the PCS.
1118  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119  * configured for the TBI, RTBI, or SGMII PHY interface.
1120  */
1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 	int interface = priv->plat->mac_interface;
1124 
1125 	if (priv->dma_cap.pcs) {
1126 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_RGMII;
1132 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135 		}
1136 	}
1137 }
1138 
1139 /**
1140  * stmmac_init_phy - PHY initialization
1141  * @dev: net device structure
1142  * Description: it initializes the driver's PHY state, and attaches the PHY
1143  * to the mac driver.
1144  *  Return value:
1145  *  0 on success
1146  */
1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 	struct stmmac_priv *priv = netdev_priv(dev);
1150 	struct fwnode_handle *phy_fwnode;
1151 	struct fwnode_handle *fwnode;
1152 	int ret;
1153 
1154 	if (!phylink_expects_phy(priv->phylink))
1155 		return 0;
1156 
1157 	fwnode = priv->plat->port_node;
1158 	if (!fwnode)
1159 		fwnode = dev_fwnode(priv->device);
1160 
1161 	if (fwnode)
1162 		phy_fwnode = fwnode_get_phy_node(fwnode);
1163 	else
1164 		phy_fwnode = NULL;
1165 
1166 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1167 	 * manually parse it
1168 	 */
1169 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 		int addr = priv->plat->phy_addr;
1171 		struct phy_device *phydev;
1172 
1173 		if (addr < 0) {
1174 			netdev_err(priv->dev, "no phy found\n");
1175 			return -ENODEV;
1176 		}
1177 
1178 		phydev = mdiobus_get_phy(priv->mii, addr);
1179 		if (!phydev) {
1180 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 			return -ENODEV;
1182 		}
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (!priv->plat->pmt) {
1191 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192 
1193 		phylink_ethtool_get_wol(priv->phylink, &wol);
1194 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1202 {
1203 	/* Half-Duplex can only work with single tx queue */
1204 	if (priv->plat->tx_queues_to_use > 1)
1205 		priv->phylink_config.mac_capabilities &=
1206 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1207 	else
1208 		priv->phylink_config.mac_capabilities |=
1209 			(MAC_10HD | MAC_100HD | MAC_1000HD);
1210 }
1211 
1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214 	struct stmmac_mdio_bus_data *mdio_bus_data;
1215 	int mode = priv->plat->phy_interface;
1216 	struct fwnode_handle *fwnode;
1217 	struct phylink *phylink;
1218 	int max_speed;
1219 
1220 	priv->phylink_config.dev = &priv->dev->dev;
1221 	priv->phylink_config.type = PHYLINK_NETDEV;
1222 	priv->phylink_config.mac_managed_pm = true;
1223 
1224 	mdio_bus_data = priv->plat->mdio_bus_data;
1225 	if (mdio_bus_data)
1226 		priv->phylink_config.ovr_an_inband =
1227 			mdio_bus_data->xpcs_an_inband;
1228 
1229 	/* Set the platform/firmware specified interface mode. Note, phylink
1230 	 * deals with the PHY interface mode, not the MAC interface mode.
1231 	 */
1232 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1233 
1234 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1235 	if (priv->hw->xpcs)
1236 		xpcs_get_interfaces(priv->hw->xpcs,
1237 				    priv->phylink_config.supported_interfaces);
1238 
1239 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1240 						MAC_10FD | MAC_100FD |
1241 						MAC_1000FD;
1242 
1243 	stmmac_set_half_duplex(priv);
1244 
1245 	/* Get the MAC specific capabilities */
1246 	stmmac_mac_phylink_get_caps(priv);
1247 
1248 	max_speed = priv->plat->max_speed;
1249 	if (max_speed)
1250 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1251 
1252 	fwnode = priv->plat->port_node;
1253 	if (!fwnode)
1254 		fwnode = dev_fwnode(priv->device);
1255 
1256 	phylink = phylink_create(&priv->phylink_config, fwnode,
1257 				 mode, &stmmac_phylink_mac_ops);
1258 	if (IS_ERR(phylink))
1259 		return PTR_ERR(phylink);
1260 
1261 	priv->phylink = phylink;
1262 	return 0;
1263 }
1264 
1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1266 				    struct stmmac_dma_conf *dma_conf)
1267 {
1268 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1269 	unsigned int desc_size;
1270 	void *head_rx;
1271 	u32 queue;
1272 
1273 	/* Display RX rings */
1274 	for (queue = 0; queue < rx_cnt; queue++) {
1275 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1276 
1277 		pr_info("\tRX Queue %u rings\n", queue);
1278 
1279 		if (priv->extend_desc) {
1280 			head_rx = (void *)rx_q->dma_erx;
1281 			desc_size = sizeof(struct dma_extended_desc);
1282 		} else {
1283 			head_rx = (void *)rx_q->dma_rx;
1284 			desc_size = sizeof(struct dma_desc);
1285 		}
1286 
1287 		/* Display RX ring */
1288 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1289 				    rx_q->dma_rx_phy, desc_size);
1290 	}
1291 }
1292 
1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1294 				    struct stmmac_dma_conf *dma_conf)
1295 {
1296 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1297 	unsigned int desc_size;
1298 	void *head_tx;
1299 	u32 queue;
1300 
1301 	/* Display TX rings */
1302 	for (queue = 0; queue < tx_cnt; queue++) {
1303 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1304 
1305 		pr_info("\tTX Queue %d rings\n", queue);
1306 
1307 		if (priv->extend_desc) {
1308 			head_tx = (void *)tx_q->dma_etx;
1309 			desc_size = sizeof(struct dma_extended_desc);
1310 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1311 			head_tx = (void *)tx_q->dma_entx;
1312 			desc_size = sizeof(struct dma_edesc);
1313 		} else {
1314 			head_tx = (void *)tx_q->dma_tx;
1315 			desc_size = sizeof(struct dma_desc);
1316 		}
1317 
1318 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1319 				    tx_q->dma_tx_phy, desc_size);
1320 	}
1321 }
1322 
1323 static void stmmac_display_rings(struct stmmac_priv *priv,
1324 				 struct stmmac_dma_conf *dma_conf)
1325 {
1326 	/* Display RX ring */
1327 	stmmac_display_rx_rings(priv, dma_conf);
1328 
1329 	/* Display TX ring */
1330 	stmmac_display_tx_rings(priv, dma_conf);
1331 }
1332 
1333 static int stmmac_set_bfsize(int mtu, int bufsize)
1334 {
1335 	int ret = bufsize;
1336 
1337 	if (mtu >= BUF_SIZE_8KiB)
1338 		ret = BUF_SIZE_16KiB;
1339 	else if (mtu >= BUF_SIZE_4KiB)
1340 		ret = BUF_SIZE_8KiB;
1341 	else if (mtu >= BUF_SIZE_2KiB)
1342 		ret = BUF_SIZE_4KiB;
1343 	else if (mtu > DEFAULT_BUFSIZE)
1344 		ret = BUF_SIZE_2KiB;
1345 	else
1346 		ret = DEFAULT_BUFSIZE;
1347 
1348 	return ret;
1349 }
1350 
1351 /**
1352  * stmmac_clear_rx_descriptors - clear RX descriptors
1353  * @priv: driver private structure
1354  * @dma_conf: structure to take the dma data
1355  * @queue: RX queue index
1356  * Description: this function is called to clear the RX descriptors
1357  * in case of both basic and extended descriptors are used.
1358  */
1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1360 					struct stmmac_dma_conf *dma_conf,
1361 					u32 queue)
1362 {
1363 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1364 	int i;
1365 
1366 	/* Clear the RX descriptors */
1367 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1368 		if (priv->extend_desc)
1369 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1370 					priv->use_riwt, priv->mode,
1371 					(i == dma_conf->dma_rx_size - 1),
1372 					dma_conf->dma_buf_sz);
1373 		else
1374 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1375 					priv->use_riwt, priv->mode,
1376 					(i == dma_conf->dma_rx_size - 1),
1377 					dma_conf->dma_buf_sz);
1378 }
1379 
1380 /**
1381  * stmmac_clear_tx_descriptors - clear tx descriptors
1382  * @priv: driver private structure
1383  * @dma_conf: structure to take the dma data
1384  * @queue: TX queue index.
1385  * Description: this function is called to clear the TX descriptors
1386  * in case of both basic and extended descriptors are used.
1387  */
1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1389 					struct stmmac_dma_conf *dma_conf,
1390 					u32 queue)
1391 {
1392 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1393 	int i;
1394 
1395 	/* Clear the TX descriptors */
1396 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1397 		int last = (i == (dma_conf->dma_tx_size - 1));
1398 		struct dma_desc *p;
1399 
1400 		if (priv->extend_desc)
1401 			p = &tx_q->dma_etx[i].basic;
1402 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1403 			p = &tx_q->dma_entx[i].basic;
1404 		else
1405 			p = &tx_q->dma_tx[i];
1406 
1407 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1408 	}
1409 }
1410 
1411 /**
1412  * stmmac_clear_descriptors - clear descriptors
1413  * @priv: driver private structure
1414  * @dma_conf: structure to take the dma data
1415  * Description: this function is called to clear the TX and RX descriptors
1416  * in case of both basic and extended descriptors are used.
1417  */
1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1419 				     struct stmmac_dma_conf *dma_conf)
1420 {
1421 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1422 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1423 	u32 queue;
1424 
1425 	/* Clear the RX descriptors */
1426 	for (queue = 0; queue < rx_queue_cnt; queue++)
1427 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1428 
1429 	/* Clear the TX descriptors */
1430 	for (queue = 0; queue < tx_queue_cnt; queue++)
1431 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1432 }
1433 
1434 /**
1435  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436  * @priv: driver private structure
1437  * @dma_conf: structure to take the dma data
1438  * @p: descriptor pointer
1439  * @i: descriptor index
1440  * @flags: gfp flag
1441  * @queue: RX queue index
1442  * Description: this function is called to allocate a receive buffer, perform
1443  * the DMA mapping and init the descriptor.
1444  */
1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1446 				  struct stmmac_dma_conf *dma_conf,
1447 				  struct dma_desc *p,
1448 				  int i, gfp_t flags, u32 queue)
1449 {
1450 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1452 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1453 
1454 	if (priv->dma_cap.host_dma_width <= 32)
1455 		gfp |= GFP_DMA32;
1456 
1457 	if (!buf->page) {
1458 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459 		if (!buf->page)
1460 			return -ENOMEM;
1461 		buf->page_offset = stmmac_rx_offset(priv);
1462 	}
1463 
1464 	if (priv->sph && !buf->sec_page) {
1465 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1466 		if (!buf->sec_page)
1467 			return -ENOMEM;
1468 
1469 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471 	} else {
1472 		buf->sec_page = NULL;
1473 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474 	}
1475 
1476 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477 
1478 	stmmac_set_desc_addr(priv, p, buf->addr);
1479 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1480 		stmmac_init_desc3(priv, p);
1481 
1482 	return 0;
1483 }
1484 
1485 /**
1486  * stmmac_free_rx_buffer - free RX dma buffers
1487  * @priv: private structure
1488  * @rx_q: RX queue
1489  * @i: buffer index.
1490  */
1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1492 				  struct stmmac_rx_queue *rx_q,
1493 				  int i)
1494 {
1495 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1496 
1497 	if (buf->page)
1498 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1499 	buf->page = NULL;
1500 
1501 	if (buf->sec_page)
1502 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1503 	buf->sec_page = NULL;
1504 }
1505 
1506 /**
1507  * stmmac_free_tx_buffer - free RX dma buffers
1508  * @priv: private structure
1509  * @dma_conf: structure to take the dma data
1510  * @queue: RX queue index
1511  * @i: buffer index.
1512  */
1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1514 				  struct stmmac_dma_conf *dma_conf,
1515 				  u32 queue, int i)
1516 {
1517 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1518 
1519 	if (tx_q->tx_skbuff_dma[i].buf &&
1520 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1522 			dma_unmap_page(priv->device,
1523 				       tx_q->tx_skbuff_dma[i].buf,
1524 				       tx_q->tx_skbuff_dma[i].len,
1525 				       DMA_TO_DEVICE);
1526 		else
1527 			dma_unmap_single(priv->device,
1528 					 tx_q->tx_skbuff_dma[i].buf,
1529 					 tx_q->tx_skbuff_dma[i].len,
1530 					 DMA_TO_DEVICE);
1531 	}
1532 
1533 	if (tx_q->xdpf[i] &&
1534 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536 		xdp_return_frame(tx_q->xdpf[i]);
1537 		tx_q->xdpf[i] = NULL;
1538 	}
1539 
1540 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541 		tx_q->xsk_frames_done++;
1542 
1543 	if (tx_q->tx_skbuff[i] &&
1544 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546 		tx_q->tx_skbuff[i] = NULL;
1547 	}
1548 
1549 	tx_q->tx_skbuff_dma[i].buf = 0;
1550 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1551 }
1552 
1553 /**
1554  * dma_free_rx_skbufs - free RX dma buffers
1555  * @priv: private structure
1556  * @dma_conf: structure to take the dma data
1557  * @queue: RX queue index
1558  */
1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1560 			       struct stmmac_dma_conf *dma_conf,
1561 			       u32 queue)
1562 {
1563 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 	int i;
1565 
1566 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1567 		stmmac_free_rx_buffer(priv, rx_q, i);
1568 }
1569 
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1571 				   struct stmmac_dma_conf *dma_conf,
1572 				   u32 queue, gfp_t flags)
1573 {
1574 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1575 	int i;
1576 
1577 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1578 		struct dma_desc *p;
1579 		int ret;
1580 
1581 		if (priv->extend_desc)
1582 			p = &((rx_q->dma_erx + i)->basic);
1583 		else
1584 			p = rx_q->dma_rx + i;
1585 
1586 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1587 					     queue);
1588 		if (ret)
1589 			return ret;
1590 
1591 		rx_q->buf_alloc_num++;
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 /**
1598  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599  * @priv: private structure
1600  * @dma_conf: structure to take the dma data
1601  * @queue: RX queue index
1602  */
1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1604 				struct stmmac_dma_conf *dma_conf,
1605 				u32 queue)
1606 {
1607 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608 	int i;
1609 
1610 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1611 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1612 
1613 		if (!buf->xdp)
1614 			continue;
1615 
1616 		xsk_buff_free(buf->xdp);
1617 		buf->xdp = NULL;
1618 	}
1619 }
1620 
1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1622 				      struct stmmac_dma_conf *dma_conf,
1623 				      u32 queue)
1624 {
1625 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626 	int i;
1627 
1628 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 	 * use this macro to make sure no size violations.
1631 	 */
1632 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1633 
1634 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1635 		struct stmmac_rx_buffer *buf;
1636 		dma_addr_t dma_addr;
1637 		struct dma_desc *p;
1638 
1639 		if (priv->extend_desc)
1640 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1641 		else
1642 			p = rx_q->dma_rx + i;
1643 
1644 		buf = &rx_q->buf_pool[i];
1645 
1646 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1647 		if (!buf->xdp)
1648 			return -ENOMEM;
1649 
1650 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1651 		stmmac_set_desc_addr(priv, p, dma_addr);
1652 		rx_q->buf_alloc_num++;
1653 	}
1654 
1655 	return 0;
1656 }
1657 
1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1659 {
1660 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1661 		return NULL;
1662 
1663 	return xsk_get_pool_from_qid(priv->dev, queue);
1664 }
1665 
1666 /**
1667  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668  * @priv: driver private structure
1669  * @dma_conf: structure to take the dma data
1670  * @queue: RX queue index
1671  * @flags: gfp flag.
1672  * Description: this function initializes the DMA RX descriptors
1673  * and allocates the socket buffers. It supports the chained and ring
1674  * modes.
1675  */
1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1677 				    struct stmmac_dma_conf *dma_conf,
1678 				    u32 queue, gfp_t flags)
1679 {
1680 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1681 	int ret;
1682 
1683 	netif_dbg(priv, probe, priv->dev,
1684 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1685 		  (u32)rx_q->dma_rx_phy);
1686 
1687 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1688 
1689 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1690 
1691 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1692 
1693 	if (rx_q->xsk_pool) {
1694 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695 						   MEM_TYPE_XSK_BUFF_POOL,
1696 						   NULL));
1697 		netdev_info(priv->dev,
1698 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699 			    rx_q->queue_index);
1700 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1701 	} else {
1702 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1703 						   MEM_TYPE_PAGE_POOL,
1704 						   rx_q->page_pool));
1705 		netdev_info(priv->dev,
1706 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1707 			    rx_q->queue_index);
1708 	}
1709 
1710 	if (rx_q->xsk_pool) {
1711 		/* RX XDP ZC buffer pool may not be populated, e.g.
1712 		 * xdpsock TX-only.
1713 		 */
1714 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1715 	} else {
1716 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1717 		if (ret < 0)
1718 			return -ENOMEM;
1719 	}
1720 
1721 	/* Setup the chained descriptor addresses */
1722 	if (priv->mode == STMMAC_CHAIN_MODE) {
1723 		if (priv->extend_desc)
1724 			stmmac_mode_init(priv, rx_q->dma_erx,
1725 					 rx_q->dma_rx_phy,
1726 					 dma_conf->dma_rx_size, 1);
1727 		else
1728 			stmmac_mode_init(priv, rx_q->dma_rx,
1729 					 rx_q->dma_rx_phy,
1730 					 dma_conf->dma_rx_size, 0);
1731 	}
1732 
1733 	return 0;
1734 }
1735 
1736 static int init_dma_rx_desc_rings(struct net_device *dev,
1737 				  struct stmmac_dma_conf *dma_conf,
1738 				  gfp_t flags)
1739 {
1740 	struct stmmac_priv *priv = netdev_priv(dev);
1741 	u32 rx_count = priv->plat->rx_queues_to_use;
1742 	int queue;
1743 	int ret;
1744 
1745 	/* RX INITIALIZATION */
1746 	netif_dbg(priv, probe, priv->dev,
1747 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748 
1749 	for (queue = 0; queue < rx_count; queue++) {
1750 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1751 		if (ret)
1752 			goto err_init_rx_buffers;
1753 	}
1754 
1755 	return 0;
1756 
1757 err_init_rx_buffers:
1758 	while (queue >= 0) {
1759 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1760 
1761 		if (rx_q->xsk_pool)
1762 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1763 		else
1764 			dma_free_rx_skbufs(priv, dma_conf, queue);
1765 
1766 		rx_q->buf_alloc_num = 0;
1767 		rx_q->xsk_pool = NULL;
1768 
1769 		queue--;
1770 	}
1771 
1772 	return ret;
1773 }
1774 
1775 /**
1776  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777  * @priv: driver private structure
1778  * @dma_conf: structure to take the dma data
1779  * @queue: TX queue index
1780  * Description: this function initializes the DMA TX descriptors
1781  * and allocates the socket buffers. It supports the chained and ring
1782  * modes.
1783  */
1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1785 				    struct stmmac_dma_conf *dma_conf,
1786 				    u32 queue)
1787 {
1788 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1789 	int i;
1790 
1791 	netif_dbg(priv, probe, priv->dev,
1792 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1793 		  (u32)tx_q->dma_tx_phy);
1794 
1795 	/* Setup the chained descriptor addresses */
1796 	if (priv->mode == STMMAC_CHAIN_MODE) {
1797 		if (priv->extend_desc)
1798 			stmmac_mode_init(priv, tx_q->dma_etx,
1799 					 tx_q->dma_tx_phy,
1800 					 dma_conf->dma_tx_size, 1);
1801 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1802 			stmmac_mode_init(priv, tx_q->dma_tx,
1803 					 tx_q->dma_tx_phy,
1804 					 dma_conf->dma_tx_size, 0);
1805 	}
1806 
1807 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1808 
1809 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1810 		struct dma_desc *p;
1811 
1812 		if (priv->extend_desc)
1813 			p = &((tx_q->dma_etx + i)->basic);
1814 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1815 			p = &((tx_q->dma_entx + i)->basic);
1816 		else
1817 			p = tx_q->dma_tx + i;
1818 
1819 		stmmac_clear_desc(priv, p);
1820 
1821 		tx_q->tx_skbuff_dma[i].buf = 0;
1822 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1823 		tx_q->tx_skbuff_dma[i].len = 0;
1824 		tx_q->tx_skbuff_dma[i].last_segment = false;
1825 		tx_q->tx_skbuff[i] = NULL;
1826 	}
1827 
1828 	return 0;
1829 }
1830 
1831 static int init_dma_tx_desc_rings(struct net_device *dev,
1832 				  struct stmmac_dma_conf *dma_conf)
1833 {
1834 	struct stmmac_priv *priv = netdev_priv(dev);
1835 	u32 tx_queue_cnt;
1836 	u32 queue;
1837 
1838 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1839 
1840 	for (queue = 0; queue < tx_queue_cnt; queue++)
1841 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1842 
1843 	return 0;
1844 }
1845 
1846 /**
1847  * init_dma_desc_rings - init the RX/TX descriptor rings
1848  * @dev: net device structure
1849  * @dma_conf: structure to take the dma data
1850  * @flags: gfp flag.
1851  * Description: this function initializes the DMA RX/TX descriptors
1852  * and allocates the socket buffers. It supports the chained and ring
1853  * modes.
1854  */
1855 static int init_dma_desc_rings(struct net_device *dev,
1856 			       struct stmmac_dma_conf *dma_conf,
1857 			       gfp_t flags)
1858 {
1859 	struct stmmac_priv *priv = netdev_priv(dev);
1860 	int ret;
1861 
1862 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1863 	if (ret)
1864 		return ret;
1865 
1866 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1867 
1868 	stmmac_clear_descriptors(priv, dma_conf);
1869 
1870 	if (netif_msg_hw(priv))
1871 		stmmac_display_rings(priv, dma_conf);
1872 
1873 	return ret;
1874 }
1875 
1876 /**
1877  * dma_free_tx_skbufs - free TX dma buffers
1878  * @priv: private structure
1879  * @dma_conf: structure to take the dma data
1880  * @queue: TX queue index
1881  */
1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1883 			       struct stmmac_dma_conf *dma_conf,
1884 			       u32 queue)
1885 {
1886 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1887 	int i;
1888 
1889 	tx_q->xsk_frames_done = 0;
1890 
1891 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1892 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893 
1894 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1895 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1896 		tx_q->xsk_frames_done = 0;
1897 		tx_q->xsk_pool = NULL;
1898 	}
1899 }
1900 
1901 /**
1902  * stmmac_free_tx_skbufs - free TX skb buffers
1903  * @priv: private structure
1904  */
1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1906 {
1907 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1908 	u32 queue;
1909 
1910 	for (queue = 0; queue < tx_queue_cnt; queue++)
1911 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1912 }
1913 
1914 /**
1915  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916  * @priv: private structure
1917  * @dma_conf: structure to take the dma data
1918  * @queue: RX queue index
1919  */
1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921 					 struct stmmac_dma_conf *dma_conf,
1922 					 u32 queue)
1923 {
1924 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1925 
1926 	/* Release the DMA RX socket buffers */
1927 	if (rx_q->xsk_pool)
1928 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1929 	else
1930 		dma_free_rx_skbufs(priv, dma_conf, queue);
1931 
1932 	rx_q->buf_alloc_num = 0;
1933 	rx_q->xsk_pool = NULL;
1934 
1935 	/* Free DMA regions of consistent memory previously allocated */
1936 	if (!priv->extend_desc)
1937 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1938 				  sizeof(struct dma_desc),
1939 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1940 	else
1941 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1942 				  sizeof(struct dma_extended_desc),
1943 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1944 
1945 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1946 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1947 
1948 	kfree(rx_q->buf_pool);
1949 	if (rx_q->page_pool)
1950 		page_pool_destroy(rx_q->page_pool);
1951 }
1952 
1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1954 				       struct stmmac_dma_conf *dma_conf)
1955 {
1956 	u32 rx_count = priv->plat->rx_queues_to_use;
1957 	u32 queue;
1958 
1959 	/* Free RX queue resources */
1960 	for (queue = 0; queue < rx_count; queue++)
1961 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1962 }
1963 
1964 /**
1965  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966  * @priv: private structure
1967  * @dma_conf: structure to take the dma data
1968  * @queue: TX queue index
1969  */
1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971 					 struct stmmac_dma_conf *dma_conf,
1972 					 u32 queue)
1973 {
1974 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 	size_t size;
1976 	void *addr;
1977 
1978 	/* Release the DMA TX socket buffers */
1979 	dma_free_tx_skbufs(priv, dma_conf, queue);
1980 
1981 	if (priv->extend_desc) {
1982 		size = sizeof(struct dma_extended_desc);
1983 		addr = tx_q->dma_etx;
1984 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1985 		size = sizeof(struct dma_edesc);
1986 		addr = tx_q->dma_entx;
1987 	} else {
1988 		size = sizeof(struct dma_desc);
1989 		addr = tx_q->dma_tx;
1990 	}
1991 
1992 	size *= dma_conf->dma_tx_size;
1993 
1994 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1995 
1996 	kfree(tx_q->tx_skbuff_dma);
1997 	kfree(tx_q->tx_skbuff);
1998 }
1999 
2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2001 				       struct stmmac_dma_conf *dma_conf)
2002 {
2003 	u32 tx_count = priv->plat->tx_queues_to_use;
2004 	u32 queue;
2005 
2006 	/* Free TX queue resources */
2007 	for (queue = 0; queue < tx_count; queue++)
2008 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2009 }
2010 
2011 /**
2012  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013  * @priv: private structure
2014  * @dma_conf: structure to take the dma data
2015  * @queue: RX queue index
2016  * Description: according to which descriptor can be used (extend or basic)
2017  * this function allocates the resources for TX and RX paths. In case of
2018  * reception, for example, it pre-allocated the RX socket buffer in order to
2019  * allow zero-copy mechanism.
2020  */
2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2022 					 struct stmmac_dma_conf *dma_conf,
2023 					 u32 queue)
2024 {
2025 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2026 	struct stmmac_channel *ch = &priv->channel[queue];
2027 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2028 	struct page_pool_params pp_params = { 0 };
2029 	unsigned int num_pages;
2030 	unsigned int napi_id;
2031 	int ret;
2032 
2033 	rx_q->queue_index = queue;
2034 	rx_q->priv_data = priv;
2035 
2036 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2037 	pp_params.pool_size = dma_conf->dma_rx_size;
2038 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2039 	pp_params.order = ilog2(num_pages);
2040 	pp_params.nid = dev_to_node(priv->device);
2041 	pp_params.dev = priv->device;
2042 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2043 	pp_params.offset = stmmac_rx_offset(priv);
2044 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2045 
2046 	rx_q->page_pool = page_pool_create(&pp_params);
2047 	if (IS_ERR(rx_q->page_pool)) {
2048 		ret = PTR_ERR(rx_q->page_pool);
2049 		rx_q->page_pool = NULL;
2050 		return ret;
2051 	}
2052 
2053 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2054 				 sizeof(*rx_q->buf_pool),
2055 				 GFP_KERNEL);
2056 	if (!rx_q->buf_pool)
2057 		return -ENOMEM;
2058 
2059 	if (priv->extend_desc) {
2060 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2061 						   dma_conf->dma_rx_size *
2062 						   sizeof(struct dma_extended_desc),
2063 						   &rx_q->dma_rx_phy,
2064 						   GFP_KERNEL);
2065 		if (!rx_q->dma_erx)
2066 			return -ENOMEM;
2067 
2068 	} else {
2069 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2070 						  dma_conf->dma_rx_size *
2071 						  sizeof(struct dma_desc),
2072 						  &rx_q->dma_rx_phy,
2073 						  GFP_KERNEL);
2074 		if (!rx_q->dma_rx)
2075 			return -ENOMEM;
2076 	}
2077 
2078 	if (stmmac_xdp_is_enabled(priv) &&
2079 	    test_bit(queue, priv->af_xdp_zc_qps))
2080 		napi_id = ch->rxtx_napi.napi_id;
2081 	else
2082 		napi_id = ch->rx_napi.napi_id;
2083 
2084 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2085 			       rx_q->queue_index,
2086 			       napi_id);
2087 	if (ret) {
2088 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2089 		return -EINVAL;
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2096 				       struct stmmac_dma_conf *dma_conf)
2097 {
2098 	u32 rx_count = priv->plat->rx_queues_to_use;
2099 	u32 queue;
2100 	int ret;
2101 
2102 	/* RX queues buffers and DMA */
2103 	for (queue = 0; queue < rx_count; queue++) {
2104 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2105 		if (ret)
2106 			goto err_dma;
2107 	}
2108 
2109 	return 0;
2110 
2111 err_dma:
2112 	free_dma_rx_desc_resources(priv, dma_conf);
2113 
2114 	return ret;
2115 }
2116 
2117 /**
2118  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119  * @priv: private structure
2120  * @dma_conf: structure to take the dma data
2121  * @queue: TX queue index
2122  * Description: according to which descriptor can be used (extend or basic)
2123  * this function allocates the resources for TX and RX paths. In case of
2124  * reception, for example, it pre-allocated the RX socket buffer in order to
2125  * allow zero-copy mechanism.
2126  */
2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2128 					 struct stmmac_dma_conf *dma_conf,
2129 					 u32 queue)
2130 {
2131 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2132 	size_t size;
2133 	void *addr;
2134 
2135 	tx_q->queue_index = queue;
2136 	tx_q->priv_data = priv;
2137 
2138 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2139 				      sizeof(*tx_q->tx_skbuff_dma),
2140 				      GFP_KERNEL);
2141 	if (!tx_q->tx_skbuff_dma)
2142 		return -ENOMEM;
2143 
2144 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2145 				  sizeof(struct sk_buff *),
2146 				  GFP_KERNEL);
2147 	if (!tx_q->tx_skbuff)
2148 		return -ENOMEM;
2149 
2150 	if (priv->extend_desc)
2151 		size = sizeof(struct dma_extended_desc);
2152 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153 		size = sizeof(struct dma_edesc);
2154 	else
2155 		size = sizeof(struct dma_desc);
2156 
2157 	size *= dma_conf->dma_tx_size;
2158 
2159 	addr = dma_alloc_coherent(priv->device, size,
2160 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2161 	if (!addr)
2162 		return -ENOMEM;
2163 
2164 	if (priv->extend_desc)
2165 		tx_q->dma_etx = addr;
2166 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2167 		tx_q->dma_entx = addr;
2168 	else
2169 		tx_q->dma_tx = addr;
2170 
2171 	return 0;
2172 }
2173 
2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2175 				       struct stmmac_dma_conf *dma_conf)
2176 {
2177 	u32 tx_count = priv->plat->tx_queues_to_use;
2178 	u32 queue;
2179 	int ret;
2180 
2181 	/* TX queues buffers and DMA */
2182 	for (queue = 0; queue < tx_count; queue++) {
2183 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2184 		if (ret)
2185 			goto err_dma;
2186 	}
2187 
2188 	return 0;
2189 
2190 err_dma:
2191 	free_dma_tx_desc_resources(priv, dma_conf);
2192 	return ret;
2193 }
2194 
2195 /**
2196  * alloc_dma_desc_resources - alloc TX/RX resources.
2197  * @priv: private structure
2198  * @dma_conf: structure to take the dma data
2199  * Description: according to which descriptor can be used (extend or basic)
2200  * this function allocates the resources for TX and RX paths. In case of
2201  * reception, for example, it pre-allocated the RX socket buffer in order to
2202  * allow zero-copy mechanism.
2203  */
2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2205 				    struct stmmac_dma_conf *dma_conf)
2206 {
2207 	/* RX Allocation */
2208 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2209 
2210 	if (ret)
2211 		return ret;
2212 
2213 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2214 
2215 	return ret;
2216 }
2217 
2218 /**
2219  * free_dma_desc_resources - free dma desc resources
2220  * @priv: private structure
2221  * @dma_conf: structure to take the dma data
2222  */
2223 static void free_dma_desc_resources(struct stmmac_priv *priv,
2224 				    struct stmmac_dma_conf *dma_conf)
2225 {
2226 	/* Release the DMA TX socket buffers */
2227 	free_dma_tx_desc_resources(priv, dma_conf);
2228 
2229 	/* Release the DMA RX socket buffers later
2230 	 * to ensure all pending XDP_TX buffers are returned.
2231 	 */
2232 	free_dma_rx_desc_resources(priv, dma_conf);
2233 }
2234 
2235 /**
2236  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237  *  @priv: driver private structure
2238  *  Description: It is used for enabling the rx queues in the MAC
2239  */
2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2241 {
2242 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2243 	int queue;
2244 	u8 mode;
2245 
2246 	for (queue = 0; queue < rx_queues_count; queue++) {
2247 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2248 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2249 	}
2250 }
2251 
2252 /**
2253  * stmmac_start_rx_dma - start RX DMA channel
2254  * @priv: driver private structure
2255  * @chan: RX channel index
2256  * Description:
2257  * This starts a RX DMA channel
2258  */
2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2262 	stmmac_start_rx(priv, priv->ioaddr, chan);
2263 }
2264 
2265 /**
2266  * stmmac_start_tx_dma - start TX DMA channel
2267  * @priv: driver private structure
2268  * @chan: TX channel index
2269  * Description:
2270  * This starts a TX DMA channel
2271  */
2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2275 	stmmac_start_tx(priv, priv->ioaddr, chan);
2276 }
2277 
2278 /**
2279  * stmmac_stop_rx_dma - stop RX DMA channel
2280  * @priv: driver private structure
2281  * @chan: RX channel index
2282  * Description:
2283  * This stops a RX DMA channel
2284  */
2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2288 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2289 }
2290 
2291 /**
2292  * stmmac_stop_tx_dma - stop TX DMA channel
2293  * @priv: driver private structure
2294  * @chan: TX channel index
2295  * Description:
2296  * This stops a TX DMA channel
2297  */
2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2301 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2302 }
2303 
2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2305 {
2306 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2309 	u32 chan;
2310 
2311 	for (chan = 0; chan < dma_csr_ch; chan++) {
2312 		struct stmmac_channel *ch = &priv->channel[chan];
2313 		unsigned long flags;
2314 
2315 		spin_lock_irqsave(&ch->lock, flags);
2316 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2317 		spin_unlock_irqrestore(&ch->lock, flags);
2318 	}
2319 }
2320 
2321 /**
2322  * stmmac_start_all_dma - start all RX and TX DMA channels
2323  * @priv: driver private structure
2324  * Description:
2325  * This starts all the RX and TX DMA channels
2326  */
2327 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2328 {
2329 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331 	u32 chan = 0;
2332 
2333 	for (chan = 0; chan < rx_channels_count; chan++)
2334 		stmmac_start_rx_dma(priv, chan);
2335 
2336 	for (chan = 0; chan < tx_channels_count; chan++)
2337 		stmmac_start_tx_dma(priv, chan);
2338 }
2339 
2340 /**
2341  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342  * @priv: driver private structure
2343  * Description:
2344  * This stops the RX and TX DMA channels
2345  */
2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2347 {
2348 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350 	u32 chan = 0;
2351 
2352 	for (chan = 0; chan < rx_channels_count; chan++)
2353 		stmmac_stop_rx_dma(priv, chan);
2354 
2355 	for (chan = 0; chan < tx_channels_count; chan++)
2356 		stmmac_stop_tx_dma(priv, chan);
2357 }
2358 
2359 /**
2360  *  stmmac_dma_operation_mode - HW DMA operation mode
2361  *  @priv: driver private structure
2362  *  Description: it is used for configuring the DMA operation mode register in
2363  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364  */
2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2366 {
2367 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2368 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2369 	int rxfifosz = priv->plat->rx_fifo_size;
2370 	int txfifosz = priv->plat->tx_fifo_size;
2371 	u32 txmode = 0;
2372 	u32 rxmode = 0;
2373 	u32 chan = 0;
2374 	u8 qmode = 0;
2375 
2376 	if (rxfifosz == 0)
2377 		rxfifosz = priv->dma_cap.rx_fifo_size;
2378 	if (txfifosz == 0)
2379 		txfifosz = priv->dma_cap.tx_fifo_size;
2380 
2381 	/* Adjust for real per queue fifo size */
2382 	rxfifosz /= rx_channels_count;
2383 	txfifosz /= tx_channels_count;
2384 
2385 	if (priv->plat->force_thresh_dma_mode) {
2386 		txmode = tc;
2387 		rxmode = tc;
2388 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2389 		/*
2390 		 * In case of GMAC, SF mode can be enabled
2391 		 * to perform the TX COE in HW. This depends on:
2392 		 * 1) TX COE if actually supported
2393 		 * 2) There is no bugged Jumbo frame support
2394 		 *    that needs to not insert csum in the TDES.
2395 		 */
2396 		txmode = SF_DMA_MODE;
2397 		rxmode = SF_DMA_MODE;
2398 		priv->xstats.threshold = SF_DMA_MODE;
2399 	} else {
2400 		txmode = tc;
2401 		rxmode = SF_DMA_MODE;
2402 	}
2403 
2404 	/* configure all channels */
2405 	for (chan = 0; chan < rx_channels_count; chan++) {
2406 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2407 		u32 buf_size;
2408 
2409 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2410 
2411 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2412 				rxfifosz, qmode);
2413 
2414 		if (rx_q->xsk_pool) {
2415 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2416 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2417 					      buf_size,
2418 					      chan);
2419 		} else {
2420 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 					      priv->dma_conf.dma_buf_sz,
2422 					      chan);
2423 		}
2424 	}
2425 
2426 	for (chan = 0; chan < tx_channels_count; chan++) {
2427 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2428 
2429 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2430 				txfifosz, qmode);
2431 	}
2432 }
2433 
2434 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2435 {
2436 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2437 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2438 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2439 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2440 	unsigned int entry = tx_q->cur_tx;
2441 	struct dma_desc *tx_desc = NULL;
2442 	struct xdp_desc xdp_desc;
2443 	bool work_done = true;
2444 	u32 tx_set_ic_bit = 0;
2445 	unsigned long flags;
2446 
2447 	/* Avoids TX time-out as we are sharing with slow path */
2448 	txq_trans_cond_update(nq);
2449 
2450 	budget = min(budget, stmmac_tx_avail(priv, queue));
2451 
2452 	while (budget-- > 0) {
2453 		dma_addr_t dma_addr;
2454 		bool set_ic;
2455 
2456 		/* We are sharing with slow path and stop XSK TX desc submission when
2457 		 * available TX ring is less than threshold.
2458 		 */
2459 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2460 		    !netif_carrier_ok(priv->dev)) {
2461 			work_done = false;
2462 			break;
2463 		}
2464 
2465 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2466 			break;
2467 
2468 		if (likely(priv->extend_desc))
2469 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2470 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2471 			tx_desc = &tx_q->dma_entx[entry].basic;
2472 		else
2473 			tx_desc = tx_q->dma_tx + entry;
2474 
2475 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2476 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2477 
2478 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2479 
2480 		/* To return XDP buffer to XSK pool, we simple call
2481 		 * xsk_tx_completed(), so we don't need to fill up
2482 		 * 'buf' and 'xdpf'.
2483 		 */
2484 		tx_q->tx_skbuff_dma[entry].buf = 0;
2485 		tx_q->xdpf[entry] = NULL;
2486 
2487 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2488 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2489 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2490 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2491 
2492 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2493 
2494 		tx_q->tx_count_frames++;
2495 
2496 		if (!priv->tx_coal_frames[queue])
2497 			set_ic = false;
2498 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2499 			set_ic = true;
2500 		else
2501 			set_ic = false;
2502 
2503 		if (set_ic) {
2504 			tx_q->tx_count_frames = 0;
2505 			stmmac_set_tx_ic(priv, tx_desc);
2506 			tx_set_ic_bit++;
2507 		}
2508 
2509 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2510 				       true, priv->mode, true, true,
2511 				       xdp_desc.len);
2512 
2513 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2514 
2515 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2516 		entry = tx_q->cur_tx;
2517 	}
2518 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2519 	txq_stats->tx_set_ic_bit += tx_set_ic_bit;
2520 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2521 
2522 	if (tx_desc) {
2523 		stmmac_flush_tx_descriptors(priv, queue);
2524 		xsk_tx_release(pool);
2525 	}
2526 
2527 	/* Return true if all of the 3 conditions are met
2528 	 *  a) TX Budget is still available
2529 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2530 	 *     pending XSK TX for transmission)
2531 	 */
2532 	return !!budget && work_done;
2533 }
2534 
2535 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2536 {
2537 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2538 		tc += 64;
2539 
2540 		if (priv->plat->force_thresh_dma_mode)
2541 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2542 		else
2543 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2544 						      chan);
2545 
2546 		priv->xstats.threshold = tc;
2547 	}
2548 }
2549 
2550 /**
2551  * stmmac_tx_clean - to manage the transmission completion
2552  * @priv: driver private structure
2553  * @budget: napi budget limiting this functions packet handling
2554  * @queue: TX queue index
2555  * @pending_packets: signal to arm the TX coal timer
2556  * Description: it reclaims the transmit resources after transmission completes.
2557  * If some packets still needs to be handled, due to TX coalesce, set
2558  * pending_packets to true to make NAPI arm the TX coal timer.
2559  */
2560 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2561 			   bool *pending_packets)
2562 {
2563 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2564 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2565 	unsigned int bytes_compl = 0, pkts_compl = 0;
2566 	unsigned int entry, xmits = 0, count = 0;
2567 	u32 tx_packets = 0, tx_errors = 0;
2568 	unsigned long flags;
2569 
2570 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2571 
2572 	tx_q->xsk_frames_done = 0;
2573 
2574 	entry = tx_q->dirty_tx;
2575 
2576 	/* Try to clean all TX complete frame in 1 shot */
2577 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2578 		struct xdp_frame *xdpf;
2579 		struct sk_buff *skb;
2580 		struct dma_desc *p;
2581 		int status;
2582 
2583 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2584 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2585 			xdpf = tx_q->xdpf[entry];
2586 			skb = NULL;
2587 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2588 			xdpf = NULL;
2589 			skb = tx_q->tx_skbuff[entry];
2590 		} else {
2591 			xdpf = NULL;
2592 			skb = NULL;
2593 		}
2594 
2595 		if (priv->extend_desc)
2596 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2597 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2598 			p = &tx_q->dma_entx[entry].basic;
2599 		else
2600 			p = tx_q->dma_tx + entry;
2601 
2602 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2603 		/* Check if the descriptor is owned by the DMA */
2604 		if (unlikely(status & tx_dma_own))
2605 			break;
2606 
2607 		count++;
2608 
2609 		/* Make sure descriptor fields are read after reading
2610 		 * the own bit.
2611 		 */
2612 		dma_rmb();
2613 
2614 		/* Just consider the last segment and ...*/
2615 		if (likely(!(status & tx_not_ls))) {
2616 			/* ... verify the status error condition */
2617 			if (unlikely(status & tx_err)) {
2618 				tx_errors++;
2619 				if (unlikely(status & tx_err_bump_tc))
2620 					stmmac_bump_dma_threshold(priv, queue);
2621 			} else {
2622 				tx_packets++;
2623 			}
2624 			if (skb)
2625 				stmmac_get_tx_hwtstamp(priv, p, skb);
2626 		}
2627 
2628 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2629 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2630 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2631 				dma_unmap_page(priv->device,
2632 					       tx_q->tx_skbuff_dma[entry].buf,
2633 					       tx_q->tx_skbuff_dma[entry].len,
2634 					       DMA_TO_DEVICE);
2635 			else
2636 				dma_unmap_single(priv->device,
2637 						 tx_q->tx_skbuff_dma[entry].buf,
2638 						 tx_q->tx_skbuff_dma[entry].len,
2639 						 DMA_TO_DEVICE);
2640 			tx_q->tx_skbuff_dma[entry].buf = 0;
2641 			tx_q->tx_skbuff_dma[entry].len = 0;
2642 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2643 		}
2644 
2645 		stmmac_clean_desc3(priv, tx_q, p);
2646 
2647 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2648 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2649 
2650 		if (xdpf &&
2651 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2652 			xdp_return_frame_rx_napi(xdpf);
2653 			tx_q->xdpf[entry] = NULL;
2654 		}
2655 
2656 		if (xdpf &&
2657 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2658 			xdp_return_frame(xdpf);
2659 			tx_q->xdpf[entry] = NULL;
2660 		}
2661 
2662 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2663 			tx_q->xsk_frames_done++;
2664 
2665 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2666 			if (likely(skb)) {
2667 				pkts_compl++;
2668 				bytes_compl += skb->len;
2669 				dev_consume_skb_any(skb);
2670 				tx_q->tx_skbuff[entry] = NULL;
2671 			}
2672 		}
2673 
2674 		stmmac_release_tx_desc(priv, p, priv->mode);
2675 
2676 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2677 	}
2678 	tx_q->dirty_tx = entry;
2679 
2680 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2681 				  pkts_compl, bytes_compl);
2682 
2683 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2684 								queue))) &&
2685 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2686 
2687 		netif_dbg(priv, tx_done, priv->dev,
2688 			  "%s: restart transmit\n", __func__);
2689 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2690 	}
2691 
2692 	if (tx_q->xsk_pool) {
2693 		bool work_done;
2694 
2695 		if (tx_q->xsk_frames_done)
2696 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2697 
2698 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2699 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2700 
2701 		/* For XSK TX, we try to send as many as possible.
2702 		 * If XSK work done (XSK TX desc empty and budget still
2703 		 * available), return "budget - 1" to reenable TX IRQ.
2704 		 * Else, return "budget" to make NAPI continue polling.
2705 		 */
2706 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2707 					       STMMAC_XSK_TX_BUDGET_MAX);
2708 		if (work_done)
2709 			xmits = budget - 1;
2710 		else
2711 			xmits = budget;
2712 	}
2713 
2714 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2715 	    priv->eee_sw_timer_en) {
2716 		if (stmmac_enable_eee_mode(priv))
2717 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2718 	}
2719 
2720 	/* We still have pending packets, let's call for a new scheduling */
2721 	if (tx_q->dirty_tx != tx_q->cur_tx)
2722 		*pending_packets = true;
2723 
2724 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2725 	txq_stats->tx_packets += tx_packets;
2726 	txq_stats->tx_pkt_n += tx_packets;
2727 	txq_stats->tx_clean++;
2728 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2729 
2730 	priv->xstats.tx_errors += tx_errors;
2731 
2732 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2733 
2734 	/* Combine decisions from TX clean and XSK TX */
2735 	return max(count, xmits);
2736 }
2737 
2738 /**
2739  * stmmac_tx_err - to manage the tx error
2740  * @priv: driver private structure
2741  * @chan: channel index
2742  * Description: it cleans the descriptors and restarts the transmission
2743  * in case of transmission errors.
2744  */
2745 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2746 {
2747 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2748 
2749 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2750 
2751 	stmmac_stop_tx_dma(priv, chan);
2752 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2753 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2754 	stmmac_reset_tx_queue(priv, chan);
2755 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2756 			    tx_q->dma_tx_phy, chan);
2757 	stmmac_start_tx_dma(priv, chan);
2758 
2759 	priv->xstats.tx_errors++;
2760 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2761 }
2762 
2763 /**
2764  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2765  *  @priv: driver private structure
2766  *  @txmode: TX operating mode
2767  *  @rxmode: RX operating mode
2768  *  @chan: channel index
2769  *  Description: it is used for configuring of the DMA operation mode in
2770  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2771  *  mode.
2772  */
2773 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2774 					  u32 rxmode, u32 chan)
2775 {
2776 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2777 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2778 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2779 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2780 	int rxfifosz = priv->plat->rx_fifo_size;
2781 	int txfifosz = priv->plat->tx_fifo_size;
2782 
2783 	if (rxfifosz == 0)
2784 		rxfifosz = priv->dma_cap.rx_fifo_size;
2785 	if (txfifosz == 0)
2786 		txfifosz = priv->dma_cap.tx_fifo_size;
2787 
2788 	/* Adjust for real per queue fifo size */
2789 	rxfifosz /= rx_channels_count;
2790 	txfifosz /= tx_channels_count;
2791 
2792 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2793 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2794 }
2795 
2796 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2797 {
2798 	int ret;
2799 
2800 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2801 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2802 	if (ret && (ret != -EINVAL)) {
2803 		stmmac_global_err(priv);
2804 		return true;
2805 	}
2806 
2807 	return false;
2808 }
2809 
2810 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2811 {
2812 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2813 						 &priv->xstats, chan, dir);
2814 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2815 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2816 	struct stmmac_channel *ch = &priv->channel[chan];
2817 	struct napi_struct *rx_napi;
2818 	struct napi_struct *tx_napi;
2819 	unsigned long flags;
2820 
2821 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2822 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2823 
2824 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2825 		if (napi_schedule_prep(rx_napi)) {
2826 			spin_lock_irqsave(&ch->lock, flags);
2827 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2828 			spin_unlock_irqrestore(&ch->lock, flags);
2829 			__napi_schedule(rx_napi);
2830 		}
2831 	}
2832 
2833 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2834 		if (napi_schedule_prep(tx_napi)) {
2835 			spin_lock_irqsave(&ch->lock, flags);
2836 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2837 			spin_unlock_irqrestore(&ch->lock, flags);
2838 			__napi_schedule(tx_napi);
2839 		}
2840 	}
2841 
2842 	return status;
2843 }
2844 
2845 /**
2846  * stmmac_dma_interrupt - DMA ISR
2847  * @priv: driver private structure
2848  * Description: this is the DMA ISR. It is called by the main ISR.
2849  * It calls the dwmac dma routine and schedule poll method in case of some
2850  * work can be done.
2851  */
2852 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2853 {
2854 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2855 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2856 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2857 				tx_channel_count : rx_channel_count;
2858 	u32 chan;
2859 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2860 
2861 	/* Make sure we never check beyond our status buffer. */
2862 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2863 		channels_to_check = ARRAY_SIZE(status);
2864 
2865 	for (chan = 0; chan < channels_to_check; chan++)
2866 		status[chan] = stmmac_napi_check(priv, chan,
2867 						 DMA_DIR_RXTX);
2868 
2869 	for (chan = 0; chan < tx_channel_count; chan++) {
2870 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2871 			/* Try to bump up the dma threshold on this failure */
2872 			stmmac_bump_dma_threshold(priv, chan);
2873 		} else if (unlikely(status[chan] == tx_hard_error)) {
2874 			stmmac_tx_err(priv, chan);
2875 		}
2876 	}
2877 }
2878 
2879 /**
2880  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2881  * @priv: driver private structure
2882  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2883  */
2884 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2885 {
2886 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2887 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2888 
2889 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2890 
2891 	if (priv->dma_cap.rmon) {
2892 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2893 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2894 	} else
2895 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2896 }
2897 
2898 /**
2899  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2900  * @priv: driver private structure
2901  * Description:
2902  *  new GMAC chip generations have a new register to indicate the
2903  *  presence of the optional feature/functions.
2904  *  This can be also used to override the value passed through the
2905  *  platform and necessary for old MAC10/100 and GMAC chips.
2906  */
2907 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2908 {
2909 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2910 }
2911 
2912 /**
2913  * stmmac_check_ether_addr - check if the MAC addr is valid
2914  * @priv: driver private structure
2915  * Description:
2916  * it is to verify if the MAC address is valid, in case of failures it
2917  * generates a random MAC address
2918  */
2919 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2920 {
2921 	u8 addr[ETH_ALEN];
2922 
2923 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2924 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2925 		if (is_valid_ether_addr(addr))
2926 			eth_hw_addr_set(priv->dev, addr);
2927 		else
2928 			eth_hw_addr_random(priv->dev);
2929 		dev_info(priv->device, "device MAC address %pM\n",
2930 			 priv->dev->dev_addr);
2931 	}
2932 }
2933 
2934 /**
2935  * stmmac_init_dma_engine - DMA init.
2936  * @priv: driver private structure
2937  * Description:
2938  * It inits the DMA invoking the specific MAC/GMAC callback.
2939  * Some DMA parameters can be passed from the platform;
2940  * in case of these are not passed a default is kept for the MAC or GMAC.
2941  */
2942 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2943 {
2944 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2945 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2946 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2947 	struct stmmac_rx_queue *rx_q;
2948 	struct stmmac_tx_queue *tx_q;
2949 	u32 chan = 0;
2950 	int atds = 0;
2951 	int ret = 0;
2952 
2953 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2954 		dev_err(priv->device, "Invalid DMA configuration\n");
2955 		return -EINVAL;
2956 	}
2957 
2958 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2959 		atds = 1;
2960 
2961 	ret = stmmac_reset(priv, priv->ioaddr);
2962 	if (ret) {
2963 		dev_err(priv->device, "Failed to reset the dma\n");
2964 		return ret;
2965 	}
2966 
2967 	/* DMA Configuration */
2968 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2969 
2970 	if (priv->plat->axi)
2971 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2972 
2973 	/* DMA CSR Channel configuration */
2974 	for (chan = 0; chan < dma_csr_ch; chan++) {
2975 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2976 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2977 	}
2978 
2979 	/* DMA RX Channel Configuration */
2980 	for (chan = 0; chan < rx_channels_count; chan++) {
2981 		rx_q = &priv->dma_conf.rx_queue[chan];
2982 
2983 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2984 				    rx_q->dma_rx_phy, chan);
2985 
2986 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2987 				     (rx_q->buf_alloc_num *
2988 				      sizeof(struct dma_desc));
2989 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2990 				       rx_q->rx_tail_addr, chan);
2991 	}
2992 
2993 	/* DMA TX Channel Configuration */
2994 	for (chan = 0; chan < tx_channels_count; chan++) {
2995 		tx_q = &priv->dma_conf.tx_queue[chan];
2996 
2997 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2998 				    tx_q->dma_tx_phy, chan);
2999 
3000 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3001 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3002 				       tx_q->tx_tail_addr, chan);
3003 	}
3004 
3005 	return ret;
3006 }
3007 
3008 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3009 {
3010 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3011 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3012 	struct stmmac_channel *ch;
3013 	struct napi_struct *napi;
3014 
3015 	if (!tx_coal_timer)
3016 		return;
3017 
3018 	ch = &priv->channel[tx_q->queue_index];
3019 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3020 
3021 	/* Arm timer only if napi is not already scheduled.
3022 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3023 	 * again in the next scheduled napi.
3024 	 */
3025 	if (unlikely(!napi_is_scheduled(napi)))
3026 		hrtimer_start(&tx_q->txtimer,
3027 			      STMMAC_COAL_TIMER(tx_coal_timer),
3028 			      HRTIMER_MODE_REL);
3029 	else
3030 		hrtimer_try_to_cancel(&tx_q->txtimer);
3031 }
3032 
3033 /**
3034  * stmmac_tx_timer - mitigation sw timer for tx.
3035  * @t: data pointer
3036  * Description:
3037  * This is the timer handler to directly invoke the stmmac_tx_clean.
3038  */
3039 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3040 {
3041 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3042 	struct stmmac_priv *priv = tx_q->priv_data;
3043 	struct stmmac_channel *ch;
3044 	struct napi_struct *napi;
3045 
3046 	ch = &priv->channel[tx_q->queue_index];
3047 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3048 
3049 	if (likely(napi_schedule_prep(napi))) {
3050 		unsigned long flags;
3051 
3052 		spin_lock_irqsave(&ch->lock, flags);
3053 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3054 		spin_unlock_irqrestore(&ch->lock, flags);
3055 		__napi_schedule(napi);
3056 	}
3057 
3058 	return HRTIMER_NORESTART;
3059 }
3060 
3061 /**
3062  * stmmac_init_coalesce - init mitigation options.
3063  * @priv: driver private structure
3064  * Description:
3065  * This inits the coalesce parameters: i.e. timer rate,
3066  * timer handler and default threshold used for enabling the
3067  * interrupt on completion bit.
3068  */
3069 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3070 {
3071 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3072 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3073 	u32 chan;
3074 
3075 	for (chan = 0; chan < tx_channel_count; chan++) {
3076 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3077 
3078 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3079 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3080 
3081 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3082 		tx_q->txtimer.function = stmmac_tx_timer;
3083 	}
3084 
3085 	for (chan = 0; chan < rx_channel_count; chan++)
3086 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3087 }
3088 
3089 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3090 {
3091 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3092 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3093 	u32 chan;
3094 
3095 	/* set TX ring length */
3096 	for (chan = 0; chan < tx_channels_count; chan++)
3097 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3098 				       (priv->dma_conf.dma_tx_size - 1), chan);
3099 
3100 	/* set RX ring length */
3101 	for (chan = 0; chan < rx_channels_count; chan++)
3102 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3103 				       (priv->dma_conf.dma_rx_size - 1), chan);
3104 }
3105 
3106 /**
3107  *  stmmac_set_tx_queue_weight - Set TX queue weight
3108  *  @priv: driver private structure
3109  *  Description: It is used for setting TX queues weight
3110  */
3111 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3112 {
3113 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3114 	u32 weight;
3115 	u32 queue;
3116 
3117 	for (queue = 0; queue < tx_queues_count; queue++) {
3118 		weight = priv->plat->tx_queues_cfg[queue].weight;
3119 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3120 	}
3121 }
3122 
3123 /**
3124  *  stmmac_configure_cbs - Configure CBS in TX queue
3125  *  @priv: driver private structure
3126  *  Description: It is used for configuring CBS in AVB TX queues
3127  */
3128 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3129 {
3130 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3131 	u32 mode_to_use;
3132 	u32 queue;
3133 
3134 	/* queue 0 is reserved for legacy traffic */
3135 	for (queue = 1; queue < tx_queues_count; queue++) {
3136 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3137 		if (mode_to_use == MTL_QUEUE_DCB)
3138 			continue;
3139 
3140 		stmmac_config_cbs(priv, priv->hw,
3141 				priv->plat->tx_queues_cfg[queue].send_slope,
3142 				priv->plat->tx_queues_cfg[queue].idle_slope,
3143 				priv->plat->tx_queues_cfg[queue].high_credit,
3144 				priv->plat->tx_queues_cfg[queue].low_credit,
3145 				queue);
3146 	}
3147 }
3148 
3149 /**
3150  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3151  *  @priv: driver private structure
3152  *  Description: It is used for mapping RX queues to RX dma channels
3153  */
3154 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3155 {
3156 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3157 	u32 queue;
3158 	u32 chan;
3159 
3160 	for (queue = 0; queue < rx_queues_count; queue++) {
3161 		chan = priv->plat->rx_queues_cfg[queue].chan;
3162 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3163 	}
3164 }
3165 
3166 /**
3167  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3168  *  @priv: driver private structure
3169  *  Description: It is used for configuring the RX Queue Priority
3170  */
3171 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3172 {
3173 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3174 	u32 queue;
3175 	u32 prio;
3176 
3177 	for (queue = 0; queue < rx_queues_count; queue++) {
3178 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3179 			continue;
3180 
3181 		prio = priv->plat->rx_queues_cfg[queue].prio;
3182 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3183 	}
3184 }
3185 
3186 /**
3187  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3188  *  @priv: driver private structure
3189  *  Description: It is used for configuring the TX Queue Priority
3190  */
3191 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3192 {
3193 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3194 	u32 queue;
3195 	u32 prio;
3196 
3197 	for (queue = 0; queue < tx_queues_count; queue++) {
3198 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3199 			continue;
3200 
3201 		prio = priv->plat->tx_queues_cfg[queue].prio;
3202 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3203 	}
3204 }
3205 
3206 /**
3207  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3208  *  @priv: driver private structure
3209  *  Description: It is used for configuring the RX queue routing
3210  */
3211 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3212 {
3213 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3214 	u32 queue;
3215 	u8 packet;
3216 
3217 	for (queue = 0; queue < rx_queues_count; queue++) {
3218 		/* no specific packet type routing specified for the queue */
3219 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3220 			continue;
3221 
3222 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3223 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3224 	}
3225 }
3226 
3227 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3228 {
3229 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3230 		priv->rss.enable = false;
3231 		return;
3232 	}
3233 
3234 	if (priv->dev->features & NETIF_F_RXHASH)
3235 		priv->rss.enable = true;
3236 	else
3237 		priv->rss.enable = false;
3238 
3239 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3240 			     priv->plat->rx_queues_to_use);
3241 }
3242 
3243 /**
3244  *  stmmac_mtl_configuration - Configure MTL
3245  *  @priv: driver private structure
3246  *  Description: It is used for configurring MTL
3247  */
3248 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3249 {
3250 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3251 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3252 
3253 	if (tx_queues_count > 1)
3254 		stmmac_set_tx_queue_weight(priv);
3255 
3256 	/* Configure MTL RX algorithms */
3257 	if (rx_queues_count > 1)
3258 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3259 				priv->plat->rx_sched_algorithm);
3260 
3261 	/* Configure MTL TX algorithms */
3262 	if (tx_queues_count > 1)
3263 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3264 				priv->plat->tx_sched_algorithm);
3265 
3266 	/* Configure CBS in AVB TX queues */
3267 	if (tx_queues_count > 1)
3268 		stmmac_configure_cbs(priv);
3269 
3270 	/* Map RX MTL to DMA channels */
3271 	stmmac_rx_queue_dma_chan_map(priv);
3272 
3273 	/* Enable MAC RX Queues */
3274 	stmmac_mac_enable_rx_queues(priv);
3275 
3276 	/* Set RX priorities */
3277 	if (rx_queues_count > 1)
3278 		stmmac_mac_config_rx_queues_prio(priv);
3279 
3280 	/* Set TX priorities */
3281 	if (tx_queues_count > 1)
3282 		stmmac_mac_config_tx_queues_prio(priv);
3283 
3284 	/* Set RX routing */
3285 	if (rx_queues_count > 1)
3286 		stmmac_mac_config_rx_queues_routing(priv);
3287 
3288 	/* Receive Side Scaling */
3289 	if (rx_queues_count > 1)
3290 		stmmac_mac_config_rss(priv);
3291 }
3292 
3293 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3294 {
3295 	if (priv->dma_cap.asp) {
3296 		netdev_info(priv->dev, "Enabling Safety Features\n");
3297 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3298 					  priv->plat->safety_feat_cfg);
3299 	} else {
3300 		netdev_info(priv->dev, "No Safety Features support found\n");
3301 	}
3302 }
3303 
3304 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3305 {
3306 	char *name;
3307 
3308 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3309 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3310 
3311 	name = priv->wq_name;
3312 	sprintf(name, "%s-fpe", priv->dev->name);
3313 
3314 	priv->fpe_wq = create_singlethread_workqueue(name);
3315 	if (!priv->fpe_wq) {
3316 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3317 
3318 		return -ENOMEM;
3319 	}
3320 	netdev_info(priv->dev, "FPE workqueue start");
3321 
3322 	return 0;
3323 }
3324 
3325 /**
3326  * stmmac_hw_setup - setup mac in a usable state.
3327  *  @dev : pointer to the device structure.
3328  *  @ptp_register: register PTP if set
3329  *  Description:
3330  *  this is the main function to setup the HW in a usable state because the
3331  *  dma engine is reset, the core registers are configured (e.g. AXI,
3332  *  Checksum features, timers). The DMA is ready to start receiving and
3333  *  transmitting.
3334  *  Return value:
3335  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3336  *  file on failure.
3337  */
3338 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3339 {
3340 	struct stmmac_priv *priv = netdev_priv(dev);
3341 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3342 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3343 	bool sph_en;
3344 	u32 chan;
3345 	int ret;
3346 
3347 	/* DMA initialization and SW reset */
3348 	ret = stmmac_init_dma_engine(priv);
3349 	if (ret < 0) {
3350 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3351 			   __func__);
3352 		return ret;
3353 	}
3354 
3355 	/* Copy the MAC addr into the HW  */
3356 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3357 
3358 	/* PS and related bits will be programmed according to the speed */
3359 	if (priv->hw->pcs) {
3360 		int speed = priv->plat->mac_port_sel_speed;
3361 
3362 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3363 		    (speed == SPEED_1000)) {
3364 			priv->hw->ps = speed;
3365 		} else {
3366 			dev_warn(priv->device, "invalid port speed\n");
3367 			priv->hw->ps = 0;
3368 		}
3369 	}
3370 
3371 	/* Initialize the MAC Core */
3372 	stmmac_core_init(priv, priv->hw, dev);
3373 
3374 	/* Initialize MTL*/
3375 	stmmac_mtl_configuration(priv);
3376 
3377 	/* Initialize Safety Features */
3378 	stmmac_safety_feat_configuration(priv);
3379 
3380 	ret = stmmac_rx_ipc(priv, priv->hw);
3381 	if (!ret) {
3382 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3383 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3384 		priv->hw->rx_csum = 0;
3385 	}
3386 
3387 	/* Enable the MAC Rx/Tx */
3388 	stmmac_mac_set(priv, priv->ioaddr, true);
3389 
3390 	/* Set the HW DMA mode and the COE */
3391 	stmmac_dma_operation_mode(priv);
3392 
3393 	stmmac_mmc_setup(priv);
3394 
3395 	if (ptp_register) {
3396 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3397 		if (ret < 0)
3398 			netdev_warn(priv->dev,
3399 				    "failed to enable PTP reference clock: %pe\n",
3400 				    ERR_PTR(ret));
3401 	}
3402 
3403 	ret = stmmac_init_ptp(priv);
3404 	if (ret == -EOPNOTSUPP)
3405 		netdev_info(priv->dev, "PTP not supported by HW\n");
3406 	else if (ret)
3407 		netdev_warn(priv->dev, "PTP init failed\n");
3408 	else if (ptp_register)
3409 		stmmac_ptp_register(priv);
3410 
3411 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3412 
3413 	/* Convert the timer from msec to usec */
3414 	if (!priv->tx_lpi_timer)
3415 		priv->tx_lpi_timer = eee_timer * 1000;
3416 
3417 	if (priv->use_riwt) {
3418 		u32 queue;
3419 
3420 		for (queue = 0; queue < rx_cnt; queue++) {
3421 			if (!priv->rx_riwt[queue])
3422 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3423 
3424 			stmmac_rx_watchdog(priv, priv->ioaddr,
3425 					   priv->rx_riwt[queue], queue);
3426 		}
3427 	}
3428 
3429 	if (priv->hw->pcs)
3430 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3431 
3432 	/* set TX and RX rings length */
3433 	stmmac_set_rings_length(priv);
3434 
3435 	/* Enable TSO */
3436 	if (priv->tso) {
3437 		for (chan = 0; chan < tx_cnt; chan++) {
3438 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3439 
3440 			/* TSO and TBS cannot co-exist */
3441 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3442 				continue;
3443 
3444 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3445 		}
3446 	}
3447 
3448 	/* Enable Split Header */
3449 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3450 	for (chan = 0; chan < rx_cnt; chan++)
3451 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3452 
3453 
3454 	/* VLAN Tag Insertion */
3455 	if (priv->dma_cap.vlins)
3456 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3457 
3458 	/* TBS */
3459 	for (chan = 0; chan < tx_cnt; chan++) {
3460 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3461 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3462 
3463 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3464 	}
3465 
3466 	/* Configure real RX and TX queues */
3467 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3468 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3469 
3470 	/* Start the ball rolling... */
3471 	stmmac_start_all_dma(priv);
3472 
3473 	if (priv->dma_cap.fpesel) {
3474 		stmmac_fpe_start_wq(priv);
3475 
3476 		if (priv->plat->fpe_cfg->enable)
3477 			stmmac_fpe_handshake(priv, true);
3478 	}
3479 
3480 	return 0;
3481 }
3482 
3483 static void stmmac_hw_teardown(struct net_device *dev)
3484 {
3485 	struct stmmac_priv *priv = netdev_priv(dev);
3486 
3487 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3488 }
3489 
3490 static void stmmac_free_irq(struct net_device *dev,
3491 			    enum request_irq_err irq_err, int irq_idx)
3492 {
3493 	struct stmmac_priv *priv = netdev_priv(dev);
3494 	int j;
3495 
3496 	switch (irq_err) {
3497 	case REQ_IRQ_ERR_ALL:
3498 		irq_idx = priv->plat->tx_queues_to_use;
3499 		fallthrough;
3500 	case REQ_IRQ_ERR_TX:
3501 		for (j = irq_idx - 1; j >= 0; j--) {
3502 			if (priv->tx_irq[j] > 0) {
3503 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3504 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3505 			}
3506 		}
3507 		irq_idx = priv->plat->rx_queues_to_use;
3508 		fallthrough;
3509 	case REQ_IRQ_ERR_RX:
3510 		for (j = irq_idx - 1; j >= 0; j--) {
3511 			if (priv->rx_irq[j] > 0) {
3512 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3513 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3514 			}
3515 		}
3516 
3517 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3518 			free_irq(priv->sfty_ue_irq, dev);
3519 		fallthrough;
3520 	case REQ_IRQ_ERR_SFTY_UE:
3521 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3522 			free_irq(priv->sfty_ce_irq, dev);
3523 		fallthrough;
3524 	case REQ_IRQ_ERR_SFTY_CE:
3525 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3526 			free_irq(priv->lpi_irq, dev);
3527 		fallthrough;
3528 	case REQ_IRQ_ERR_LPI:
3529 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3530 			free_irq(priv->wol_irq, dev);
3531 		fallthrough;
3532 	case REQ_IRQ_ERR_WOL:
3533 		free_irq(dev->irq, dev);
3534 		fallthrough;
3535 	case REQ_IRQ_ERR_MAC:
3536 	case REQ_IRQ_ERR_NO:
3537 		/* If MAC IRQ request error, no more IRQ to free */
3538 		break;
3539 	}
3540 }
3541 
3542 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3543 {
3544 	struct stmmac_priv *priv = netdev_priv(dev);
3545 	enum request_irq_err irq_err;
3546 	cpumask_t cpu_mask;
3547 	int irq_idx = 0;
3548 	char *int_name;
3549 	int ret;
3550 	int i;
3551 
3552 	/* For common interrupt */
3553 	int_name = priv->int_name_mac;
3554 	sprintf(int_name, "%s:%s", dev->name, "mac");
3555 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3556 			  0, int_name, dev);
3557 	if (unlikely(ret < 0)) {
3558 		netdev_err(priv->dev,
3559 			   "%s: alloc mac MSI %d (error: %d)\n",
3560 			   __func__, dev->irq, ret);
3561 		irq_err = REQ_IRQ_ERR_MAC;
3562 		goto irq_error;
3563 	}
3564 
3565 	/* Request the Wake IRQ in case of another line
3566 	 * is used for WoL
3567 	 */
3568 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3569 		int_name = priv->int_name_wol;
3570 		sprintf(int_name, "%s:%s", dev->name, "wol");
3571 		ret = request_irq(priv->wol_irq,
3572 				  stmmac_mac_interrupt,
3573 				  0, int_name, dev);
3574 		if (unlikely(ret < 0)) {
3575 			netdev_err(priv->dev,
3576 				   "%s: alloc wol MSI %d (error: %d)\n",
3577 				   __func__, priv->wol_irq, ret);
3578 			irq_err = REQ_IRQ_ERR_WOL;
3579 			goto irq_error;
3580 		}
3581 	}
3582 
3583 	/* Request the LPI IRQ in case of another line
3584 	 * is used for LPI
3585 	 */
3586 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3587 		int_name = priv->int_name_lpi;
3588 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3589 		ret = request_irq(priv->lpi_irq,
3590 				  stmmac_mac_interrupt,
3591 				  0, int_name, dev);
3592 		if (unlikely(ret < 0)) {
3593 			netdev_err(priv->dev,
3594 				   "%s: alloc lpi MSI %d (error: %d)\n",
3595 				   __func__, priv->lpi_irq, ret);
3596 			irq_err = REQ_IRQ_ERR_LPI;
3597 			goto irq_error;
3598 		}
3599 	}
3600 
3601 	/* Request the Safety Feature Correctible Error line in
3602 	 * case of another line is used
3603 	 */
3604 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3605 		int_name = priv->int_name_sfty_ce;
3606 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3607 		ret = request_irq(priv->sfty_ce_irq,
3608 				  stmmac_safety_interrupt,
3609 				  0, int_name, dev);
3610 		if (unlikely(ret < 0)) {
3611 			netdev_err(priv->dev,
3612 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3613 				   __func__, priv->sfty_ce_irq, ret);
3614 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3615 			goto irq_error;
3616 		}
3617 	}
3618 
3619 	/* Request the Safety Feature Uncorrectible Error line in
3620 	 * case of another line is used
3621 	 */
3622 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3623 		int_name = priv->int_name_sfty_ue;
3624 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3625 		ret = request_irq(priv->sfty_ue_irq,
3626 				  stmmac_safety_interrupt,
3627 				  0, int_name, dev);
3628 		if (unlikely(ret < 0)) {
3629 			netdev_err(priv->dev,
3630 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3631 				   __func__, priv->sfty_ue_irq, ret);
3632 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3633 			goto irq_error;
3634 		}
3635 	}
3636 
3637 	/* Request Rx MSI irq */
3638 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3639 		if (i >= MTL_MAX_RX_QUEUES)
3640 			break;
3641 		if (priv->rx_irq[i] == 0)
3642 			continue;
3643 
3644 		int_name = priv->int_name_rx_irq[i];
3645 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3646 		ret = request_irq(priv->rx_irq[i],
3647 				  stmmac_msi_intr_rx,
3648 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3649 		if (unlikely(ret < 0)) {
3650 			netdev_err(priv->dev,
3651 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3652 				   __func__, i, priv->rx_irq[i], ret);
3653 			irq_err = REQ_IRQ_ERR_RX;
3654 			irq_idx = i;
3655 			goto irq_error;
3656 		}
3657 		cpumask_clear(&cpu_mask);
3658 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3659 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3660 	}
3661 
3662 	/* Request Tx MSI irq */
3663 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3664 		if (i >= MTL_MAX_TX_QUEUES)
3665 			break;
3666 		if (priv->tx_irq[i] == 0)
3667 			continue;
3668 
3669 		int_name = priv->int_name_tx_irq[i];
3670 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3671 		ret = request_irq(priv->tx_irq[i],
3672 				  stmmac_msi_intr_tx,
3673 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3674 		if (unlikely(ret < 0)) {
3675 			netdev_err(priv->dev,
3676 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3677 				   __func__, i, priv->tx_irq[i], ret);
3678 			irq_err = REQ_IRQ_ERR_TX;
3679 			irq_idx = i;
3680 			goto irq_error;
3681 		}
3682 		cpumask_clear(&cpu_mask);
3683 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3684 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3685 	}
3686 
3687 	return 0;
3688 
3689 irq_error:
3690 	stmmac_free_irq(dev, irq_err, irq_idx);
3691 	return ret;
3692 }
3693 
3694 static int stmmac_request_irq_single(struct net_device *dev)
3695 {
3696 	struct stmmac_priv *priv = netdev_priv(dev);
3697 	enum request_irq_err irq_err;
3698 	int ret;
3699 
3700 	ret = request_irq(dev->irq, stmmac_interrupt,
3701 			  IRQF_SHARED, dev->name, dev);
3702 	if (unlikely(ret < 0)) {
3703 		netdev_err(priv->dev,
3704 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3705 			   __func__, dev->irq, ret);
3706 		irq_err = REQ_IRQ_ERR_MAC;
3707 		goto irq_error;
3708 	}
3709 
3710 	/* Request the Wake IRQ in case of another line
3711 	 * is used for WoL
3712 	 */
3713 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3714 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3715 				  IRQF_SHARED, dev->name, dev);
3716 		if (unlikely(ret < 0)) {
3717 			netdev_err(priv->dev,
3718 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3719 				   __func__, priv->wol_irq, ret);
3720 			irq_err = REQ_IRQ_ERR_WOL;
3721 			goto irq_error;
3722 		}
3723 	}
3724 
3725 	/* Request the IRQ lines */
3726 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3727 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3728 				  IRQF_SHARED, dev->name, dev);
3729 		if (unlikely(ret < 0)) {
3730 			netdev_err(priv->dev,
3731 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3732 				   __func__, priv->lpi_irq, ret);
3733 			irq_err = REQ_IRQ_ERR_LPI;
3734 			goto irq_error;
3735 		}
3736 	}
3737 
3738 	return 0;
3739 
3740 irq_error:
3741 	stmmac_free_irq(dev, irq_err, 0);
3742 	return ret;
3743 }
3744 
3745 static int stmmac_request_irq(struct net_device *dev)
3746 {
3747 	struct stmmac_priv *priv = netdev_priv(dev);
3748 	int ret;
3749 
3750 	/* Request the IRQ lines */
3751 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3752 		ret = stmmac_request_irq_multi_msi(dev);
3753 	else
3754 		ret = stmmac_request_irq_single(dev);
3755 
3756 	return ret;
3757 }
3758 
3759 /**
3760  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3761  *  @priv: driver private structure
3762  *  @mtu: MTU to setup the dma queue and buf with
3763  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3764  *  Allocate the Tx/Rx DMA queue and init them.
3765  *  Return value:
3766  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3767  */
3768 static struct stmmac_dma_conf *
3769 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3770 {
3771 	struct stmmac_dma_conf *dma_conf;
3772 	int chan, bfsize, ret;
3773 
3774 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3775 	if (!dma_conf) {
3776 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3777 			   __func__);
3778 		return ERR_PTR(-ENOMEM);
3779 	}
3780 
3781 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3782 	if (bfsize < 0)
3783 		bfsize = 0;
3784 
3785 	if (bfsize < BUF_SIZE_16KiB)
3786 		bfsize = stmmac_set_bfsize(mtu, 0);
3787 
3788 	dma_conf->dma_buf_sz = bfsize;
3789 	/* Chose the tx/rx size from the already defined one in the
3790 	 * priv struct. (if defined)
3791 	 */
3792 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3793 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3794 
3795 	if (!dma_conf->dma_tx_size)
3796 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3797 	if (!dma_conf->dma_rx_size)
3798 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3799 
3800 	/* Earlier check for TBS */
3801 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3802 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3803 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3804 
3805 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3806 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3807 	}
3808 
3809 	ret = alloc_dma_desc_resources(priv, dma_conf);
3810 	if (ret < 0) {
3811 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3812 			   __func__);
3813 		goto alloc_error;
3814 	}
3815 
3816 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3817 	if (ret < 0) {
3818 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3819 			   __func__);
3820 		goto init_error;
3821 	}
3822 
3823 	return dma_conf;
3824 
3825 init_error:
3826 	free_dma_desc_resources(priv, dma_conf);
3827 alloc_error:
3828 	kfree(dma_conf);
3829 	return ERR_PTR(ret);
3830 }
3831 
3832 /**
3833  *  __stmmac_open - open entry point of the driver
3834  *  @dev : pointer to the device structure.
3835  *  @dma_conf :  structure to take the dma data
3836  *  Description:
3837  *  This function is the open entry point of the driver.
3838  *  Return value:
3839  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3840  *  file on failure.
3841  */
3842 static int __stmmac_open(struct net_device *dev,
3843 			 struct stmmac_dma_conf *dma_conf)
3844 {
3845 	struct stmmac_priv *priv = netdev_priv(dev);
3846 	int mode = priv->plat->phy_interface;
3847 	u32 chan;
3848 	int ret;
3849 
3850 	ret = pm_runtime_resume_and_get(priv->device);
3851 	if (ret < 0)
3852 		return ret;
3853 
3854 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3855 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3856 	    (!priv->hw->xpcs ||
3857 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3858 	    !priv->hw->lynx_pcs) {
3859 		ret = stmmac_init_phy(dev);
3860 		if (ret) {
3861 			netdev_err(priv->dev,
3862 				   "%s: Cannot attach to PHY (error: %d)\n",
3863 				   __func__, ret);
3864 			goto init_phy_error;
3865 		}
3866 	}
3867 
3868 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3869 
3870 	buf_sz = dma_conf->dma_buf_sz;
3871 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3872 
3873 	stmmac_reset_queues_param(priv);
3874 
3875 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3876 	    priv->plat->serdes_powerup) {
3877 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3878 		if (ret < 0) {
3879 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3880 				   __func__);
3881 			goto init_error;
3882 		}
3883 	}
3884 
3885 	ret = stmmac_hw_setup(dev, true);
3886 	if (ret < 0) {
3887 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3888 		goto init_error;
3889 	}
3890 
3891 	stmmac_init_coalesce(priv);
3892 
3893 	phylink_start(priv->phylink);
3894 	/* We may have called phylink_speed_down before */
3895 	phylink_speed_up(priv->phylink);
3896 
3897 	ret = stmmac_request_irq(dev);
3898 	if (ret)
3899 		goto irq_error;
3900 
3901 	stmmac_enable_all_queues(priv);
3902 	netif_tx_start_all_queues(priv->dev);
3903 	stmmac_enable_all_dma_irq(priv);
3904 
3905 	return 0;
3906 
3907 irq_error:
3908 	phylink_stop(priv->phylink);
3909 
3910 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3911 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3912 
3913 	stmmac_hw_teardown(dev);
3914 init_error:
3915 	phylink_disconnect_phy(priv->phylink);
3916 init_phy_error:
3917 	pm_runtime_put(priv->device);
3918 	return ret;
3919 }
3920 
3921 static int stmmac_open(struct net_device *dev)
3922 {
3923 	struct stmmac_priv *priv = netdev_priv(dev);
3924 	struct stmmac_dma_conf *dma_conf;
3925 	int ret;
3926 
3927 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3928 	if (IS_ERR(dma_conf))
3929 		return PTR_ERR(dma_conf);
3930 
3931 	ret = __stmmac_open(dev, dma_conf);
3932 	if (ret)
3933 		free_dma_desc_resources(priv, dma_conf);
3934 
3935 	kfree(dma_conf);
3936 	return ret;
3937 }
3938 
3939 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3940 {
3941 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3942 
3943 	if (priv->fpe_wq)
3944 		destroy_workqueue(priv->fpe_wq);
3945 
3946 	netdev_info(priv->dev, "FPE workqueue stop");
3947 }
3948 
3949 /**
3950  *  stmmac_release - close entry point of the driver
3951  *  @dev : device pointer.
3952  *  Description:
3953  *  This is the stop entry point of the driver.
3954  */
3955 static int stmmac_release(struct net_device *dev)
3956 {
3957 	struct stmmac_priv *priv = netdev_priv(dev);
3958 	u32 chan;
3959 
3960 	if (device_may_wakeup(priv->device))
3961 		phylink_speed_down(priv->phylink, false);
3962 	/* Stop and disconnect the PHY */
3963 	phylink_stop(priv->phylink);
3964 	phylink_disconnect_phy(priv->phylink);
3965 
3966 	stmmac_disable_all_queues(priv);
3967 
3968 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3969 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3970 
3971 	netif_tx_disable(dev);
3972 
3973 	/* Free the IRQ lines */
3974 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3975 
3976 	if (priv->eee_enabled) {
3977 		priv->tx_path_in_lpi_mode = false;
3978 		del_timer_sync(&priv->eee_ctrl_timer);
3979 	}
3980 
3981 	/* Stop TX/RX DMA and clear the descriptors */
3982 	stmmac_stop_all_dma(priv);
3983 
3984 	/* Release and free the Rx/Tx resources */
3985 	free_dma_desc_resources(priv, &priv->dma_conf);
3986 
3987 	/* Disable the MAC Rx/Tx */
3988 	stmmac_mac_set(priv, priv->ioaddr, false);
3989 
3990 	/* Powerdown Serdes if there is */
3991 	if (priv->plat->serdes_powerdown)
3992 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3993 
3994 	netif_carrier_off(dev);
3995 
3996 	stmmac_release_ptp(priv);
3997 
3998 	pm_runtime_put(priv->device);
3999 
4000 	if (priv->dma_cap.fpesel)
4001 		stmmac_fpe_stop_wq(priv);
4002 
4003 	return 0;
4004 }
4005 
4006 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4007 			       struct stmmac_tx_queue *tx_q)
4008 {
4009 	u16 tag = 0x0, inner_tag = 0x0;
4010 	u32 inner_type = 0x0;
4011 	struct dma_desc *p;
4012 
4013 	if (!priv->dma_cap.vlins)
4014 		return false;
4015 	if (!skb_vlan_tag_present(skb))
4016 		return false;
4017 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4018 		inner_tag = skb_vlan_tag_get(skb);
4019 		inner_type = STMMAC_VLAN_INSERT;
4020 	}
4021 
4022 	tag = skb_vlan_tag_get(skb);
4023 
4024 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4025 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4026 	else
4027 		p = &tx_q->dma_tx[tx_q->cur_tx];
4028 
4029 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4030 		return false;
4031 
4032 	stmmac_set_tx_owner(priv, p);
4033 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4034 	return true;
4035 }
4036 
4037 /**
4038  *  stmmac_tso_allocator - close entry point of the driver
4039  *  @priv: driver private structure
4040  *  @des: buffer start address
4041  *  @total_len: total length to fill in descriptors
4042  *  @last_segment: condition for the last descriptor
4043  *  @queue: TX queue index
4044  *  Description:
4045  *  This function fills descriptor and request new descriptors according to
4046  *  buffer length to fill
4047  */
4048 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4049 				 int total_len, bool last_segment, u32 queue)
4050 {
4051 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4052 	struct dma_desc *desc;
4053 	u32 buff_size;
4054 	int tmp_len;
4055 
4056 	tmp_len = total_len;
4057 
4058 	while (tmp_len > 0) {
4059 		dma_addr_t curr_addr;
4060 
4061 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4062 						priv->dma_conf.dma_tx_size);
4063 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4064 
4065 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4066 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4067 		else
4068 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4069 
4070 		curr_addr = des + (total_len - tmp_len);
4071 		if (priv->dma_cap.addr64 <= 32)
4072 			desc->des0 = cpu_to_le32(curr_addr);
4073 		else
4074 			stmmac_set_desc_addr(priv, desc, curr_addr);
4075 
4076 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4077 			    TSO_MAX_BUFF_SIZE : tmp_len;
4078 
4079 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4080 				0, 1,
4081 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4082 				0, 0);
4083 
4084 		tmp_len -= TSO_MAX_BUFF_SIZE;
4085 	}
4086 }
4087 
4088 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4089 {
4090 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4091 	int desc_size;
4092 
4093 	if (likely(priv->extend_desc))
4094 		desc_size = sizeof(struct dma_extended_desc);
4095 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4096 		desc_size = sizeof(struct dma_edesc);
4097 	else
4098 		desc_size = sizeof(struct dma_desc);
4099 
4100 	/* The own bit must be the latest setting done when prepare the
4101 	 * descriptor and then barrier is needed to make sure that
4102 	 * all is coherent before granting the DMA engine.
4103 	 */
4104 	wmb();
4105 
4106 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4107 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4108 }
4109 
4110 /**
4111  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4112  *  @skb : the socket buffer
4113  *  @dev : device pointer
4114  *  Description: this is the transmit function that is called on TSO frames
4115  *  (support available on GMAC4 and newer chips).
4116  *  Diagram below show the ring programming in case of TSO frames:
4117  *
4118  *  First Descriptor
4119  *   --------
4120  *   | DES0 |---> buffer1 = L2/L3/L4 header
4121  *   | DES1 |---> TCP Payload (can continue on next descr...)
4122  *   | DES2 |---> buffer 1 and 2 len
4123  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4124  *   --------
4125  *	|
4126  *     ...
4127  *	|
4128  *   --------
4129  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4130  *   | DES1 | --|
4131  *   | DES2 | --> buffer 1 and 2 len
4132  *   | DES3 |
4133  *   --------
4134  *
4135  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4136  */
4137 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4138 {
4139 	struct dma_desc *desc, *first, *mss_desc = NULL;
4140 	struct stmmac_priv *priv = netdev_priv(dev);
4141 	int nfrags = skb_shinfo(skb)->nr_frags;
4142 	u32 queue = skb_get_queue_mapping(skb);
4143 	unsigned int first_entry, tx_packets;
4144 	struct stmmac_txq_stats *txq_stats;
4145 	int tmp_pay_len = 0, first_tx;
4146 	struct stmmac_tx_queue *tx_q;
4147 	bool has_vlan, set_ic;
4148 	u8 proto_hdr_len, hdr;
4149 	unsigned long flags;
4150 	u32 pay_len, mss;
4151 	dma_addr_t des;
4152 	int i;
4153 
4154 	tx_q = &priv->dma_conf.tx_queue[queue];
4155 	txq_stats = &priv->xstats.txq_stats[queue];
4156 	first_tx = tx_q->cur_tx;
4157 
4158 	/* Compute header lengths */
4159 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4160 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4161 		hdr = sizeof(struct udphdr);
4162 	} else {
4163 		proto_hdr_len = skb_tcp_all_headers(skb);
4164 		hdr = tcp_hdrlen(skb);
4165 	}
4166 
4167 	/* Desc availability based on threshold should be enough safe */
4168 	if (unlikely(stmmac_tx_avail(priv, queue) <
4169 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4170 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4171 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4172 								queue));
4173 			/* This is a hard error, log it. */
4174 			netdev_err(priv->dev,
4175 				   "%s: Tx Ring full when queue awake\n",
4176 				   __func__);
4177 		}
4178 		return NETDEV_TX_BUSY;
4179 	}
4180 
4181 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4182 
4183 	mss = skb_shinfo(skb)->gso_size;
4184 
4185 	/* set new MSS value if needed */
4186 	if (mss != tx_q->mss) {
4187 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4188 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4189 		else
4190 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4191 
4192 		stmmac_set_mss(priv, mss_desc, mss);
4193 		tx_q->mss = mss;
4194 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4195 						priv->dma_conf.dma_tx_size);
4196 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4197 	}
4198 
4199 	if (netif_msg_tx_queued(priv)) {
4200 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4201 			__func__, hdr, proto_hdr_len, pay_len, mss);
4202 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4203 			skb->data_len);
4204 	}
4205 
4206 	/* Check if VLAN can be inserted by HW */
4207 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4208 
4209 	first_entry = tx_q->cur_tx;
4210 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4211 
4212 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4213 		desc = &tx_q->dma_entx[first_entry].basic;
4214 	else
4215 		desc = &tx_q->dma_tx[first_entry];
4216 	first = desc;
4217 
4218 	if (has_vlan)
4219 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4220 
4221 	/* first descriptor: fill Headers on Buf1 */
4222 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4223 			     DMA_TO_DEVICE);
4224 	if (dma_mapping_error(priv->device, des))
4225 		goto dma_map_err;
4226 
4227 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4228 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4229 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4230 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4231 
4232 	if (priv->dma_cap.addr64 <= 32) {
4233 		first->des0 = cpu_to_le32(des);
4234 
4235 		/* Fill start of payload in buff2 of first descriptor */
4236 		if (pay_len)
4237 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4238 
4239 		/* If needed take extra descriptors to fill the remaining payload */
4240 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4241 	} else {
4242 		stmmac_set_desc_addr(priv, first, des);
4243 		tmp_pay_len = pay_len;
4244 		des += proto_hdr_len;
4245 		pay_len = 0;
4246 	}
4247 
4248 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4249 
4250 	/* Prepare fragments */
4251 	for (i = 0; i < nfrags; i++) {
4252 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4253 
4254 		des = skb_frag_dma_map(priv->device, frag, 0,
4255 				       skb_frag_size(frag),
4256 				       DMA_TO_DEVICE);
4257 		if (dma_mapping_error(priv->device, des))
4258 			goto dma_map_err;
4259 
4260 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4261 				     (i == nfrags - 1), queue);
4262 
4263 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4264 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4265 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4266 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4267 	}
4268 
4269 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4270 
4271 	/* Only the last descriptor gets to point to the skb. */
4272 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4273 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4274 
4275 	/* Manage tx mitigation */
4276 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4277 	tx_q->tx_count_frames += tx_packets;
4278 
4279 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4280 		set_ic = true;
4281 	else if (!priv->tx_coal_frames[queue])
4282 		set_ic = false;
4283 	else if (tx_packets > priv->tx_coal_frames[queue])
4284 		set_ic = true;
4285 	else if ((tx_q->tx_count_frames %
4286 		  priv->tx_coal_frames[queue]) < tx_packets)
4287 		set_ic = true;
4288 	else
4289 		set_ic = false;
4290 
4291 	if (set_ic) {
4292 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4293 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4294 		else
4295 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4296 
4297 		tx_q->tx_count_frames = 0;
4298 		stmmac_set_tx_ic(priv, desc);
4299 	}
4300 
4301 	/* We've used all descriptors we need for this skb, however,
4302 	 * advance cur_tx so that it references a fresh descriptor.
4303 	 * ndo_start_xmit will fill this descriptor the next time it's
4304 	 * called and stmmac_tx_clean may clean up to this descriptor.
4305 	 */
4306 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4307 
4308 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4309 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4310 			  __func__);
4311 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4312 	}
4313 
4314 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4315 	txq_stats->tx_bytes += skb->len;
4316 	txq_stats->tx_tso_frames++;
4317 	txq_stats->tx_tso_nfrags += nfrags;
4318 	if (set_ic)
4319 		txq_stats->tx_set_ic_bit++;
4320 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4321 
4322 	if (priv->sarc_type)
4323 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4324 
4325 	skb_tx_timestamp(skb);
4326 
4327 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4328 		     priv->hwts_tx_en)) {
4329 		/* declare that device is doing timestamping */
4330 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4331 		stmmac_enable_tx_timestamp(priv, first);
4332 	}
4333 
4334 	/* Complete the first descriptor before granting the DMA */
4335 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4336 			proto_hdr_len,
4337 			pay_len,
4338 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4339 			hdr / 4, (skb->len - proto_hdr_len));
4340 
4341 	/* If context desc is used to change MSS */
4342 	if (mss_desc) {
4343 		/* Make sure that first descriptor has been completely
4344 		 * written, including its own bit. This is because MSS is
4345 		 * actually before first descriptor, so we need to make
4346 		 * sure that MSS's own bit is the last thing written.
4347 		 */
4348 		dma_wmb();
4349 		stmmac_set_tx_owner(priv, mss_desc);
4350 	}
4351 
4352 	if (netif_msg_pktdata(priv)) {
4353 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4354 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4355 			tx_q->cur_tx, first, nfrags);
4356 		pr_info(">>> frame to be transmitted: ");
4357 		print_pkt(skb->data, skb_headlen(skb));
4358 	}
4359 
4360 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4361 
4362 	stmmac_flush_tx_descriptors(priv, queue);
4363 	stmmac_tx_timer_arm(priv, queue);
4364 
4365 	return NETDEV_TX_OK;
4366 
4367 dma_map_err:
4368 	dev_err(priv->device, "Tx dma map failed\n");
4369 	dev_kfree_skb(skb);
4370 	priv->xstats.tx_dropped++;
4371 	return NETDEV_TX_OK;
4372 }
4373 
4374 /**
4375  *  stmmac_xmit - Tx entry point of the driver
4376  *  @skb : the socket buffer
4377  *  @dev : device pointer
4378  *  Description : this is the tx entry point of the driver.
4379  *  It programs the chain or the ring and supports oversized frames
4380  *  and SG feature.
4381  */
4382 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4383 {
4384 	unsigned int first_entry, tx_packets, enh_desc;
4385 	struct stmmac_priv *priv = netdev_priv(dev);
4386 	unsigned int nopaged_len = skb_headlen(skb);
4387 	int i, csum_insertion = 0, is_jumbo = 0;
4388 	u32 queue = skb_get_queue_mapping(skb);
4389 	int nfrags = skb_shinfo(skb)->nr_frags;
4390 	int gso = skb_shinfo(skb)->gso_type;
4391 	struct stmmac_txq_stats *txq_stats;
4392 	struct dma_edesc *tbs_desc = NULL;
4393 	struct dma_desc *desc, *first;
4394 	struct stmmac_tx_queue *tx_q;
4395 	bool has_vlan, set_ic;
4396 	int entry, first_tx;
4397 	unsigned long flags;
4398 	dma_addr_t des;
4399 
4400 	tx_q = &priv->dma_conf.tx_queue[queue];
4401 	txq_stats = &priv->xstats.txq_stats[queue];
4402 	first_tx = tx_q->cur_tx;
4403 
4404 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4405 		stmmac_disable_eee_mode(priv);
4406 
4407 	/* Manage oversized TCP frames for GMAC4 device */
4408 	if (skb_is_gso(skb) && priv->tso) {
4409 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4410 			return stmmac_tso_xmit(skb, dev);
4411 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4412 			return stmmac_tso_xmit(skb, dev);
4413 	}
4414 
4415 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4416 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4417 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4418 								queue));
4419 			/* This is a hard error, log it. */
4420 			netdev_err(priv->dev,
4421 				   "%s: Tx Ring full when queue awake\n",
4422 				   __func__);
4423 		}
4424 		return NETDEV_TX_BUSY;
4425 	}
4426 
4427 	/* Check if VLAN can be inserted by HW */
4428 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4429 
4430 	entry = tx_q->cur_tx;
4431 	first_entry = entry;
4432 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4433 
4434 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4435 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4436 	 * queues. In that case, checksum offloading for those queues that don't
4437 	 * support tx coe needs to fallback to software checksum calculation.
4438 	 */
4439 	if (csum_insertion &&
4440 	    priv->plat->tx_queues_cfg[queue].coe_unsupported) {
4441 		if (unlikely(skb_checksum_help(skb)))
4442 			goto dma_map_err;
4443 		csum_insertion = !csum_insertion;
4444 	}
4445 
4446 	if (likely(priv->extend_desc))
4447 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4448 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4449 		desc = &tx_q->dma_entx[entry].basic;
4450 	else
4451 		desc = tx_q->dma_tx + entry;
4452 
4453 	first = desc;
4454 
4455 	if (has_vlan)
4456 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4457 
4458 	enh_desc = priv->plat->enh_desc;
4459 	/* To program the descriptors according to the size of the frame */
4460 	if (enh_desc)
4461 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4462 
4463 	if (unlikely(is_jumbo)) {
4464 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4465 		if (unlikely(entry < 0) && (entry != -EINVAL))
4466 			goto dma_map_err;
4467 	}
4468 
4469 	for (i = 0; i < nfrags; i++) {
4470 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4471 		int len = skb_frag_size(frag);
4472 		bool last_segment = (i == (nfrags - 1));
4473 
4474 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4475 		WARN_ON(tx_q->tx_skbuff[entry]);
4476 
4477 		if (likely(priv->extend_desc))
4478 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4479 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4480 			desc = &tx_q->dma_entx[entry].basic;
4481 		else
4482 			desc = tx_q->dma_tx + entry;
4483 
4484 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4485 				       DMA_TO_DEVICE);
4486 		if (dma_mapping_error(priv->device, des))
4487 			goto dma_map_err; /* should reuse desc w/o issues */
4488 
4489 		tx_q->tx_skbuff_dma[entry].buf = des;
4490 
4491 		stmmac_set_desc_addr(priv, desc, des);
4492 
4493 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4494 		tx_q->tx_skbuff_dma[entry].len = len;
4495 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4496 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4497 
4498 		/* Prepare the descriptor and set the own bit too */
4499 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4500 				priv->mode, 1, last_segment, skb->len);
4501 	}
4502 
4503 	/* Only the last descriptor gets to point to the skb. */
4504 	tx_q->tx_skbuff[entry] = skb;
4505 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4506 
4507 	/* According to the coalesce parameter the IC bit for the latest
4508 	 * segment is reset and the timer re-started to clean the tx status.
4509 	 * This approach takes care about the fragments: desc is the first
4510 	 * element in case of no SG.
4511 	 */
4512 	tx_packets = (entry + 1) - first_tx;
4513 	tx_q->tx_count_frames += tx_packets;
4514 
4515 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4516 		set_ic = true;
4517 	else if (!priv->tx_coal_frames[queue])
4518 		set_ic = false;
4519 	else if (tx_packets > priv->tx_coal_frames[queue])
4520 		set_ic = true;
4521 	else if ((tx_q->tx_count_frames %
4522 		  priv->tx_coal_frames[queue]) < tx_packets)
4523 		set_ic = true;
4524 	else
4525 		set_ic = false;
4526 
4527 	if (set_ic) {
4528 		if (likely(priv->extend_desc))
4529 			desc = &tx_q->dma_etx[entry].basic;
4530 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4531 			desc = &tx_q->dma_entx[entry].basic;
4532 		else
4533 			desc = &tx_q->dma_tx[entry];
4534 
4535 		tx_q->tx_count_frames = 0;
4536 		stmmac_set_tx_ic(priv, desc);
4537 	}
4538 
4539 	/* We've used all descriptors we need for this skb, however,
4540 	 * advance cur_tx so that it references a fresh descriptor.
4541 	 * ndo_start_xmit will fill this descriptor the next time it's
4542 	 * called and stmmac_tx_clean may clean up to this descriptor.
4543 	 */
4544 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4545 	tx_q->cur_tx = entry;
4546 
4547 	if (netif_msg_pktdata(priv)) {
4548 		netdev_dbg(priv->dev,
4549 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4550 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4551 			   entry, first, nfrags);
4552 
4553 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4554 		print_pkt(skb->data, skb->len);
4555 	}
4556 
4557 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4558 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4559 			  __func__);
4560 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4561 	}
4562 
4563 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4564 	txq_stats->tx_bytes += skb->len;
4565 	if (set_ic)
4566 		txq_stats->tx_set_ic_bit++;
4567 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4568 
4569 	if (priv->sarc_type)
4570 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4571 
4572 	skb_tx_timestamp(skb);
4573 
4574 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4575 	 * problems because all the descriptors are actually ready to be
4576 	 * passed to the DMA engine.
4577 	 */
4578 	if (likely(!is_jumbo)) {
4579 		bool last_segment = (nfrags == 0);
4580 
4581 		des = dma_map_single(priv->device, skb->data,
4582 				     nopaged_len, DMA_TO_DEVICE);
4583 		if (dma_mapping_error(priv->device, des))
4584 			goto dma_map_err;
4585 
4586 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4587 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4588 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4589 
4590 		stmmac_set_desc_addr(priv, first, des);
4591 
4592 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4593 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4594 
4595 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4596 			     priv->hwts_tx_en)) {
4597 			/* declare that device is doing timestamping */
4598 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4599 			stmmac_enable_tx_timestamp(priv, first);
4600 		}
4601 
4602 		/* Prepare the first descriptor setting the OWN bit too */
4603 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4604 				csum_insertion, priv->mode, 0, last_segment,
4605 				skb->len);
4606 	}
4607 
4608 	if (tx_q->tbs & STMMAC_TBS_EN) {
4609 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4610 
4611 		tbs_desc = &tx_q->dma_entx[first_entry];
4612 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4613 	}
4614 
4615 	stmmac_set_tx_owner(priv, first);
4616 
4617 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4618 
4619 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4620 
4621 	stmmac_flush_tx_descriptors(priv, queue);
4622 	stmmac_tx_timer_arm(priv, queue);
4623 
4624 	return NETDEV_TX_OK;
4625 
4626 dma_map_err:
4627 	netdev_err(priv->dev, "Tx DMA map failed\n");
4628 	dev_kfree_skb(skb);
4629 	priv->xstats.tx_dropped++;
4630 	return NETDEV_TX_OK;
4631 }
4632 
4633 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4634 {
4635 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4636 	__be16 vlan_proto = veth->h_vlan_proto;
4637 	u16 vlanid;
4638 
4639 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4640 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4641 	    (vlan_proto == htons(ETH_P_8021AD) &&
4642 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4643 		/* pop the vlan tag */
4644 		vlanid = ntohs(veth->h_vlan_TCI);
4645 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4646 		skb_pull(skb, VLAN_HLEN);
4647 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4648 	}
4649 }
4650 
4651 /**
4652  * stmmac_rx_refill - refill used skb preallocated buffers
4653  * @priv: driver private structure
4654  * @queue: RX queue index
4655  * Description : this is to reallocate the skb for the reception process
4656  * that is based on zero-copy.
4657  */
4658 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4659 {
4660 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4661 	int dirty = stmmac_rx_dirty(priv, queue);
4662 	unsigned int entry = rx_q->dirty_rx;
4663 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4664 
4665 	if (priv->dma_cap.host_dma_width <= 32)
4666 		gfp |= GFP_DMA32;
4667 
4668 	while (dirty-- > 0) {
4669 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4670 		struct dma_desc *p;
4671 		bool use_rx_wd;
4672 
4673 		if (priv->extend_desc)
4674 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4675 		else
4676 			p = rx_q->dma_rx + entry;
4677 
4678 		if (!buf->page) {
4679 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4680 			if (!buf->page)
4681 				break;
4682 		}
4683 
4684 		if (priv->sph && !buf->sec_page) {
4685 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4686 			if (!buf->sec_page)
4687 				break;
4688 
4689 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4690 		}
4691 
4692 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4693 
4694 		stmmac_set_desc_addr(priv, p, buf->addr);
4695 		if (priv->sph)
4696 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4697 		else
4698 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4699 		stmmac_refill_desc3(priv, rx_q, p);
4700 
4701 		rx_q->rx_count_frames++;
4702 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4703 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4704 			rx_q->rx_count_frames = 0;
4705 
4706 		use_rx_wd = !priv->rx_coal_frames[queue];
4707 		use_rx_wd |= rx_q->rx_count_frames > 0;
4708 		if (!priv->use_riwt)
4709 			use_rx_wd = false;
4710 
4711 		dma_wmb();
4712 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4713 
4714 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4715 	}
4716 	rx_q->dirty_rx = entry;
4717 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4718 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4719 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4720 }
4721 
4722 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4723 				       struct dma_desc *p,
4724 				       int status, unsigned int len)
4725 {
4726 	unsigned int plen = 0, hlen = 0;
4727 	int coe = priv->hw->rx_csum;
4728 
4729 	/* Not first descriptor, buffer is always zero */
4730 	if (priv->sph && len)
4731 		return 0;
4732 
4733 	/* First descriptor, get split header length */
4734 	stmmac_get_rx_header_len(priv, p, &hlen);
4735 	if (priv->sph && hlen) {
4736 		priv->xstats.rx_split_hdr_pkt_n++;
4737 		return hlen;
4738 	}
4739 
4740 	/* First descriptor, not last descriptor and not split header */
4741 	if (status & rx_not_ls)
4742 		return priv->dma_conf.dma_buf_sz;
4743 
4744 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4745 
4746 	/* First descriptor and last descriptor and not split header */
4747 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4748 }
4749 
4750 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4751 				       struct dma_desc *p,
4752 				       int status, unsigned int len)
4753 {
4754 	int coe = priv->hw->rx_csum;
4755 	unsigned int plen = 0;
4756 
4757 	/* Not split header, buffer is not available */
4758 	if (!priv->sph)
4759 		return 0;
4760 
4761 	/* Not last descriptor */
4762 	if (status & rx_not_ls)
4763 		return priv->dma_conf.dma_buf_sz;
4764 
4765 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4766 
4767 	/* Last descriptor */
4768 	return plen - len;
4769 }
4770 
4771 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4772 				struct xdp_frame *xdpf, bool dma_map)
4773 {
4774 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4775 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4776 	unsigned int entry = tx_q->cur_tx;
4777 	struct dma_desc *tx_desc;
4778 	dma_addr_t dma_addr;
4779 	bool set_ic;
4780 
4781 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4782 		return STMMAC_XDP_CONSUMED;
4783 
4784 	if (likely(priv->extend_desc))
4785 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4786 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4787 		tx_desc = &tx_q->dma_entx[entry].basic;
4788 	else
4789 		tx_desc = tx_q->dma_tx + entry;
4790 
4791 	if (dma_map) {
4792 		dma_addr = dma_map_single(priv->device, xdpf->data,
4793 					  xdpf->len, DMA_TO_DEVICE);
4794 		if (dma_mapping_error(priv->device, dma_addr))
4795 			return STMMAC_XDP_CONSUMED;
4796 
4797 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4798 	} else {
4799 		struct page *page = virt_to_page(xdpf->data);
4800 
4801 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4802 			   xdpf->headroom;
4803 		dma_sync_single_for_device(priv->device, dma_addr,
4804 					   xdpf->len, DMA_BIDIRECTIONAL);
4805 
4806 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4807 	}
4808 
4809 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4810 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4811 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4812 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4813 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4814 
4815 	tx_q->xdpf[entry] = xdpf;
4816 
4817 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4818 
4819 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4820 			       true, priv->mode, true, true,
4821 			       xdpf->len);
4822 
4823 	tx_q->tx_count_frames++;
4824 
4825 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4826 		set_ic = true;
4827 	else
4828 		set_ic = false;
4829 
4830 	if (set_ic) {
4831 		unsigned long flags;
4832 		tx_q->tx_count_frames = 0;
4833 		stmmac_set_tx_ic(priv, tx_desc);
4834 		flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4835 		txq_stats->tx_set_ic_bit++;
4836 		u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4837 	}
4838 
4839 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4840 
4841 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4842 	tx_q->cur_tx = entry;
4843 
4844 	return STMMAC_XDP_TX;
4845 }
4846 
4847 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4848 				   int cpu)
4849 {
4850 	int index = cpu;
4851 
4852 	if (unlikely(index < 0))
4853 		index = 0;
4854 
4855 	while (index >= priv->plat->tx_queues_to_use)
4856 		index -= priv->plat->tx_queues_to_use;
4857 
4858 	return index;
4859 }
4860 
4861 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4862 				struct xdp_buff *xdp)
4863 {
4864 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4865 	int cpu = smp_processor_id();
4866 	struct netdev_queue *nq;
4867 	int queue;
4868 	int res;
4869 
4870 	if (unlikely(!xdpf))
4871 		return STMMAC_XDP_CONSUMED;
4872 
4873 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4874 	nq = netdev_get_tx_queue(priv->dev, queue);
4875 
4876 	__netif_tx_lock(nq, cpu);
4877 	/* Avoids TX time-out as we are sharing with slow path */
4878 	txq_trans_cond_update(nq);
4879 
4880 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4881 	if (res == STMMAC_XDP_TX)
4882 		stmmac_flush_tx_descriptors(priv, queue);
4883 
4884 	__netif_tx_unlock(nq);
4885 
4886 	return res;
4887 }
4888 
4889 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4890 				 struct bpf_prog *prog,
4891 				 struct xdp_buff *xdp)
4892 {
4893 	u32 act;
4894 	int res;
4895 
4896 	act = bpf_prog_run_xdp(prog, xdp);
4897 	switch (act) {
4898 	case XDP_PASS:
4899 		res = STMMAC_XDP_PASS;
4900 		break;
4901 	case XDP_TX:
4902 		res = stmmac_xdp_xmit_back(priv, xdp);
4903 		break;
4904 	case XDP_REDIRECT:
4905 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4906 			res = STMMAC_XDP_CONSUMED;
4907 		else
4908 			res = STMMAC_XDP_REDIRECT;
4909 		break;
4910 	default:
4911 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4912 		fallthrough;
4913 	case XDP_ABORTED:
4914 		trace_xdp_exception(priv->dev, prog, act);
4915 		fallthrough;
4916 	case XDP_DROP:
4917 		res = STMMAC_XDP_CONSUMED;
4918 		break;
4919 	}
4920 
4921 	return res;
4922 }
4923 
4924 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4925 					   struct xdp_buff *xdp)
4926 {
4927 	struct bpf_prog *prog;
4928 	int res;
4929 
4930 	prog = READ_ONCE(priv->xdp_prog);
4931 	if (!prog) {
4932 		res = STMMAC_XDP_PASS;
4933 		goto out;
4934 	}
4935 
4936 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4937 out:
4938 	return ERR_PTR(-res);
4939 }
4940 
4941 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4942 				   int xdp_status)
4943 {
4944 	int cpu = smp_processor_id();
4945 	int queue;
4946 
4947 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4948 
4949 	if (xdp_status & STMMAC_XDP_TX)
4950 		stmmac_tx_timer_arm(priv, queue);
4951 
4952 	if (xdp_status & STMMAC_XDP_REDIRECT)
4953 		xdp_do_flush();
4954 }
4955 
4956 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4957 					       struct xdp_buff *xdp)
4958 {
4959 	unsigned int metasize = xdp->data - xdp->data_meta;
4960 	unsigned int datasize = xdp->data_end - xdp->data;
4961 	struct sk_buff *skb;
4962 
4963 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4964 			       xdp->data_end - xdp->data_hard_start,
4965 			       GFP_ATOMIC | __GFP_NOWARN);
4966 	if (unlikely(!skb))
4967 		return NULL;
4968 
4969 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4970 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4971 	if (metasize)
4972 		skb_metadata_set(skb, metasize);
4973 
4974 	return skb;
4975 }
4976 
4977 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4978 				   struct dma_desc *p, struct dma_desc *np,
4979 				   struct xdp_buff *xdp)
4980 {
4981 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4982 	struct stmmac_channel *ch = &priv->channel[queue];
4983 	unsigned int len = xdp->data_end - xdp->data;
4984 	enum pkt_hash_types hash_type;
4985 	int coe = priv->hw->rx_csum;
4986 	unsigned long flags;
4987 	struct sk_buff *skb;
4988 	u32 hash;
4989 
4990 	skb = stmmac_construct_skb_zc(ch, xdp);
4991 	if (!skb) {
4992 		priv->xstats.rx_dropped++;
4993 		return;
4994 	}
4995 
4996 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4997 	stmmac_rx_vlan(priv->dev, skb);
4998 	skb->protocol = eth_type_trans(skb, priv->dev);
4999 
5000 	if (unlikely(!coe))
5001 		skb_checksum_none_assert(skb);
5002 	else
5003 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5004 
5005 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5006 		skb_set_hash(skb, hash, hash_type);
5007 
5008 	skb_record_rx_queue(skb, queue);
5009 	napi_gro_receive(&ch->rxtx_napi, skb);
5010 
5011 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5012 	rxq_stats->rx_pkt_n++;
5013 	rxq_stats->rx_bytes += len;
5014 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5015 }
5016 
5017 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5018 {
5019 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5020 	unsigned int entry = rx_q->dirty_rx;
5021 	struct dma_desc *rx_desc = NULL;
5022 	bool ret = true;
5023 
5024 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5025 
5026 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5027 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5028 		dma_addr_t dma_addr;
5029 		bool use_rx_wd;
5030 
5031 		if (!buf->xdp) {
5032 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5033 			if (!buf->xdp) {
5034 				ret = false;
5035 				break;
5036 			}
5037 		}
5038 
5039 		if (priv->extend_desc)
5040 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5041 		else
5042 			rx_desc = rx_q->dma_rx + entry;
5043 
5044 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5045 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5046 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5047 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5048 
5049 		rx_q->rx_count_frames++;
5050 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5051 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5052 			rx_q->rx_count_frames = 0;
5053 
5054 		use_rx_wd = !priv->rx_coal_frames[queue];
5055 		use_rx_wd |= rx_q->rx_count_frames > 0;
5056 		if (!priv->use_riwt)
5057 			use_rx_wd = false;
5058 
5059 		dma_wmb();
5060 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5061 
5062 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5063 	}
5064 
5065 	if (rx_desc) {
5066 		rx_q->dirty_rx = entry;
5067 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5068 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5069 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5070 	}
5071 
5072 	return ret;
5073 }
5074 
5075 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5076 {
5077 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5078 	 * to represent incoming packet, whereas cb field in the same structure
5079 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5080 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5081 	 */
5082 	return (struct stmmac_xdp_buff *)xdp;
5083 }
5084 
5085 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5086 {
5087 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5088 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5089 	unsigned int count = 0, error = 0, len = 0;
5090 	int dirty = stmmac_rx_dirty(priv, queue);
5091 	unsigned int next_entry = rx_q->cur_rx;
5092 	u32 rx_errors = 0, rx_dropped = 0;
5093 	unsigned int desc_size;
5094 	struct bpf_prog *prog;
5095 	bool failure = false;
5096 	unsigned long flags;
5097 	int xdp_status = 0;
5098 	int status = 0;
5099 
5100 	if (netif_msg_rx_status(priv)) {
5101 		void *rx_head;
5102 
5103 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5104 		if (priv->extend_desc) {
5105 			rx_head = (void *)rx_q->dma_erx;
5106 			desc_size = sizeof(struct dma_extended_desc);
5107 		} else {
5108 			rx_head = (void *)rx_q->dma_rx;
5109 			desc_size = sizeof(struct dma_desc);
5110 		}
5111 
5112 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5113 				    rx_q->dma_rx_phy, desc_size);
5114 	}
5115 	while (count < limit) {
5116 		struct stmmac_rx_buffer *buf;
5117 		struct stmmac_xdp_buff *ctx;
5118 		unsigned int buf1_len = 0;
5119 		struct dma_desc *np, *p;
5120 		int entry;
5121 		int res;
5122 
5123 		if (!count && rx_q->state_saved) {
5124 			error = rx_q->state.error;
5125 			len = rx_q->state.len;
5126 		} else {
5127 			rx_q->state_saved = false;
5128 			error = 0;
5129 			len = 0;
5130 		}
5131 
5132 		if (count >= limit)
5133 			break;
5134 
5135 read_again:
5136 		buf1_len = 0;
5137 		entry = next_entry;
5138 		buf = &rx_q->buf_pool[entry];
5139 
5140 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5141 			failure = failure ||
5142 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5143 			dirty = 0;
5144 		}
5145 
5146 		if (priv->extend_desc)
5147 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5148 		else
5149 			p = rx_q->dma_rx + entry;
5150 
5151 		/* read the status of the incoming frame */
5152 		status = stmmac_rx_status(priv, &priv->xstats, p);
5153 		/* check if managed by the DMA otherwise go ahead */
5154 		if (unlikely(status & dma_own))
5155 			break;
5156 
5157 		/* Prefetch the next RX descriptor */
5158 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5159 						priv->dma_conf.dma_rx_size);
5160 		next_entry = rx_q->cur_rx;
5161 
5162 		if (priv->extend_desc)
5163 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5164 		else
5165 			np = rx_q->dma_rx + next_entry;
5166 
5167 		prefetch(np);
5168 
5169 		/* Ensure a valid XSK buffer before proceed */
5170 		if (!buf->xdp)
5171 			break;
5172 
5173 		if (priv->extend_desc)
5174 			stmmac_rx_extended_status(priv, &priv->xstats,
5175 						  rx_q->dma_erx + entry);
5176 		if (unlikely(status == discard_frame)) {
5177 			xsk_buff_free(buf->xdp);
5178 			buf->xdp = NULL;
5179 			dirty++;
5180 			error = 1;
5181 			if (!priv->hwts_rx_en)
5182 				rx_errors++;
5183 		}
5184 
5185 		if (unlikely(error && (status & rx_not_ls)))
5186 			goto read_again;
5187 		if (unlikely(error)) {
5188 			count++;
5189 			continue;
5190 		}
5191 
5192 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5193 		if (likely(status & rx_not_ls)) {
5194 			xsk_buff_free(buf->xdp);
5195 			buf->xdp = NULL;
5196 			dirty++;
5197 			count++;
5198 			goto read_again;
5199 		}
5200 
5201 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5202 		ctx->priv = priv;
5203 		ctx->desc = p;
5204 		ctx->ndesc = np;
5205 
5206 		/* XDP ZC Frame only support primary buffers for now */
5207 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5208 		len += buf1_len;
5209 
5210 		/* ACS is disabled; strip manually. */
5211 		if (likely(!(status & rx_not_ls))) {
5212 			buf1_len -= ETH_FCS_LEN;
5213 			len -= ETH_FCS_LEN;
5214 		}
5215 
5216 		/* RX buffer is good and fit into a XSK pool buffer */
5217 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5218 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5219 
5220 		prog = READ_ONCE(priv->xdp_prog);
5221 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5222 
5223 		switch (res) {
5224 		case STMMAC_XDP_PASS:
5225 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5226 			xsk_buff_free(buf->xdp);
5227 			break;
5228 		case STMMAC_XDP_CONSUMED:
5229 			xsk_buff_free(buf->xdp);
5230 			rx_dropped++;
5231 			break;
5232 		case STMMAC_XDP_TX:
5233 		case STMMAC_XDP_REDIRECT:
5234 			xdp_status |= res;
5235 			break;
5236 		}
5237 
5238 		buf->xdp = NULL;
5239 		dirty++;
5240 		count++;
5241 	}
5242 
5243 	if (status & rx_not_ls) {
5244 		rx_q->state_saved = true;
5245 		rx_q->state.error = error;
5246 		rx_q->state.len = len;
5247 	}
5248 
5249 	stmmac_finalize_xdp_rx(priv, xdp_status);
5250 
5251 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5252 	rxq_stats->rx_pkt_n += count;
5253 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5254 
5255 	priv->xstats.rx_dropped += rx_dropped;
5256 	priv->xstats.rx_errors += rx_errors;
5257 
5258 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5259 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5260 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5261 		else
5262 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5263 
5264 		return (int)count;
5265 	}
5266 
5267 	return failure ? limit : (int)count;
5268 }
5269 
5270 /**
5271  * stmmac_rx - manage the receive process
5272  * @priv: driver private structure
5273  * @limit: napi bugget
5274  * @queue: RX queue index.
5275  * Description :  this the function called by the napi poll method.
5276  * It gets all the frames inside the ring.
5277  */
5278 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5279 {
5280 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5281 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5282 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5283 	struct stmmac_channel *ch = &priv->channel[queue];
5284 	unsigned int count = 0, error = 0, len = 0;
5285 	int status = 0, coe = priv->hw->rx_csum;
5286 	unsigned int next_entry = rx_q->cur_rx;
5287 	enum dma_data_direction dma_dir;
5288 	unsigned int desc_size;
5289 	struct sk_buff *skb = NULL;
5290 	struct stmmac_xdp_buff ctx;
5291 	unsigned long flags;
5292 	int xdp_status = 0;
5293 	int buf_sz;
5294 
5295 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5296 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5297 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5298 
5299 	if (netif_msg_rx_status(priv)) {
5300 		void *rx_head;
5301 
5302 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5303 		if (priv->extend_desc) {
5304 			rx_head = (void *)rx_q->dma_erx;
5305 			desc_size = sizeof(struct dma_extended_desc);
5306 		} else {
5307 			rx_head = (void *)rx_q->dma_rx;
5308 			desc_size = sizeof(struct dma_desc);
5309 		}
5310 
5311 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5312 				    rx_q->dma_rx_phy, desc_size);
5313 	}
5314 	while (count < limit) {
5315 		unsigned int buf1_len = 0, buf2_len = 0;
5316 		enum pkt_hash_types hash_type;
5317 		struct stmmac_rx_buffer *buf;
5318 		struct dma_desc *np, *p;
5319 		int entry;
5320 		u32 hash;
5321 
5322 		if (!count && rx_q->state_saved) {
5323 			skb = rx_q->state.skb;
5324 			error = rx_q->state.error;
5325 			len = rx_q->state.len;
5326 		} else {
5327 			rx_q->state_saved = false;
5328 			skb = NULL;
5329 			error = 0;
5330 			len = 0;
5331 		}
5332 
5333 read_again:
5334 		if (count >= limit)
5335 			break;
5336 
5337 		buf1_len = 0;
5338 		buf2_len = 0;
5339 		entry = next_entry;
5340 		buf = &rx_q->buf_pool[entry];
5341 
5342 		if (priv->extend_desc)
5343 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5344 		else
5345 			p = rx_q->dma_rx + entry;
5346 
5347 		/* read the status of the incoming frame */
5348 		status = stmmac_rx_status(priv, &priv->xstats, p);
5349 		/* check if managed by the DMA otherwise go ahead */
5350 		if (unlikely(status & dma_own))
5351 			break;
5352 
5353 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5354 						priv->dma_conf.dma_rx_size);
5355 		next_entry = rx_q->cur_rx;
5356 
5357 		if (priv->extend_desc)
5358 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5359 		else
5360 			np = rx_q->dma_rx + next_entry;
5361 
5362 		prefetch(np);
5363 
5364 		if (priv->extend_desc)
5365 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5366 		if (unlikely(status == discard_frame)) {
5367 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5368 			buf->page = NULL;
5369 			error = 1;
5370 			if (!priv->hwts_rx_en)
5371 				rx_errors++;
5372 		}
5373 
5374 		if (unlikely(error && (status & rx_not_ls)))
5375 			goto read_again;
5376 		if (unlikely(error)) {
5377 			dev_kfree_skb(skb);
5378 			skb = NULL;
5379 			count++;
5380 			continue;
5381 		}
5382 
5383 		/* Buffer is good. Go on. */
5384 
5385 		prefetch(page_address(buf->page) + buf->page_offset);
5386 		if (buf->sec_page)
5387 			prefetch(page_address(buf->sec_page));
5388 
5389 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5390 		len += buf1_len;
5391 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5392 		len += buf2_len;
5393 
5394 		/* ACS is disabled; strip manually. */
5395 		if (likely(!(status & rx_not_ls))) {
5396 			if (buf2_len) {
5397 				buf2_len -= ETH_FCS_LEN;
5398 				len -= ETH_FCS_LEN;
5399 			} else if (buf1_len) {
5400 				buf1_len -= ETH_FCS_LEN;
5401 				len -= ETH_FCS_LEN;
5402 			}
5403 		}
5404 
5405 		if (!skb) {
5406 			unsigned int pre_len, sync_len;
5407 
5408 			dma_sync_single_for_cpu(priv->device, buf->addr,
5409 						buf1_len, dma_dir);
5410 
5411 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5412 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5413 					 buf->page_offset, buf1_len, true);
5414 
5415 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5416 				  buf->page_offset;
5417 
5418 			ctx.priv = priv;
5419 			ctx.desc = p;
5420 			ctx.ndesc = np;
5421 
5422 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5423 			/* Due xdp_adjust_tail: DMA sync for_device
5424 			 * cover max len CPU touch
5425 			 */
5426 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5427 				   buf->page_offset;
5428 			sync_len = max(sync_len, pre_len);
5429 
5430 			/* For Not XDP_PASS verdict */
5431 			if (IS_ERR(skb)) {
5432 				unsigned int xdp_res = -PTR_ERR(skb);
5433 
5434 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5435 					page_pool_put_page(rx_q->page_pool,
5436 							   virt_to_head_page(ctx.xdp.data),
5437 							   sync_len, true);
5438 					buf->page = NULL;
5439 					rx_dropped++;
5440 
5441 					/* Clear skb as it was set as
5442 					 * status by XDP program.
5443 					 */
5444 					skb = NULL;
5445 
5446 					if (unlikely((status & rx_not_ls)))
5447 						goto read_again;
5448 
5449 					count++;
5450 					continue;
5451 				} else if (xdp_res & (STMMAC_XDP_TX |
5452 						      STMMAC_XDP_REDIRECT)) {
5453 					xdp_status |= xdp_res;
5454 					buf->page = NULL;
5455 					skb = NULL;
5456 					count++;
5457 					continue;
5458 				}
5459 			}
5460 		}
5461 
5462 		if (!skb) {
5463 			/* XDP program may expand or reduce tail */
5464 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5465 
5466 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5467 			if (!skb) {
5468 				rx_dropped++;
5469 				count++;
5470 				goto drain_data;
5471 			}
5472 
5473 			/* XDP program may adjust header */
5474 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5475 			skb_put(skb, buf1_len);
5476 
5477 			/* Data payload copied into SKB, page ready for recycle */
5478 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5479 			buf->page = NULL;
5480 		} else if (buf1_len) {
5481 			dma_sync_single_for_cpu(priv->device, buf->addr,
5482 						buf1_len, dma_dir);
5483 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5484 					buf->page, buf->page_offset, buf1_len,
5485 					priv->dma_conf.dma_buf_sz);
5486 
5487 			/* Data payload appended into SKB */
5488 			skb_mark_for_recycle(skb);
5489 			buf->page = NULL;
5490 		}
5491 
5492 		if (buf2_len) {
5493 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5494 						buf2_len, dma_dir);
5495 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5496 					buf->sec_page, 0, buf2_len,
5497 					priv->dma_conf.dma_buf_sz);
5498 
5499 			/* Data payload appended into SKB */
5500 			skb_mark_for_recycle(skb);
5501 			buf->sec_page = NULL;
5502 		}
5503 
5504 drain_data:
5505 		if (likely(status & rx_not_ls))
5506 			goto read_again;
5507 		if (!skb)
5508 			continue;
5509 
5510 		/* Got entire packet into SKB. Finish it. */
5511 
5512 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5513 		stmmac_rx_vlan(priv->dev, skb);
5514 		skb->protocol = eth_type_trans(skb, priv->dev);
5515 
5516 		if (unlikely(!coe))
5517 			skb_checksum_none_assert(skb);
5518 		else
5519 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5520 
5521 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5522 			skb_set_hash(skb, hash, hash_type);
5523 
5524 		skb_record_rx_queue(skb, queue);
5525 		napi_gro_receive(&ch->rx_napi, skb);
5526 		skb = NULL;
5527 
5528 		rx_packets++;
5529 		rx_bytes += len;
5530 		count++;
5531 	}
5532 
5533 	if (status & rx_not_ls || skb) {
5534 		rx_q->state_saved = true;
5535 		rx_q->state.skb = skb;
5536 		rx_q->state.error = error;
5537 		rx_q->state.len = len;
5538 	}
5539 
5540 	stmmac_finalize_xdp_rx(priv, xdp_status);
5541 
5542 	stmmac_rx_refill(priv, queue);
5543 
5544 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5545 	rxq_stats->rx_packets += rx_packets;
5546 	rxq_stats->rx_bytes += rx_bytes;
5547 	rxq_stats->rx_pkt_n += count;
5548 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5549 
5550 	priv->xstats.rx_dropped += rx_dropped;
5551 	priv->xstats.rx_errors += rx_errors;
5552 
5553 	return count;
5554 }
5555 
5556 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5557 {
5558 	struct stmmac_channel *ch =
5559 		container_of(napi, struct stmmac_channel, rx_napi);
5560 	struct stmmac_priv *priv = ch->priv_data;
5561 	struct stmmac_rxq_stats *rxq_stats;
5562 	u32 chan = ch->index;
5563 	unsigned long flags;
5564 	int work_done;
5565 
5566 	rxq_stats = &priv->xstats.rxq_stats[chan];
5567 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5568 	rxq_stats->napi_poll++;
5569 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5570 
5571 	work_done = stmmac_rx(priv, budget, chan);
5572 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5573 		unsigned long flags;
5574 
5575 		spin_lock_irqsave(&ch->lock, flags);
5576 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5577 		spin_unlock_irqrestore(&ch->lock, flags);
5578 	}
5579 
5580 	return work_done;
5581 }
5582 
5583 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5584 {
5585 	struct stmmac_channel *ch =
5586 		container_of(napi, struct stmmac_channel, tx_napi);
5587 	struct stmmac_priv *priv = ch->priv_data;
5588 	struct stmmac_txq_stats *txq_stats;
5589 	bool pending_packets = false;
5590 	u32 chan = ch->index;
5591 	unsigned long flags;
5592 	int work_done;
5593 
5594 	txq_stats = &priv->xstats.txq_stats[chan];
5595 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5596 	txq_stats->napi_poll++;
5597 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5598 
5599 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5600 	work_done = min(work_done, budget);
5601 
5602 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5603 		unsigned long flags;
5604 
5605 		spin_lock_irqsave(&ch->lock, flags);
5606 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5607 		spin_unlock_irqrestore(&ch->lock, flags);
5608 	}
5609 
5610 	/* TX still have packet to handle, check if we need to arm tx timer */
5611 	if (pending_packets)
5612 		stmmac_tx_timer_arm(priv, chan);
5613 
5614 	return work_done;
5615 }
5616 
5617 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5618 {
5619 	struct stmmac_channel *ch =
5620 		container_of(napi, struct stmmac_channel, rxtx_napi);
5621 	struct stmmac_priv *priv = ch->priv_data;
5622 	bool tx_pending_packets = false;
5623 	int rx_done, tx_done, rxtx_done;
5624 	struct stmmac_rxq_stats *rxq_stats;
5625 	struct stmmac_txq_stats *txq_stats;
5626 	u32 chan = ch->index;
5627 	unsigned long flags;
5628 
5629 	rxq_stats = &priv->xstats.rxq_stats[chan];
5630 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5631 	rxq_stats->napi_poll++;
5632 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5633 
5634 	txq_stats = &priv->xstats.txq_stats[chan];
5635 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5636 	txq_stats->napi_poll++;
5637 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5638 
5639 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5640 	tx_done = min(tx_done, budget);
5641 
5642 	rx_done = stmmac_rx_zc(priv, budget, chan);
5643 
5644 	rxtx_done = max(tx_done, rx_done);
5645 
5646 	/* If either TX or RX work is not complete, return budget
5647 	 * and keep pooling
5648 	 */
5649 	if (rxtx_done >= budget)
5650 		return budget;
5651 
5652 	/* all work done, exit the polling mode */
5653 	if (napi_complete_done(napi, rxtx_done)) {
5654 		unsigned long flags;
5655 
5656 		spin_lock_irqsave(&ch->lock, flags);
5657 		/* Both RX and TX work done are compelte,
5658 		 * so enable both RX & TX IRQs.
5659 		 */
5660 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5661 		spin_unlock_irqrestore(&ch->lock, flags);
5662 	}
5663 
5664 	/* TX still have packet to handle, check if we need to arm tx timer */
5665 	if (tx_pending_packets)
5666 		stmmac_tx_timer_arm(priv, chan);
5667 
5668 	return min(rxtx_done, budget - 1);
5669 }
5670 
5671 /**
5672  *  stmmac_tx_timeout
5673  *  @dev : Pointer to net device structure
5674  *  @txqueue: the index of the hanging transmit queue
5675  *  Description: this function is called when a packet transmission fails to
5676  *   complete within a reasonable time. The driver will mark the error in the
5677  *   netdev structure and arrange for the device to be reset to a sane state
5678  *   in order to transmit a new packet.
5679  */
5680 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5681 {
5682 	struct stmmac_priv *priv = netdev_priv(dev);
5683 
5684 	stmmac_global_err(priv);
5685 }
5686 
5687 /**
5688  *  stmmac_set_rx_mode - entry point for multicast addressing
5689  *  @dev : pointer to the device structure
5690  *  Description:
5691  *  This function is a driver entry point which gets called by the kernel
5692  *  whenever multicast addresses must be enabled/disabled.
5693  *  Return value:
5694  *  void.
5695  */
5696 static void stmmac_set_rx_mode(struct net_device *dev)
5697 {
5698 	struct stmmac_priv *priv = netdev_priv(dev);
5699 
5700 	stmmac_set_filter(priv, priv->hw, dev);
5701 }
5702 
5703 /**
5704  *  stmmac_change_mtu - entry point to change MTU size for the device.
5705  *  @dev : device pointer.
5706  *  @new_mtu : the new MTU size for the device.
5707  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5708  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5709  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5710  *  Return value:
5711  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5712  *  file on failure.
5713  */
5714 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5715 {
5716 	struct stmmac_priv *priv = netdev_priv(dev);
5717 	int txfifosz = priv->plat->tx_fifo_size;
5718 	struct stmmac_dma_conf *dma_conf;
5719 	const int mtu = new_mtu;
5720 	int ret;
5721 
5722 	if (txfifosz == 0)
5723 		txfifosz = priv->dma_cap.tx_fifo_size;
5724 
5725 	txfifosz /= priv->plat->tx_queues_to_use;
5726 
5727 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5728 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5729 		return -EINVAL;
5730 	}
5731 
5732 	new_mtu = STMMAC_ALIGN(new_mtu);
5733 
5734 	/* If condition true, FIFO is too small or MTU too large */
5735 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5736 		return -EINVAL;
5737 
5738 	if (netif_running(dev)) {
5739 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5740 		/* Try to allocate the new DMA conf with the new mtu */
5741 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5742 		if (IS_ERR(dma_conf)) {
5743 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5744 				   mtu);
5745 			return PTR_ERR(dma_conf);
5746 		}
5747 
5748 		stmmac_release(dev);
5749 
5750 		ret = __stmmac_open(dev, dma_conf);
5751 		if (ret) {
5752 			free_dma_desc_resources(priv, dma_conf);
5753 			kfree(dma_conf);
5754 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5755 			return ret;
5756 		}
5757 
5758 		kfree(dma_conf);
5759 
5760 		stmmac_set_rx_mode(dev);
5761 	}
5762 
5763 	dev->mtu = mtu;
5764 	netdev_update_features(dev);
5765 
5766 	return 0;
5767 }
5768 
5769 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5770 					     netdev_features_t features)
5771 {
5772 	struct stmmac_priv *priv = netdev_priv(dev);
5773 
5774 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5775 		features &= ~NETIF_F_RXCSUM;
5776 
5777 	if (!priv->plat->tx_coe)
5778 		features &= ~NETIF_F_CSUM_MASK;
5779 
5780 	/* Some GMAC devices have a bugged Jumbo frame support that
5781 	 * needs to have the Tx COE disabled for oversized frames
5782 	 * (due to limited buffer sizes). In this case we disable
5783 	 * the TX csum insertion in the TDES and not use SF.
5784 	 */
5785 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5786 		features &= ~NETIF_F_CSUM_MASK;
5787 
5788 	/* Disable tso if asked by ethtool */
5789 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5790 		if (features & NETIF_F_TSO)
5791 			priv->tso = true;
5792 		else
5793 			priv->tso = false;
5794 	}
5795 
5796 	return features;
5797 }
5798 
5799 static int stmmac_set_features(struct net_device *netdev,
5800 			       netdev_features_t features)
5801 {
5802 	struct stmmac_priv *priv = netdev_priv(netdev);
5803 
5804 	/* Keep the COE Type in case of csum is supporting */
5805 	if (features & NETIF_F_RXCSUM)
5806 		priv->hw->rx_csum = priv->plat->rx_coe;
5807 	else
5808 		priv->hw->rx_csum = 0;
5809 	/* No check needed because rx_coe has been set before and it will be
5810 	 * fixed in case of issue.
5811 	 */
5812 	stmmac_rx_ipc(priv, priv->hw);
5813 
5814 	if (priv->sph_cap) {
5815 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5816 		u32 chan;
5817 
5818 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5819 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5820 	}
5821 
5822 	return 0;
5823 }
5824 
5825 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5826 {
5827 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5828 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5829 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5830 	bool *hs_enable = &fpe_cfg->hs_enable;
5831 
5832 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5833 		return;
5834 
5835 	/* If LP has sent verify mPacket, LP is FPE capable */
5836 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5837 		if (*lp_state < FPE_STATE_CAPABLE)
5838 			*lp_state = FPE_STATE_CAPABLE;
5839 
5840 		/* If user has requested FPE enable, quickly response */
5841 		if (*hs_enable)
5842 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5843 						fpe_cfg,
5844 						MPACKET_RESPONSE);
5845 	}
5846 
5847 	/* If Local has sent verify mPacket, Local is FPE capable */
5848 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5849 		if (*lo_state < FPE_STATE_CAPABLE)
5850 			*lo_state = FPE_STATE_CAPABLE;
5851 	}
5852 
5853 	/* If LP has sent response mPacket, LP is entering FPE ON */
5854 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5855 		*lp_state = FPE_STATE_ENTERING_ON;
5856 
5857 	/* If Local has sent response mPacket, Local is entering FPE ON */
5858 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5859 		*lo_state = FPE_STATE_ENTERING_ON;
5860 
5861 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5862 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5863 	    priv->fpe_wq) {
5864 		queue_work(priv->fpe_wq, &priv->fpe_task);
5865 	}
5866 }
5867 
5868 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5869 {
5870 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5871 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5872 	u32 queues_count;
5873 	u32 queue;
5874 	bool xmac;
5875 
5876 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5877 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5878 
5879 	if (priv->irq_wake)
5880 		pm_wakeup_event(priv->device, 0);
5881 
5882 	if (priv->dma_cap.estsel)
5883 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5884 				      &priv->xstats, tx_cnt);
5885 
5886 	if (priv->dma_cap.fpesel) {
5887 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5888 						   priv->dev);
5889 
5890 		stmmac_fpe_event_status(priv, status);
5891 	}
5892 
5893 	/* To handle GMAC own interrupts */
5894 	if ((priv->plat->has_gmac) || xmac) {
5895 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5896 
5897 		if (unlikely(status)) {
5898 			/* For LPI we need to save the tx status */
5899 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5900 				priv->tx_path_in_lpi_mode = true;
5901 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5902 				priv->tx_path_in_lpi_mode = false;
5903 		}
5904 
5905 		for (queue = 0; queue < queues_count; queue++) {
5906 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5907 							    queue);
5908 		}
5909 
5910 		/* PCS link status */
5911 		if (priv->hw->pcs &&
5912 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5913 			if (priv->xstats.pcs_link)
5914 				netif_carrier_on(priv->dev);
5915 			else
5916 				netif_carrier_off(priv->dev);
5917 		}
5918 
5919 		stmmac_timestamp_interrupt(priv, priv);
5920 	}
5921 }
5922 
5923 /**
5924  *  stmmac_interrupt - main ISR
5925  *  @irq: interrupt number.
5926  *  @dev_id: to pass the net device pointer.
5927  *  Description: this is the main driver interrupt service routine.
5928  *  It can call:
5929  *  o DMA service routine (to manage incoming frame reception and transmission
5930  *    status)
5931  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5932  *    interrupts.
5933  */
5934 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5935 {
5936 	struct net_device *dev = (struct net_device *)dev_id;
5937 	struct stmmac_priv *priv = netdev_priv(dev);
5938 
5939 	/* Check if adapter is up */
5940 	if (test_bit(STMMAC_DOWN, &priv->state))
5941 		return IRQ_HANDLED;
5942 
5943 	/* Check if a fatal error happened */
5944 	if (stmmac_safety_feat_interrupt(priv))
5945 		return IRQ_HANDLED;
5946 
5947 	/* To handle Common interrupts */
5948 	stmmac_common_interrupt(priv);
5949 
5950 	/* To handle DMA interrupts */
5951 	stmmac_dma_interrupt(priv);
5952 
5953 	return IRQ_HANDLED;
5954 }
5955 
5956 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5957 {
5958 	struct net_device *dev = (struct net_device *)dev_id;
5959 	struct stmmac_priv *priv = netdev_priv(dev);
5960 
5961 	if (unlikely(!dev)) {
5962 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5963 		return IRQ_NONE;
5964 	}
5965 
5966 	/* Check if adapter is up */
5967 	if (test_bit(STMMAC_DOWN, &priv->state))
5968 		return IRQ_HANDLED;
5969 
5970 	/* To handle Common interrupts */
5971 	stmmac_common_interrupt(priv);
5972 
5973 	return IRQ_HANDLED;
5974 }
5975 
5976 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5977 {
5978 	struct net_device *dev = (struct net_device *)dev_id;
5979 	struct stmmac_priv *priv = netdev_priv(dev);
5980 
5981 	if (unlikely(!dev)) {
5982 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5983 		return IRQ_NONE;
5984 	}
5985 
5986 	/* Check if adapter is up */
5987 	if (test_bit(STMMAC_DOWN, &priv->state))
5988 		return IRQ_HANDLED;
5989 
5990 	/* Check if a fatal error happened */
5991 	stmmac_safety_feat_interrupt(priv);
5992 
5993 	return IRQ_HANDLED;
5994 }
5995 
5996 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5997 {
5998 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5999 	struct stmmac_dma_conf *dma_conf;
6000 	int chan = tx_q->queue_index;
6001 	struct stmmac_priv *priv;
6002 	int status;
6003 
6004 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6005 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6006 
6007 	if (unlikely(!data)) {
6008 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6009 		return IRQ_NONE;
6010 	}
6011 
6012 	/* Check if adapter is up */
6013 	if (test_bit(STMMAC_DOWN, &priv->state))
6014 		return IRQ_HANDLED;
6015 
6016 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6017 
6018 	if (unlikely(status & tx_hard_error_bump_tc)) {
6019 		/* Try to bump up the dma threshold on this failure */
6020 		stmmac_bump_dma_threshold(priv, chan);
6021 	} else if (unlikely(status == tx_hard_error)) {
6022 		stmmac_tx_err(priv, chan);
6023 	}
6024 
6025 	return IRQ_HANDLED;
6026 }
6027 
6028 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6029 {
6030 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6031 	struct stmmac_dma_conf *dma_conf;
6032 	int chan = rx_q->queue_index;
6033 	struct stmmac_priv *priv;
6034 
6035 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6036 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6037 
6038 	if (unlikely(!data)) {
6039 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6040 		return IRQ_NONE;
6041 	}
6042 
6043 	/* Check if adapter is up */
6044 	if (test_bit(STMMAC_DOWN, &priv->state))
6045 		return IRQ_HANDLED;
6046 
6047 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6048 
6049 	return IRQ_HANDLED;
6050 }
6051 
6052 /**
6053  *  stmmac_ioctl - Entry point for the Ioctl
6054  *  @dev: Device pointer.
6055  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6056  *  a proprietary structure used to pass information to the driver.
6057  *  @cmd: IOCTL command
6058  *  Description:
6059  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6060  */
6061 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6062 {
6063 	struct stmmac_priv *priv = netdev_priv (dev);
6064 	int ret = -EOPNOTSUPP;
6065 
6066 	if (!netif_running(dev))
6067 		return -EINVAL;
6068 
6069 	switch (cmd) {
6070 	case SIOCGMIIPHY:
6071 	case SIOCGMIIREG:
6072 	case SIOCSMIIREG:
6073 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6074 		break;
6075 	case SIOCSHWTSTAMP:
6076 		ret = stmmac_hwtstamp_set(dev, rq);
6077 		break;
6078 	case SIOCGHWTSTAMP:
6079 		ret = stmmac_hwtstamp_get(dev, rq);
6080 		break;
6081 	default:
6082 		break;
6083 	}
6084 
6085 	return ret;
6086 }
6087 
6088 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6089 				    void *cb_priv)
6090 {
6091 	struct stmmac_priv *priv = cb_priv;
6092 	int ret = -EOPNOTSUPP;
6093 
6094 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6095 		return ret;
6096 
6097 	__stmmac_disable_all_queues(priv);
6098 
6099 	switch (type) {
6100 	case TC_SETUP_CLSU32:
6101 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6102 		break;
6103 	case TC_SETUP_CLSFLOWER:
6104 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6105 		break;
6106 	default:
6107 		break;
6108 	}
6109 
6110 	stmmac_enable_all_queues(priv);
6111 	return ret;
6112 }
6113 
6114 static LIST_HEAD(stmmac_block_cb_list);
6115 
6116 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6117 			   void *type_data)
6118 {
6119 	struct stmmac_priv *priv = netdev_priv(ndev);
6120 
6121 	switch (type) {
6122 	case TC_QUERY_CAPS:
6123 		return stmmac_tc_query_caps(priv, priv, type_data);
6124 	case TC_SETUP_BLOCK:
6125 		return flow_block_cb_setup_simple(type_data,
6126 						  &stmmac_block_cb_list,
6127 						  stmmac_setup_tc_block_cb,
6128 						  priv, priv, true);
6129 	case TC_SETUP_QDISC_CBS:
6130 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6131 	case TC_SETUP_QDISC_TAPRIO:
6132 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6133 	case TC_SETUP_QDISC_ETF:
6134 		return stmmac_tc_setup_etf(priv, priv, type_data);
6135 	default:
6136 		return -EOPNOTSUPP;
6137 	}
6138 }
6139 
6140 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6141 			       struct net_device *sb_dev)
6142 {
6143 	int gso = skb_shinfo(skb)->gso_type;
6144 
6145 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6146 		/*
6147 		 * There is no way to determine the number of TSO/USO
6148 		 * capable Queues. Let's use always the Queue 0
6149 		 * because if TSO/USO is supported then at least this
6150 		 * one will be capable.
6151 		 */
6152 		return 0;
6153 	}
6154 
6155 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6156 }
6157 
6158 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6159 {
6160 	struct stmmac_priv *priv = netdev_priv(ndev);
6161 	int ret = 0;
6162 
6163 	ret = pm_runtime_resume_and_get(priv->device);
6164 	if (ret < 0)
6165 		return ret;
6166 
6167 	ret = eth_mac_addr(ndev, addr);
6168 	if (ret)
6169 		goto set_mac_error;
6170 
6171 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6172 
6173 set_mac_error:
6174 	pm_runtime_put(priv->device);
6175 
6176 	return ret;
6177 }
6178 
6179 #ifdef CONFIG_DEBUG_FS
6180 static struct dentry *stmmac_fs_dir;
6181 
6182 static void sysfs_display_ring(void *head, int size, int extend_desc,
6183 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6184 {
6185 	int i;
6186 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6187 	struct dma_desc *p = (struct dma_desc *)head;
6188 	dma_addr_t dma_addr;
6189 
6190 	for (i = 0; i < size; i++) {
6191 		if (extend_desc) {
6192 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6193 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6194 				   i, &dma_addr,
6195 				   le32_to_cpu(ep->basic.des0),
6196 				   le32_to_cpu(ep->basic.des1),
6197 				   le32_to_cpu(ep->basic.des2),
6198 				   le32_to_cpu(ep->basic.des3));
6199 			ep++;
6200 		} else {
6201 			dma_addr = dma_phy_addr + i * sizeof(*p);
6202 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6203 				   i, &dma_addr,
6204 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6205 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6206 			p++;
6207 		}
6208 		seq_printf(seq, "\n");
6209 	}
6210 }
6211 
6212 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6213 {
6214 	struct net_device *dev = seq->private;
6215 	struct stmmac_priv *priv = netdev_priv(dev);
6216 	u32 rx_count = priv->plat->rx_queues_to_use;
6217 	u32 tx_count = priv->plat->tx_queues_to_use;
6218 	u32 queue;
6219 
6220 	if ((dev->flags & IFF_UP) == 0)
6221 		return 0;
6222 
6223 	for (queue = 0; queue < rx_count; queue++) {
6224 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6225 
6226 		seq_printf(seq, "RX Queue %d:\n", queue);
6227 
6228 		if (priv->extend_desc) {
6229 			seq_printf(seq, "Extended descriptor ring:\n");
6230 			sysfs_display_ring((void *)rx_q->dma_erx,
6231 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6232 		} else {
6233 			seq_printf(seq, "Descriptor ring:\n");
6234 			sysfs_display_ring((void *)rx_q->dma_rx,
6235 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6236 		}
6237 	}
6238 
6239 	for (queue = 0; queue < tx_count; queue++) {
6240 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6241 
6242 		seq_printf(seq, "TX Queue %d:\n", queue);
6243 
6244 		if (priv->extend_desc) {
6245 			seq_printf(seq, "Extended descriptor ring:\n");
6246 			sysfs_display_ring((void *)tx_q->dma_etx,
6247 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6248 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6249 			seq_printf(seq, "Descriptor ring:\n");
6250 			sysfs_display_ring((void *)tx_q->dma_tx,
6251 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6252 		}
6253 	}
6254 
6255 	return 0;
6256 }
6257 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6258 
6259 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6260 {
6261 	static const char * const dwxgmac_timestamp_source[] = {
6262 		"None",
6263 		"Internal",
6264 		"External",
6265 		"Both",
6266 	};
6267 	static const char * const dwxgmac_safety_feature_desc[] = {
6268 		"No",
6269 		"All Safety Features with ECC and Parity",
6270 		"All Safety Features without ECC or Parity",
6271 		"All Safety Features with Parity Only",
6272 		"ECC Only",
6273 		"UNDEFINED",
6274 		"UNDEFINED",
6275 		"UNDEFINED",
6276 	};
6277 	struct net_device *dev = seq->private;
6278 	struct stmmac_priv *priv = netdev_priv(dev);
6279 
6280 	if (!priv->hw_cap_support) {
6281 		seq_printf(seq, "DMA HW features not supported\n");
6282 		return 0;
6283 	}
6284 
6285 	seq_printf(seq, "==============================\n");
6286 	seq_printf(seq, "\tDMA HW features\n");
6287 	seq_printf(seq, "==============================\n");
6288 
6289 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6290 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6291 	seq_printf(seq, "\t1000 Mbps: %s\n",
6292 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6293 	seq_printf(seq, "\tHalf duplex: %s\n",
6294 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6295 	if (priv->plat->has_xgmac) {
6296 		seq_printf(seq,
6297 			   "\tNumber of Additional MAC address registers: %d\n",
6298 			   priv->dma_cap.multi_addr);
6299 	} else {
6300 		seq_printf(seq, "\tHash Filter: %s\n",
6301 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6302 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6303 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6304 	}
6305 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6306 		   (priv->dma_cap.pcs) ? "Y" : "N");
6307 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6308 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6309 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6310 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6311 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6312 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6313 	seq_printf(seq, "\tRMON module: %s\n",
6314 		   (priv->dma_cap.rmon) ? "Y" : "N");
6315 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6316 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6317 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6318 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6319 	if (priv->plat->has_xgmac)
6320 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6321 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6322 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6323 		   (priv->dma_cap.eee) ? "Y" : "N");
6324 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6325 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6326 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6327 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6328 	    priv->plat->has_xgmac) {
6329 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6330 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6331 	} else {
6332 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6333 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6334 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6335 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6336 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6337 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6338 	}
6339 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6340 		   priv->dma_cap.number_rx_channel);
6341 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6342 		   priv->dma_cap.number_tx_channel);
6343 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6344 		   priv->dma_cap.number_rx_queues);
6345 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6346 		   priv->dma_cap.number_tx_queues);
6347 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6348 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6349 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6350 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6351 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6352 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6353 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6354 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6355 		   priv->dma_cap.pps_out_num);
6356 	seq_printf(seq, "\tSafety Features: %s\n",
6357 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6358 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6359 		   priv->dma_cap.frpsel ? "Y" : "N");
6360 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6361 		   priv->dma_cap.host_dma_width);
6362 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6363 		   priv->dma_cap.rssen ? "Y" : "N");
6364 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6365 		   priv->dma_cap.vlhash ? "Y" : "N");
6366 	seq_printf(seq, "\tSplit Header: %s\n",
6367 		   priv->dma_cap.sphen ? "Y" : "N");
6368 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6369 		   priv->dma_cap.vlins ? "Y" : "N");
6370 	seq_printf(seq, "\tDouble VLAN: %s\n",
6371 		   priv->dma_cap.dvlan ? "Y" : "N");
6372 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6373 		   priv->dma_cap.l3l4fnum);
6374 	seq_printf(seq, "\tARP Offloading: %s\n",
6375 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6376 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6377 		   priv->dma_cap.estsel ? "Y" : "N");
6378 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6379 		   priv->dma_cap.fpesel ? "Y" : "N");
6380 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6381 		   priv->dma_cap.tbssel ? "Y" : "N");
6382 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6383 		   priv->dma_cap.tbs_ch_num);
6384 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6385 		   priv->dma_cap.sgfsel ? "Y" : "N");
6386 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6387 		   BIT(priv->dma_cap.ttsfd) >> 1);
6388 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6389 		   priv->dma_cap.numtc);
6390 	seq_printf(seq, "\tDCB Feature: %s\n",
6391 		   priv->dma_cap.dcben ? "Y" : "N");
6392 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6393 		   priv->dma_cap.advthword ? "Y" : "N");
6394 	seq_printf(seq, "\tPTP Offload: %s\n",
6395 		   priv->dma_cap.ptoen ? "Y" : "N");
6396 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6397 		   priv->dma_cap.osten ? "Y" : "N");
6398 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6399 		   priv->dma_cap.pfcen ? "Y" : "N");
6400 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6401 		   BIT(priv->dma_cap.frpes) << 6);
6402 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6403 		   BIT(priv->dma_cap.frpbs) << 6);
6404 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6405 		   priv->dma_cap.frppipe_num);
6406 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6407 		   priv->dma_cap.nrvf_num ?
6408 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6409 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6410 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6411 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6412 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6413 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6414 		   priv->dma_cap.cbtisel ? "Y" : "N");
6415 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6416 		   priv->dma_cap.aux_snapshot_n);
6417 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6418 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6419 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6420 		   priv->dma_cap.edma ? "Y" : "N");
6421 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6422 		   priv->dma_cap.ediffc ? "Y" : "N");
6423 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6424 		   priv->dma_cap.vxn ? "Y" : "N");
6425 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6426 		   priv->dma_cap.dbgmem ? "Y" : "N");
6427 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6428 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6429 	return 0;
6430 }
6431 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6432 
6433 /* Use network device events to rename debugfs file entries.
6434  */
6435 static int stmmac_device_event(struct notifier_block *unused,
6436 			       unsigned long event, void *ptr)
6437 {
6438 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6439 	struct stmmac_priv *priv = netdev_priv(dev);
6440 
6441 	if (dev->netdev_ops != &stmmac_netdev_ops)
6442 		goto done;
6443 
6444 	switch (event) {
6445 	case NETDEV_CHANGENAME:
6446 		if (priv->dbgfs_dir)
6447 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6448 							 priv->dbgfs_dir,
6449 							 stmmac_fs_dir,
6450 							 dev->name);
6451 		break;
6452 	}
6453 done:
6454 	return NOTIFY_DONE;
6455 }
6456 
6457 static struct notifier_block stmmac_notifier = {
6458 	.notifier_call = stmmac_device_event,
6459 };
6460 
6461 static void stmmac_init_fs(struct net_device *dev)
6462 {
6463 	struct stmmac_priv *priv = netdev_priv(dev);
6464 
6465 	rtnl_lock();
6466 
6467 	/* Create per netdev entries */
6468 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6469 
6470 	/* Entry to report DMA RX/TX rings */
6471 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6472 			    &stmmac_rings_status_fops);
6473 
6474 	/* Entry to report the DMA HW features */
6475 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6476 			    &stmmac_dma_cap_fops);
6477 
6478 	rtnl_unlock();
6479 }
6480 
6481 static void stmmac_exit_fs(struct net_device *dev)
6482 {
6483 	struct stmmac_priv *priv = netdev_priv(dev);
6484 
6485 	debugfs_remove_recursive(priv->dbgfs_dir);
6486 }
6487 #endif /* CONFIG_DEBUG_FS */
6488 
6489 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6490 {
6491 	unsigned char *data = (unsigned char *)&vid_le;
6492 	unsigned char data_byte = 0;
6493 	u32 crc = ~0x0;
6494 	u32 temp = 0;
6495 	int i, bits;
6496 
6497 	bits = get_bitmask_order(VLAN_VID_MASK);
6498 	for (i = 0; i < bits; i++) {
6499 		if ((i % 8) == 0)
6500 			data_byte = data[i / 8];
6501 
6502 		temp = ((crc & 1) ^ data_byte) & 1;
6503 		crc >>= 1;
6504 		data_byte >>= 1;
6505 
6506 		if (temp)
6507 			crc ^= 0xedb88320;
6508 	}
6509 
6510 	return crc;
6511 }
6512 
6513 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6514 {
6515 	u32 crc, hash = 0;
6516 	__le16 pmatch = 0;
6517 	int count = 0;
6518 	u16 vid = 0;
6519 
6520 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6521 		__le16 vid_le = cpu_to_le16(vid);
6522 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6523 		hash |= (1 << crc);
6524 		count++;
6525 	}
6526 
6527 	if (!priv->dma_cap.vlhash) {
6528 		if (count > 2) /* VID = 0 always passes filter */
6529 			return -EOPNOTSUPP;
6530 
6531 		pmatch = cpu_to_le16(vid);
6532 		hash = 0;
6533 	}
6534 
6535 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6536 }
6537 
6538 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6539 {
6540 	struct stmmac_priv *priv = netdev_priv(ndev);
6541 	bool is_double = false;
6542 	int ret;
6543 
6544 	ret = pm_runtime_resume_and_get(priv->device);
6545 	if (ret < 0)
6546 		return ret;
6547 
6548 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6549 		is_double = true;
6550 
6551 	set_bit(vid, priv->active_vlans);
6552 	ret = stmmac_vlan_update(priv, is_double);
6553 	if (ret) {
6554 		clear_bit(vid, priv->active_vlans);
6555 		goto err_pm_put;
6556 	}
6557 
6558 	if (priv->hw->num_vlan) {
6559 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6560 		if (ret)
6561 			goto err_pm_put;
6562 	}
6563 err_pm_put:
6564 	pm_runtime_put(priv->device);
6565 
6566 	return ret;
6567 }
6568 
6569 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6570 {
6571 	struct stmmac_priv *priv = netdev_priv(ndev);
6572 	bool is_double = false;
6573 	int ret;
6574 
6575 	ret = pm_runtime_resume_and_get(priv->device);
6576 	if (ret < 0)
6577 		return ret;
6578 
6579 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6580 		is_double = true;
6581 
6582 	clear_bit(vid, priv->active_vlans);
6583 
6584 	if (priv->hw->num_vlan) {
6585 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6586 		if (ret)
6587 			goto del_vlan_error;
6588 	}
6589 
6590 	ret = stmmac_vlan_update(priv, is_double);
6591 
6592 del_vlan_error:
6593 	pm_runtime_put(priv->device);
6594 
6595 	return ret;
6596 }
6597 
6598 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6599 {
6600 	struct stmmac_priv *priv = netdev_priv(dev);
6601 
6602 	switch (bpf->command) {
6603 	case XDP_SETUP_PROG:
6604 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6605 	case XDP_SETUP_XSK_POOL:
6606 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6607 					     bpf->xsk.queue_id);
6608 	default:
6609 		return -EOPNOTSUPP;
6610 	}
6611 }
6612 
6613 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6614 			   struct xdp_frame **frames, u32 flags)
6615 {
6616 	struct stmmac_priv *priv = netdev_priv(dev);
6617 	int cpu = smp_processor_id();
6618 	struct netdev_queue *nq;
6619 	int i, nxmit = 0;
6620 	int queue;
6621 
6622 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6623 		return -ENETDOWN;
6624 
6625 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6626 		return -EINVAL;
6627 
6628 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6629 	nq = netdev_get_tx_queue(priv->dev, queue);
6630 
6631 	__netif_tx_lock(nq, cpu);
6632 	/* Avoids TX time-out as we are sharing with slow path */
6633 	txq_trans_cond_update(nq);
6634 
6635 	for (i = 0; i < num_frames; i++) {
6636 		int res;
6637 
6638 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6639 		if (res == STMMAC_XDP_CONSUMED)
6640 			break;
6641 
6642 		nxmit++;
6643 	}
6644 
6645 	if (flags & XDP_XMIT_FLUSH) {
6646 		stmmac_flush_tx_descriptors(priv, queue);
6647 		stmmac_tx_timer_arm(priv, queue);
6648 	}
6649 
6650 	__netif_tx_unlock(nq);
6651 
6652 	return nxmit;
6653 }
6654 
6655 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6656 {
6657 	struct stmmac_channel *ch = &priv->channel[queue];
6658 	unsigned long flags;
6659 
6660 	spin_lock_irqsave(&ch->lock, flags);
6661 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6662 	spin_unlock_irqrestore(&ch->lock, flags);
6663 
6664 	stmmac_stop_rx_dma(priv, queue);
6665 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6666 }
6667 
6668 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6669 {
6670 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6671 	struct stmmac_channel *ch = &priv->channel[queue];
6672 	unsigned long flags;
6673 	u32 buf_size;
6674 	int ret;
6675 
6676 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6677 	if (ret) {
6678 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6679 		return;
6680 	}
6681 
6682 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6683 	if (ret) {
6684 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6685 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6686 		return;
6687 	}
6688 
6689 	stmmac_reset_rx_queue(priv, queue);
6690 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6691 
6692 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6693 			    rx_q->dma_rx_phy, rx_q->queue_index);
6694 
6695 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6696 			     sizeof(struct dma_desc));
6697 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6698 			       rx_q->rx_tail_addr, rx_q->queue_index);
6699 
6700 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6701 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6702 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6703 				      buf_size,
6704 				      rx_q->queue_index);
6705 	} else {
6706 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6707 				      priv->dma_conf.dma_buf_sz,
6708 				      rx_q->queue_index);
6709 	}
6710 
6711 	stmmac_start_rx_dma(priv, queue);
6712 
6713 	spin_lock_irqsave(&ch->lock, flags);
6714 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6715 	spin_unlock_irqrestore(&ch->lock, flags);
6716 }
6717 
6718 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6719 {
6720 	struct stmmac_channel *ch = &priv->channel[queue];
6721 	unsigned long flags;
6722 
6723 	spin_lock_irqsave(&ch->lock, flags);
6724 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6725 	spin_unlock_irqrestore(&ch->lock, flags);
6726 
6727 	stmmac_stop_tx_dma(priv, queue);
6728 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6729 }
6730 
6731 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6732 {
6733 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6734 	struct stmmac_channel *ch = &priv->channel[queue];
6735 	unsigned long flags;
6736 	int ret;
6737 
6738 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6739 	if (ret) {
6740 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6741 		return;
6742 	}
6743 
6744 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6745 	if (ret) {
6746 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6747 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6748 		return;
6749 	}
6750 
6751 	stmmac_reset_tx_queue(priv, queue);
6752 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6753 
6754 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6755 			    tx_q->dma_tx_phy, tx_q->queue_index);
6756 
6757 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6758 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6759 
6760 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6761 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6762 			       tx_q->tx_tail_addr, tx_q->queue_index);
6763 
6764 	stmmac_start_tx_dma(priv, queue);
6765 
6766 	spin_lock_irqsave(&ch->lock, flags);
6767 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6768 	spin_unlock_irqrestore(&ch->lock, flags);
6769 }
6770 
6771 void stmmac_xdp_release(struct net_device *dev)
6772 {
6773 	struct stmmac_priv *priv = netdev_priv(dev);
6774 	u32 chan;
6775 
6776 	/* Ensure tx function is not running */
6777 	netif_tx_disable(dev);
6778 
6779 	/* Disable NAPI process */
6780 	stmmac_disable_all_queues(priv);
6781 
6782 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6783 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6784 
6785 	/* Free the IRQ lines */
6786 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6787 
6788 	/* Stop TX/RX DMA channels */
6789 	stmmac_stop_all_dma(priv);
6790 
6791 	/* Release and free the Rx/Tx resources */
6792 	free_dma_desc_resources(priv, &priv->dma_conf);
6793 
6794 	/* Disable the MAC Rx/Tx */
6795 	stmmac_mac_set(priv, priv->ioaddr, false);
6796 
6797 	/* set trans_start so we don't get spurious
6798 	 * watchdogs during reset
6799 	 */
6800 	netif_trans_update(dev);
6801 	netif_carrier_off(dev);
6802 }
6803 
6804 int stmmac_xdp_open(struct net_device *dev)
6805 {
6806 	struct stmmac_priv *priv = netdev_priv(dev);
6807 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6808 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6809 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6810 	struct stmmac_rx_queue *rx_q;
6811 	struct stmmac_tx_queue *tx_q;
6812 	u32 buf_size;
6813 	bool sph_en;
6814 	u32 chan;
6815 	int ret;
6816 
6817 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6818 	if (ret < 0) {
6819 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6820 			   __func__);
6821 		goto dma_desc_error;
6822 	}
6823 
6824 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6825 	if (ret < 0) {
6826 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6827 			   __func__);
6828 		goto init_error;
6829 	}
6830 
6831 	stmmac_reset_queues_param(priv);
6832 
6833 	/* DMA CSR Channel configuration */
6834 	for (chan = 0; chan < dma_csr_ch; chan++) {
6835 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6836 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6837 	}
6838 
6839 	/* Adjust Split header */
6840 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6841 
6842 	/* DMA RX Channel Configuration */
6843 	for (chan = 0; chan < rx_cnt; chan++) {
6844 		rx_q = &priv->dma_conf.rx_queue[chan];
6845 
6846 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6847 				    rx_q->dma_rx_phy, chan);
6848 
6849 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6850 				     (rx_q->buf_alloc_num *
6851 				      sizeof(struct dma_desc));
6852 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6853 				       rx_q->rx_tail_addr, chan);
6854 
6855 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6856 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6857 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6858 					      buf_size,
6859 					      rx_q->queue_index);
6860 		} else {
6861 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6862 					      priv->dma_conf.dma_buf_sz,
6863 					      rx_q->queue_index);
6864 		}
6865 
6866 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6867 	}
6868 
6869 	/* DMA TX Channel Configuration */
6870 	for (chan = 0; chan < tx_cnt; chan++) {
6871 		tx_q = &priv->dma_conf.tx_queue[chan];
6872 
6873 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6874 				    tx_q->dma_tx_phy, chan);
6875 
6876 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6877 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6878 				       tx_q->tx_tail_addr, chan);
6879 
6880 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6881 		tx_q->txtimer.function = stmmac_tx_timer;
6882 	}
6883 
6884 	/* Enable the MAC Rx/Tx */
6885 	stmmac_mac_set(priv, priv->ioaddr, true);
6886 
6887 	/* Start Rx & Tx DMA Channels */
6888 	stmmac_start_all_dma(priv);
6889 
6890 	ret = stmmac_request_irq(dev);
6891 	if (ret)
6892 		goto irq_error;
6893 
6894 	/* Enable NAPI process*/
6895 	stmmac_enable_all_queues(priv);
6896 	netif_carrier_on(dev);
6897 	netif_tx_start_all_queues(dev);
6898 	stmmac_enable_all_dma_irq(priv);
6899 
6900 	return 0;
6901 
6902 irq_error:
6903 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6904 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6905 
6906 	stmmac_hw_teardown(dev);
6907 init_error:
6908 	free_dma_desc_resources(priv, &priv->dma_conf);
6909 dma_desc_error:
6910 	return ret;
6911 }
6912 
6913 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6914 {
6915 	struct stmmac_priv *priv = netdev_priv(dev);
6916 	struct stmmac_rx_queue *rx_q;
6917 	struct stmmac_tx_queue *tx_q;
6918 	struct stmmac_channel *ch;
6919 
6920 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6921 	    !netif_carrier_ok(priv->dev))
6922 		return -ENETDOWN;
6923 
6924 	if (!stmmac_xdp_is_enabled(priv))
6925 		return -EINVAL;
6926 
6927 	if (queue >= priv->plat->rx_queues_to_use ||
6928 	    queue >= priv->plat->tx_queues_to_use)
6929 		return -EINVAL;
6930 
6931 	rx_q = &priv->dma_conf.rx_queue[queue];
6932 	tx_q = &priv->dma_conf.tx_queue[queue];
6933 	ch = &priv->channel[queue];
6934 
6935 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6936 		return -EINVAL;
6937 
6938 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6939 		/* EQoS does not have per-DMA channel SW interrupt,
6940 		 * so we schedule RX Napi straight-away.
6941 		 */
6942 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6943 			__napi_schedule(&ch->rxtx_napi);
6944 	}
6945 
6946 	return 0;
6947 }
6948 
6949 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6950 {
6951 	struct stmmac_priv *priv = netdev_priv(dev);
6952 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6953 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6954 	unsigned int start;
6955 	int q;
6956 
6957 	for (q = 0; q < tx_cnt; q++) {
6958 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6959 		u64 tx_packets;
6960 		u64 tx_bytes;
6961 
6962 		do {
6963 			start = u64_stats_fetch_begin(&txq_stats->syncp);
6964 			tx_packets = txq_stats->tx_packets;
6965 			tx_bytes   = txq_stats->tx_bytes;
6966 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
6967 
6968 		stats->tx_packets += tx_packets;
6969 		stats->tx_bytes += tx_bytes;
6970 	}
6971 
6972 	for (q = 0; q < rx_cnt; q++) {
6973 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6974 		u64 rx_packets;
6975 		u64 rx_bytes;
6976 
6977 		do {
6978 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
6979 			rx_packets = rxq_stats->rx_packets;
6980 			rx_bytes   = rxq_stats->rx_bytes;
6981 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
6982 
6983 		stats->rx_packets += rx_packets;
6984 		stats->rx_bytes += rx_bytes;
6985 	}
6986 
6987 	stats->rx_dropped = priv->xstats.rx_dropped;
6988 	stats->rx_errors = priv->xstats.rx_errors;
6989 	stats->tx_dropped = priv->xstats.tx_dropped;
6990 	stats->tx_errors = priv->xstats.tx_errors;
6991 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6992 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6993 	stats->rx_length_errors = priv->xstats.rx_length;
6994 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6995 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6996 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6997 }
6998 
6999 static const struct net_device_ops stmmac_netdev_ops = {
7000 	.ndo_open = stmmac_open,
7001 	.ndo_start_xmit = stmmac_xmit,
7002 	.ndo_stop = stmmac_release,
7003 	.ndo_change_mtu = stmmac_change_mtu,
7004 	.ndo_fix_features = stmmac_fix_features,
7005 	.ndo_set_features = stmmac_set_features,
7006 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7007 	.ndo_tx_timeout = stmmac_tx_timeout,
7008 	.ndo_eth_ioctl = stmmac_ioctl,
7009 	.ndo_get_stats64 = stmmac_get_stats64,
7010 	.ndo_setup_tc = stmmac_setup_tc,
7011 	.ndo_select_queue = stmmac_select_queue,
7012 	.ndo_set_mac_address = stmmac_set_mac_address,
7013 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7014 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7015 	.ndo_bpf = stmmac_bpf,
7016 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7017 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7018 };
7019 
7020 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7021 {
7022 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7023 		return;
7024 	if (test_bit(STMMAC_DOWN, &priv->state))
7025 		return;
7026 
7027 	netdev_err(priv->dev, "Reset adapter.\n");
7028 
7029 	rtnl_lock();
7030 	netif_trans_update(priv->dev);
7031 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7032 		usleep_range(1000, 2000);
7033 
7034 	set_bit(STMMAC_DOWN, &priv->state);
7035 	dev_close(priv->dev);
7036 	dev_open(priv->dev, NULL);
7037 	clear_bit(STMMAC_DOWN, &priv->state);
7038 	clear_bit(STMMAC_RESETING, &priv->state);
7039 	rtnl_unlock();
7040 }
7041 
7042 static void stmmac_service_task(struct work_struct *work)
7043 {
7044 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7045 			service_task);
7046 
7047 	stmmac_reset_subtask(priv);
7048 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7049 }
7050 
7051 /**
7052  *  stmmac_hw_init - Init the MAC device
7053  *  @priv: driver private structure
7054  *  Description: this function is to configure the MAC device according to
7055  *  some platform parameters or the HW capability register. It prepares the
7056  *  driver to use either ring or chain modes and to setup either enhanced or
7057  *  normal descriptors.
7058  */
7059 static int stmmac_hw_init(struct stmmac_priv *priv)
7060 {
7061 	int ret;
7062 
7063 	/* dwmac-sun8i only work in chain mode */
7064 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7065 		chain_mode = 1;
7066 	priv->chain_mode = chain_mode;
7067 
7068 	/* Initialize HW Interface */
7069 	ret = stmmac_hwif_init(priv);
7070 	if (ret)
7071 		return ret;
7072 
7073 	/* Get the HW capability (new GMAC newer than 3.50a) */
7074 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7075 	if (priv->hw_cap_support) {
7076 		dev_info(priv->device, "DMA HW capability register supported\n");
7077 
7078 		/* We can override some gmac/dma configuration fields: e.g.
7079 		 * enh_desc, tx_coe (e.g. that are passed through the
7080 		 * platform) with the values from the HW capability
7081 		 * register (if supported).
7082 		 */
7083 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7084 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7085 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7086 		priv->hw->pmt = priv->plat->pmt;
7087 		if (priv->dma_cap.hash_tb_sz) {
7088 			priv->hw->multicast_filter_bins =
7089 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7090 			priv->hw->mcast_bits_log2 =
7091 					ilog2(priv->hw->multicast_filter_bins);
7092 		}
7093 
7094 		/* TXCOE doesn't work in thresh DMA mode */
7095 		if (priv->plat->force_thresh_dma_mode)
7096 			priv->plat->tx_coe = 0;
7097 		else
7098 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7099 
7100 		/* In case of GMAC4 rx_coe is from HW cap register. */
7101 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7102 
7103 		if (priv->dma_cap.rx_coe_type2)
7104 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7105 		else if (priv->dma_cap.rx_coe_type1)
7106 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7107 
7108 	} else {
7109 		dev_info(priv->device, "No HW DMA feature register supported\n");
7110 	}
7111 
7112 	if (priv->plat->rx_coe) {
7113 		priv->hw->rx_csum = priv->plat->rx_coe;
7114 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7115 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7116 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7117 	}
7118 	if (priv->plat->tx_coe)
7119 		dev_info(priv->device, "TX Checksum insertion supported\n");
7120 
7121 	if (priv->plat->pmt) {
7122 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7123 		device_set_wakeup_capable(priv->device, 1);
7124 	}
7125 
7126 	if (priv->dma_cap.tsoen)
7127 		dev_info(priv->device, "TSO supported\n");
7128 
7129 	priv->hw->vlan_fail_q_en =
7130 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7131 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7132 
7133 	/* Run HW quirks, if any */
7134 	if (priv->hwif_quirks) {
7135 		ret = priv->hwif_quirks(priv);
7136 		if (ret)
7137 			return ret;
7138 	}
7139 
7140 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7141 	 * In some case, for example on bugged HW this feature
7142 	 * has to be disable and this can be done by passing the
7143 	 * riwt_off field from the platform.
7144 	 */
7145 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7146 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7147 		priv->use_riwt = 1;
7148 		dev_info(priv->device,
7149 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7150 	}
7151 
7152 	return 0;
7153 }
7154 
7155 static void stmmac_napi_add(struct net_device *dev)
7156 {
7157 	struct stmmac_priv *priv = netdev_priv(dev);
7158 	u32 queue, maxq;
7159 
7160 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7161 
7162 	for (queue = 0; queue < maxq; queue++) {
7163 		struct stmmac_channel *ch = &priv->channel[queue];
7164 
7165 		ch->priv_data = priv;
7166 		ch->index = queue;
7167 		spin_lock_init(&ch->lock);
7168 
7169 		if (queue < priv->plat->rx_queues_to_use) {
7170 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7171 		}
7172 		if (queue < priv->plat->tx_queues_to_use) {
7173 			netif_napi_add_tx(dev, &ch->tx_napi,
7174 					  stmmac_napi_poll_tx);
7175 		}
7176 		if (queue < priv->plat->rx_queues_to_use &&
7177 		    queue < priv->plat->tx_queues_to_use) {
7178 			netif_napi_add(dev, &ch->rxtx_napi,
7179 				       stmmac_napi_poll_rxtx);
7180 		}
7181 	}
7182 }
7183 
7184 static void stmmac_napi_del(struct net_device *dev)
7185 {
7186 	struct stmmac_priv *priv = netdev_priv(dev);
7187 	u32 queue, maxq;
7188 
7189 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7190 
7191 	for (queue = 0; queue < maxq; queue++) {
7192 		struct stmmac_channel *ch = &priv->channel[queue];
7193 
7194 		if (queue < priv->plat->rx_queues_to_use)
7195 			netif_napi_del(&ch->rx_napi);
7196 		if (queue < priv->plat->tx_queues_to_use)
7197 			netif_napi_del(&ch->tx_napi);
7198 		if (queue < priv->plat->rx_queues_to_use &&
7199 		    queue < priv->plat->tx_queues_to_use) {
7200 			netif_napi_del(&ch->rxtx_napi);
7201 		}
7202 	}
7203 }
7204 
7205 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7206 {
7207 	struct stmmac_priv *priv = netdev_priv(dev);
7208 	int ret = 0, i;
7209 
7210 	if (netif_running(dev))
7211 		stmmac_release(dev);
7212 
7213 	stmmac_napi_del(dev);
7214 
7215 	priv->plat->rx_queues_to_use = rx_cnt;
7216 	priv->plat->tx_queues_to_use = tx_cnt;
7217 	if (!netif_is_rxfh_configured(dev))
7218 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7219 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7220 									rx_cnt);
7221 
7222 	stmmac_set_half_duplex(priv);
7223 	stmmac_napi_add(dev);
7224 
7225 	if (netif_running(dev))
7226 		ret = stmmac_open(dev);
7227 
7228 	return ret;
7229 }
7230 
7231 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7232 {
7233 	struct stmmac_priv *priv = netdev_priv(dev);
7234 	int ret = 0;
7235 
7236 	if (netif_running(dev))
7237 		stmmac_release(dev);
7238 
7239 	priv->dma_conf.dma_rx_size = rx_size;
7240 	priv->dma_conf.dma_tx_size = tx_size;
7241 
7242 	if (netif_running(dev))
7243 		ret = stmmac_open(dev);
7244 
7245 	return ret;
7246 }
7247 
7248 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7249 static void stmmac_fpe_lp_task(struct work_struct *work)
7250 {
7251 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7252 						fpe_task);
7253 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7254 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7255 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7256 	bool *hs_enable = &fpe_cfg->hs_enable;
7257 	bool *enable = &fpe_cfg->enable;
7258 	int retries = 20;
7259 
7260 	while (retries-- > 0) {
7261 		/* Bail out immediately if FPE handshake is OFF */
7262 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7263 			break;
7264 
7265 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7266 		    *lp_state == FPE_STATE_ENTERING_ON) {
7267 			stmmac_fpe_configure(priv, priv->ioaddr,
7268 					     fpe_cfg,
7269 					     priv->plat->tx_queues_to_use,
7270 					     priv->plat->rx_queues_to_use,
7271 					     *enable);
7272 
7273 			netdev_info(priv->dev, "configured FPE\n");
7274 
7275 			*lo_state = FPE_STATE_ON;
7276 			*lp_state = FPE_STATE_ON;
7277 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7278 			break;
7279 		}
7280 
7281 		if ((*lo_state == FPE_STATE_CAPABLE ||
7282 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7283 		     *lp_state != FPE_STATE_ON) {
7284 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7285 				    *lo_state, *lp_state);
7286 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7287 						fpe_cfg,
7288 						MPACKET_VERIFY);
7289 		}
7290 		/* Sleep then retry */
7291 		msleep(500);
7292 	}
7293 
7294 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7295 }
7296 
7297 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7298 {
7299 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7300 		if (enable) {
7301 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7302 						priv->plat->fpe_cfg,
7303 						MPACKET_VERIFY);
7304 		} else {
7305 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7306 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7307 		}
7308 
7309 		priv->plat->fpe_cfg->hs_enable = enable;
7310 	}
7311 }
7312 
7313 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7314 {
7315 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7316 	struct dma_desc *desc_contains_ts = ctx->desc;
7317 	struct stmmac_priv *priv = ctx->priv;
7318 	struct dma_desc *ndesc = ctx->ndesc;
7319 	struct dma_desc *desc = ctx->desc;
7320 	u64 ns = 0;
7321 
7322 	if (!priv->hwts_rx_en)
7323 		return -ENODATA;
7324 
7325 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7326 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7327 		desc_contains_ts = ndesc;
7328 
7329 	/* Check if timestamp is available */
7330 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7331 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7332 		ns -= priv->plat->cdc_error_adj;
7333 		*timestamp = ns_to_ktime(ns);
7334 		return 0;
7335 	}
7336 
7337 	return -ENODATA;
7338 }
7339 
7340 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7341 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7342 };
7343 
7344 /**
7345  * stmmac_dvr_probe
7346  * @device: device pointer
7347  * @plat_dat: platform data pointer
7348  * @res: stmmac resource pointer
7349  * Description: this is the main probe function used to
7350  * call the alloc_etherdev, allocate the priv structure.
7351  * Return:
7352  * returns 0 on success, otherwise errno.
7353  */
7354 int stmmac_dvr_probe(struct device *device,
7355 		     struct plat_stmmacenet_data *plat_dat,
7356 		     struct stmmac_resources *res)
7357 {
7358 	struct net_device *ndev = NULL;
7359 	struct stmmac_priv *priv;
7360 	u32 rxq;
7361 	int i, ret = 0;
7362 
7363 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7364 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7365 	if (!ndev)
7366 		return -ENOMEM;
7367 
7368 	SET_NETDEV_DEV(ndev, device);
7369 
7370 	priv = netdev_priv(ndev);
7371 	priv->device = device;
7372 	priv->dev = ndev;
7373 
7374 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7375 		u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
7376 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7377 		u64_stats_init(&priv->xstats.txq_stats[i].syncp);
7378 
7379 	stmmac_set_ethtool_ops(ndev);
7380 	priv->pause = pause;
7381 	priv->plat = plat_dat;
7382 	priv->ioaddr = res->addr;
7383 	priv->dev->base_addr = (unsigned long)res->addr;
7384 	priv->plat->dma_cfg->multi_msi_en =
7385 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7386 
7387 	priv->dev->irq = res->irq;
7388 	priv->wol_irq = res->wol_irq;
7389 	priv->lpi_irq = res->lpi_irq;
7390 	priv->sfty_ce_irq = res->sfty_ce_irq;
7391 	priv->sfty_ue_irq = res->sfty_ue_irq;
7392 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7393 		priv->rx_irq[i] = res->rx_irq[i];
7394 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7395 		priv->tx_irq[i] = res->tx_irq[i];
7396 
7397 	if (!is_zero_ether_addr(res->mac))
7398 		eth_hw_addr_set(priv->dev, res->mac);
7399 
7400 	dev_set_drvdata(device, priv->dev);
7401 
7402 	/* Verify driver arguments */
7403 	stmmac_verify_args();
7404 
7405 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7406 	if (!priv->af_xdp_zc_qps)
7407 		return -ENOMEM;
7408 
7409 	/* Allocate workqueue */
7410 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7411 	if (!priv->wq) {
7412 		dev_err(priv->device, "failed to create workqueue\n");
7413 		ret = -ENOMEM;
7414 		goto error_wq_init;
7415 	}
7416 
7417 	INIT_WORK(&priv->service_task, stmmac_service_task);
7418 
7419 	/* Initialize Link Partner FPE workqueue */
7420 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7421 
7422 	/* Override with kernel parameters if supplied XXX CRS XXX
7423 	 * this needs to have multiple instances
7424 	 */
7425 	if ((phyaddr >= 0) && (phyaddr <= 31))
7426 		priv->plat->phy_addr = phyaddr;
7427 
7428 	if (priv->plat->stmmac_rst) {
7429 		ret = reset_control_assert(priv->plat->stmmac_rst);
7430 		reset_control_deassert(priv->plat->stmmac_rst);
7431 		/* Some reset controllers have only reset callback instead of
7432 		 * assert + deassert callbacks pair.
7433 		 */
7434 		if (ret == -ENOTSUPP)
7435 			reset_control_reset(priv->plat->stmmac_rst);
7436 	}
7437 
7438 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7439 	if (ret == -ENOTSUPP)
7440 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7441 			ERR_PTR(ret));
7442 
7443 	/* Init MAC and get the capabilities */
7444 	ret = stmmac_hw_init(priv);
7445 	if (ret)
7446 		goto error_hw_init;
7447 
7448 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7449 	 */
7450 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7451 		priv->plat->dma_cfg->dche = false;
7452 
7453 	stmmac_check_ether_addr(priv);
7454 
7455 	ndev->netdev_ops = &stmmac_netdev_ops;
7456 
7457 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7458 
7459 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7460 			    NETIF_F_RXCSUM;
7461 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7462 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7463 
7464 	ret = stmmac_tc_init(priv, priv);
7465 	if (!ret) {
7466 		ndev->hw_features |= NETIF_F_HW_TC;
7467 	}
7468 
7469 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7470 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7471 		if (priv->plat->has_gmac4)
7472 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7473 		priv->tso = true;
7474 		dev_info(priv->device, "TSO feature enabled\n");
7475 	}
7476 
7477 	if (priv->dma_cap.sphen &&
7478 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7479 		ndev->hw_features |= NETIF_F_GRO;
7480 		priv->sph_cap = true;
7481 		priv->sph = priv->sph_cap;
7482 		dev_info(priv->device, "SPH feature enabled\n");
7483 	}
7484 
7485 	/* Ideally our host DMA address width is the same as for the
7486 	 * device. However, it may differ and then we have to use our
7487 	 * host DMA width for allocation and the device DMA width for
7488 	 * register handling.
7489 	 */
7490 	if (priv->plat->host_dma_width)
7491 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7492 	else
7493 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7494 
7495 	if (priv->dma_cap.host_dma_width) {
7496 		ret = dma_set_mask_and_coherent(device,
7497 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7498 		if (!ret) {
7499 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7500 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7501 
7502 			/*
7503 			 * If more than 32 bits can be addressed, make sure to
7504 			 * enable enhanced addressing mode.
7505 			 */
7506 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7507 				priv->plat->dma_cfg->eame = true;
7508 		} else {
7509 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7510 			if (ret) {
7511 				dev_err(priv->device, "Failed to set DMA Mask\n");
7512 				goto error_hw_init;
7513 			}
7514 
7515 			priv->dma_cap.host_dma_width = 32;
7516 		}
7517 	}
7518 
7519 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7520 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7521 #ifdef STMMAC_VLAN_TAG_USED
7522 	/* Both mac100 and gmac support receive VLAN tag detection */
7523 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7524 	if (priv->dma_cap.vlhash) {
7525 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7526 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7527 	}
7528 	if (priv->dma_cap.vlins) {
7529 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7530 		if (priv->dma_cap.dvlan)
7531 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7532 	}
7533 #endif
7534 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7535 
7536 	priv->xstats.threshold = tc;
7537 
7538 	/* Initialize RSS */
7539 	rxq = priv->plat->rx_queues_to_use;
7540 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7541 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7542 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7543 
7544 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7545 		ndev->features |= NETIF_F_RXHASH;
7546 
7547 	ndev->vlan_features |= ndev->features;
7548 	/* TSO doesn't work on VLANs yet */
7549 	ndev->vlan_features &= ~NETIF_F_TSO;
7550 
7551 	/* MTU range: 46 - hw-specific max */
7552 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7553 	if (priv->plat->has_xgmac)
7554 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7555 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7556 		ndev->max_mtu = JUMBO_LEN;
7557 	else
7558 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7559 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7560 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7561 	 */
7562 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7563 	    (priv->plat->maxmtu >= ndev->min_mtu))
7564 		ndev->max_mtu = priv->plat->maxmtu;
7565 	else if (priv->plat->maxmtu < ndev->min_mtu)
7566 		dev_warn(priv->device,
7567 			 "%s: warning: maxmtu having invalid value (%d)\n",
7568 			 __func__, priv->plat->maxmtu);
7569 
7570 	if (flow_ctrl)
7571 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7572 
7573 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7574 
7575 	/* Setup channels NAPI */
7576 	stmmac_napi_add(ndev);
7577 
7578 	mutex_init(&priv->lock);
7579 
7580 	/* If a specific clk_csr value is passed from the platform
7581 	 * this means that the CSR Clock Range selection cannot be
7582 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7583 	 * set the MDC clock dynamically according to the csr actual
7584 	 * clock input.
7585 	 */
7586 	if (priv->plat->clk_csr >= 0)
7587 		priv->clk_csr = priv->plat->clk_csr;
7588 	else
7589 		stmmac_clk_csr_set(priv);
7590 
7591 	stmmac_check_pcs_mode(priv);
7592 
7593 	pm_runtime_get_noresume(device);
7594 	pm_runtime_set_active(device);
7595 	if (!pm_runtime_enabled(device))
7596 		pm_runtime_enable(device);
7597 
7598 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7599 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7600 		/* MDIO bus Registration */
7601 		ret = stmmac_mdio_register(ndev);
7602 		if (ret < 0) {
7603 			dev_err_probe(priv->device, ret,
7604 				      "%s: MDIO bus (id: %d) registration failed\n",
7605 				      __func__, priv->plat->bus_id);
7606 			goto error_mdio_register;
7607 		}
7608 	}
7609 
7610 	if (priv->plat->speed_mode_2500)
7611 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7612 
7613 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7614 		ret = stmmac_xpcs_setup(priv->mii);
7615 		if (ret)
7616 			goto error_xpcs_setup;
7617 	}
7618 
7619 	ret = stmmac_phy_setup(priv);
7620 	if (ret) {
7621 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7622 		goto error_phy_setup;
7623 	}
7624 
7625 	ret = register_netdev(ndev);
7626 	if (ret) {
7627 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7628 			__func__, ret);
7629 		goto error_netdev_register;
7630 	}
7631 
7632 #ifdef CONFIG_DEBUG_FS
7633 	stmmac_init_fs(ndev);
7634 #endif
7635 
7636 	if (priv->plat->dump_debug_regs)
7637 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7638 
7639 	/* Let pm_runtime_put() disable the clocks.
7640 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7641 	 */
7642 	pm_runtime_put(device);
7643 
7644 	return ret;
7645 
7646 error_netdev_register:
7647 	phylink_destroy(priv->phylink);
7648 error_xpcs_setup:
7649 error_phy_setup:
7650 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7651 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7652 		stmmac_mdio_unregister(ndev);
7653 error_mdio_register:
7654 	stmmac_napi_del(ndev);
7655 error_hw_init:
7656 	destroy_workqueue(priv->wq);
7657 error_wq_init:
7658 	bitmap_free(priv->af_xdp_zc_qps);
7659 
7660 	return ret;
7661 }
7662 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7663 
7664 /**
7665  * stmmac_dvr_remove
7666  * @dev: device pointer
7667  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7668  * changes the link status, releases the DMA descriptor rings.
7669  */
7670 void stmmac_dvr_remove(struct device *dev)
7671 {
7672 	struct net_device *ndev = dev_get_drvdata(dev);
7673 	struct stmmac_priv *priv = netdev_priv(ndev);
7674 
7675 	netdev_info(priv->dev, "%s: removing driver", __func__);
7676 
7677 	pm_runtime_get_sync(dev);
7678 
7679 	stmmac_stop_all_dma(priv);
7680 	stmmac_mac_set(priv, priv->ioaddr, false);
7681 	netif_carrier_off(ndev);
7682 	unregister_netdev(ndev);
7683 
7684 #ifdef CONFIG_DEBUG_FS
7685 	stmmac_exit_fs(ndev);
7686 #endif
7687 	phylink_destroy(priv->phylink);
7688 	if (priv->plat->stmmac_rst)
7689 		reset_control_assert(priv->plat->stmmac_rst);
7690 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7691 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7692 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7693 		stmmac_mdio_unregister(ndev);
7694 	destroy_workqueue(priv->wq);
7695 	mutex_destroy(&priv->lock);
7696 	bitmap_free(priv->af_xdp_zc_qps);
7697 
7698 	pm_runtime_disable(dev);
7699 	pm_runtime_put_noidle(dev);
7700 }
7701 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7702 
7703 /**
7704  * stmmac_suspend - suspend callback
7705  * @dev: device pointer
7706  * Description: this is the function to suspend the device and it is called
7707  * by the platform driver to stop the network queue, release the resources,
7708  * program the PMT register (for WoL), clean and release driver resources.
7709  */
7710 int stmmac_suspend(struct device *dev)
7711 {
7712 	struct net_device *ndev = dev_get_drvdata(dev);
7713 	struct stmmac_priv *priv = netdev_priv(ndev);
7714 	u32 chan;
7715 
7716 	if (!ndev || !netif_running(ndev))
7717 		return 0;
7718 
7719 	mutex_lock(&priv->lock);
7720 
7721 	netif_device_detach(ndev);
7722 
7723 	stmmac_disable_all_queues(priv);
7724 
7725 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7726 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7727 
7728 	if (priv->eee_enabled) {
7729 		priv->tx_path_in_lpi_mode = false;
7730 		del_timer_sync(&priv->eee_ctrl_timer);
7731 	}
7732 
7733 	/* Stop TX/RX DMA */
7734 	stmmac_stop_all_dma(priv);
7735 
7736 	if (priv->plat->serdes_powerdown)
7737 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7738 
7739 	/* Enable Power down mode by programming the PMT regs */
7740 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7741 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7742 		priv->irq_wake = 1;
7743 	} else {
7744 		stmmac_mac_set(priv, priv->ioaddr, false);
7745 		pinctrl_pm_select_sleep_state(priv->device);
7746 	}
7747 
7748 	mutex_unlock(&priv->lock);
7749 
7750 	rtnl_lock();
7751 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7752 		phylink_suspend(priv->phylink, true);
7753 	} else {
7754 		if (device_may_wakeup(priv->device))
7755 			phylink_speed_down(priv->phylink, false);
7756 		phylink_suspend(priv->phylink, false);
7757 	}
7758 	rtnl_unlock();
7759 
7760 	if (priv->dma_cap.fpesel) {
7761 		/* Disable FPE */
7762 		stmmac_fpe_configure(priv, priv->ioaddr,
7763 				     priv->plat->fpe_cfg,
7764 				     priv->plat->tx_queues_to_use,
7765 				     priv->plat->rx_queues_to_use, false);
7766 
7767 		stmmac_fpe_handshake(priv, false);
7768 		stmmac_fpe_stop_wq(priv);
7769 	}
7770 
7771 	priv->speed = SPEED_UNKNOWN;
7772 	return 0;
7773 }
7774 EXPORT_SYMBOL_GPL(stmmac_suspend);
7775 
7776 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7777 {
7778 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7779 
7780 	rx_q->cur_rx = 0;
7781 	rx_q->dirty_rx = 0;
7782 }
7783 
7784 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7785 {
7786 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7787 
7788 	tx_q->cur_tx = 0;
7789 	tx_q->dirty_tx = 0;
7790 	tx_q->mss = 0;
7791 
7792 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7793 }
7794 
7795 /**
7796  * stmmac_reset_queues_param - reset queue parameters
7797  * @priv: device pointer
7798  */
7799 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7800 {
7801 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7802 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7803 	u32 queue;
7804 
7805 	for (queue = 0; queue < rx_cnt; queue++)
7806 		stmmac_reset_rx_queue(priv, queue);
7807 
7808 	for (queue = 0; queue < tx_cnt; queue++)
7809 		stmmac_reset_tx_queue(priv, queue);
7810 }
7811 
7812 /**
7813  * stmmac_resume - resume callback
7814  * @dev: device pointer
7815  * Description: when resume this function is invoked to setup the DMA and CORE
7816  * in a usable state.
7817  */
7818 int stmmac_resume(struct device *dev)
7819 {
7820 	struct net_device *ndev = dev_get_drvdata(dev);
7821 	struct stmmac_priv *priv = netdev_priv(ndev);
7822 	int ret;
7823 
7824 	if (!netif_running(ndev))
7825 		return 0;
7826 
7827 	/* Power Down bit, into the PM register, is cleared
7828 	 * automatically as soon as a magic packet or a Wake-up frame
7829 	 * is received. Anyway, it's better to manually clear
7830 	 * this bit because it can generate problems while resuming
7831 	 * from another devices (e.g. serial console).
7832 	 */
7833 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7834 		mutex_lock(&priv->lock);
7835 		stmmac_pmt(priv, priv->hw, 0);
7836 		mutex_unlock(&priv->lock);
7837 		priv->irq_wake = 0;
7838 	} else {
7839 		pinctrl_pm_select_default_state(priv->device);
7840 		/* reset the phy so that it's ready */
7841 		if (priv->mii)
7842 			stmmac_mdio_reset(priv->mii);
7843 	}
7844 
7845 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7846 	    priv->plat->serdes_powerup) {
7847 		ret = priv->plat->serdes_powerup(ndev,
7848 						 priv->plat->bsp_priv);
7849 
7850 		if (ret < 0)
7851 			return ret;
7852 	}
7853 
7854 	rtnl_lock();
7855 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7856 		phylink_resume(priv->phylink);
7857 	} else {
7858 		phylink_resume(priv->phylink);
7859 		if (device_may_wakeup(priv->device))
7860 			phylink_speed_up(priv->phylink);
7861 	}
7862 	rtnl_unlock();
7863 
7864 	rtnl_lock();
7865 	mutex_lock(&priv->lock);
7866 
7867 	stmmac_reset_queues_param(priv);
7868 
7869 	stmmac_free_tx_skbufs(priv);
7870 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7871 
7872 	stmmac_hw_setup(ndev, false);
7873 	stmmac_init_coalesce(priv);
7874 	stmmac_set_rx_mode(ndev);
7875 
7876 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7877 
7878 	stmmac_enable_all_queues(priv);
7879 	stmmac_enable_all_dma_irq(priv);
7880 
7881 	mutex_unlock(&priv->lock);
7882 	rtnl_unlock();
7883 
7884 	netif_device_attach(ndev);
7885 
7886 	return 0;
7887 }
7888 EXPORT_SYMBOL_GPL(stmmac_resume);
7889 
7890 #ifndef MODULE
7891 static int __init stmmac_cmdline_opt(char *str)
7892 {
7893 	char *opt;
7894 
7895 	if (!str || !*str)
7896 		return 1;
7897 	while ((opt = strsep(&str, ",")) != NULL) {
7898 		if (!strncmp(opt, "debug:", 6)) {
7899 			if (kstrtoint(opt + 6, 0, &debug))
7900 				goto err;
7901 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7902 			if (kstrtoint(opt + 8, 0, &phyaddr))
7903 				goto err;
7904 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7905 			if (kstrtoint(opt + 7, 0, &buf_sz))
7906 				goto err;
7907 		} else if (!strncmp(opt, "tc:", 3)) {
7908 			if (kstrtoint(opt + 3, 0, &tc))
7909 				goto err;
7910 		} else if (!strncmp(opt, "watchdog:", 9)) {
7911 			if (kstrtoint(opt + 9, 0, &watchdog))
7912 				goto err;
7913 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7914 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7915 				goto err;
7916 		} else if (!strncmp(opt, "pause:", 6)) {
7917 			if (kstrtoint(opt + 6, 0, &pause))
7918 				goto err;
7919 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7920 			if (kstrtoint(opt + 10, 0, &eee_timer))
7921 				goto err;
7922 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7923 			if (kstrtoint(opt + 11, 0, &chain_mode))
7924 				goto err;
7925 		}
7926 	}
7927 	return 1;
7928 
7929 err:
7930 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7931 	return 1;
7932 }
7933 
7934 __setup("stmmaceth=", stmmac_cmdline_opt);
7935 #endif /* MODULE */
7936 
7937 static int __init stmmac_init(void)
7938 {
7939 #ifdef CONFIG_DEBUG_FS
7940 	/* Create debugfs main directory if it doesn't exist yet */
7941 	if (!stmmac_fs_dir)
7942 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7943 	register_netdevice_notifier(&stmmac_notifier);
7944 #endif
7945 
7946 	return 0;
7947 }
7948 
7949 static void __exit stmmac_exit(void)
7950 {
7951 #ifdef CONFIG_DEBUG_FS
7952 	unregister_netdevice_notifier(&stmmac_notifier);
7953 	debugfs_remove_recursive(stmmac_fs_dir);
7954 #endif
7955 }
7956 
7957 module_init(stmmac_init)
7958 module_exit(stmmac_exit)
7959 
7960 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7961 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7962 MODULE_LICENSE("GPL");
7963