xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 4e73826089ce899357580bbf6e0afe4e6f9900b7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968 					MPACKET_VERIFY);
969 	} else {
970 		*lo_state = FPE_STATE_OFF;
971 		*lp_state = FPE_STATE_OFF;
972 	}
973 }
974 
975 static void stmmac_mac_link_down(struct phylink_config *config,
976 				 unsigned int mode, phy_interface_t interface)
977 {
978 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979 
980 	stmmac_mac_set(priv, priv->ioaddr, false);
981 	priv->eee_active = false;
982 	priv->tx_lpi_enabled = false;
983 	priv->eee_enabled = stmmac_eee_init(priv);
984 	stmmac_set_eee_pls(priv, priv->hw, false);
985 
986 	if (priv->dma_cap.fpesel)
987 		stmmac_fpe_link_state_handle(priv, false);
988 }
989 
990 static void stmmac_mac_link_up(struct phylink_config *config,
991 			       struct phy_device *phy,
992 			       unsigned int mode, phy_interface_t interface,
993 			       int speed, int duplex,
994 			       bool tx_pause, bool rx_pause)
995 {
996 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997 	u32 old_ctrl, ctrl;
998 
999 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000 	    priv->plat->serdes_powerup)
1001 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002 
1003 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005 
1006 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007 		switch (speed) {
1008 		case SPEED_10000:
1009 			ctrl |= priv->hw->link.xgmii.speed10000;
1010 			break;
1011 		case SPEED_5000:
1012 			ctrl |= priv->hw->link.xgmii.speed5000;
1013 			break;
1014 		case SPEED_2500:
1015 			ctrl |= priv->hw->link.xgmii.speed2500;
1016 			break;
1017 		default:
1018 			return;
1019 		}
1020 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021 		switch (speed) {
1022 		case SPEED_100000:
1023 			ctrl |= priv->hw->link.xlgmii.speed100000;
1024 			break;
1025 		case SPEED_50000:
1026 			ctrl |= priv->hw->link.xlgmii.speed50000;
1027 			break;
1028 		case SPEED_40000:
1029 			ctrl |= priv->hw->link.xlgmii.speed40000;
1030 			break;
1031 		case SPEED_25000:
1032 			ctrl |= priv->hw->link.xlgmii.speed25000;
1033 			break;
1034 		case SPEED_10000:
1035 			ctrl |= priv->hw->link.xgmii.speed10000;
1036 			break;
1037 		case SPEED_2500:
1038 			ctrl |= priv->hw->link.speed2500;
1039 			break;
1040 		case SPEED_1000:
1041 			ctrl |= priv->hw->link.speed1000;
1042 			break;
1043 		default:
1044 			return;
1045 		}
1046 	} else {
1047 		switch (speed) {
1048 		case SPEED_2500:
1049 			ctrl |= priv->hw->link.speed2500;
1050 			break;
1051 		case SPEED_1000:
1052 			ctrl |= priv->hw->link.speed1000;
1053 			break;
1054 		case SPEED_100:
1055 			ctrl |= priv->hw->link.speed100;
1056 			break;
1057 		case SPEED_10:
1058 			ctrl |= priv->hw->link.speed10;
1059 			break;
1060 		default:
1061 			return;
1062 		}
1063 	}
1064 
1065 	priv->speed = speed;
1066 
1067 	if (priv->plat->fix_mac_speed)
1068 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069 
1070 	if (!duplex)
1071 		ctrl &= ~priv->hw->link.duplex;
1072 	else
1073 		ctrl |= priv->hw->link.duplex;
1074 
1075 	/* Flow Control operation */
1076 	if (rx_pause && tx_pause)
1077 		priv->flow_ctrl = FLOW_AUTO;
1078 	else if (rx_pause && !tx_pause)
1079 		priv->flow_ctrl = FLOW_RX;
1080 	else if (!rx_pause && tx_pause)
1081 		priv->flow_ctrl = FLOW_TX;
1082 	else
1083 		priv->flow_ctrl = FLOW_OFF;
1084 
1085 	stmmac_mac_flow_ctrl(priv, duplex);
1086 
1087 	if (ctrl != old_ctrl)
1088 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089 
1090 	stmmac_mac_set(priv, priv->ioaddr, true);
1091 	if (phy && priv->dma_cap.eee) {
1092 		priv->eee_active =
1093 			phy_init_eee(phy, !(priv->plat->flags &
1094 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095 		priv->eee_enabled = stmmac_eee_init(priv);
1096 		priv->tx_lpi_enabled = priv->eee_enabled;
1097 		stmmac_set_eee_pls(priv, priv->hw, true);
1098 	}
1099 
1100 	if (priv->dma_cap.fpesel)
1101 		stmmac_fpe_link_state_handle(priv, true);
1102 
1103 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104 		stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106 
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108 	.mac_select_pcs = stmmac_mac_select_pcs,
1109 	.mac_config = stmmac_mac_config,
1110 	.mac_link_down = stmmac_mac_link_down,
1111 	.mac_link_up = stmmac_mac_link_up,
1112 };
1113 
1114 /**
1115  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116  * @priv: driver private structure
1117  * Description: this is to verify if the HW supports the PCS.
1118  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119  * configured for the TBI, RTBI, or SGMII PHY interface.
1120  */
1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 	int interface = priv->plat->mac_interface;
1124 
1125 	if (priv->dma_cap.pcs) {
1126 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_RGMII;
1132 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135 		}
1136 	}
1137 }
1138 
1139 /**
1140  * stmmac_init_phy - PHY initialization
1141  * @dev: net device structure
1142  * Description: it initializes the driver's PHY state, and attaches the PHY
1143  * to the mac driver.
1144  *  Return value:
1145  *  0 on success
1146  */
1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 	struct stmmac_priv *priv = netdev_priv(dev);
1150 	struct fwnode_handle *phy_fwnode;
1151 	struct fwnode_handle *fwnode;
1152 	int ret;
1153 
1154 	if (!phylink_expects_phy(priv->phylink))
1155 		return 0;
1156 
1157 	fwnode = priv->plat->port_node;
1158 	if (!fwnode)
1159 		fwnode = dev_fwnode(priv->device);
1160 
1161 	if (fwnode)
1162 		phy_fwnode = fwnode_get_phy_node(fwnode);
1163 	else
1164 		phy_fwnode = NULL;
1165 
1166 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1167 	 * manually parse it
1168 	 */
1169 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 		int addr = priv->plat->phy_addr;
1171 		struct phy_device *phydev;
1172 
1173 		if (addr < 0) {
1174 			netdev_err(priv->dev, "no phy found\n");
1175 			return -ENODEV;
1176 		}
1177 
1178 		phydev = mdiobus_get_phy(priv->mii, addr);
1179 		if (!phydev) {
1180 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 			return -ENODEV;
1182 		}
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (!priv->plat->pmt) {
1191 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192 
1193 		phylink_ethtool_get_wol(priv->phylink, &wol);
1194 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1202 {
1203 	/* Half-Duplex can only work with single tx queue */
1204 	if (priv->plat->tx_queues_to_use > 1)
1205 		priv->phylink_config.mac_capabilities &=
1206 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1207 	else
1208 		priv->phylink_config.mac_capabilities |=
1209 			(MAC_10HD | MAC_100HD | MAC_1000HD);
1210 }
1211 
1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214 	struct stmmac_mdio_bus_data *mdio_bus_data;
1215 	int mode = priv->plat->phy_interface;
1216 	struct fwnode_handle *fwnode;
1217 	struct phylink *phylink;
1218 	int max_speed;
1219 
1220 	priv->phylink_config.dev = &priv->dev->dev;
1221 	priv->phylink_config.type = PHYLINK_NETDEV;
1222 	priv->phylink_config.mac_managed_pm = true;
1223 
1224 	mdio_bus_data = priv->plat->mdio_bus_data;
1225 	if (mdio_bus_data)
1226 		priv->phylink_config.ovr_an_inband =
1227 			mdio_bus_data->xpcs_an_inband;
1228 
1229 	/* Set the platform/firmware specified interface mode. Note, phylink
1230 	 * deals with the PHY interface mode, not the MAC interface mode.
1231 	 */
1232 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1233 
1234 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1235 	if (priv->hw->xpcs)
1236 		xpcs_get_interfaces(priv->hw->xpcs,
1237 				    priv->phylink_config.supported_interfaces);
1238 
1239 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1240 						MAC_10FD | MAC_100FD |
1241 						MAC_1000FD;
1242 
1243 	stmmac_set_half_duplex(priv);
1244 
1245 	/* Get the MAC specific capabilities */
1246 	stmmac_mac_phylink_get_caps(priv);
1247 
1248 	max_speed = priv->plat->max_speed;
1249 	if (max_speed)
1250 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1251 
1252 	fwnode = priv->plat->port_node;
1253 	if (!fwnode)
1254 		fwnode = dev_fwnode(priv->device);
1255 
1256 	phylink = phylink_create(&priv->phylink_config, fwnode,
1257 				 mode, &stmmac_phylink_mac_ops);
1258 	if (IS_ERR(phylink))
1259 		return PTR_ERR(phylink);
1260 
1261 	priv->phylink = phylink;
1262 	return 0;
1263 }
1264 
1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1266 				    struct stmmac_dma_conf *dma_conf)
1267 {
1268 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1269 	unsigned int desc_size;
1270 	void *head_rx;
1271 	u32 queue;
1272 
1273 	/* Display RX rings */
1274 	for (queue = 0; queue < rx_cnt; queue++) {
1275 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1276 
1277 		pr_info("\tRX Queue %u rings\n", queue);
1278 
1279 		if (priv->extend_desc) {
1280 			head_rx = (void *)rx_q->dma_erx;
1281 			desc_size = sizeof(struct dma_extended_desc);
1282 		} else {
1283 			head_rx = (void *)rx_q->dma_rx;
1284 			desc_size = sizeof(struct dma_desc);
1285 		}
1286 
1287 		/* Display RX ring */
1288 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1289 				    rx_q->dma_rx_phy, desc_size);
1290 	}
1291 }
1292 
1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1294 				    struct stmmac_dma_conf *dma_conf)
1295 {
1296 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1297 	unsigned int desc_size;
1298 	void *head_tx;
1299 	u32 queue;
1300 
1301 	/* Display TX rings */
1302 	for (queue = 0; queue < tx_cnt; queue++) {
1303 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1304 
1305 		pr_info("\tTX Queue %d rings\n", queue);
1306 
1307 		if (priv->extend_desc) {
1308 			head_tx = (void *)tx_q->dma_etx;
1309 			desc_size = sizeof(struct dma_extended_desc);
1310 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1311 			head_tx = (void *)tx_q->dma_entx;
1312 			desc_size = sizeof(struct dma_edesc);
1313 		} else {
1314 			head_tx = (void *)tx_q->dma_tx;
1315 			desc_size = sizeof(struct dma_desc);
1316 		}
1317 
1318 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1319 				    tx_q->dma_tx_phy, desc_size);
1320 	}
1321 }
1322 
1323 static void stmmac_display_rings(struct stmmac_priv *priv,
1324 				 struct stmmac_dma_conf *dma_conf)
1325 {
1326 	/* Display RX ring */
1327 	stmmac_display_rx_rings(priv, dma_conf);
1328 
1329 	/* Display TX ring */
1330 	stmmac_display_tx_rings(priv, dma_conf);
1331 }
1332 
1333 static int stmmac_set_bfsize(int mtu, int bufsize)
1334 {
1335 	int ret = bufsize;
1336 
1337 	if (mtu >= BUF_SIZE_8KiB)
1338 		ret = BUF_SIZE_16KiB;
1339 	else if (mtu >= BUF_SIZE_4KiB)
1340 		ret = BUF_SIZE_8KiB;
1341 	else if (mtu >= BUF_SIZE_2KiB)
1342 		ret = BUF_SIZE_4KiB;
1343 	else if (mtu > DEFAULT_BUFSIZE)
1344 		ret = BUF_SIZE_2KiB;
1345 	else
1346 		ret = DEFAULT_BUFSIZE;
1347 
1348 	return ret;
1349 }
1350 
1351 /**
1352  * stmmac_clear_rx_descriptors - clear RX descriptors
1353  * @priv: driver private structure
1354  * @dma_conf: structure to take the dma data
1355  * @queue: RX queue index
1356  * Description: this function is called to clear the RX descriptors
1357  * in case of both basic and extended descriptors are used.
1358  */
1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1360 					struct stmmac_dma_conf *dma_conf,
1361 					u32 queue)
1362 {
1363 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1364 	int i;
1365 
1366 	/* Clear the RX descriptors */
1367 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1368 		if (priv->extend_desc)
1369 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1370 					priv->use_riwt, priv->mode,
1371 					(i == dma_conf->dma_rx_size - 1),
1372 					dma_conf->dma_buf_sz);
1373 		else
1374 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1375 					priv->use_riwt, priv->mode,
1376 					(i == dma_conf->dma_rx_size - 1),
1377 					dma_conf->dma_buf_sz);
1378 }
1379 
1380 /**
1381  * stmmac_clear_tx_descriptors - clear tx descriptors
1382  * @priv: driver private structure
1383  * @dma_conf: structure to take the dma data
1384  * @queue: TX queue index.
1385  * Description: this function is called to clear the TX descriptors
1386  * in case of both basic and extended descriptors are used.
1387  */
1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1389 					struct stmmac_dma_conf *dma_conf,
1390 					u32 queue)
1391 {
1392 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1393 	int i;
1394 
1395 	/* Clear the TX descriptors */
1396 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1397 		int last = (i == (dma_conf->dma_tx_size - 1));
1398 		struct dma_desc *p;
1399 
1400 		if (priv->extend_desc)
1401 			p = &tx_q->dma_etx[i].basic;
1402 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1403 			p = &tx_q->dma_entx[i].basic;
1404 		else
1405 			p = &tx_q->dma_tx[i];
1406 
1407 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1408 	}
1409 }
1410 
1411 /**
1412  * stmmac_clear_descriptors - clear descriptors
1413  * @priv: driver private structure
1414  * @dma_conf: structure to take the dma data
1415  * Description: this function is called to clear the TX and RX descriptors
1416  * in case of both basic and extended descriptors are used.
1417  */
1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1419 				     struct stmmac_dma_conf *dma_conf)
1420 {
1421 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1422 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1423 	u32 queue;
1424 
1425 	/* Clear the RX descriptors */
1426 	for (queue = 0; queue < rx_queue_cnt; queue++)
1427 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1428 
1429 	/* Clear the TX descriptors */
1430 	for (queue = 0; queue < tx_queue_cnt; queue++)
1431 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1432 }
1433 
1434 /**
1435  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436  * @priv: driver private structure
1437  * @dma_conf: structure to take the dma data
1438  * @p: descriptor pointer
1439  * @i: descriptor index
1440  * @flags: gfp flag
1441  * @queue: RX queue index
1442  * Description: this function is called to allocate a receive buffer, perform
1443  * the DMA mapping and init the descriptor.
1444  */
1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1446 				  struct stmmac_dma_conf *dma_conf,
1447 				  struct dma_desc *p,
1448 				  int i, gfp_t flags, u32 queue)
1449 {
1450 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1452 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1453 
1454 	if (priv->dma_cap.host_dma_width <= 32)
1455 		gfp |= GFP_DMA32;
1456 
1457 	if (!buf->page) {
1458 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459 		if (!buf->page)
1460 			return -ENOMEM;
1461 		buf->page_offset = stmmac_rx_offset(priv);
1462 	}
1463 
1464 	if (priv->sph && !buf->sec_page) {
1465 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1466 		if (!buf->sec_page)
1467 			return -ENOMEM;
1468 
1469 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471 	} else {
1472 		buf->sec_page = NULL;
1473 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474 	}
1475 
1476 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477 
1478 	stmmac_set_desc_addr(priv, p, buf->addr);
1479 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1480 		stmmac_init_desc3(priv, p);
1481 
1482 	return 0;
1483 }
1484 
1485 /**
1486  * stmmac_free_rx_buffer - free RX dma buffers
1487  * @priv: private structure
1488  * @rx_q: RX queue
1489  * @i: buffer index.
1490  */
1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1492 				  struct stmmac_rx_queue *rx_q,
1493 				  int i)
1494 {
1495 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1496 
1497 	if (buf->page)
1498 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1499 	buf->page = NULL;
1500 
1501 	if (buf->sec_page)
1502 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1503 	buf->sec_page = NULL;
1504 }
1505 
1506 /**
1507  * stmmac_free_tx_buffer - free RX dma buffers
1508  * @priv: private structure
1509  * @dma_conf: structure to take the dma data
1510  * @queue: RX queue index
1511  * @i: buffer index.
1512  */
1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1514 				  struct stmmac_dma_conf *dma_conf,
1515 				  u32 queue, int i)
1516 {
1517 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1518 
1519 	if (tx_q->tx_skbuff_dma[i].buf &&
1520 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1522 			dma_unmap_page(priv->device,
1523 				       tx_q->tx_skbuff_dma[i].buf,
1524 				       tx_q->tx_skbuff_dma[i].len,
1525 				       DMA_TO_DEVICE);
1526 		else
1527 			dma_unmap_single(priv->device,
1528 					 tx_q->tx_skbuff_dma[i].buf,
1529 					 tx_q->tx_skbuff_dma[i].len,
1530 					 DMA_TO_DEVICE);
1531 	}
1532 
1533 	if (tx_q->xdpf[i] &&
1534 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536 		xdp_return_frame(tx_q->xdpf[i]);
1537 		tx_q->xdpf[i] = NULL;
1538 	}
1539 
1540 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541 		tx_q->xsk_frames_done++;
1542 
1543 	if (tx_q->tx_skbuff[i] &&
1544 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546 		tx_q->tx_skbuff[i] = NULL;
1547 	}
1548 
1549 	tx_q->tx_skbuff_dma[i].buf = 0;
1550 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1551 }
1552 
1553 /**
1554  * dma_free_rx_skbufs - free RX dma buffers
1555  * @priv: private structure
1556  * @dma_conf: structure to take the dma data
1557  * @queue: RX queue index
1558  */
1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1560 			       struct stmmac_dma_conf *dma_conf,
1561 			       u32 queue)
1562 {
1563 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 	int i;
1565 
1566 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1567 		stmmac_free_rx_buffer(priv, rx_q, i);
1568 }
1569 
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1571 				   struct stmmac_dma_conf *dma_conf,
1572 				   u32 queue, gfp_t flags)
1573 {
1574 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1575 	int i;
1576 
1577 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1578 		struct dma_desc *p;
1579 		int ret;
1580 
1581 		if (priv->extend_desc)
1582 			p = &((rx_q->dma_erx + i)->basic);
1583 		else
1584 			p = rx_q->dma_rx + i;
1585 
1586 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1587 					     queue);
1588 		if (ret)
1589 			return ret;
1590 
1591 		rx_q->buf_alloc_num++;
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 /**
1598  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599  * @priv: private structure
1600  * @dma_conf: structure to take the dma data
1601  * @queue: RX queue index
1602  */
1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1604 				struct stmmac_dma_conf *dma_conf,
1605 				u32 queue)
1606 {
1607 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608 	int i;
1609 
1610 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1611 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1612 
1613 		if (!buf->xdp)
1614 			continue;
1615 
1616 		xsk_buff_free(buf->xdp);
1617 		buf->xdp = NULL;
1618 	}
1619 }
1620 
1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1622 				      struct stmmac_dma_conf *dma_conf,
1623 				      u32 queue)
1624 {
1625 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626 	int i;
1627 
1628 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 	 * use this macro to make sure no size violations.
1631 	 */
1632 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1633 
1634 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1635 		struct stmmac_rx_buffer *buf;
1636 		dma_addr_t dma_addr;
1637 		struct dma_desc *p;
1638 
1639 		if (priv->extend_desc)
1640 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1641 		else
1642 			p = rx_q->dma_rx + i;
1643 
1644 		buf = &rx_q->buf_pool[i];
1645 
1646 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1647 		if (!buf->xdp)
1648 			return -ENOMEM;
1649 
1650 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1651 		stmmac_set_desc_addr(priv, p, dma_addr);
1652 		rx_q->buf_alloc_num++;
1653 	}
1654 
1655 	return 0;
1656 }
1657 
1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1659 {
1660 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1661 		return NULL;
1662 
1663 	return xsk_get_pool_from_qid(priv->dev, queue);
1664 }
1665 
1666 /**
1667  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668  * @priv: driver private structure
1669  * @dma_conf: structure to take the dma data
1670  * @queue: RX queue index
1671  * @flags: gfp flag.
1672  * Description: this function initializes the DMA RX descriptors
1673  * and allocates the socket buffers. It supports the chained and ring
1674  * modes.
1675  */
1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1677 				    struct stmmac_dma_conf *dma_conf,
1678 				    u32 queue, gfp_t flags)
1679 {
1680 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1681 	int ret;
1682 
1683 	netif_dbg(priv, probe, priv->dev,
1684 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1685 		  (u32)rx_q->dma_rx_phy);
1686 
1687 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1688 
1689 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1690 
1691 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1692 
1693 	if (rx_q->xsk_pool) {
1694 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695 						   MEM_TYPE_XSK_BUFF_POOL,
1696 						   NULL));
1697 		netdev_info(priv->dev,
1698 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699 			    rx_q->queue_index);
1700 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1701 	} else {
1702 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1703 						   MEM_TYPE_PAGE_POOL,
1704 						   rx_q->page_pool));
1705 		netdev_info(priv->dev,
1706 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1707 			    rx_q->queue_index);
1708 	}
1709 
1710 	if (rx_q->xsk_pool) {
1711 		/* RX XDP ZC buffer pool may not be populated, e.g.
1712 		 * xdpsock TX-only.
1713 		 */
1714 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1715 	} else {
1716 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1717 		if (ret < 0)
1718 			return -ENOMEM;
1719 	}
1720 
1721 	/* Setup the chained descriptor addresses */
1722 	if (priv->mode == STMMAC_CHAIN_MODE) {
1723 		if (priv->extend_desc)
1724 			stmmac_mode_init(priv, rx_q->dma_erx,
1725 					 rx_q->dma_rx_phy,
1726 					 dma_conf->dma_rx_size, 1);
1727 		else
1728 			stmmac_mode_init(priv, rx_q->dma_rx,
1729 					 rx_q->dma_rx_phy,
1730 					 dma_conf->dma_rx_size, 0);
1731 	}
1732 
1733 	return 0;
1734 }
1735 
1736 static int init_dma_rx_desc_rings(struct net_device *dev,
1737 				  struct stmmac_dma_conf *dma_conf,
1738 				  gfp_t flags)
1739 {
1740 	struct stmmac_priv *priv = netdev_priv(dev);
1741 	u32 rx_count = priv->plat->rx_queues_to_use;
1742 	int queue;
1743 	int ret;
1744 
1745 	/* RX INITIALIZATION */
1746 	netif_dbg(priv, probe, priv->dev,
1747 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748 
1749 	for (queue = 0; queue < rx_count; queue++) {
1750 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1751 		if (ret)
1752 			goto err_init_rx_buffers;
1753 	}
1754 
1755 	return 0;
1756 
1757 err_init_rx_buffers:
1758 	while (queue >= 0) {
1759 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1760 
1761 		if (rx_q->xsk_pool)
1762 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1763 		else
1764 			dma_free_rx_skbufs(priv, dma_conf, queue);
1765 
1766 		rx_q->buf_alloc_num = 0;
1767 		rx_q->xsk_pool = NULL;
1768 
1769 		queue--;
1770 	}
1771 
1772 	return ret;
1773 }
1774 
1775 /**
1776  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777  * @priv: driver private structure
1778  * @dma_conf: structure to take the dma data
1779  * @queue: TX queue index
1780  * Description: this function initializes the DMA TX descriptors
1781  * and allocates the socket buffers. It supports the chained and ring
1782  * modes.
1783  */
1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1785 				    struct stmmac_dma_conf *dma_conf,
1786 				    u32 queue)
1787 {
1788 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1789 	int i;
1790 
1791 	netif_dbg(priv, probe, priv->dev,
1792 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1793 		  (u32)tx_q->dma_tx_phy);
1794 
1795 	/* Setup the chained descriptor addresses */
1796 	if (priv->mode == STMMAC_CHAIN_MODE) {
1797 		if (priv->extend_desc)
1798 			stmmac_mode_init(priv, tx_q->dma_etx,
1799 					 tx_q->dma_tx_phy,
1800 					 dma_conf->dma_tx_size, 1);
1801 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1802 			stmmac_mode_init(priv, tx_q->dma_tx,
1803 					 tx_q->dma_tx_phy,
1804 					 dma_conf->dma_tx_size, 0);
1805 	}
1806 
1807 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1808 
1809 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1810 		struct dma_desc *p;
1811 
1812 		if (priv->extend_desc)
1813 			p = &((tx_q->dma_etx + i)->basic);
1814 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1815 			p = &((tx_q->dma_entx + i)->basic);
1816 		else
1817 			p = tx_q->dma_tx + i;
1818 
1819 		stmmac_clear_desc(priv, p);
1820 
1821 		tx_q->tx_skbuff_dma[i].buf = 0;
1822 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1823 		tx_q->tx_skbuff_dma[i].len = 0;
1824 		tx_q->tx_skbuff_dma[i].last_segment = false;
1825 		tx_q->tx_skbuff[i] = NULL;
1826 	}
1827 
1828 	return 0;
1829 }
1830 
1831 static int init_dma_tx_desc_rings(struct net_device *dev,
1832 				  struct stmmac_dma_conf *dma_conf)
1833 {
1834 	struct stmmac_priv *priv = netdev_priv(dev);
1835 	u32 tx_queue_cnt;
1836 	u32 queue;
1837 
1838 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1839 
1840 	for (queue = 0; queue < tx_queue_cnt; queue++)
1841 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1842 
1843 	return 0;
1844 }
1845 
1846 /**
1847  * init_dma_desc_rings - init the RX/TX descriptor rings
1848  * @dev: net device structure
1849  * @dma_conf: structure to take the dma data
1850  * @flags: gfp flag.
1851  * Description: this function initializes the DMA RX/TX descriptors
1852  * and allocates the socket buffers. It supports the chained and ring
1853  * modes.
1854  */
1855 static int init_dma_desc_rings(struct net_device *dev,
1856 			       struct stmmac_dma_conf *dma_conf,
1857 			       gfp_t flags)
1858 {
1859 	struct stmmac_priv *priv = netdev_priv(dev);
1860 	int ret;
1861 
1862 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1863 	if (ret)
1864 		return ret;
1865 
1866 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1867 
1868 	stmmac_clear_descriptors(priv, dma_conf);
1869 
1870 	if (netif_msg_hw(priv))
1871 		stmmac_display_rings(priv, dma_conf);
1872 
1873 	return ret;
1874 }
1875 
1876 /**
1877  * dma_free_tx_skbufs - free TX dma buffers
1878  * @priv: private structure
1879  * @dma_conf: structure to take the dma data
1880  * @queue: TX queue index
1881  */
1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1883 			       struct stmmac_dma_conf *dma_conf,
1884 			       u32 queue)
1885 {
1886 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1887 	int i;
1888 
1889 	tx_q->xsk_frames_done = 0;
1890 
1891 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1892 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893 
1894 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1895 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1896 		tx_q->xsk_frames_done = 0;
1897 		tx_q->xsk_pool = NULL;
1898 	}
1899 }
1900 
1901 /**
1902  * stmmac_free_tx_skbufs - free TX skb buffers
1903  * @priv: private structure
1904  */
1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1906 {
1907 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1908 	u32 queue;
1909 
1910 	for (queue = 0; queue < tx_queue_cnt; queue++)
1911 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1912 }
1913 
1914 /**
1915  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916  * @priv: private structure
1917  * @dma_conf: structure to take the dma data
1918  * @queue: RX queue index
1919  */
1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921 					 struct stmmac_dma_conf *dma_conf,
1922 					 u32 queue)
1923 {
1924 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1925 
1926 	/* Release the DMA RX socket buffers */
1927 	if (rx_q->xsk_pool)
1928 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1929 	else
1930 		dma_free_rx_skbufs(priv, dma_conf, queue);
1931 
1932 	rx_q->buf_alloc_num = 0;
1933 	rx_q->xsk_pool = NULL;
1934 
1935 	/* Free DMA regions of consistent memory previously allocated */
1936 	if (!priv->extend_desc)
1937 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1938 				  sizeof(struct dma_desc),
1939 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1940 	else
1941 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1942 				  sizeof(struct dma_extended_desc),
1943 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1944 
1945 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1946 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1947 
1948 	kfree(rx_q->buf_pool);
1949 	if (rx_q->page_pool)
1950 		page_pool_destroy(rx_q->page_pool);
1951 }
1952 
1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1954 				       struct stmmac_dma_conf *dma_conf)
1955 {
1956 	u32 rx_count = priv->plat->rx_queues_to_use;
1957 	u32 queue;
1958 
1959 	/* Free RX queue resources */
1960 	for (queue = 0; queue < rx_count; queue++)
1961 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1962 }
1963 
1964 /**
1965  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966  * @priv: private structure
1967  * @dma_conf: structure to take the dma data
1968  * @queue: TX queue index
1969  */
1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971 					 struct stmmac_dma_conf *dma_conf,
1972 					 u32 queue)
1973 {
1974 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 	size_t size;
1976 	void *addr;
1977 
1978 	/* Release the DMA TX socket buffers */
1979 	dma_free_tx_skbufs(priv, dma_conf, queue);
1980 
1981 	if (priv->extend_desc) {
1982 		size = sizeof(struct dma_extended_desc);
1983 		addr = tx_q->dma_etx;
1984 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1985 		size = sizeof(struct dma_edesc);
1986 		addr = tx_q->dma_entx;
1987 	} else {
1988 		size = sizeof(struct dma_desc);
1989 		addr = tx_q->dma_tx;
1990 	}
1991 
1992 	size *= dma_conf->dma_tx_size;
1993 
1994 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1995 
1996 	kfree(tx_q->tx_skbuff_dma);
1997 	kfree(tx_q->tx_skbuff);
1998 }
1999 
2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2001 				       struct stmmac_dma_conf *dma_conf)
2002 {
2003 	u32 tx_count = priv->plat->tx_queues_to_use;
2004 	u32 queue;
2005 
2006 	/* Free TX queue resources */
2007 	for (queue = 0; queue < tx_count; queue++)
2008 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2009 }
2010 
2011 /**
2012  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013  * @priv: private structure
2014  * @dma_conf: structure to take the dma data
2015  * @queue: RX queue index
2016  * Description: according to which descriptor can be used (extend or basic)
2017  * this function allocates the resources for TX and RX paths. In case of
2018  * reception, for example, it pre-allocated the RX socket buffer in order to
2019  * allow zero-copy mechanism.
2020  */
2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2022 					 struct stmmac_dma_conf *dma_conf,
2023 					 u32 queue)
2024 {
2025 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2026 	struct stmmac_channel *ch = &priv->channel[queue];
2027 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2028 	struct page_pool_params pp_params = { 0 };
2029 	unsigned int num_pages;
2030 	unsigned int napi_id;
2031 	int ret;
2032 
2033 	rx_q->queue_index = queue;
2034 	rx_q->priv_data = priv;
2035 
2036 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2037 	pp_params.pool_size = dma_conf->dma_rx_size;
2038 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2039 	pp_params.order = ilog2(num_pages);
2040 	pp_params.nid = dev_to_node(priv->device);
2041 	pp_params.dev = priv->device;
2042 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2043 	pp_params.offset = stmmac_rx_offset(priv);
2044 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2045 
2046 	rx_q->page_pool = page_pool_create(&pp_params);
2047 	if (IS_ERR(rx_q->page_pool)) {
2048 		ret = PTR_ERR(rx_q->page_pool);
2049 		rx_q->page_pool = NULL;
2050 		return ret;
2051 	}
2052 
2053 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2054 				 sizeof(*rx_q->buf_pool),
2055 				 GFP_KERNEL);
2056 	if (!rx_q->buf_pool)
2057 		return -ENOMEM;
2058 
2059 	if (priv->extend_desc) {
2060 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2061 						   dma_conf->dma_rx_size *
2062 						   sizeof(struct dma_extended_desc),
2063 						   &rx_q->dma_rx_phy,
2064 						   GFP_KERNEL);
2065 		if (!rx_q->dma_erx)
2066 			return -ENOMEM;
2067 
2068 	} else {
2069 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2070 						  dma_conf->dma_rx_size *
2071 						  sizeof(struct dma_desc),
2072 						  &rx_q->dma_rx_phy,
2073 						  GFP_KERNEL);
2074 		if (!rx_q->dma_rx)
2075 			return -ENOMEM;
2076 	}
2077 
2078 	if (stmmac_xdp_is_enabled(priv) &&
2079 	    test_bit(queue, priv->af_xdp_zc_qps))
2080 		napi_id = ch->rxtx_napi.napi_id;
2081 	else
2082 		napi_id = ch->rx_napi.napi_id;
2083 
2084 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2085 			       rx_q->queue_index,
2086 			       napi_id);
2087 	if (ret) {
2088 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2089 		return -EINVAL;
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2096 				       struct stmmac_dma_conf *dma_conf)
2097 {
2098 	u32 rx_count = priv->plat->rx_queues_to_use;
2099 	u32 queue;
2100 	int ret;
2101 
2102 	/* RX queues buffers and DMA */
2103 	for (queue = 0; queue < rx_count; queue++) {
2104 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2105 		if (ret)
2106 			goto err_dma;
2107 	}
2108 
2109 	return 0;
2110 
2111 err_dma:
2112 	free_dma_rx_desc_resources(priv, dma_conf);
2113 
2114 	return ret;
2115 }
2116 
2117 /**
2118  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119  * @priv: private structure
2120  * @dma_conf: structure to take the dma data
2121  * @queue: TX queue index
2122  * Description: according to which descriptor can be used (extend or basic)
2123  * this function allocates the resources for TX and RX paths. In case of
2124  * reception, for example, it pre-allocated the RX socket buffer in order to
2125  * allow zero-copy mechanism.
2126  */
2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2128 					 struct stmmac_dma_conf *dma_conf,
2129 					 u32 queue)
2130 {
2131 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2132 	size_t size;
2133 	void *addr;
2134 
2135 	tx_q->queue_index = queue;
2136 	tx_q->priv_data = priv;
2137 
2138 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2139 				      sizeof(*tx_q->tx_skbuff_dma),
2140 				      GFP_KERNEL);
2141 	if (!tx_q->tx_skbuff_dma)
2142 		return -ENOMEM;
2143 
2144 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2145 				  sizeof(struct sk_buff *),
2146 				  GFP_KERNEL);
2147 	if (!tx_q->tx_skbuff)
2148 		return -ENOMEM;
2149 
2150 	if (priv->extend_desc)
2151 		size = sizeof(struct dma_extended_desc);
2152 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153 		size = sizeof(struct dma_edesc);
2154 	else
2155 		size = sizeof(struct dma_desc);
2156 
2157 	size *= dma_conf->dma_tx_size;
2158 
2159 	addr = dma_alloc_coherent(priv->device, size,
2160 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2161 	if (!addr)
2162 		return -ENOMEM;
2163 
2164 	if (priv->extend_desc)
2165 		tx_q->dma_etx = addr;
2166 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2167 		tx_q->dma_entx = addr;
2168 	else
2169 		tx_q->dma_tx = addr;
2170 
2171 	return 0;
2172 }
2173 
2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2175 				       struct stmmac_dma_conf *dma_conf)
2176 {
2177 	u32 tx_count = priv->plat->tx_queues_to_use;
2178 	u32 queue;
2179 	int ret;
2180 
2181 	/* TX queues buffers and DMA */
2182 	for (queue = 0; queue < tx_count; queue++) {
2183 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2184 		if (ret)
2185 			goto err_dma;
2186 	}
2187 
2188 	return 0;
2189 
2190 err_dma:
2191 	free_dma_tx_desc_resources(priv, dma_conf);
2192 	return ret;
2193 }
2194 
2195 /**
2196  * alloc_dma_desc_resources - alloc TX/RX resources.
2197  * @priv: private structure
2198  * @dma_conf: structure to take the dma data
2199  * Description: according to which descriptor can be used (extend or basic)
2200  * this function allocates the resources for TX and RX paths. In case of
2201  * reception, for example, it pre-allocated the RX socket buffer in order to
2202  * allow zero-copy mechanism.
2203  */
2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2205 				    struct stmmac_dma_conf *dma_conf)
2206 {
2207 	/* RX Allocation */
2208 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2209 
2210 	if (ret)
2211 		return ret;
2212 
2213 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2214 
2215 	return ret;
2216 }
2217 
2218 /**
2219  * free_dma_desc_resources - free dma desc resources
2220  * @priv: private structure
2221  * @dma_conf: structure to take the dma data
2222  */
2223 static void free_dma_desc_resources(struct stmmac_priv *priv,
2224 				    struct stmmac_dma_conf *dma_conf)
2225 {
2226 	/* Release the DMA TX socket buffers */
2227 	free_dma_tx_desc_resources(priv, dma_conf);
2228 
2229 	/* Release the DMA RX socket buffers later
2230 	 * to ensure all pending XDP_TX buffers are returned.
2231 	 */
2232 	free_dma_rx_desc_resources(priv, dma_conf);
2233 }
2234 
2235 /**
2236  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237  *  @priv: driver private structure
2238  *  Description: It is used for enabling the rx queues in the MAC
2239  */
2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2241 {
2242 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2243 	int queue;
2244 	u8 mode;
2245 
2246 	for (queue = 0; queue < rx_queues_count; queue++) {
2247 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2248 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2249 	}
2250 }
2251 
2252 /**
2253  * stmmac_start_rx_dma - start RX DMA channel
2254  * @priv: driver private structure
2255  * @chan: RX channel index
2256  * Description:
2257  * This starts a RX DMA channel
2258  */
2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2262 	stmmac_start_rx(priv, priv->ioaddr, chan);
2263 }
2264 
2265 /**
2266  * stmmac_start_tx_dma - start TX DMA channel
2267  * @priv: driver private structure
2268  * @chan: TX channel index
2269  * Description:
2270  * This starts a TX DMA channel
2271  */
2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2275 	stmmac_start_tx(priv, priv->ioaddr, chan);
2276 }
2277 
2278 /**
2279  * stmmac_stop_rx_dma - stop RX DMA channel
2280  * @priv: driver private structure
2281  * @chan: RX channel index
2282  * Description:
2283  * This stops a RX DMA channel
2284  */
2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2288 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2289 }
2290 
2291 /**
2292  * stmmac_stop_tx_dma - stop TX DMA channel
2293  * @priv: driver private structure
2294  * @chan: TX channel index
2295  * Description:
2296  * This stops a TX DMA channel
2297  */
2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2301 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2302 }
2303 
2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2305 {
2306 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2309 	u32 chan;
2310 
2311 	for (chan = 0; chan < dma_csr_ch; chan++) {
2312 		struct stmmac_channel *ch = &priv->channel[chan];
2313 		unsigned long flags;
2314 
2315 		spin_lock_irqsave(&ch->lock, flags);
2316 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2317 		spin_unlock_irqrestore(&ch->lock, flags);
2318 	}
2319 }
2320 
2321 /**
2322  * stmmac_start_all_dma - start all RX and TX DMA channels
2323  * @priv: driver private structure
2324  * Description:
2325  * This starts all the RX and TX DMA channels
2326  */
2327 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2328 {
2329 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331 	u32 chan = 0;
2332 
2333 	for (chan = 0; chan < rx_channels_count; chan++)
2334 		stmmac_start_rx_dma(priv, chan);
2335 
2336 	for (chan = 0; chan < tx_channels_count; chan++)
2337 		stmmac_start_tx_dma(priv, chan);
2338 }
2339 
2340 /**
2341  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342  * @priv: driver private structure
2343  * Description:
2344  * This stops the RX and TX DMA channels
2345  */
2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2347 {
2348 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350 	u32 chan = 0;
2351 
2352 	for (chan = 0; chan < rx_channels_count; chan++)
2353 		stmmac_stop_rx_dma(priv, chan);
2354 
2355 	for (chan = 0; chan < tx_channels_count; chan++)
2356 		stmmac_stop_tx_dma(priv, chan);
2357 }
2358 
2359 /**
2360  *  stmmac_dma_operation_mode - HW DMA operation mode
2361  *  @priv: driver private structure
2362  *  Description: it is used for configuring the DMA operation mode register in
2363  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364  */
2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2366 {
2367 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2368 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2369 	int rxfifosz = priv->plat->rx_fifo_size;
2370 	int txfifosz = priv->plat->tx_fifo_size;
2371 	u32 txmode = 0;
2372 	u32 rxmode = 0;
2373 	u32 chan = 0;
2374 	u8 qmode = 0;
2375 
2376 	if (rxfifosz == 0)
2377 		rxfifosz = priv->dma_cap.rx_fifo_size;
2378 	if (txfifosz == 0)
2379 		txfifosz = priv->dma_cap.tx_fifo_size;
2380 
2381 	/* Adjust for real per queue fifo size */
2382 	rxfifosz /= rx_channels_count;
2383 	txfifosz /= tx_channels_count;
2384 
2385 	if (priv->plat->force_thresh_dma_mode) {
2386 		txmode = tc;
2387 		rxmode = tc;
2388 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2389 		/*
2390 		 * In case of GMAC, SF mode can be enabled
2391 		 * to perform the TX COE in HW. This depends on:
2392 		 * 1) TX COE if actually supported
2393 		 * 2) There is no bugged Jumbo frame support
2394 		 *    that needs to not insert csum in the TDES.
2395 		 */
2396 		txmode = SF_DMA_MODE;
2397 		rxmode = SF_DMA_MODE;
2398 		priv->xstats.threshold = SF_DMA_MODE;
2399 	} else {
2400 		txmode = tc;
2401 		rxmode = SF_DMA_MODE;
2402 	}
2403 
2404 	/* configure all channels */
2405 	for (chan = 0; chan < rx_channels_count; chan++) {
2406 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2407 		u32 buf_size;
2408 
2409 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2410 
2411 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2412 				rxfifosz, qmode);
2413 
2414 		if (rx_q->xsk_pool) {
2415 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2416 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2417 					      buf_size,
2418 					      chan);
2419 		} else {
2420 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 					      priv->dma_conf.dma_buf_sz,
2422 					      chan);
2423 		}
2424 	}
2425 
2426 	for (chan = 0; chan < tx_channels_count; chan++) {
2427 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2428 
2429 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2430 				txfifosz, qmode);
2431 	}
2432 }
2433 
2434 static void stmmac_xsk_request_timestamp(void *_priv)
2435 {
2436 	struct stmmac_metadata_request *meta_req = _priv;
2437 
2438 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2439 	*meta_req->set_ic = true;
2440 }
2441 
2442 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2443 {
2444 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2445 	struct stmmac_priv *priv = tx_compl->priv;
2446 	struct dma_desc *desc = tx_compl->desc;
2447 	bool found = false;
2448 	u64 ns = 0;
2449 
2450 	if (!priv->hwts_tx_en)
2451 		return 0;
2452 
2453 	/* check tx tstamp status */
2454 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2455 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2456 		found = true;
2457 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2458 		found = true;
2459 	}
2460 
2461 	if (found) {
2462 		ns -= priv->plat->cdc_error_adj;
2463 		return ns_to_ktime(ns);
2464 	}
2465 
2466 	return 0;
2467 }
2468 
2469 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2470 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2471 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2472 };
2473 
2474 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2475 {
2476 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2477 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2478 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2479 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2480 	unsigned int entry = tx_q->cur_tx;
2481 	struct dma_desc *tx_desc = NULL;
2482 	struct xdp_desc xdp_desc;
2483 	bool work_done = true;
2484 	u32 tx_set_ic_bit = 0;
2485 
2486 	/* Avoids TX time-out as we are sharing with slow path */
2487 	txq_trans_cond_update(nq);
2488 
2489 	budget = min(budget, stmmac_tx_avail(priv, queue));
2490 
2491 	while (budget-- > 0) {
2492 		struct stmmac_metadata_request meta_req;
2493 		struct xsk_tx_metadata *meta = NULL;
2494 		dma_addr_t dma_addr;
2495 		bool set_ic;
2496 
2497 		/* We are sharing with slow path and stop XSK TX desc submission when
2498 		 * available TX ring is less than threshold.
2499 		 */
2500 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2501 		    !netif_carrier_ok(priv->dev)) {
2502 			work_done = false;
2503 			break;
2504 		}
2505 
2506 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2507 			break;
2508 
2509 		if (likely(priv->extend_desc))
2510 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2511 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2512 			tx_desc = &tx_q->dma_entx[entry].basic;
2513 		else
2514 			tx_desc = tx_q->dma_tx + entry;
2515 
2516 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2517 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2518 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2519 
2520 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2521 
2522 		/* To return XDP buffer to XSK pool, we simple call
2523 		 * xsk_tx_completed(), so we don't need to fill up
2524 		 * 'buf' and 'xdpf'.
2525 		 */
2526 		tx_q->tx_skbuff_dma[entry].buf = 0;
2527 		tx_q->xdpf[entry] = NULL;
2528 
2529 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2530 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2531 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2532 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2533 
2534 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2535 
2536 		tx_q->tx_count_frames++;
2537 
2538 		if (!priv->tx_coal_frames[queue])
2539 			set_ic = false;
2540 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2541 			set_ic = true;
2542 		else
2543 			set_ic = false;
2544 
2545 		meta_req.priv = priv;
2546 		meta_req.tx_desc = tx_desc;
2547 		meta_req.set_ic = &set_ic;
2548 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2549 					&meta_req);
2550 		if (set_ic) {
2551 			tx_q->tx_count_frames = 0;
2552 			stmmac_set_tx_ic(priv, tx_desc);
2553 			tx_set_ic_bit++;
2554 		}
2555 
2556 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2557 				       true, priv->mode, true, true,
2558 				       xdp_desc.len);
2559 
2560 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2561 
2562 		xsk_tx_metadata_to_compl(meta,
2563 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2564 
2565 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2566 		entry = tx_q->cur_tx;
2567 	}
2568 	u64_stats_update_begin(&txq_stats->napi_syncp);
2569 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2570 	u64_stats_update_end(&txq_stats->napi_syncp);
2571 
2572 	if (tx_desc) {
2573 		stmmac_flush_tx_descriptors(priv, queue);
2574 		xsk_tx_release(pool);
2575 	}
2576 
2577 	/* Return true if all of the 3 conditions are met
2578 	 *  a) TX Budget is still available
2579 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2580 	 *     pending XSK TX for transmission)
2581 	 */
2582 	return !!budget && work_done;
2583 }
2584 
2585 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2586 {
2587 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2588 		tc += 64;
2589 
2590 		if (priv->plat->force_thresh_dma_mode)
2591 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2592 		else
2593 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2594 						      chan);
2595 
2596 		priv->xstats.threshold = tc;
2597 	}
2598 }
2599 
2600 /**
2601  * stmmac_tx_clean - to manage the transmission completion
2602  * @priv: driver private structure
2603  * @budget: napi budget limiting this functions packet handling
2604  * @queue: TX queue index
2605  * @pending_packets: signal to arm the TX coal timer
2606  * Description: it reclaims the transmit resources after transmission completes.
2607  * If some packets still needs to be handled, due to TX coalesce, set
2608  * pending_packets to true to make NAPI arm the TX coal timer.
2609  */
2610 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2611 			   bool *pending_packets)
2612 {
2613 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2614 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2615 	unsigned int bytes_compl = 0, pkts_compl = 0;
2616 	unsigned int entry, xmits = 0, count = 0;
2617 	u32 tx_packets = 0, tx_errors = 0;
2618 
2619 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2620 
2621 	tx_q->xsk_frames_done = 0;
2622 
2623 	entry = tx_q->dirty_tx;
2624 
2625 	/* Try to clean all TX complete frame in 1 shot */
2626 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2627 		struct xdp_frame *xdpf;
2628 		struct sk_buff *skb;
2629 		struct dma_desc *p;
2630 		int status;
2631 
2632 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2633 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2634 			xdpf = tx_q->xdpf[entry];
2635 			skb = NULL;
2636 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2637 			xdpf = NULL;
2638 			skb = tx_q->tx_skbuff[entry];
2639 		} else {
2640 			xdpf = NULL;
2641 			skb = NULL;
2642 		}
2643 
2644 		if (priv->extend_desc)
2645 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2646 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2647 			p = &tx_q->dma_entx[entry].basic;
2648 		else
2649 			p = tx_q->dma_tx + entry;
2650 
2651 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2652 		/* Check if the descriptor is owned by the DMA */
2653 		if (unlikely(status & tx_dma_own))
2654 			break;
2655 
2656 		count++;
2657 
2658 		/* Make sure descriptor fields are read after reading
2659 		 * the own bit.
2660 		 */
2661 		dma_rmb();
2662 
2663 		/* Just consider the last segment and ...*/
2664 		if (likely(!(status & tx_not_ls))) {
2665 			/* ... verify the status error condition */
2666 			if (unlikely(status & tx_err)) {
2667 				tx_errors++;
2668 				if (unlikely(status & tx_err_bump_tc))
2669 					stmmac_bump_dma_threshold(priv, queue);
2670 			} else {
2671 				tx_packets++;
2672 			}
2673 			if (skb) {
2674 				stmmac_get_tx_hwtstamp(priv, p, skb);
2675 			} else {
2676 				struct stmmac_xsk_tx_complete tx_compl = {
2677 					.priv = priv,
2678 					.desc = p,
2679 				};
2680 
2681 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2682 							 &stmmac_xsk_tx_metadata_ops,
2683 							 &tx_compl);
2684 			}
2685 		}
2686 
2687 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2688 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2689 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2690 				dma_unmap_page(priv->device,
2691 					       tx_q->tx_skbuff_dma[entry].buf,
2692 					       tx_q->tx_skbuff_dma[entry].len,
2693 					       DMA_TO_DEVICE);
2694 			else
2695 				dma_unmap_single(priv->device,
2696 						 tx_q->tx_skbuff_dma[entry].buf,
2697 						 tx_q->tx_skbuff_dma[entry].len,
2698 						 DMA_TO_DEVICE);
2699 			tx_q->tx_skbuff_dma[entry].buf = 0;
2700 			tx_q->tx_skbuff_dma[entry].len = 0;
2701 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2702 		}
2703 
2704 		stmmac_clean_desc3(priv, tx_q, p);
2705 
2706 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2707 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2708 
2709 		if (xdpf &&
2710 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2711 			xdp_return_frame_rx_napi(xdpf);
2712 			tx_q->xdpf[entry] = NULL;
2713 		}
2714 
2715 		if (xdpf &&
2716 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2717 			xdp_return_frame(xdpf);
2718 			tx_q->xdpf[entry] = NULL;
2719 		}
2720 
2721 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2722 			tx_q->xsk_frames_done++;
2723 
2724 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2725 			if (likely(skb)) {
2726 				pkts_compl++;
2727 				bytes_compl += skb->len;
2728 				dev_consume_skb_any(skb);
2729 				tx_q->tx_skbuff[entry] = NULL;
2730 			}
2731 		}
2732 
2733 		stmmac_release_tx_desc(priv, p, priv->mode);
2734 
2735 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2736 	}
2737 	tx_q->dirty_tx = entry;
2738 
2739 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2740 				  pkts_compl, bytes_compl);
2741 
2742 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2743 								queue))) &&
2744 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2745 
2746 		netif_dbg(priv, tx_done, priv->dev,
2747 			  "%s: restart transmit\n", __func__);
2748 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2749 	}
2750 
2751 	if (tx_q->xsk_pool) {
2752 		bool work_done;
2753 
2754 		if (tx_q->xsk_frames_done)
2755 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2756 
2757 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2758 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2759 
2760 		/* For XSK TX, we try to send as many as possible.
2761 		 * If XSK work done (XSK TX desc empty and budget still
2762 		 * available), return "budget - 1" to reenable TX IRQ.
2763 		 * Else, return "budget" to make NAPI continue polling.
2764 		 */
2765 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2766 					       STMMAC_XSK_TX_BUDGET_MAX);
2767 		if (work_done)
2768 			xmits = budget - 1;
2769 		else
2770 			xmits = budget;
2771 	}
2772 
2773 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2774 	    priv->eee_sw_timer_en) {
2775 		if (stmmac_enable_eee_mode(priv))
2776 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2777 	}
2778 
2779 	/* We still have pending packets, let's call for a new scheduling */
2780 	if (tx_q->dirty_tx != tx_q->cur_tx)
2781 		*pending_packets = true;
2782 
2783 	u64_stats_update_begin(&txq_stats->napi_syncp);
2784 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2785 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2786 	u64_stats_inc(&txq_stats->napi.tx_clean);
2787 	u64_stats_update_end(&txq_stats->napi_syncp);
2788 
2789 	priv->xstats.tx_errors += tx_errors;
2790 
2791 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2792 
2793 	/* Combine decisions from TX clean and XSK TX */
2794 	return max(count, xmits);
2795 }
2796 
2797 /**
2798  * stmmac_tx_err - to manage the tx error
2799  * @priv: driver private structure
2800  * @chan: channel index
2801  * Description: it cleans the descriptors and restarts the transmission
2802  * in case of transmission errors.
2803  */
2804 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2805 {
2806 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2807 
2808 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2809 
2810 	stmmac_stop_tx_dma(priv, chan);
2811 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2812 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2813 	stmmac_reset_tx_queue(priv, chan);
2814 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2815 			    tx_q->dma_tx_phy, chan);
2816 	stmmac_start_tx_dma(priv, chan);
2817 
2818 	priv->xstats.tx_errors++;
2819 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2820 }
2821 
2822 /**
2823  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2824  *  @priv: driver private structure
2825  *  @txmode: TX operating mode
2826  *  @rxmode: RX operating mode
2827  *  @chan: channel index
2828  *  Description: it is used for configuring of the DMA operation mode in
2829  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2830  *  mode.
2831  */
2832 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2833 					  u32 rxmode, u32 chan)
2834 {
2835 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2836 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2837 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2838 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2839 	int rxfifosz = priv->plat->rx_fifo_size;
2840 	int txfifosz = priv->plat->tx_fifo_size;
2841 
2842 	if (rxfifosz == 0)
2843 		rxfifosz = priv->dma_cap.rx_fifo_size;
2844 	if (txfifosz == 0)
2845 		txfifosz = priv->dma_cap.tx_fifo_size;
2846 
2847 	/* Adjust for real per queue fifo size */
2848 	rxfifosz /= rx_channels_count;
2849 	txfifosz /= tx_channels_count;
2850 
2851 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2852 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2853 }
2854 
2855 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2856 {
2857 	int ret;
2858 
2859 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2860 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2861 	if (ret && (ret != -EINVAL)) {
2862 		stmmac_global_err(priv);
2863 		return true;
2864 	}
2865 
2866 	return false;
2867 }
2868 
2869 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2870 {
2871 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2872 						 &priv->xstats, chan, dir);
2873 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2874 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2875 	struct stmmac_channel *ch = &priv->channel[chan];
2876 	struct napi_struct *rx_napi;
2877 	struct napi_struct *tx_napi;
2878 	unsigned long flags;
2879 
2880 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2881 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2882 
2883 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2884 		if (napi_schedule_prep(rx_napi)) {
2885 			spin_lock_irqsave(&ch->lock, flags);
2886 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2887 			spin_unlock_irqrestore(&ch->lock, flags);
2888 			__napi_schedule(rx_napi);
2889 		}
2890 	}
2891 
2892 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2893 		if (napi_schedule_prep(tx_napi)) {
2894 			spin_lock_irqsave(&ch->lock, flags);
2895 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2896 			spin_unlock_irqrestore(&ch->lock, flags);
2897 			__napi_schedule(tx_napi);
2898 		}
2899 	}
2900 
2901 	return status;
2902 }
2903 
2904 /**
2905  * stmmac_dma_interrupt - DMA ISR
2906  * @priv: driver private structure
2907  * Description: this is the DMA ISR. It is called by the main ISR.
2908  * It calls the dwmac dma routine and schedule poll method in case of some
2909  * work can be done.
2910  */
2911 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2912 {
2913 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2914 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2915 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2916 				tx_channel_count : rx_channel_count;
2917 	u32 chan;
2918 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2919 
2920 	/* Make sure we never check beyond our status buffer. */
2921 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2922 		channels_to_check = ARRAY_SIZE(status);
2923 
2924 	for (chan = 0; chan < channels_to_check; chan++)
2925 		status[chan] = stmmac_napi_check(priv, chan,
2926 						 DMA_DIR_RXTX);
2927 
2928 	for (chan = 0; chan < tx_channel_count; chan++) {
2929 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2930 			/* Try to bump up the dma threshold on this failure */
2931 			stmmac_bump_dma_threshold(priv, chan);
2932 		} else if (unlikely(status[chan] == tx_hard_error)) {
2933 			stmmac_tx_err(priv, chan);
2934 		}
2935 	}
2936 }
2937 
2938 /**
2939  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2940  * @priv: driver private structure
2941  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2942  */
2943 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2944 {
2945 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2946 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2947 
2948 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2949 
2950 	if (priv->dma_cap.rmon) {
2951 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2952 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2953 	} else
2954 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2955 }
2956 
2957 /**
2958  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2959  * @priv: driver private structure
2960  * Description:
2961  *  new GMAC chip generations have a new register to indicate the
2962  *  presence of the optional feature/functions.
2963  *  This can be also used to override the value passed through the
2964  *  platform and necessary for old MAC10/100 and GMAC chips.
2965  */
2966 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2967 {
2968 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2969 }
2970 
2971 /**
2972  * stmmac_check_ether_addr - check if the MAC addr is valid
2973  * @priv: driver private structure
2974  * Description:
2975  * it is to verify if the MAC address is valid, in case of failures it
2976  * generates a random MAC address
2977  */
2978 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2979 {
2980 	u8 addr[ETH_ALEN];
2981 
2982 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2983 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2984 		if (is_valid_ether_addr(addr))
2985 			eth_hw_addr_set(priv->dev, addr);
2986 		else
2987 			eth_hw_addr_random(priv->dev);
2988 		dev_info(priv->device, "device MAC address %pM\n",
2989 			 priv->dev->dev_addr);
2990 	}
2991 }
2992 
2993 /**
2994  * stmmac_init_dma_engine - DMA init.
2995  * @priv: driver private structure
2996  * Description:
2997  * It inits the DMA invoking the specific MAC/GMAC callback.
2998  * Some DMA parameters can be passed from the platform;
2999  * in case of these are not passed a default is kept for the MAC or GMAC.
3000  */
3001 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3002 {
3003 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3004 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3005 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3006 	struct stmmac_rx_queue *rx_q;
3007 	struct stmmac_tx_queue *tx_q;
3008 	u32 chan = 0;
3009 	int atds = 0;
3010 	int ret = 0;
3011 
3012 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3013 		dev_err(priv->device, "Invalid DMA configuration\n");
3014 		return -EINVAL;
3015 	}
3016 
3017 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3018 		atds = 1;
3019 
3020 	ret = stmmac_reset(priv, priv->ioaddr);
3021 	if (ret) {
3022 		dev_err(priv->device, "Failed to reset the dma\n");
3023 		return ret;
3024 	}
3025 
3026 	/* DMA Configuration */
3027 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3028 
3029 	if (priv->plat->axi)
3030 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3031 
3032 	/* DMA CSR Channel configuration */
3033 	for (chan = 0; chan < dma_csr_ch; chan++) {
3034 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3035 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3036 	}
3037 
3038 	/* DMA RX Channel Configuration */
3039 	for (chan = 0; chan < rx_channels_count; chan++) {
3040 		rx_q = &priv->dma_conf.rx_queue[chan];
3041 
3042 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3043 				    rx_q->dma_rx_phy, chan);
3044 
3045 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3046 				     (rx_q->buf_alloc_num *
3047 				      sizeof(struct dma_desc));
3048 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3049 				       rx_q->rx_tail_addr, chan);
3050 	}
3051 
3052 	/* DMA TX Channel Configuration */
3053 	for (chan = 0; chan < tx_channels_count; chan++) {
3054 		tx_q = &priv->dma_conf.tx_queue[chan];
3055 
3056 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3057 				    tx_q->dma_tx_phy, chan);
3058 
3059 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3060 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3061 				       tx_q->tx_tail_addr, chan);
3062 	}
3063 
3064 	return ret;
3065 }
3066 
3067 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3068 {
3069 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3070 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3071 	struct stmmac_channel *ch;
3072 	struct napi_struct *napi;
3073 
3074 	if (!tx_coal_timer)
3075 		return;
3076 
3077 	ch = &priv->channel[tx_q->queue_index];
3078 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3079 
3080 	/* Arm timer only if napi is not already scheduled.
3081 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3082 	 * again in the next scheduled napi.
3083 	 */
3084 	if (unlikely(!napi_is_scheduled(napi)))
3085 		hrtimer_start(&tx_q->txtimer,
3086 			      STMMAC_COAL_TIMER(tx_coal_timer),
3087 			      HRTIMER_MODE_REL);
3088 	else
3089 		hrtimer_try_to_cancel(&tx_q->txtimer);
3090 }
3091 
3092 /**
3093  * stmmac_tx_timer - mitigation sw timer for tx.
3094  * @t: data pointer
3095  * Description:
3096  * This is the timer handler to directly invoke the stmmac_tx_clean.
3097  */
3098 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3099 {
3100 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3101 	struct stmmac_priv *priv = tx_q->priv_data;
3102 	struct stmmac_channel *ch;
3103 	struct napi_struct *napi;
3104 
3105 	ch = &priv->channel[tx_q->queue_index];
3106 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3107 
3108 	if (likely(napi_schedule_prep(napi))) {
3109 		unsigned long flags;
3110 
3111 		spin_lock_irqsave(&ch->lock, flags);
3112 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3113 		spin_unlock_irqrestore(&ch->lock, flags);
3114 		__napi_schedule(napi);
3115 	}
3116 
3117 	return HRTIMER_NORESTART;
3118 }
3119 
3120 /**
3121  * stmmac_init_coalesce - init mitigation options.
3122  * @priv: driver private structure
3123  * Description:
3124  * This inits the coalesce parameters: i.e. timer rate,
3125  * timer handler and default threshold used for enabling the
3126  * interrupt on completion bit.
3127  */
3128 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3129 {
3130 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3131 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3132 	u32 chan;
3133 
3134 	for (chan = 0; chan < tx_channel_count; chan++) {
3135 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3136 
3137 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3138 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3139 
3140 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3141 		tx_q->txtimer.function = stmmac_tx_timer;
3142 	}
3143 
3144 	for (chan = 0; chan < rx_channel_count; chan++)
3145 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3146 }
3147 
3148 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3149 {
3150 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3151 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3152 	u32 chan;
3153 
3154 	/* set TX ring length */
3155 	for (chan = 0; chan < tx_channels_count; chan++)
3156 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3157 				       (priv->dma_conf.dma_tx_size - 1), chan);
3158 
3159 	/* set RX ring length */
3160 	for (chan = 0; chan < rx_channels_count; chan++)
3161 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3162 				       (priv->dma_conf.dma_rx_size - 1), chan);
3163 }
3164 
3165 /**
3166  *  stmmac_set_tx_queue_weight - Set TX queue weight
3167  *  @priv: driver private structure
3168  *  Description: It is used for setting TX queues weight
3169  */
3170 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3171 {
3172 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3173 	u32 weight;
3174 	u32 queue;
3175 
3176 	for (queue = 0; queue < tx_queues_count; queue++) {
3177 		weight = priv->plat->tx_queues_cfg[queue].weight;
3178 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3179 	}
3180 }
3181 
3182 /**
3183  *  stmmac_configure_cbs - Configure CBS in TX queue
3184  *  @priv: driver private structure
3185  *  Description: It is used for configuring CBS in AVB TX queues
3186  */
3187 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3188 {
3189 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3190 	u32 mode_to_use;
3191 	u32 queue;
3192 
3193 	/* queue 0 is reserved for legacy traffic */
3194 	for (queue = 1; queue < tx_queues_count; queue++) {
3195 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3196 		if (mode_to_use == MTL_QUEUE_DCB)
3197 			continue;
3198 
3199 		stmmac_config_cbs(priv, priv->hw,
3200 				priv->plat->tx_queues_cfg[queue].send_slope,
3201 				priv->plat->tx_queues_cfg[queue].idle_slope,
3202 				priv->plat->tx_queues_cfg[queue].high_credit,
3203 				priv->plat->tx_queues_cfg[queue].low_credit,
3204 				queue);
3205 	}
3206 }
3207 
3208 /**
3209  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3210  *  @priv: driver private structure
3211  *  Description: It is used for mapping RX queues to RX dma channels
3212  */
3213 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3214 {
3215 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3216 	u32 queue;
3217 	u32 chan;
3218 
3219 	for (queue = 0; queue < rx_queues_count; queue++) {
3220 		chan = priv->plat->rx_queues_cfg[queue].chan;
3221 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3222 	}
3223 }
3224 
3225 /**
3226  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3227  *  @priv: driver private structure
3228  *  Description: It is used for configuring the RX Queue Priority
3229  */
3230 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3231 {
3232 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3233 	u32 queue;
3234 	u32 prio;
3235 
3236 	for (queue = 0; queue < rx_queues_count; queue++) {
3237 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3238 			continue;
3239 
3240 		prio = priv->plat->rx_queues_cfg[queue].prio;
3241 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3242 	}
3243 }
3244 
3245 /**
3246  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3247  *  @priv: driver private structure
3248  *  Description: It is used for configuring the TX Queue Priority
3249  */
3250 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3251 {
3252 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3253 	u32 queue;
3254 	u32 prio;
3255 
3256 	for (queue = 0; queue < tx_queues_count; queue++) {
3257 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3258 			continue;
3259 
3260 		prio = priv->plat->tx_queues_cfg[queue].prio;
3261 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3262 	}
3263 }
3264 
3265 /**
3266  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3267  *  @priv: driver private structure
3268  *  Description: It is used for configuring the RX queue routing
3269  */
3270 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3271 {
3272 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3273 	u32 queue;
3274 	u8 packet;
3275 
3276 	for (queue = 0; queue < rx_queues_count; queue++) {
3277 		/* no specific packet type routing specified for the queue */
3278 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3279 			continue;
3280 
3281 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3282 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3283 	}
3284 }
3285 
3286 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3287 {
3288 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3289 		priv->rss.enable = false;
3290 		return;
3291 	}
3292 
3293 	if (priv->dev->features & NETIF_F_RXHASH)
3294 		priv->rss.enable = true;
3295 	else
3296 		priv->rss.enable = false;
3297 
3298 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3299 			     priv->plat->rx_queues_to_use);
3300 }
3301 
3302 /**
3303  *  stmmac_mtl_configuration - Configure MTL
3304  *  @priv: driver private structure
3305  *  Description: It is used for configurring MTL
3306  */
3307 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3308 {
3309 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3310 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3311 
3312 	if (tx_queues_count > 1)
3313 		stmmac_set_tx_queue_weight(priv);
3314 
3315 	/* Configure MTL RX algorithms */
3316 	if (rx_queues_count > 1)
3317 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3318 				priv->plat->rx_sched_algorithm);
3319 
3320 	/* Configure MTL TX algorithms */
3321 	if (tx_queues_count > 1)
3322 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3323 				priv->plat->tx_sched_algorithm);
3324 
3325 	/* Configure CBS in AVB TX queues */
3326 	if (tx_queues_count > 1)
3327 		stmmac_configure_cbs(priv);
3328 
3329 	/* Map RX MTL to DMA channels */
3330 	stmmac_rx_queue_dma_chan_map(priv);
3331 
3332 	/* Enable MAC RX Queues */
3333 	stmmac_mac_enable_rx_queues(priv);
3334 
3335 	/* Set RX priorities */
3336 	if (rx_queues_count > 1)
3337 		stmmac_mac_config_rx_queues_prio(priv);
3338 
3339 	/* Set TX priorities */
3340 	if (tx_queues_count > 1)
3341 		stmmac_mac_config_tx_queues_prio(priv);
3342 
3343 	/* Set RX routing */
3344 	if (rx_queues_count > 1)
3345 		stmmac_mac_config_rx_queues_routing(priv);
3346 
3347 	/* Receive Side Scaling */
3348 	if (rx_queues_count > 1)
3349 		stmmac_mac_config_rss(priv);
3350 }
3351 
3352 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3353 {
3354 	if (priv->dma_cap.asp) {
3355 		netdev_info(priv->dev, "Enabling Safety Features\n");
3356 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3357 					  priv->plat->safety_feat_cfg);
3358 	} else {
3359 		netdev_info(priv->dev, "No Safety Features support found\n");
3360 	}
3361 }
3362 
3363 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3364 {
3365 	char *name;
3366 
3367 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3368 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3369 
3370 	name = priv->wq_name;
3371 	sprintf(name, "%s-fpe", priv->dev->name);
3372 
3373 	priv->fpe_wq = create_singlethread_workqueue(name);
3374 	if (!priv->fpe_wq) {
3375 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3376 
3377 		return -ENOMEM;
3378 	}
3379 	netdev_info(priv->dev, "FPE workqueue start");
3380 
3381 	return 0;
3382 }
3383 
3384 /**
3385  * stmmac_hw_setup - setup mac in a usable state.
3386  *  @dev : pointer to the device structure.
3387  *  @ptp_register: register PTP if set
3388  *  Description:
3389  *  this is the main function to setup the HW in a usable state because the
3390  *  dma engine is reset, the core registers are configured (e.g. AXI,
3391  *  Checksum features, timers). The DMA is ready to start receiving and
3392  *  transmitting.
3393  *  Return value:
3394  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3395  *  file on failure.
3396  */
3397 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3398 {
3399 	struct stmmac_priv *priv = netdev_priv(dev);
3400 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3401 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3402 	bool sph_en;
3403 	u32 chan;
3404 	int ret;
3405 
3406 	/* DMA initialization and SW reset */
3407 	ret = stmmac_init_dma_engine(priv);
3408 	if (ret < 0) {
3409 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3410 			   __func__);
3411 		return ret;
3412 	}
3413 
3414 	/* Copy the MAC addr into the HW  */
3415 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3416 
3417 	/* PS and related bits will be programmed according to the speed */
3418 	if (priv->hw->pcs) {
3419 		int speed = priv->plat->mac_port_sel_speed;
3420 
3421 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3422 		    (speed == SPEED_1000)) {
3423 			priv->hw->ps = speed;
3424 		} else {
3425 			dev_warn(priv->device, "invalid port speed\n");
3426 			priv->hw->ps = 0;
3427 		}
3428 	}
3429 
3430 	/* Initialize the MAC Core */
3431 	stmmac_core_init(priv, priv->hw, dev);
3432 
3433 	/* Initialize MTL*/
3434 	stmmac_mtl_configuration(priv);
3435 
3436 	/* Initialize Safety Features */
3437 	stmmac_safety_feat_configuration(priv);
3438 
3439 	ret = stmmac_rx_ipc(priv, priv->hw);
3440 	if (!ret) {
3441 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3442 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3443 		priv->hw->rx_csum = 0;
3444 	}
3445 
3446 	/* Enable the MAC Rx/Tx */
3447 	stmmac_mac_set(priv, priv->ioaddr, true);
3448 
3449 	/* Set the HW DMA mode and the COE */
3450 	stmmac_dma_operation_mode(priv);
3451 
3452 	stmmac_mmc_setup(priv);
3453 
3454 	if (ptp_register) {
3455 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3456 		if (ret < 0)
3457 			netdev_warn(priv->dev,
3458 				    "failed to enable PTP reference clock: %pe\n",
3459 				    ERR_PTR(ret));
3460 	}
3461 
3462 	ret = stmmac_init_ptp(priv);
3463 	if (ret == -EOPNOTSUPP)
3464 		netdev_info(priv->dev, "PTP not supported by HW\n");
3465 	else if (ret)
3466 		netdev_warn(priv->dev, "PTP init failed\n");
3467 	else if (ptp_register)
3468 		stmmac_ptp_register(priv);
3469 
3470 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3471 
3472 	/* Convert the timer from msec to usec */
3473 	if (!priv->tx_lpi_timer)
3474 		priv->tx_lpi_timer = eee_timer * 1000;
3475 
3476 	if (priv->use_riwt) {
3477 		u32 queue;
3478 
3479 		for (queue = 0; queue < rx_cnt; queue++) {
3480 			if (!priv->rx_riwt[queue])
3481 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3482 
3483 			stmmac_rx_watchdog(priv, priv->ioaddr,
3484 					   priv->rx_riwt[queue], queue);
3485 		}
3486 	}
3487 
3488 	if (priv->hw->pcs)
3489 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3490 
3491 	/* set TX and RX rings length */
3492 	stmmac_set_rings_length(priv);
3493 
3494 	/* Enable TSO */
3495 	if (priv->tso) {
3496 		for (chan = 0; chan < tx_cnt; chan++) {
3497 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3498 
3499 			/* TSO and TBS cannot co-exist */
3500 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3501 				continue;
3502 
3503 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3504 		}
3505 	}
3506 
3507 	/* Enable Split Header */
3508 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3509 	for (chan = 0; chan < rx_cnt; chan++)
3510 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3511 
3512 
3513 	/* VLAN Tag Insertion */
3514 	if (priv->dma_cap.vlins)
3515 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3516 
3517 	/* TBS */
3518 	for (chan = 0; chan < tx_cnt; chan++) {
3519 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3520 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3521 
3522 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3523 	}
3524 
3525 	/* Configure real RX and TX queues */
3526 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3527 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3528 
3529 	/* Start the ball rolling... */
3530 	stmmac_start_all_dma(priv);
3531 
3532 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3533 
3534 	if (priv->dma_cap.fpesel) {
3535 		stmmac_fpe_start_wq(priv);
3536 
3537 		if (priv->plat->fpe_cfg->enable)
3538 			stmmac_fpe_handshake(priv, true);
3539 	}
3540 
3541 	return 0;
3542 }
3543 
3544 static void stmmac_hw_teardown(struct net_device *dev)
3545 {
3546 	struct stmmac_priv *priv = netdev_priv(dev);
3547 
3548 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3549 }
3550 
3551 static void stmmac_free_irq(struct net_device *dev,
3552 			    enum request_irq_err irq_err, int irq_idx)
3553 {
3554 	struct stmmac_priv *priv = netdev_priv(dev);
3555 	int j;
3556 
3557 	switch (irq_err) {
3558 	case REQ_IRQ_ERR_ALL:
3559 		irq_idx = priv->plat->tx_queues_to_use;
3560 		fallthrough;
3561 	case REQ_IRQ_ERR_TX:
3562 		for (j = irq_idx - 1; j >= 0; j--) {
3563 			if (priv->tx_irq[j] > 0) {
3564 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3565 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3566 			}
3567 		}
3568 		irq_idx = priv->plat->rx_queues_to_use;
3569 		fallthrough;
3570 	case REQ_IRQ_ERR_RX:
3571 		for (j = irq_idx - 1; j >= 0; j--) {
3572 			if (priv->rx_irq[j] > 0) {
3573 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3574 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3575 			}
3576 		}
3577 
3578 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3579 			free_irq(priv->sfty_ue_irq, dev);
3580 		fallthrough;
3581 	case REQ_IRQ_ERR_SFTY_UE:
3582 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3583 			free_irq(priv->sfty_ce_irq, dev);
3584 		fallthrough;
3585 	case REQ_IRQ_ERR_SFTY_CE:
3586 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3587 			free_irq(priv->lpi_irq, dev);
3588 		fallthrough;
3589 	case REQ_IRQ_ERR_LPI:
3590 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3591 			free_irq(priv->wol_irq, dev);
3592 		fallthrough;
3593 	case REQ_IRQ_ERR_WOL:
3594 		free_irq(dev->irq, dev);
3595 		fallthrough;
3596 	case REQ_IRQ_ERR_MAC:
3597 	case REQ_IRQ_ERR_NO:
3598 		/* If MAC IRQ request error, no more IRQ to free */
3599 		break;
3600 	}
3601 }
3602 
3603 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3604 {
3605 	struct stmmac_priv *priv = netdev_priv(dev);
3606 	enum request_irq_err irq_err;
3607 	cpumask_t cpu_mask;
3608 	int irq_idx = 0;
3609 	char *int_name;
3610 	int ret;
3611 	int i;
3612 
3613 	/* For common interrupt */
3614 	int_name = priv->int_name_mac;
3615 	sprintf(int_name, "%s:%s", dev->name, "mac");
3616 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3617 			  0, int_name, dev);
3618 	if (unlikely(ret < 0)) {
3619 		netdev_err(priv->dev,
3620 			   "%s: alloc mac MSI %d (error: %d)\n",
3621 			   __func__, dev->irq, ret);
3622 		irq_err = REQ_IRQ_ERR_MAC;
3623 		goto irq_error;
3624 	}
3625 
3626 	/* Request the Wake IRQ in case of another line
3627 	 * is used for WoL
3628 	 */
3629 	priv->wol_irq_disabled = true;
3630 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3631 		int_name = priv->int_name_wol;
3632 		sprintf(int_name, "%s:%s", dev->name, "wol");
3633 		ret = request_irq(priv->wol_irq,
3634 				  stmmac_mac_interrupt,
3635 				  0, int_name, dev);
3636 		if (unlikely(ret < 0)) {
3637 			netdev_err(priv->dev,
3638 				   "%s: alloc wol MSI %d (error: %d)\n",
3639 				   __func__, priv->wol_irq, ret);
3640 			irq_err = REQ_IRQ_ERR_WOL;
3641 			goto irq_error;
3642 		}
3643 	}
3644 
3645 	/* Request the LPI IRQ in case of another line
3646 	 * is used for LPI
3647 	 */
3648 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3649 		int_name = priv->int_name_lpi;
3650 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3651 		ret = request_irq(priv->lpi_irq,
3652 				  stmmac_mac_interrupt,
3653 				  0, int_name, dev);
3654 		if (unlikely(ret < 0)) {
3655 			netdev_err(priv->dev,
3656 				   "%s: alloc lpi MSI %d (error: %d)\n",
3657 				   __func__, priv->lpi_irq, ret);
3658 			irq_err = REQ_IRQ_ERR_LPI;
3659 			goto irq_error;
3660 		}
3661 	}
3662 
3663 	/* Request the Safety Feature Correctible Error line in
3664 	 * case of another line is used
3665 	 */
3666 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3667 		int_name = priv->int_name_sfty_ce;
3668 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3669 		ret = request_irq(priv->sfty_ce_irq,
3670 				  stmmac_safety_interrupt,
3671 				  0, int_name, dev);
3672 		if (unlikely(ret < 0)) {
3673 			netdev_err(priv->dev,
3674 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3675 				   __func__, priv->sfty_ce_irq, ret);
3676 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3677 			goto irq_error;
3678 		}
3679 	}
3680 
3681 	/* Request the Safety Feature Uncorrectible Error line in
3682 	 * case of another line is used
3683 	 */
3684 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3685 		int_name = priv->int_name_sfty_ue;
3686 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3687 		ret = request_irq(priv->sfty_ue_irq,
3688 				  stmmac_safety_interrupt,
3689 				  0, int_name, dev);
3690 		if (unlikely(ret < 0)) {
3691 			netdev_err(priv->dev,
3692 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3693 				   __func__, priv->sfty_ue_irq, ret);
3694 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3695 			goto irq_error;
3696 		}
3697 	}
3698 
3699 	/* Request Rx MSI irq */
3700 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3701 		if (i >= MTL_MAX_RX_QUEUES)
3702 			break;
3703 		if (priv->rx_irq[i] == 0)
3704 			continue;
3705 
3706 		int_name = priv->int_name_rx_irq[i];
3707 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3708 		ret = request_irq(priv->rx_irq[i],
3709 				  stmmac_msi_intr_rx,
3710 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3711 		if (unlikely(ret < 0)) {
3712 			netdev_err(priv->dev,
3713 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3714 				   __func__, i, priv->rx_irq[i], ret);
3715 			irq_err = REQ_IRQ_ERR_RX;
3716 			irq_idx = i;
3717 			goto irq_error;
3718 		}
3719 		cpumask_clear(&cpu_mask);
3720 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3721 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3722 	}
3723 
3724 	/* Request Tx MSI irq */
3725 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3726 		if (i >= MTL_MAX_TX_QUEUES)
3727 			break;
3728 		if (priv->tx_irq[i] == 0)
3729 			continue;
3730 
3731 		int_name = priv->int_name_tx_irq[i];
3732 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3733 		ret = request_irq(priv->tx_irq[i],
3734 				  stmmac_msi_intr_tx,
3735 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3736 		if (unlikely(ret < 0)) {
3737 			netdev_err(priv->dev,
3738 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3739 				   __func__, i, priv->tx_irq[i], ret);
3740 			irq_err = REQ_IRQ_ERR_TX;
3741 			irq_idx = i;
3742 			goto irq_error;
3743 		}
3744 		cpumask_clear(&cpu_mask);
3745 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3746 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3747 	}
3748 
3749 	return 0;
3750 
3751 irq_error:
3752 	stmmac_free_irq(dev, irq_err, irq_idx);
3753 	return ret;
3754 }
3755 
3756 static int stmmac_request_irq_single(struct net_device *dev)
3757 {
3758 	struct stmmac_priv *priv = netdev_priv(dev);
3759 	enum request_irq_err irq_err;
3760 	int ret;
3761 
3762 	ret = request_irq(dev->irq, stmmac_interrupt,
3763 			  IRQF_SHARED, dev->name, dev);
3764 	if (unlikely(ret < 0)) {
3765 		netdev_err(priv->dev,
3766 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3767 			   __func__, dev->irq, ret);
3768 		irq_err = REQ_IRQ_ERR_MAC;
3769 		goto irq_error;
3770 	}
3771 
3772 	/* Request the Wake IRQ in case of another line
3773 	 * is used for WoL
3774 	 */
3775 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3776 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3777 				  IRQF_SHARED, dev->name, dev);
3778 		if (unlikely(ret < 0)) {
3779 			netdev_err(priv->dev,
3780 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3781 				   __func__, priv->wol_irq, ret);
3782 			irq_err = REQ_IRQ_ERR_WOL;
3783 			goto irq_error;
3784 		}
3785 	}
3786 
3787 	/* Request the IRQ lines */
3788 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3789 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3790 				  IRQF_SHARED, dev->name, dev);
3791 		if (unlikely(ret < 0)) {
3792 			netdev_err(priv->dev,
3793 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3794 				   __func__, priv->lpi_irq, ret);
3795 			irq_err = REQ_IRQ_ERR_LPI;
3796 			goto irq_error;
3797 		}
3798 	}
3799 
3800 	return 0;
3801 
3802 irq_error:
3803 	stmmac_free_irq(dev, irq_err, 0);
3804 	return ret;
3805 }
3806 
3807 static int stmmac_request_irq(struct net_device *dev)
3808 {
3809 	struct stmmac_priv *priv = netdev_priv(dev);
3810 	int ret;
3811 
3812 	/* Request the IRQ lines */
3813 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3814 		ret = stmmac_request_irq_multi_msi(dev);
3815 	else
3816 		ret = stmmac_request_irq_single(dev);
3817 
3818 	return ret;
3819 }
3820 
3821 /**
3822  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3823  *  @priv: driver private structure
3824  *  @mtu: MTU to setup the dma queue and buf with
3825  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3826  *  Allocate the Tx/Rx DMA queue and init them.
3827  *  Return value:
3828  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3829  */
3830 static struct stmmac_dma_conf *
3831 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3832 {
3833 	struct stmmac_dma_conf *dma_conf;
3834 	int chan, bfsize, ret;
3835 
3836 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3837 	if (!dma_conf) {
3838 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3839 			   __func__);
3840 		return ERR_PTR(-ENOMEM);
3841 	}
3842 
3843 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3844 	if (bfsize < 0)
3845 		bfsize = 0;
3846 
3847 	if (bfsize < BUF_SIZE_16KiB)
3848 		bfsize = stmmac_set_bfsize(mtu, 0);
3849 
3850 	dma_conf->dma_buf_sz = bfsize;
3851 	/* Chose the tx/rx size from the already defined one in the
3852 	 * priv struct. (if defined)
3853 	 */
3854 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3855 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3856 
3857 	if (!dma_conf->dma_tx_size)
3858 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3859 	if (!dma_conf->dma_rx_size)
3860 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3861 
3862 	/* Earlier check for TBS */
3863 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3864 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3865 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3866 
3867 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3868 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3869 	}
3870 
3871 	ret = alloc_dma_desc_resources(priv, dma_conf);
3872 	if (ret < 0) {
3873 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3874 			   __func__);
3875 		goto alloc_error;
3876 	}
3877 
3878 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3879 	if (ret < 0) {
3880 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3881 			   __func__);
3882 		goto init_error;
3883 	}
3884 
3885 	return dma_conf;
3886 
3887 init_error:
3888 	free_dma_desc_resources(priv, dma_conf);
3889 alloc_error:
3890 	kfree(dma_conf);
3891 	return ERR_PTR(ret);
3892 }
3893 
3894 /**
3895  *  __stmmac_open - open entry point of the driver
3896  *  @dev : pointer to the device structure.
3897  *  @dma_conf :  structure to take the dma data
3898  *  Description:
3899  *  This function is the open entry point of the driver.
3900  *  Return value:
3901  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3902  *  file on failure.
3903  */
3904 static int __stmmac_open(struct net_device *dev,
3905 			 struct stmmac_dma_conf *dma_conf)
3906 {
3907 	struct stmmac_priv *priv = netdev_priv(dev);
3908 	int mode = priv->plat->phy_interface;
3909 	u32 chan;
3910 	int ret;
3911 
3912 	ret = pm_runtime_resume_and_get(priv->device);
3913 	if (ret < 0)
3914 		return ret;
3915 
3916 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3917 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3918 	    (!priv->hw->xpcs ||
3919 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3920 	    !priv->hw->lynx_pcs) {
3921 		ret = stmmac_init_phy(dev);
3922 		if (ret) {
3923 			netdev_err(priv->dev,
3924 				   "%s: Cannot attach to PHY (error: %d)\n",
3925 				   __func__, ret);
3926 			goto init_phy_error;
3927 		}
3928 	}
3929 
3930 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3931 
3932 	buf_sz = dma_conf->dma_buf_sz;
3933 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3934 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3935 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3936 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3937 
3938 	stmmac_reset_queues_param(priv);
3939 
3940 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3941 	    priv->plat->serdes_powerup) {
3942 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3943 		if (ret < 0) {
3944 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3945 				   __func__);
3946 			goto init_error;
3947 		}
3948 	}
3949 
3950 	ret = stmmac_hw_setup(dev, true);
3951 	if (ret < 0) {
3952 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3953 		goto init_error;
3954 	}
3955 
3956 	stmmac_init_coalesce(priv);
3957 
3958 	phylink_start(priv->phylink);
3959 	/* We may have called phylink_speed_down before */
3960 	phylink_speed_up(priv->phylink);
3961 
3962 	ret = stmmac_request_irq(dev);
3963 	if (ret)
3964 		goto irq_error;
3965 
3966 	stmmac_enable_all_queues(priv);
3967 	netif_tx_start_all_queues(priv->dev);
3968 	stmmac_enable_all_dma_irq(priv);
3969 
3970 	return 0;
3971 
3972 irq_error:
3973 	phylink_stop(priv->phylink);
3974 
3975 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3976 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3977 
3978 	stmmac_hw_teardown(dev);
3979 init_error:
3980 	phylink_disconnect_phy(priv->phylink);
3981 init_phy_error:
3982 	pm_runtime_put(priv->device);
3983 	return ret;
3984 }
3985 
3986 static int stmmac_open(struct net_device *dev)
3987 {
3988 	struct stmmac_priv *priv = netdev_priv(dev);
3989 	struct stmmac_dma_conf *dma_conf;
3990 	int ret;
3991 
3992 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3993 	if (IS_ERR(dma_conf))
3994 		return PTR_ERR(dma_conf);
3995 
3996 	ret = __stmmac_open(dev, dma_conf);
3997 	if (ret)
3998 		free_dma_desc_resources(priv, dma_conf);
3999 
4000 	kfree(dma_conf);
4001 	return ret;
4002 }
4003 
4004 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4005 {
4006 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4007 
4008 	if (priv->fpe_wq)
4009 		destroy_workqueue(priv->fpe_wq);
4010 
4011 	netdev_info(priv->dev, "FPE workqueue stop");
4012 }
4013 
4014 /**
4015  *  stmmac_release - close entry point of the driver
4016  *  @dev : device pointer.
4017  *  Description:
4018  *  This is the stop entry point of the driver.
4019  */
4020 static int stmmac_release(struct net_device *dev)
4021 {
4022 	struct stmmac_priv *priv = netdev_priv(dev);
4023 	u32 chan;
4024 
4025 	if (device_may_wakeup(priv->device))
4026 		phylink_speed_down(priv->phylink, false);
4027 	/* Stop and disconnect the PHY */
4028 	phylink_stop(priv->phylink);
4029 	phylink_disconnect_phy(priv->phylink);
4030 
4031 	stmmac_disable_all_queues(priv);
4032 
4033 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4034 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4035 
4036 	netif_tx_disable(dev);
4037 
4038 	/* Free the IRQ lines */
4039 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4040 
4041 	if (priv->eee_enabled) {
4042 		priv->tx_path_in_lpi_mode = false;
4043 		del_timer_sync(&priv->eee_ctrl_timer);
4044 	}
4045 
4046 	/* Stop TX/RX DMA and clear the descriptors */
4047 	stmmac_stop_all_dma(priv);
4048 
4049 	/* Release and free the Rx/Tx resources */
4050 	free_dma_desc_resources(priv, &priv->dma_conf);
4051 
4052 	/* Disable the MAC Rx/Tx */
4053 	stmmac_mac_set(priv, priv->ioaddr, false);
4054 
4055 	/* Powerdown Serdes if there is */
4056 	if (priv->plat->serdes_powerdown)
4057 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4058 
4059 	netif_carrier_off(dev);
4060 
4061 	stmmac_release_ptp(priv);
4062 
4063 	pm_runtime_put(priv->device);
4064 
4065 	if (priv->dma_cap.fpesel)
4066 		stmmac_fpe_stop_wq(priv);
4067 
4068 	return 0;
4069 }
4070 
4071 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4072 			       struct stmmac_tx_queue *tx_q)
4073 {
4074 	u16 tag = 0x0, inner_tag = 0x0;
4075 	u32 inner_type = 0x0;
4076 	struct dma_desc *p;
4077 
4078 	if (!priv->dma_cap.vlins)
4079 		return false;
4080 	if (!skb_vlan_tag_present(skb))
4081 		return false;
4082 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4083 		inner_tag = skb_vlan_tag_get(skb);
4084 		inner_type = STMMAC_VLAN_INSERT;
4085 	}
4086 
4087 	tag = skb_vlan_tag_get(skb);
4088 
4089 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4090 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4091 	else
4092 		p = &tx_q->dma_tx[tx_q->cur_tx];
4093 
4094 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4095 		return false;
4096 
4097 	stmmac_set_tx_owner(priv, p);
4098 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4099 	return true;
4100 }
4101 
4102 /**
4103  *  stmmac_tso_allocator - close entry point of the driver
4104  *  @priv: driver private structure
4105  *  @des: buffer start address
4106  *  @total_len: total length to fill in descriptors
4107  *  @last_segment: condition for the last descriptor
4108  *  @queue: TX queue index
4109  *  Description:
4110  *  This function fills descriptor and request new descriptors according to
4111  *  buffer length to fill
4112  */
4113 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4114 				 int total_len, bool last_segment, u32 queue)
4115 {
4116 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4117 	struct dma_desc *desc;
4118 	u32 buff_size;
4119 	int tmp_len;
4120 
4121 	tmp_len = total_len;
4122 
4123 	while (tmp_len > 0) {
4124 		dma_addr_t curr_addr;
4125 
4126 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4127 						priv->dma_conf.dma_tx_size);
4128 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4129 
4130 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4131 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4132 		else
4133 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4134 
4135 		curr_addr = des + (total_len - tmp_len);
4136 		if (priv->dma_cap.addr64 <= 32)
4137 			desc->des0 = cpu_to_le32(curr_addr);
4138 		else
4139 			stmmac_set_desc_addr(priv, desc, curr_addr);
4140 
4141 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4142 			    TSO_MAX_BUFF_SIZE : tmp_len;
4143 
4144 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4145 				0, 1,
4146 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4147 				0, 0);
4148 
4149 		tmp_len -= TSO_MAX_BUFF_SIZE;
4150 	}
4151 }
4152 
4153 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4154 {
4155 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4156 	int desc_size;
4157 
4158 	if (likely(priv->extend_desc))
4159 		desc_size = sizeof(struct dma_extended_desc);
4160 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4161 		desc_size = sizeof(struct dma_edesc);
4162 	else
4163 		desc_size = sizeof(struct dma_desc);
4164 
4165 	/* The own bit must be the latest setting done when prepare the
4166 	 * descriptor and then barrier is needed to make sure that
4167 	 * all is coherent before granting the DMA engine.
4168 	 */
4169 	wmb();
4170 
4171 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4172 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4173 }
4174 
4175 /**
4176  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4177  *  @skb : the socket buffer
4178  *  @dev : device pointer
4179  *  Description: this is the transmit function that is called on TSO frames
4180  *  (support available on GMAC4 and newer chips).
4181  *  Diagram below show the ring programming in case of TSO frames:
4182  *
4183  *  First Descriptor
4184  *   --------
4185  *   | DES0 |---> buffer1 = L2/L3/L4 header
4186  *   | DES1 |---> TCP Payload (can continue on next descr...)
4187  *   | DES2 |---> buffer 1 and 2 len
4188  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4189  *   --------
4190  *	|
4191  *     ...
4192  *	|
4193  *   --------
4194  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4195  *   | DES1 | --|
4196  *   | DES2 | --> buffer 1 and 2 len
4197  *   | DES3 |
4198  *   --------
4199  *
4200  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4201  */
4202 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4203 {
4204 	struct dma_desc *desc, *first, *mss_desc = NULL;
4205 	struct stmmac_priv *priv = netdev_priv(dev);
4206 	int nfrags = skb_shinfo(skb)->nr_frags;
4207 	u32 queue = skb_get_queue_mapping(skb);
4208 	unsigned int first_entry, tx_packets;
4209 	struct stmmac_txq_stats *txq_stats;
4210 	int tmp_pay_len = 0, first_tx;
4211 	struct stmmac_tx_queue *tx_q;
4212 	bool has_vlan, set_ic;
4213 	u8 proto_hdr_len, hdr;
4214 	u32 pay_len, mss;
4215 	dma_addr_t des;
4216 	int i;
4217 
4218 	tx_q = &priv->dma_conf.tx_queue[queue];
4219 	txq_stats = &priv->xstats.txq_stats[queue];
4220 	first_tx = tx_q->cur_tx;
4221 
4222 	/* Compute header lengths */
4223 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4224 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4225 		hdr = sizeof(struct udphdr);
4226 	} else {
4227 		proto_hdr_len = skb_tcp_all_headers(skb);
4228 		hdr = tcp_hdrlen(skb);
4229 	}
4230 
4231 	/* Desc availability based on threshold should be enough safe */
4232 	if (unlikely(stmmac_tx_avail(priv, queue) <
4233 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4234 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4235 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4236 								queue));
4237 			/* This is a hard error, log it. */
4238 			netdev_err(priv->dev,
4239 				   "%s: Tx Ring full when queue awake\n",
4240 				   __func__);
4241 		}
4242 		return NETDEV_TX_BUSY;
4243 	}
4244 
4245 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4246 
4247 	mss = skb_shinfo(skb)->gso_size;
4248 
4249 	/* set new MSS value if needed */
4250 	if (mss != tx_q->mss) {
4251 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4252 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4253 		else
4254 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4255 
4256 		stmmac_set_mss(priv, mss_desc, mss);
4257 		tx_q->mss = mss;
4258 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4259 						priv->dma_conf.dma_tx_size);
4260 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4261 	}
4262 
4263 	if (netif_msg_tx_queued(priv)) {
4264 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4265 			__func__, hdr, proto_hdr_len, pay_len, mss);
4266 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4267 			skb->data_len);
4268 	}
4269 
4270 	/* Check if VLAN can be inserted by HW */
4271 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4272 
4273 	first_entry = tx_q->cur_tx;
4274 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4275 
4276 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4277 		desc = &tx_q->dma_entx[first_entry].basic;
4278 	else
4279 		desc = &tx_q->dma_tx[first_entry];
4280 	first = desc;
4281 
4282 	if (has_vlan)
4283 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4284 
4285 	/* first descriptor: fill Headers on Buf1 */
4286 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4287 			     DMA_TO_DEVICE);
4288 	if (dma_mapping_error(priv->device, des))
4289 		goto dma_map_err;
4290 
4291 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4292 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4293 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4294 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4295 
4296 	if (priv->dma_cap.addr64 <= 32) {
4297 		first->des0 = cpu_to_le32(des);
4298 
4299 		/* Fill start of payload in buff2 of first descriptor */
4300 		if (pay_len)
4301 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4302 
4303 		/* If needed take extra descriptors to fill the remaining payload */
4304 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4305 	} else {
4306 		stmmac_set_desc_addr(priv, first, des);
4307 		tmp_pay_len = pay_len;
4308 		des += proto_hdr_len;
4309 		pay_len = 0;
4310 	}
4311 
4312 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4313 
4314 	/* Prepare fragments */
4315 	for (i = 0; i < nfrags; i++) {
4316 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4317 
4318 		des = skb_frag_dma_map(priv->device, frag, 0,
4319 				       skb_frag_size(frag),
4320 				       DMA_TO_DEVICE);
4321 		if (dma_mapping_error(priv->device, des))
4322 			goto dma_map_err;
4323 
4324 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4325 				     (i == nfrags - 1), queue);
4326 
4327 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4328 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4329 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4330 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4331 	}
4332 
4333 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4334 
4335 	/* Only the last descriptor gets to point to the skb. */
4336 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4337 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4338 
4339 	/* Manage tx mitigation */
4340 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4341 	tx_q->tx_count_frames += tx_packets;
4342 
4343 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4344 		set_ic = true;
4345 	else if (!priv->tx_coal_frames[queue])
4346 		set_ic = false;
4347 	else if (tx_packets > priv->tx_coal_frames[queue])
4348 		set_ic = true;
4349 	else if ((tx_q->tx_count_frames %
4350 		  priv->tx_coal_frames[queue]) < tx_packets)
4351 		set_ic = true;
4352 	else
4353 		set_ic = false;
4354 
4355 	if (set_ic) {
4356 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4357 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4358 		else
4359 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4360 
4361 		tx_q->tx_count_frames = 0;
4362 		stmmac_set_tx_ic(priv, desc);
4363 	}
4364 
4365 	/* We've used all descriptors we need for this skb, however,
4366 	 * advance cur_tx so that it references a fresh descriptor.
4367 	 * ndo_start_xmit will fill this descriptor the next time it's
4368 	 * called and stmmac_tx_clean may clean up to this descriptor.
4369 	 */
4370 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4371 
4372 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4373 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4374 			  __func__);
4375 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4376 	}
4377 
4378 	u64_stats_update_begin(&txq_stats->q_syncp);
4379 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4380 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4381 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4382 	if (set_ic)
4383 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4384 	u64_stats_update_end(&txq_stats->q_syncp);
4385 
4386 	if (priv->sarc_type)
4387 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4388 
4389 	skb_tx_timestamp(skb);
4390 
4391 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4392 		     priv->hwts_tx_en)) {
4393 		/* declare that device is doing timestamping */
4394 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4395 		stmmac_enable_tx_timestamp(priv, first);
4396 	}
4397 
4398 	/* Complete the first descriptor before granting the DMA */
4399 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4400 			proto_hdr_len,
4401 			pay_len,
4402 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4403 			hdr / 4, (skb->len - proto_hdr_len));
4404 
4405 	/* If context desc is used to change MSS */
4406 	if (mss_desc) {
4407 		/* Make sure that first descriptor has been completely
4408 		 * written, including its own bit. This is because MSS is
4409 		 * actually before first descriptor, so we need to make
4410 		 * sure that MSS's own bit is the last thing written.
4411 		 */
4412 		dma_wmb();
4413 		stmmac_set_tx_owner(priv, mss_desc);
4414 	}
4415 
4416 	if (netif_msg_pktdata(priv)) {
4417 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4418 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4419 			tx_q->cur_tx, first, nfrags);
4420 		pr_info(">>> frame to be transmitted: ");
4421 		print_pkt(skb->data, skb_headlen(skb));
4422 	}
4423 
4424 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4425 
4426 	stmmac_flush_tx_descriptors(priv, queue);
4427 	stmmac_tx_timer_arm(priv, queue);
4428 
4429 	return NETDEV_TX_OK;
4430 
4431 dma_map_err:
4432 	dev_err(priv->device, "Tx dma map failed\n");
4433 	dev_kfree_skb(skb);
4434 	priv->xstats.tx_dropped++;
4435 	return NETDEV_TX_OK;
4436 }
4437 
4438 /**
4439  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4440  * @skb: socket buffer to check
4441  *
4442  * Check if a packet has an ethertype that will trigger the IP header checks
4443  * and IP/TCP checksum engine of the stmmac core.
4444  *
4445  * Return: true if the ethertype can trigger the checksum engine, false
4446  * otherwise
4447  */
4448 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4449 {
4450 	int depth = 0;
4451 	__be16 proto;
4452 
4453 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4454 				    &depth);
4455 
4456 	return (depth <= ETH_HLEN) &&
4457 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4458 }
4459 
4460 /**
4461  *  stmmac_xmit - Tx entry point of the driver
4462  *  @skb : the socket buffer
4463  *  @dev : device pointer
4464  *  Description : this is the tx entry point of the driver.
4465  *  It programs the chain or the ring and supports oversized frames
4466  *  and SG feature.
4467  */
4468 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4469 {
4470 	unsigned int first_entry, tx_packets, enh_desc;
4471 	struct stmmac_priv *priv = netdev_priv(dev);
4472 	unsigned int nopaged_len = skb_headlen(skb);
4473 	int i, csum_insertion = 0, is_jumbo = 0;
4474 	u32 queue = skb_get_queue_mapping(skb);
4475 	int nfrags = skb_shinfo(skb)->nr_frags;
4476 	int gso = skb_shinfo(skb)->gso_type;
4477 	struct stmmac_txq_stats *txq_stats;
4478 	struct dma_edesc *tbs_desc = NULL;
4479 	struct dma_desc *desc, *first;
4480 	struct stmmac_tx_queue *tx_q;
4481 	bool has_vlan, set_ic;
4482 	int entry, first_tx;
4483 	dma_addr_t des;
4484 
4485 	tx_q = &priv->dma_conf.tx_queue[queue];
4486 	txq_stats = &priv->xstats.txq_stats[queue];
4487 	first_tx = tx_q->cur_tx;
4488 
4489 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4490 		stmmac_disable_eee_mode(priv);
4491 
4492 	/* Manage oversized TCP frames for GMAC4 device */
4493 	if (skb_is_gso(skb) && priv->tso) {
4494 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4495 			return stmmac_tso_xmit(skb, dev);
4496 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4497 			return stmmac_tso_xmit(skb, dev);
4498 	}
4499 
4500 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4501 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4502 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4503 								queue));
4504 			/* This is a hard error, log it. */
4505 			netdev_err(priv->dev,
4506 				   "%s: Tx Ring full when queue awake\n",
4507 				   __func__);
4508 		}
4509 		return NETDEV_TX_BUSY;
4510 	}
4511 
4512 	/* Check if VLAN can be inserted by HW */
4513 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4514 
4515 	entry = tx_q->cur_tx;
4516 	first_entry = entry;
4517 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4518 
4519 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4520 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4521 	 * queues. In that case, checksum offloading for those queues that don't
4522 	 * support tx coe needs to fallback to software checksum calculation.
4523 	 *
4524 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4525 	 * also have to be checksummed in software.
4526 	 */
4527 	if (csum_insertion &&
4528 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4529 	     !stmmac_has_ip_ethertype(skb))) {
4530 		if (unlikely(skb_checksum_help(skb)))
4531 			goto dma_map_err;
4532 		csum_insertion = !csum_insertion;
4533 	}
4534 
4535 	if (likely(priv->extend_desc))
4536 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4537 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4538 		desc = &tx_q->dma_entx[entry].basic;
4539 	else
4540 		desc = tx_q->dma_tx + entry;
4541 
4542 	first = desc;
4543 
4544 	if (has_vlan)
4545 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4546 
4547 	enh_desc = priv->plat->enh_desc;
4548 	/* To program the descriptors according to the size of the frame */
4549 	if (enh_desc)
4550 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4551 
4552 	if (unlikely(is_jumbo)) {
4553 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4554 		if (unlikely(entry < 0) && (entry != -EINVAL))
4555 			goto dma_map_err;
4556 	}
4557 
4558 	for (i = 0; i < nfrags; i++) {
4559 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4560 		int len = skb_frag_size(frag);
4561 		bool last_segment = (i == (nfrags - 1));
4562 
4563 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4564 		WARN_ON(tx_q->tx_skbuff[entry]);
4565 
4566 		if (likely(priv->extend_desc))
4567 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4568 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4569 			desc = &tx_q->dma_entx[entry].basic;
4570 		else
4571 			desc = tx_q->dma_tx + entry;
4572 
4573 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4574 				       DMA_TO_DEVICE);
4575 		if (dma_mapping_error(priv->device, des))
4576 			goto dma_map_err; /* should reuse desc w/o issues */
4577 
4578 		tx_q->tx_skbuff_dma[entry].buf = des;
4579 
4580 		stmmac_set_desc_addr(priv, desc, des);
4581 
4582 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4583 		tx_q->tx_skbuff_dma[entry].len = len;
4584 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4585 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4586 
4587 		/* Prepare the descriptor and set the own bit too */
4588 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4589 				priv->mode, 1, last_segment, skb->len);
4590 	}
4591 
4592 	/* Only the last descriptor gets to point to the skb. */
4593 	tx_q->tx_skbuff[entry] = skb;
4594 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4595 
4596 	/* According to the coalesce parameter the IC bit for the latest
4597 	 * segment is reset and the timer re-started to clean the tx status.
4598 	 * This approach takes care about the fragments: desc is the first
4599 	 * element in case of no SG.
4600 	 */
4601 	tx_packets = (entry + 1) - first_tx;
4602 	tx_q->tx_count_frames += tx_packets;
4603 
4604 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4605 		set_ic = true;
4606 	else if (!priv->tx_coal_frames[queue])
4607 		set_ic = false;
4608 	else if (tx_packets > priv->tx_coal_frames[queue])
4609 		set_ic = true;
4610 	else if ((tx_q->tx_count_frames %
4611 		  priv->tx_coal_frames[queue]) < tx_packets)
4612 		set_ic = true;
4613 	else
4614 		set_ic = false;
4615 
4616 	if (set_ic) {
4617 		if (likely(priv->extend_desc))
4618 			desc = &tx_q->dma_etx[entry].basic;
4619 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4620 			desc = &tx_q->dma_entx[entry].basic;
4621 		else
4622 			desc = &tx_q->dma_tx[entry];
4623 
4624 		tx_q->tx_count_frames = 0;
4625 		stmmac_set_tx_ic(priv, desc);
4626 	}
4627 
4628 	/* We've used all descriptors we need for this skb, however,
4629 	 * advance cur_tx so that it references a fresh descriptor.
4630 	 * ndo_start_xmit will fill this descriptor the next time it's
4631 	 * called and stmmac_tx_clean may clean up to this descriptor.
4632 	 */
4633 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4634 	tx_q->cur_tx = entry;
4635 
4636 	if (netif_msg_pktdata(priv)) {
4637 		netdev_dbg(priv->dev,
4638 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4639 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4640 			   entry, first, nfrags);
4641 
4642 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4643 		print_pkt(skb->data, skb->len);
4644 	}
4645 
4646 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4647 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4648 			  __func__);
4649 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4650 	}
4651 
4652 	u64_stats_update_begin(&txq_stats->q_syncp);
4653 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4654 	if (set_ic)
4655 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4656 	u64_stats_update_end(&txq_stats->q_syncp);
4657 
4658 	if (priv->sarc_type)
4659 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4660 
4661 	skb_tx_timestamp(skb);
4662 
4663 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4664 	 * problems because all the descriptors are actually ready to be
4665 	 * passed to the DMA engine.
4666 	 */
4667 	if (likely(!is_jumbo)) {
4668 		bool last_segment = (nfrags == 0);
4669 
4670 		des = dma_map_single(priv->device, skb->data,
4671 				     nopaged_len, DMA_TO_DEVICE);
4672 		if (dma_mapping_error(priv->device, des))
4673 			goto dma_map_err;
4674 
4675 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4676 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4677 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4678 
4679 		stmmac_set_desc_addr(priv, first, des);
4680 
4681 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4682 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4683 
4684 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4685 			     priv->hwts_tx_en)) {
4686 			/* declare that device is doing timestamping */
4687 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4688 			stmmac_enable_tx_timestamp(priv, first);
4689 		}
4690 
4691 		/* Prepare the first descriptor setting the OWN bit too */
4692 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4693 				csum_insertion, priv->mode, 0, last_segment,
4694 				skb->len);
4695 	}
4696 
4697 	if (tx_q->tbs & STMMAC_TBS_EN) {
4698 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4699 
4700 		tbs_desc = &tx_q->dma_entx[first_entry];
4701 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4702 	}
4703 
4704 	stmmac_set_tx_owner(priv, first);
4705 
4706 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4707 
4708 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4709 
4710 	stmmac_flush_tx_descriptors(priv, queue);
4711 	stmmac_tx_timer_arm(priv, queue);
4712 
4713 	return NETDEV_TX_OK;
4714 
4715 dma_map_err:
4716 	netdev_err(priv->dev, "Tx DMA map failed\n");
4717 	dev_kfree_skb(skb);
4718 	priv->xstats.tx_dropped++;
4719 	return NETDEV_TX_OK;
4720 }
4721 
4722 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4723 {
4724 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4725 	__be16 vlan_proto = veth->h_vlan_proto;
4726 	u16 vlanid;
4727 
4728 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4729 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4730 	    (vlan_proto == htons(ETH_P_8021AD) &&
4731 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4732 		/* pop the vlan tag */
4733 		vlanid = ntohs(veth->h_vlan_TCI);
4734 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4735 		skb_pull(skb, VLAN_HLEN);
4736 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4737 	}
4738 }
4739 
4740 /**
4741  * stmmac_rx_refill - refill used skb preallocated buffers
4742  * @priv: driver private structure
4743  * @queue: RX queue index
4744  * Description : this is to reallocate the skb for the reception process
4745  * that is based on zero-copy.
4746  */
4747 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4748 {
4749 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4750 	int dirty = stmmac_rx_dirty(priv, queue);
4751 	unsigned int entry = rx_q->dirty_rx;
4752 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4753 
4754 	if (priv->dma_cap.host_dma_width <= 32)
4755 		gfp |= GFP_DMA32;
4756 
4757 	while (dirty-- > 0) {
4758 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4759 		struct dma_desc *p;
4760 		bool use_rx_wd;
4761 
4762 		if (priv->extend_desc)
4763 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4764 		else
4765 			p = rx_q->dma_rx + entry;
4766 
4767 		if (!buf->page) {
4768 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4769 			if (!buf->page)
4770 				break;
4771 		}
4772 
4773 		if (priv->sph && !buf->sec_page) {
4774 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4775 			if (!buf->sec_page)
4776 				break;
4777 
4778 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4779 		}
4780 
4781 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4782 
4783 		stmmac_set_desc_addr(priv, p, buf->addr);
4784 		if (priv->sph)
4785 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4786 		else
4787 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4788 		stmmac_refill_desc3(priv, rx_q, p);
4789 
4790 		rx_q->rx_count_frames++;
4791 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4792 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4793 			rx_q->rx_count_frames = 0;
4794 
4795 		use_rx_wd = !priv->rx_coal_frames[queue];
4796 		use_rx_wd |= rx_q->rx_count_frames > 0;
4797 		if (!priv->use_riwt)
4798 			use_rx_wd = false;
4799 
4800 		dma_wmb();
4801 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4802 
4803 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4804 	}
4805 	rx_q->dirty_rx = entry;
4806 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4807 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4808 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4809 }
4810 
4811 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4812 				       struct dma_desc *p,
4813 				       int status, unsigned int len)
4814 {
4815 	unsigned int plen = 0, hlen = 0;
4816 	int coe = priv->hw->rx_csum;
4817 
4818 	/* Not first descriptor, buffer is always zero */
4819 	if (priv->sph && len)
4820 		return 0;
4821 
4822 	/* First descriptor, get split header length */
4823 	stmmac_get_rx_header_len(priv, p, &hlen);
4824 	if (priv->sph && hlen) {
4825 		priv->xstats.rx_split_hdr_pkt_n++;
4826 		return hlen;
4827 	}
4828 
4829 	/* First descriptor, not last descriptor and not split header */
4830 	if (status & rx_not_ls)
4831 		return priv->dma_conf.dma_buf_sz;
4832 
4833 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4834 
4835 	/* First descriptor and last descriptor and not split header */
4836 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4837 }
4838 
4839 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4840 				       struct dma_desc *p,
4841 				       int status, unsigned int len)
4842 {
4843 	int coe = priv->hw->rx_csum;
4844 	unsigned int plen = 0;
4845 
4846 	/* Not split header, buffer is not available */
4847 	if (!priv->sph)
4848 		return 0;
4849 
4850 	/* Not last descriptor */
4851 	if (status & rx_not_ls)
4852 		return priv->dma_conf.dma_buf_sz;
4853 
4854 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4855 
4856 	/* Last descriptor */
4857 	return plen - len;
4858 }
4859 
4860 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4861 				struct xdp_frame *xdpf, bool dma_map)
4862 {
4863 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4864 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4865 	unsigned int entry = tx_q->cur_tx;
4866 	struct dma_desc *tx_desc;
4867 	dma_addr_t dma_addr;
4868 	bool set_ic;
4869 
4870 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4871 		return STMMAC_XDP_CONSUMED;
4872 
4873 	if (likely(priv->extend_desc))
4874 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4875 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4876 		tx_desc = &tx_q->dma_entx[entry].basic;
4877 	else
4878 		tx_desc = tx_q->dma_tx + entry;
4879 
4880 	if (dma_map) {
4881 		dma_addr = dma_map_single(priv->device, xdpf->data,
4882 					  xdpf->len, DMA_TO_DEVICE);
4883 		if (dma_mapping_error(priv->device, dma_addr))
4884 			return STMMAC_XDP_CONSUMED;
4885 
4886 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4887 	} else {
4888 		struct page *page = virt_to_page(xdpf->data);
4889 
4890 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4891 			   xdpf->headroom;
4892 		dma_sync_single_for_device(priv->device, dma_addr,
4893 					   xdpf->len, DMA_BIDIRECTIONAL);
4894 
4895 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4896 	}
4897 
4898 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4899 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4900 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4901 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4902 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4903 
4904 	tx_q->xdpf[entry] = xdpf;
4905 
4906 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4907 
4908 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4909 			       true, priv->mode, true, true,
4910 			       xdpf->len);
4911 
4912 	tx_q->tx_count_frames++;
4913 
4914 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4915 		set_ic = true;
4916 	else
4917 		set_ic = false;
4918 
4919 	if (set_ic) {
4920 		tx_q->tx_count_frames = 0;
4921 		stmmac_set_tx_ic(priv, tx_desc);
4922 		u64_stats_update_begin(&txq_stats->q_syncp);
4923 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4924 		u64_stats_update_end(&txq_stats->q_syncp);
4925 	}
4926 
4927 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4928 
4929 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4930 	tx_q->cur_tx = entry;
4931 
4932 	return STMMAC_XDP_TX;
4933 }
4934 
4935 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4936 				   int cpu)
4937 {
4938 	int index = cpu;
4939 
4940 	if (unlikely(index < 0))
4941 		index = 0;
4942 
4943 	while (index >= priv->plat->tx_queues_to_use)
4944 		index -= priv->plat->tx_queues_to_use;
4945 
4946 	return index;
4947 }
4948 
4949 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4950 				struct xdp_buff *xdp)
4951 {
4952 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4953 	int cpu = smp_processor_id();
4954 	struct netdev_queue *nq;
4955 	int queue;
4956 	int res;
4957 
4958 	if (unlikely(!xdpf))
4959 		return STMMAC_XDP_CONSUMED;
4960 
4961 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4962 	nq = netdev_get_tx_queue(priv->dev, queue);
4963 
4964 	__netif_tx_lock(nq, cpu);
4965 	/* Avoids TX time-out as we are sharing with slow path */
4966 	txq_trans_cond_update(nq);
4967 
4968 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4969 	if (res == STMMAC_XDP_TX)
4970 		stmmac_flush_tx_descriptors(priv, queue);
4971 
4972 	__netif_tx_unlock(nq);
4973 
4974 	return res;
4975 }
4976 
4977 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4978 				 struct bpf_prog *prog,
4979 				 struct xdp_buff *xdp)
4980 {
4981 	u32 act;
4982 	int res;
4983 
4984 	act = bpf_prog_run_xdp(prog, xdp);
4985 	switch (act) {
4986 	case XDP_PASS:
4987 		res = STMMAC_XDP_PASS;
4988 		break;
4989 	case XDP_TX:
4990 		res = stmmac_xdp_xmit_back(priv, xdp);
4991 		break;
4992 	case XDP_REDIRECT:
4993 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4994 			res = STMMAC_XDP_CONSUMED;
4995 		else
4996 			res = STMMAC_XDP_REDIRECT;
4997 		break;
4998 	default:
4999 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5000 		fallthrough;
5001 	case XDP_ABORTED:
5002 		trace_xdp_exception(priv->dev, prog, act);
5003 		fallthrough;
5004 	case XDP_DROP:
5005 		res = STMMAC_XDP_CONSUMED;
5006 		break;
5007 	}
5008 
5009 	return res;
5010 }
5011 
5012 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5013 					   struct xdp_buff *xdp)
5014 {
5015 	struct bpf_prog *prog;
5016 	int res;
5017 
5018 	prog = READ_ONCE(priv->xdp_prog);
5019 	if (!prog) {
5020 		res = STMMAC_XDP_PASS;
5021 		goto out;
5022 	}
5023 
5024 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5025 out:
5026 	return ERR_PTR(-res);
5027 }
5028 
5029 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5030 				   int xdp_status)
5031 {
5032 	int cpu = smp_processor_id();
5033 	int queue;
5034 
5035 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5036 
5037 	if (xdp_status & STMMAC_XDP_TX)
5038 		stmmac_tx_timer_arm(priv, queue);
5039 
5040 	if (xdp_status & STMMAC_XDP_REDIRECT)
5041 		xdp_do_flush();
5042 }
5043 
5044 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5045 					       struct xdp_buff *xdp)
5046 {
5047 	unsigned int metasize = xdp->data - xdp->data_meta;
5048 	unsigned int datasize = xdp->data_end - xdp->data;
5049 	struct sk_buff *skb;
5050 
5051 	skb = __napi_alloc_skb(&ch->rxtx_napi,
5052 			       xdp->data_end - xdp->data_hard_start,
5053 			       GFP_ATOMIC | __GFP_NOWARN);
5054 	if (unlikely(!skb))
5055 		return NULL;
5056 
5057 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5058 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5059 	if (metasize)
5060 		skb_metadata_set(skb, metasize);
5061 
5062 	return skb;
5063 }
5064 
5065 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5066 				   struct dma_desc *p, struct dma_desc *np,
5067 				   struct xdp_buff *xdp)
5068 {
5069 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5070 	struct stmmac_channel *ch = &priv->channel[queue];
5071 	unsigned int len = xdp->data_end - xdp->data;
5072 	enum pkt_hash_types hash_type;
5073 	int coe = priv->hw->rx_csum;
5074 	struct sk_buff *skb;
5075 	u32 hash;
5076 
5077 	skb = stmmac_construct_skb_zc(ch, xdp);
5078 	if (!skb) {
5079 		priv->xstats.rx_dropped++;
5080 		return;
5081 	}
5082 
5083 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5084 	if (priv->hw->hw_vlan_en)
5085 		/* MAC level stripping. */
5086 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5087 	else
5088 		/* Driver level stripping. */
5089 		stmmac_rx_vlan(priv->dev, skb);
5090 	skb->protocol = eth_type_trans(skb, priv->dev);
5091 
5092 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5093 		skb_checksum_none_assert(skb);
5094 	else
5095 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5096 
5097 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5098 		skb_set_hash(skb, hash, hash_type);
5099 
5100 	skb_record_rx_queue(skb, queue);
5101 	napi_gro_receive(&ch->rxtx_napi, skb);
5102 
5103 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5104 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5105 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5106 	u64_stats_update_end(&rxq_stats->napi_syncp);
5107 }
5108 
5109 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5110 {
5111 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5112 	unsigned int entry = rx_q->dirty_rx;
5113 	struct dma_desc *rx_desc = NULL;
5114 	bool ret = true;
5115 
5116 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5117 
5118 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5119 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5120 		dma_addr_t dma_addr;
5121 		bool use_rx_wd;
5122 
5123 		if (!buf->xdp) {
5124 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5125 			if (!buf->xdp) {
5126 				ret = false;
5127 				break;
5128 			}
5129 		}
5130 
5131 		if (priv->extend_desc)
5132 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5133 		else
5134 			rx_desc = rx_q->dma_rx + entry;
5135 
5136 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5137 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5138 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5139 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5140 
5141 		rx_q->rx_count_frames++;
5142 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5143 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5144 			rx_q->rx_count_frames = 0;
5145 
5146 		use_rx_wd = !priv->rx_coal_frames[queue];
5147 		use_rx_wd |= rx_q->rx_count_frames > 0;
5148 		if (!priv->use_riwt)
5149 			use_rx_wd = false;
5150 
5151 		dma_wmb();
5152 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5153 
5154 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5155 	}
5156 
5157 	if (rx_desc) {
5158 		rx_q->dirty_rx = entry;
5159 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5160 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5161 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5162 	}
5163 
5164 	return ret;
5165 }
5166 
5167 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5168 {
5169 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5170 	 * to represent incoming packet, whereas cb field in the same structure
5171 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5172 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5173 	 */
5174 	return (struct stmmac_xdp_buff *)xdp;
5175 }
5176 
5177 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5178 {
5179 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5180 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5181 	unsigned int count = 0, error = 0, len = 0;
5182 	int dirty = stmmac_rx_dirty(priv, queue);
5183 	unsigned int next_entry = rx_q->cur_rx;
5184 	u32 rx_errors = 0, rx_dropped = 0;
5185 	unsigned int desc_size;
5186 	struct bpf_prog *prog;
5187 	bool failure = false;
5188 	int xdp_status = 0;
5189 	int status = 0;
5190 
5191 	if (netif_msg_rx_status(priv)) {
5192 		void *rx_head;
5193 
5194 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5195 		if (priv->extend_desc) {
5196 			rx_head = (void *)rx_q->dma_erx;
5197 			desc_size = sizeof(struct dma_extended_desc);
5198 		} else {
5199 			rx_head = (void *)rx_q->dma_rx;
5200 			desc_size = sizeof(struct dma_desc);
5201 		}
5202 
5203 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5204 				    rx_q->dma_rx_phy, desc_size);
5205 	}
5206 	while (count < limit) {
5207 		struct stmmac_rx_buffer *buf;
5208 		struct stmmac_xdp_buff *ctx;
5209 		unsigned int buf1_len = 0;
5210 		struct dma_desc *np, *p;
5211 		int entry;
5212 		int res;
5213 
5214 		if (!count && rx_q->state_saved) {
5215 			error = rx_q->state.error;
5216 			len = rx_q->state.len;
5217 		} else {
5218 			rx_q->state_saved = false;
5219 			error = 0;
5220 			len = 0;
5221 		}
5222 
5223 		if (count >= limit)
5224 			break;
5225 
5226 read_again:
5227 		buf1_len = 0;
5228 		entry = next_entry;
5229 		buf = &rx_q->buf_pool[entry];
5230 
5231 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5232 			failure = failure ||
5233 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5234 			dirty = 0;
5235 		}
5236 
5237 		if (priv->extend_desc)
5238 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5239 		else
5240 			p = rx_q->dma_rx + entry;
5241 
5242 		/* read the status of the incoming frame */
5243 		status = stmmac_rx_status(priv, &priv->xstats, p);
5244 		/* check if managed by the DMA otherwise go ahead */
5245 		if (unlikely(status & dma_own))
5246 			break;
5247 
5248 		/* Prefetch the next RX descriptor */
5249 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5250 						priv->dma_conf.dma_rx_size);
5251 		next_entry = rx_q->cur_rx;
5252 
5253 		if (priv->extend_desc)
5254 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5255 		else
5256 			np = rx_q->dma_rx + next_entry;
5257 
5258 		prefetch(np);
5259 
5260 		/* Ensure a valid XSK buffer before proceed */
5261 		if (!buf->xdp)
5262 			break;
5263 
5264 		if (priv->extend_desc)
5265 			stmmac_rx_extended_status(priv, &priv->xstats,
5266 						  rx_q->dma_erx + entry);
5267 		if (unlikely(status == discard_frame)) {
5268 			xsk_buff_free(buf->xdp);
5269 			buf->xdp = NULL;
5270 			dirty++;
5271 			error = 1;
5272 			if (!priv->hwts_rx_en)
5273 				rx_errors++;
5274 		}
5275 
5276 		if (unlikely(error && (status & rx_not_ls)))
5277 			goto read_again;
5278 		if (unlikely(error)) {
5279 			count++;
5280 			continue;
5281 		}
5282 
5283 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5284 		if (likely(status & rx_not_ls)) {
5285 			xsk_buff_free(buf->xdp);
5286 			buf->xdp = NULL;
5287 			dirty++;
5288 			count++;
5289 			goto read_again;
5290 		}
5291 
5292 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5293 		ctx->priv = priv;
5294 		ctx->desc = p;
5295 		ctx->ndesc = np;
5296 
5297 		/* XDP ZC Frame only support primary buffers for now */
5298 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5299 		len += buf1_len;
5300 
5301 		/* ACS is disabled; strip manually. */
5302 		if (likely(!(status & rx_not_ls))) {
5303 			buf1_len -= ETH_FCS_LEN;
5304 			len -= ETH_FCS_LEN;
5305 		}
5306 
5307 		/* RX buffer is good and fit into a XSK pool buffer */
5308 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5309 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5310 
5311 		prog = READ_ONCE(priv->xdp_prog);
5312 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5313 
5314 		switch (res) {
5315 		case STMMAC_XDP_PASS:
5316 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5317 			xsk_buff_free(buf->xdp);
5318 			break;
5319 		case STMMAC_XDP_CONSUMED:
5320 			xsk_buff_free(buf->xdp);
5321 			rx_dropped++;
5322 			break;
5323 		case STMMAC_XDP_TX:
5324 		case STMMAC_XDP_REDIRECT:
5325 			xdp_status |= res;
5326 			break;
5327 		}
5328 
5329 		buf->xdp = NULL;
5330 		dirty++;
5331 		count++;
5332 	}
5333 
5334 	if (status & rx_not_ls) {
5335 		rx_q->state_saved = true;
5336 		rx_q->state.error = error;
5337 		rx_q->state.len = len;
5338 	}
5339 
5340 	stmmac_finalize_xdp_rx(priv, xdp_status);
5341 
5342 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5343 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5344 	u64_stats_update_end(&rxq_stats->napi_syncp);
5345 
5346 	priv->xstats.rx_dropped += rx_dropped;
5347 	priv->xstats.rx_errors += rx_errors;
5348 
5349 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5350 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5351 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5352 		else
5353 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5354 
5355 		return (int)count;
5356 	}
5357 
5358 	return failure ? limit : (int)count;
5359 }
5360 
5361 /**
5362  * stmmac_rx - manage the receive process
5363  * @priv: driver private structure
5364  * @limit: napi bugget
5365  * @queue: RX queue index.
5366  * Description :  this the function called by the napi poll method.
5367  * It gets all the frames inside the ring.
5368  */
5369 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5370 {
5371 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5372 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5373 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5374 	struct stmmac_channel *ch = &priv->channel[queue];
5375 	unsigned int count = 0, error = 0, len = 0;
5376 	int status = 0, coe = priv->hw->rx_csum;
5377 	unsigned int next_entry = rx_q->cur_rx;
5378 	enum dma_data_direction dma_dir;
5379 	unsigned int desc_size;
5380 	struct sk_buff *skb = NULL;
5381 	struct stmmac_xdp_buff ctx;
5382 	int xdp_status = 0;
5383 	int buf_sz;
5384 
5385 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5386 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5387 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5388 
5389 	if (netif_msg_rx_status(priv)) {
5390 		void *rx_head;
5391 
5392 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5393 		if (priv->extend_desc) {
5394 			rx_head = (void *)rx_q->dma_erx;
5395 			desc_size = sizeof(struct dma_extended_desc);
5396 		} else {
5397 			rx_head = (void *)rx_q->dma_rx;
5398 			desc_size = sizeof(struct dma_desc);
5399 		}
5400 
5401 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5402 				    rx_q->dma_rx_phy, desc_size);
5403 	}
5404 	while (count < limit) {
5405 		unsigned int buf1_len = 0, buf2_len = 0;
5406 		enum pkt_hash_types hash_type;
5407 		struct stmmac_rx_buffer *buf;
5408 		struct dma_desc *np, *p;
5409 		int entry;
5410 		u32 hash;
5411 
5412 		if (!count && rx_q->state_saved) {
5413 			skb = rx_q->state.skb;
5414 			error = rx_q->state.error;
5415 			len = rx_q->state.len;
5416 		} else {
5417 			rx_q->state_saved = false;
5418 			skb = NULL;
5419 			error = 0;
5420 			len = 0;
5421 		}
5422 
5423 read_again:
5424 		if (count >= limit)
5425 			break;
5426 
5427 		buf1_len = 0;
5428 		buf2_len = 0;
5429 		entry = next_entry;
5430 		buf = &rx_q->buf_pool[entry];
5431 
5432 		if (priv->extend_desc)
5433 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5434 		else
5435 			p = rx_q->dma_rx + entry;
5436 
5437 		/* read the status of the incoming frame */
5438 		status = stmmac_rx_status(priv, &priv->xstats, p);
5439 		/* check if managed by the DMA otherwise go ahead */
5440 		if (unlikely(status & dma_own))
5441 			break;
5442 
5443 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5444 						priv->dma_conf.dma_rx_size);
5445 		next_entry = rx_q->cur_rx;
5446 
5447 		if (priv->extend_desc)
5448 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5449 		else
5450 			np = rx_q->dma_rx + next_entry;
5451 
5452 		prefetch(np);
5453 
5454 		if (priv->extend_desc)
5455 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5456 		if (unlikely(status == discard_frame)) {
5457 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5458 			buf->page = NULL;
5459 			error = 1;
5460 			if (!priv->hwts_rx_en)
5461 				rx_errors++;
5462 		}
5463 
5464 		if (unlikely(error && (status & rx_not_ls)))
5465 			goto read_again;
5466 		if (unlikely(error)) {
5467 			dev_kfree_skb(skb);
5468 			skb = NULL;
5469 			count++;
5470 			continue;
5471 		}
5472 
5473 		/* Buffer is good. Go on. */
5474 
5475 		prefetch(page_address(buf->page) + buf->page_offset);
5476 		if (buf->sec_page)
5477 			prefetch(page_address(buf->sec_page));
5478 
5479 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5480 		len += buf1_len;
5481 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5482 		len += buf2_len;
5483 
5484 		/* ACS is disabled; strip manually. */
5485 		if (likely(!(status & rx_not_ls))) {
5486 			if (buf2_len) {
5487 				buf2_len -= ETH_FCS_LEN;
5488 				len -= ETH_FCS_LEN;
5489 			} else if (buf1_len) {
5490 				buf1_len -= ETH_FCS_LEN;
5491 				len -= ETH_FCS_LEN;
5492 			}
5493 		}
5494 
5495 		if (!skb) {
5496 			unsigned int pre_len, sync_len;
5497 
5498 			dma_sync_single_for_cpu(priv->device, buf->addr,
5499 						buf1_len, dma_dir);
5500 
5501 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5502 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5503 					 buf->page_offset, buf1_len, true);
5504 
5505 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5506 				  buf->page_offset;
5507 
5508 			ctx.priv = priv;
5509 			ctx.desc = p;
5510 			ctx.ndesc = np;
5511 
5512 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5513 			/* Due xdp_adjust_tail: DMA sync for_device
5514 			 * cover max len CPU touch
5515 			 */
5516 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5517 				   buf->page_offset;
5518 			sync_len = max(sync_len, pre_len);
5519 
5520 			/* For Not XDP_PASS verdict */
5521 			if (IS_ERR(skb)) {
5522 				unsigned int xdp_res = -PTR_ERR(skb);
5523 
5524 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5525 					page_pool_put_page(rx_q->page_pool,
5526 							   virt_to_head_page(ctx.xdp.data),
5527 							   sync_len, true);
5528 					buf->page = NULL;
5529 					rx_dropped++;
5530 
5531 					/* Clear skb as it was set as
5532 					 * status by XDP program.
5533 					 */
5534 					skb = NULL;
5535 
5536 					if (unlikely((status & rx_not_ls)))
5537 						goto read_again;
5538 
5539 					count++;
5540 					continue;
5541 				} else if (xdp_res & (STMMAC_XDP_TX |
5542 						      STMMAC_XDP_REDIRECT)) {
5543 					xdp_status |= xdp_res;
5544 					buf->page = NULL;
5545 					skb = NULL;
5546 					count++;
5547 					continue;
5548 				}
5549 			}
5550 		}
5551 
5552 		if (!skb) {
5553 			/* XDP program may expand or reduce tail */
5554 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5555 
5556 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5557 			if (!skb) {
5558 				rx_dropped++;
5559 				count++;
5560 				goto drain_data;
5561 			}
5562 
5563 			/* XDP program may adjust header */
5564 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5565 			skb_put(skb, buf1_len);
5566 
5567 			/* Data payload copied into SKB, page ready for recycle */
5568 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5569 			buf->page = NULL;
5570 		} else if (buf1_len) {
5571 			dma_sync_single_for_cpu(priv->device, buf->addr,
5572 						buf1_len, dma_dir);
5573 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5574 					buf->page, buf->page_offset, buf1_len,
5575 					priv->dma_conf.dma_buf_sz);
5576 
5577 			/* Data payload appended into SKB */
5578 			skb_mark_for_recycle(skb);
5579 			buf->page = NULL;
5580 		}
5581 
5582 		if (buf2_len) {
5583 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5584 						buf2_len, dma_dir);
5585 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5586 					buf->sec_page, 0, buf2_len,
5587 					priv->dma_conf.dma_buf_sz);
5588 
5589 			/* Data payload appended into SKB */
5590 			skb_mark_for_recycle(skb);
5591 			buf->sec_page = NULL;
5592 		}
5593 
5594 drain_data:
5595 		if (likely(status & rx_not_ls))
5596 			goto read_again;
5597 		if (!skb)
5598 			continue;
5599 
5600 		/* Got entire packet into SKB. Finish it. */
5601 
5602 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5603 
5604 		if (priv->hw->hw_vlan_en)
5605 			/* MAC level stripping. */
5606 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5607 		else
5608 			/* Driver level stripping. */
5609 			stmmac_rx_vlan(priv->dev, skb);
5610 
5611 		skb->protocol = eth_type_trans(skb, priv->dev);
5612 
5613 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5614 			skb_checksum_none_assert(skb);
5615 		else
5616 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5617 
5618 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5619 			skb_set_hash(skb, hash, hash_type);
5620 
5621 		skb_record_rx_queue(skb, queue);
5622 		napi_gro_receive(&ch->rx_napi, skb);
5623 		skb = NULL;
5624 
5625 		rx_packets++;
5626 		rx_bytes += len;
5627 		count++;
5628 	}
5629 
5630 	if (status & rx_not_ls || skb) {
5631 		rx_q->state_saved = true;
5632 		rx_q->state.skb = skb;
5633 		rx_q->state.error = error;
5634 		rx_q->state.len = len;
5635 	}
5636 
5637 	stmmac_finalize_xdp_rx(priv, xdp_status);
5638 
5639 	stmmac_rx_refill(priv, queue);
5640 
5641 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5642 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5643 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5644 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5645 	u64_stats_update_end(&rxq_stats->napi_syncp);
5646 
5647 	priv->xstats.rx_dropped += rx_dropped;
5648 	priv->xstats.rx_errors += rx_errors;
5649 
5650 	return count;
5651 }
5652 
5653 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5654 {
5655 	struct stmmac_channel *ch =
5656 		container_of(napi, struct stmmac_channel, rx_napi);
5657 	struct stmmac_priv *priv = ch->priv_data;
5658 	struct stmmac_rxq_stats *rxq_stats;
5659 	u32 chan = ch->index;
5660 	int work_done;
5661 
5662 	rxq_stats = &priv->xstats.rxq_stats[chan];
5663 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5664 	u64_stats_inc(&rxq_stats->napi.poll);
5665 	u64_stats_update_end(&rxq_stats->napi_syncp);
5666 
5667 	work_done = stmmac_rx(priv, budget, chan);
5668 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5669 		unsigned long flags;
5670 
5671 		spin_lock_irqsave(&ch->lock, flags);
5672 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5673 		spin_unlock_irqrestore(&ch->lock, flags);
5674 	}
5675 
5676 	return work_done;
5677 }
5678 
5679 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5680 {
5681 	struct stmmac_channel *ch =
5682 		container_of(napi, struct stmmac_channel, tx_napi);
5683 	struct stmmac_priv *priv = ch->priv_data;
5684 	struct stmmac_txq_stats *txq_stats;
5685 	bool pending_packets = false;
5686 	u32 chan = ch->index;
5687 	int work_done;
5688 
5689 	txq_stats = &priv->xstats.txq_stats[chan];
5690 	u64_stats_update_begin(&txq_stats->napi_syncp);
5691 	u64_stats_inc(&txq_stats->napi.poll);
5692 	u64_stats_update_end(&txq_stats->napi_syncp);
5693 
5694 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5695 	work_done = min(work_done, budget);
5696 
5697 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5698 		unsigned long flags;
5699 
5700 		spin_lock_irqsave(&ch->lock, flags);
5701 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5702 		spin_unlock_irqrestore(&ch->lock, flags);
5703 	}
5704 
5705 	/* TX still have packet to handle, check if we need to arm tx timer */
5706 	if (pending_packets)
5707 		stmmac_tx_timer_arm(priv, chan);
5708 
5709 	return work_done;
5710 }
5711 
5712 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5713 {
5714 	struct stmmac_channel *ch =
5715 		container_of(napi, struct stmmac_channel, rxtx_napi);
5716 	struct stmmac_priv *priv = ch->priv_data;
5717 	bool tx_pending_packets = false;
5718 	int rx_done, tx_done, rxtx_done;
5719 	struct stmmac_rxq_stats *rxq_stats;
5720 	struct stmmac_txq_stats *txq_stats;
5721 	u32 chan = ch->index;
5722 
5723 	rxq_stats = &priv->xstats.rxq_stats[chan];
5724 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5725 	u64_stats_inc(&rxq_stats->napi.poll);
5726 	u64_stats_update_end(&rxq_stats->napi_syncp);
5727 
5728 	txq_stats = &priv->xstats.txq_stats[chan];
5729 	u64_stats_update_begin(&txq_stats->napi_syncp);
5730 	u64_stats_inc(&txq_stats->napi.poll);
5731 	u64_stats_update_end(&txq_stats->napi_syncp);
5732 
5733 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5734 	tx_done = min(tx_done, budget);
5735 
5736 	rx_done = stmmac_rx_zc(priv, budget, chan);
5737 
5738 	rxtx_done = max(tx_done, rx_done);
5739 
5740 	/* If either TX or RX work is not complete, return budget
5741 	 * and keep pooling
5742 	 */
5743 	if (rxtx_done >= budget)
5744 		return budget;
5745 
5746 	/* all work done, exit the polling mode */
5747 	if (napi_complete_done(napi, rxtx_done)) {
5748 		unsigned long flags;
5749 
5750 		spin_lock_irqsave(&ch->lock, flags);
5751 		/* Both RX and TX work done are compelte,
5752 		 * so enable both RX & TX IRQs.
5753 		 */
5754 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5755 		spin_unlock_irqrestore(&ch->lock, flags);
5756 	}
5757 
5758 	/* TX still have packet to handle, check if we need to arm tx timer */
5759 	if (tx_pending_packets)
5760 		stmmac_tx_timer_arm(priv, chan);
5761 
5762 	return min(rxtx_done, budget - 1);
5763 }
5764 
5765 /**
5766  *  stmmac_tx_timeout
5767  *  @dev : Pointer to net device structure
5768  *  @txqueue: the index of the hanging transmit queue
5769  *  Description: this function is called when a packet transmission fails to
5770  *   complete within a reasonable time. The driver will mark the error in the
5771  *   netdev structure and arrange for the device to be reset to a sane state
5772  *   in order to transmit a new packet.
5773  */
5774 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5775 {
5776 	struct stmmac_priv *priv = netdev_priv(dev);
5777 
5778 	stmmac_global_err(priv);
5779 }
5780 
5781 /**
5782  *  stmmac_set_rx_mode - entry point for multicast addressing
5783  *  @dev : pointer to the device structure
5784  *  Description:
5785  *  This function is a driver entry point which gets called by the kernel
5786  *  whenever multicast addresses must be enabled/disabled.
5787  *  Return value:
5788  *  void.
5789  */
5790 static void stmmac_set_rx_mode(struct net_device *dev)
5791 {
5792 	struct stmmac_priv *priv = netdev_priv(dev);
5793 
5794 	stmmac_set_filter(priv, priv->hw, dev);
5795 }
5796 
5797 /**
5798  *  stmmac_change_mtu - entry point to change MTU size for the device.
5799  *  @dev : device pointer.
5800  *  @new_mtu : the new MTU size for the device.
5801  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5802  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5803  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5804  *  Return value:
5805  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5806  *  file on failure.
5807  */
5808 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5809 {
5810 	struct stmmac_priv *priv = netdev_priv(dev);
5811 	int txfifosz = priv->plat->tx_fifo_size;
5812 	struct stmmac_dma_conf *dma_conf;
5813 	const int mtu = new_mtu;
5814 	int ret;
5815 
5816 	if (txfifosz == 0)
5817 		txfifosz = priv->dma_cap.tx_fifo_size;
5818 
5819 	txfifosz /= priv->plat->tx_queues_to_use;
5820 
5821 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5822 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5823 		return -EINVAL;
5824 	}
5825 
5826 	new_mtu = STMMAC_ALIGN(new_mtu);
5827 
5828 	/* If condition true, FIFO is too small or MTU too large */
5829 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5830 		return -EINVAL;
5831 
5832 	if (netif_running(dev)) {
5833 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5834 		/* Try to allocate the new DMA conf with the new mtu */
5835 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5836 		if (IS_ERR(dma_conf)) {
5837 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5838 				   mtu);
5839 			return PTR_ERR(dma_conf);
5840 		}
5841 
5842 		stmmac_release(dev);
5843 
5844 		ret = __stmmac_open(dev, dma_conf);
5845 		if (ret) {
5846 			free_dma_desc_resources(priv, dma_conf);
5847 			kfree(dma_conf);
5848 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5849 			return ret;
5850 		}
5851 
5852 		kfree(dma_conf);
5853 
5854 		stmmac_set_rx_mode(dev);
5855 	}
5856 
5857 	dev->mtu = mtu;
5858 	netdev_update_features(dev);
5859 
5860 	return 0;
5861 }
5862 
5863 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5864 					     netdev_features_t features)
5865 {
5866 	struct stmmac_priv *priv = netdev_priv(dev);
5867 
5868 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5869 		features &= ~NETIF_F_RXCSUM;
5870 
5871 	if (!priv->plat->tx_coe)
5872 		features &= ~NETIF_F_CSUM_MASK;
5873 
5874 	/* Some GMAC devices have a bugged Jumbo frame support that
5875 	 * needs to have the Tx COE disabled for oversized frames
5876 	 * (due to limited buffer sizes). In this case we disable
5877 	 * the TX csum insertion in the TDES and not use SF.
5878 	 */
5879 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5880 		features &= ~NETIF_F_CSUM_MASK;
5881 
5882 	/* Disable tso if asked by ethtool */
5883 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5884 		if (features & NETIF_F_TSO)
5885 			priv->tso = true;
5886 		else
5887 			priv->tso = false;
5888 	}
5889 
5890 	return features;
5891 }
5892 
5893 static int stmmac_set_features(struct net_device *netdev,
5894 			       netdev_features_t features)
5895 {
5896 	struct stmmac_priv *priv = netdev_priv(netdev);
5897 
5898 	/* Keep the COE Type in case of csum is supporting */
5899 	if (features & NETIF_F_RXCSUM)
5900 		priv->hw->rx_csum = priv->plat->rx_coe;
5901 	else
5902 		priv->hw->rx_csum = 0;
5903 	/* No check needed because rx_coe has been set before and it will be
5904 	 * fixed in case of issue.
5905 	 */
5906 	stmmac_rx_ipc(priv, priv->hw);
5907 
5908 	if (priv->sph_cap) {
5909 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5910 		u32 chan;
5911 
5912 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5913 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5914 	}
5915 
5916 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5917 		priv->hw->hw_vlan_en = true;
5918 	else
5919 		priv->hw->hw_vlan_en = false;
5920 
5921 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5922 
5923 	return 0;
5924 }
5925 
5926 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5927 {
5928 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5929 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5930 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5931 	bool *hs_enable = &fpe_cfg->hs_enable;
5932 
5933 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5934 		return;
5935 
5936 	/* If LP has sent verify mPacket, LP is FPE capable */
5937 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5938 		if (*lp_state < FPE_STATE_CAPABLE)
5939 			*lp_state = FPE_STATE_CAPABLE;
5940 
5941 		/* If user has requested FPE enable, quickly response */
5942 		if (*hs_enable)
5943 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5944 						fpe_cfg,
5945 						MPACKET_RESPONSE);
5946 	}
5947 
5948 	/* If Local has sent verify mPacket, Local is FPE capable */
5949 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5950 		if (*lo_state < FPE_STATE_CAPABLE)
5951 			*lo_state = FPE_STATE_CAPABLE;
5952 	}
5953 
5954 	/* If LP has sent response mPacket, LP is entering FPE ON */
5955 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5956 		*lp_state = FPE_STATE_ENTERING_ON;
5957 
5958 	/* If Local has sent response mPacket, Local is entering FPE ON */
5959 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5960 		*lo_state = FPE_STATE_ENTERING_ON;
5961 
5962 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5963 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5964 	    priv->fpe_wq) {
5965 		queue_work(priv->fpe_wq, &priv->fpe_task);
5966 	}
5967 }
5968 
5969 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5970 {
5971 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5972 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5973 	u32 queues_count;
5974 	u32 queue;
5975 	bool xmac;
5976 
5977 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5978 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5979 
5980 	if (priv->irq_wake)
5981 		pm_wakeup_event(priv->device, 0);
5982 
5983 	if (priv->dma_cap.estsel)
5984 		stmmac_est_irq_status(priv, priv, priv->dev,
5985 				      &priv->xstats, tx_cnt);
5986 
5987 	if (priv->dma_cap.fpesel) {
5988 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5989 						   priv->dev);
5990 
5991 		stmmac_fpe_event_status(priv, status);
5992 	}
5993 
5994 	/* To handle GMAC own interrupts */
5995 	if ((priv->plat->has_gmac) || xmac) {
5996 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5997 
5998 		if (unlikely(status)) {
5999 			/* For LPI we need to save the tx status */
6000 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6001 				priv->tx_path_in_lpi_mode = true;
6002 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6003 				priv->tx_path_in_lpi_mode = false;
6004 		}
6005 
6006 		for (queue = 0; queue < queues_count; queue++) {
6007 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
6008 							    queue);
6009 		}
6010 
6011 		/* PCS link status */
6012 		if (priv->hw->pcs &&
6013 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6014 			if (priv->xstats.pcs_link)
6015 				netif_carrier_on(priv->dev);
6016 			else
6017 				netif_carrier_off(priv->dev);
6018 		}
6019 
6020 		stmmac_timestamp_interrupt(priv, priv);
6021 	}
6022 }
6023 
6024 /**
6025  *  stmmac_interrupt - main ISR
6026  *  @irq: interrupt number.
6027  *  @dev_id: to pass the net device pointer.
6028  *  Description: this is the main driver interrupt service routine.
6029  *  It can call:
6030  *  o DMA service routine (to manage incoming frame reception and transmission
6031  *    status)
6032  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6033  *    interrupts.
6034  */
6035 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6036 {
6037 	struct net_device *dev = (struct net_device *)dev_id;
6038 	struct stmmac_priv *priv = netdev_priv(dev);
6039 
6040 	/* Check if adapter is up */
6041 	if (test_bit(STMMAC_DOWN, &priv->state))
6042 		return IRQ_HANDLED;
6043 
6044 	/* Check if a fatal error happened */
6045 	if (stmmac_safety_feat_interrupt(priv))
6046 		return IRQ_HANDLED;
6047 
6048 	/* To handle Common interrupts */
6049 	stmmac_common_interrupt(priv);
6050 
6051 	/* To handle DMA interrupts */
6052 	stmmac_dma_interrupt(priv);
6053 
6054 	return IRQ_HANDLED;
6055 }
6056 
6057 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6058 {
6059 	struct net_device *dev = (struct net_device *)dev_id;
6060 	struct stmmac_priv *priv = netdev_priv(dev);
6061 
6062 	if (unlikely(!dev)) {
6063 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6064 		return IRQ_NONE;
6065 	}
6066 
6067 	/* Check if adapter is up */
6068 	if (test_bit(STMMAC_DOWN, &priv->state))
6069 		return IRQ_HANDLED;
6070 
6071 	/* To handle Common interrupts */
6072 	stmmac_common_interrupt(priv);
6073 
6074 	return IRQ_HANDLED;
6075 }
6076 
6077 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6078 {
6079 	struct net_device *dev = (struct net_device *)dev_id;
6080 	struct stmmac_priv *priv = netdev_priv(dev);
6081 
6082 	if (unlikely(!dev)) {
6083 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6084 		return IRQ_NONE;
6085 	}
6086 
6087 	/* Check if adapter is up */
6088 	if (test_bit(STMMAC_DOWN, &priv->state))
6089 		return IRQ_HANDLED;
6090 
6091 	/* Check if a fatal error happened */
6092 	stmmac_safety_feat_interrupt(priv);
6093 
6094 	return IRQ_HANDLED;
6095 }
6096 
6097 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6098 {
6099 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6100 	struct stmmac_dma_conf *dma_conf;
6101 	int chan = tx_q->queue_index;
6102 	struct stmmac_priv *priv;
6103 	int status;
6104 
6105 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6106 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6107 
6108 	if (unlikely(!data)) {
6109 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6110 		return IRQ_NONE;
6111 	}
6112 
6113 	/* Check if adapter is up */
6114 	if (test_bit(STMMAC_DOWN, &priv->state))
6115 		return IRQ_HANDLED;
6116 
6117 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6118 
6119 	if (unlikely(status & tx_hard_error_bump_tc)) {
6120 		/* Try to bump up the dma threshold on this failure */
6121 		stmmac_bump_dma_threshold(priv, chan);
6122 	} else if (unlikely(status == tx_hard_error)) {
6123 		stmmac_tx_err(priv, chan);
6124 	}
6125 
6126 	return IRQ_HANDLED;
6127 }
6128 
6129 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6130 {
6131 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6132 	struct stmmac_dma_conf *dma_conf;
6133 	int chan = rx_q->queue_index;
6134 	struct stmmac_priv *priv;
6135 
6136 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6137 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6138 
6139 	if (unlikely(!data)) {
6140 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6141 		return IRQ_NONE;
6142 	}
6143 
6144 	/* Check if adapter is up */
6145 	if (test_bit(STMMAC_DOWN, &priv->state))
6146 		return IRQ_HANDLED;
6147 
6148 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6149 
6150 	return IRQ_HANDLED;
6151 }
6152 
6153 /**
6154  *  stmmac_ioctl - Entry point for the Ioctl
6155  *  @dev: Device pointer.
6156  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6157  *  a proprietary structure used to pass information to the driver.
6158  *  @cmd: IOCTL command
6159  *  Description:
6160  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6161  */
6162 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6163 {
6164 	struct stmmac_priv *priv = netdev_priv (dev);
6165 	int ret = -EOPNOTSUPP;
6166 
6167 	if (!netif_running(dev))
6168 		return -EINVAL;
6169 
6170 	switch (cmd) {
6171 	case SIOCGMIIPHY:
6172 	case SIOCGMIIREG:
6173 	case SIOCSMIIREG:
6174 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6175 		break;
6176 	case SIOCSHWTSTAMP:
6177 		ret = stmmac_hwtstamp_set(dev, rq);
6178 		break;
6179 	case SIOCGHWTSTAMP:
6180 		ret = stmmac_hwtstamp_get(dev, rq);
6181 		break;
6182 	default:
6183 		break;
6184 	}
6185 
6186 	return ret;
6187 }
6188 
6189 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6190 				    void *cb_priv)
6191 {
6192 	struct stmmac_priv *priv = cb_priv;
6193 	int ret = -EOPNOTSUPP;
6194 
6195 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6196 		return ret;
6197 
6198 	__stmmac_disable_all_queues(priv);
6199 
6200 	switch (type) {
6201 	case TC_SETUP_CLSU32:
6202 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6203 		break;
6204 	case TC_SETUP_CLSFLOWER:
6205 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6206 		break;
6207 	default:
6208 		break;
6209 	}
6210 
6211 	stmmac_enable_all_queues(priv);
6212 	return ret;
6213 }
6214 
6215 static LIST_HEAD(stmmac_block_cb_list);
6216 
6217 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6218 			   void *type_data)
6219 {
6220 	struct stmmac_priv *priv = netdev_priv(ndev);
6221 
6222 	switch (type) {
6223 	case TC_QUERY_CAPS:
6224 		return stmmac_tc_query_caps(priv, priv, type_data);
6225 	case TC_SETUP_BLOCK:
6226 		return flow_block_cb_setup_simple(type_data,
6227 						  &stmmac_block_cb_list,
6228 						  stmmac_setup_tc_block_cb,
6229 						  priv, priv, true);
6230 	case TC_SETUP_QDISC_CBS:
6231 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6232 	case TC_SETUP_QDISC_TAPRIO:
6233 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6234 	case TC_SETUP_QDISC_ETF:
6235 		return stmmac_tc_setup_etf(priv, priv, type_data);
6236 	default:
6237 		return -EOPNOTSUPP;
6238 	}
6239 }
6240 
6241 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6242 			       struct net_device *sb_dev)
6243 {
6244 	int gso = skb_shinfo(skb)->gso_type;
6245 
6246 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6247 		/*
6248 		 * There is no way to determine the number of TSO/USO
6249 		 * capable Queues. Let's use always the Queue 0
6250 		 * because if TSO/USO is supported then at least this
6251 		 * one will be capable.
6252 		 */
6253 		return 0;
6254 	}
6255 
6256 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6257 }
6258 
6259 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6260 {
6261 	struct stmmac_priv *priv = netdev_priv(ndev);
6262 	int ret = 0;
6263 
6264 	ret = pm_runtime_resume_and_get(priv->device);
6265 	if (ret < 0)
6266 		return ret;
6267 
6268 	ret = eth_mac_addr(ndev, addr);
6269 	if (ret)
6270 		goto set_mac_error;
6271 
6272 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6273 
6274 set_mac_error:
6275 	pm_runtime_put(priv->device);
6276 
6277 	return ret;
6278 }
6279 
6280 #ifdef CONFIG_DEBUG_FS
6281 static struct dentry *stmmac_fs_dir;
6282 
6283 static void sysfs_display_ring(void *head, int size, int extend_desc,
6284 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6285 {
6286 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6287 	struct dma_desc *p = (struct dma_desc *)head;
6288 	unsigned int desc_size;
6289 	dma_addr_t dma_addr;
6290 	int i;
6291 
6292 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6293 	for (i = 0; i < size; i++) {
6294 		dma_addr = dma_phy_addr + i * desc_size;
6295 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6296 				i, &dma_addr,
6297 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6298 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6299 		if (extend_desc)
6300 			p = &(++ep)->basic;
6301 		else
6302 			p++;
6303 	}
6304 }
6305 
6306 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6307 {
6308 	struct net_device *dev = seq->private;
6309 	struct stmmac_priv *priv = netdev_priv(dev);
6310 	u32 rx_count = priv->plat->rx_queues_to_use;
6311 	u32 tx_count = priv->plat->tx_queues_to_use;
6312 	u32 queue;
6313 
6314 	if ((dev->flags & IFF_UP) == 0)
6315 		return 0;
6316 
6317 	for (queue = 0; queue < rx_count; queue++) {
6318 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6319 
6320 		seq_printf(seq, "RX Queue %d:\n", queue);
6321 
6322 		if (priv->extend_desc) {
6323 			seq_printf(seq, "Extended descriptor ring:\n");
6324 			sysfs_display_ring((void *)rx_q->dma_erx,
6325 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6326 		} else {
6327 			seq_printf(seq, "Descriptor ring:\n");
6328 			sysfs_display_ring((void *)rx_q->dma_rx,
6329 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6330 		}
6331 	}
6332 
6333 	for (queue = 0; queue < tx_count; queue++) {
6334 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6335 
6336 		seq_printf(seq, "TX Queue %d:\n", queue);
6337 
6338 		if (priv->extend_desc) {
6339 			seq_printf(seq, "Extended descriptor ring:\n");
6340 			sysfs_display_ring((void *)tx_q->dma_etx,
6341 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6342 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6343 			seq_printf(seq, "Descriptor ring:\n");
6344 			sysfs_display_ring((void *)tx_q->dma_tx,
6345 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6346 		}
6347 	}
6348 
6349 	return 0;
6350 }
6351 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6352 
6353 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6354 {
6355 	static const char * const dwxgmac_timestamp_source[] = {
6356 		"None",
6357 		"Internal",
6358 		"External",
6359 		"Both",
6360 	};
6361 	static const char * const dwxgmac_safety_feature_desc[] = {
6362 		"No",
6363 		"All Safety Features with ECC and Parity",
6364 		"All Safety Features without ECC or Parity",
6365 		"All Safety Features with Parity Only",
6366 		"ECC Only",
6367 		"UNDEFINED",
6368 		"UNDEFINED",
6369 		"UNDEFINED",
6370 	};
6371 	struct net_device *dev = seq->private;
6372 	struct stmmac_priv *priv = netdev_priv(dev);
6373 
6374 	if (!priv->hw_cap_support) {
6375 		seq_printf(seq, "DMA HW features not supported\n");
6376 		return 0;
6377 	}
6378 
6379 	seq_printf(seq, "==============================\n");
6380 	seq_printf(seq, "\tDMA HW features\n");
6381 	seq_printf(seq, "==============================\n");
6382 
6383 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6384 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6385 	seq_printf(seq, "\t1000 Mbps: %s\n",
6386 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6387 	seq_printf(seq, "\tHalf duplex: %s\n",
6388 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6389 	if (priv->plat->has_xgmac) {
6390 		seq_printf(seq,
6391 			   "\tNumber of Additional MAC address registers: %d\n",
6392 			   priv->dma_cap.multi_addr);
6393 	} else {
6394 		seq_printf(seq, "\tHash Filter: %s\n",
6395 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6396 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6397 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6398 	}
6399 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6400 		   (priv->dma_cap.pcs) ? "Y" : "N");
6401 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6402 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6403 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6404 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6405 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6406 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6407 	seq_printf(seq, "\tRMON module: %s\n",
6408 		   (priv->dma_cap.rmon) ? "Y" : "N");
6409 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6410 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6411 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6412 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6413 	if (priv->plat->has_xgmac)
6414 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6415 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6416 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6417 		   (priv->dma_cap.eee) ? "Y" : "N");
6418 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6419 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6420 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6421 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6422 	    priv->plat->has_xgmac) {
6423 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6424 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6425 	} else {
6426 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6427 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6428 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6429 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6430 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6431 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6432 	}
6433 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6434 		   priv->dma_cap.number_rx_channel);
6435 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6436 		   priv->dma_cap.number_tx_channel);
6437 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6438 		   priv->dma_cap.number_rx_queues);
6439 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6440 		   priv->dma_cap.number_tx_queues);
6441 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6442 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6443 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6444 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6445 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6446 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6447 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6448 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6449 		   priv->dma_cap.pps_out_num);
6450 	seq_printf(seq, "\tSafety Features: %s\n",
6451 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6452 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6453 		   priv->dma_cap.frpsel ? "Y" : "N");
6454 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6455 		   priv->dma_cap.host_dma_width);
6456 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6457 		   priv->dma_cap.rssen ? "Y" : "N");
6458 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6459 		   priv->dma_cap.vlhash ? "Y" : "N");
6460 	seq_printf(seq, "\tSplit Header: %s\n",
6461 		   priv->dma_cap.sphen ? "Y" : "N");
6462 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6463 		   priv->dma_cap.vlins ? "Y" : "N");
6464 	seq_printf(seq, "\tDouble VLAN: %s\n",
6465 		   priv->dma_cap.dvlan ? "Y" : "N");
6466 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6467 		   priv->dma_cap.l3l4fnum);
6468 	seq_printf(seq, "\tARP Offloading: %s\n",
6469 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6470 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6471 		   priv->dma_cap.estsel ? "Y" : "N");
6472 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6473 		   priv->dma_cap.fpesel ? "Y" : "N");
6474 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6475 		   priv->dma_cap.tbssel ? "Y" : "N");
6476 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6477 		   priv->dma_cap.tbs_ch_num);
6478 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6479 		   priv->dma_cap.sgfsel ? "Y" : "N");
6480 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6481 		   BIT(priv->dma_cap.ttsfd) >> 1);
6482 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6483 		   priv->dma_cap.numtc);
6484 	seq_printf(seq, "\tDCB Feature: %s\n",
6485 		   priv->dma_cap.dcben ? "Y" : "N");
6486 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6487 		   priv->dma_cap.advthword ? "Y" : "N");
6488 	seq_printf(seq, "\tPTP Offload: %s\n",
6489 		   priv->dma_cap.ptoen ? "Y" : "N");
6490 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6491 		   priv->dma_cap.osten ? "Y" : "N");
6492 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6493 		   priv->dma_cap.pfcen ? "Y" : "N");
6494 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6495 		   BIT(priv->dma_cap.frpes) << 6);
6496 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6497 		   BIT(priv->dma_cap.frpbs) << 6);
6498 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6499 		   priv->dma_cap.frppipe_num);
6500 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6501 		   priv->dma_cap.nrvf_num ?
6502 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6503 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6504 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6505 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6506 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6507 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6508 		   priv->dma_cap.cbtisel ? "Y" : "N");
6509 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6510 		   priv->dma_cap.aux_snapshot_n);
6511 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6512 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6513 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6514 		   priv->dma_cap.edma ? "Y" : "N");
6515 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6516 		   priv->dma_cap.ediffc ? "Y" : "N");
6517 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6518 		   priv->dma_cap.vxn ? "Y" : "N");
6519 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6520 		   priv->dma_cap.dbgmem ? "Y" : "N");
6521 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6522 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6523 	return 0;
6524 }
6525 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6526 
6527 /* Use network device events to rename debugfs file entries.
6528  */
6529 static int stmmac_device_event(struct notifier_block *unused,
6530 			       unsigned long event, void *ptr)
6531 {
6532 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6533 	struct stmmac_priv *priv = netdev_priv(dev);
6534 
6535 	if (dev->netdev_ops != &stmmac_netdev_ops)
6536 		goto done;
6537 
6538 	switch (event) {
6539 	case NETDEV_CHANGENAME:
6540 		if (priv->dbgfs_dir)
6541 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6542 							 priv->dbgfs_dir,
6543 							 stmmac_fs_dir,
6544 							 dev->name);
6545 		break;
6546 	}
6547 done:
6548 	return NOTIFY_DONE;
6549 }
6550 
6551 static struct notifier_block stmmac_notifier = {
6552 	.notifier_call = stmmac_device_event,
6553 };
6554 
6555 static void stmmac_init_fs(struct net_device *dev)
6556 {
6557 	struct stmmac_priv *priv = netdev_priv(dev);
6558 
6559 	rtnl_lock();
6560 
6561 	/* Create per netdev entries */
6562 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6563 
6564 	/* Entry to report DMA RX/TX rings */
6565 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6566 			    &stmmac_rings_status_fops);
6567 
6568 	/* Entry to report the DMA HW features */
6569 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6570 			    &stmmac_dma_cap_fops);
6571 
6572 	rtnl_unlock();
6573 }
6574 
6575 static void stmmac_exit_fs(struct net_device *dev)
6576 {
6577 	struct stmmac_priv *priv = netdev_priv(dev);
6578 
6579 	debugfs_remove_recursive(priv->dbgfs_dir);
6580 }
6581 #endif /* CONFIG_DEBUG_FS */
6582 
6583 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6584 {
6585 	unsigned char *data = (unsigned char *)&vid_le;
6586 	unsigned char data_byte = 0;
6587 	u32 crc = ~0x0;
6588 	u32 temp = 0;
6589 	int i, bits;
6590 
6591 	bits = get_bitmask_order(VLAN_VID_MASK);
6592 	for (i = 0; i < bits; i++) {
6593 		if ((i % 8) == 0)
6594 			data_byte = data[i / 8];
6595 
6596 		temp = ((crc & 1) ^ data_byte) & 1;
6597 		crc >>= 1;
6598 		data_byte >>= 1;
6599 
6600 		if (temp)
6601 			crc ^= 0xedb88320;
6602 	}
6603 
6604 	return crc;
6605 }
6606 
6607 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6608 {
6609 	u32 crc, hash = 0;
6610 	__le16 pmatch = 0;
6611 	int count = 0;
6612 	u16 vid = 0;
6613 
6614 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6615 		__le16 vid_le = cpu_to_le16(vid);
6616 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6617 		hash |= (1 << crc);
6618 		count++;
6619 	}
6620 
6621 	if (!priv->dma_cap.vlhash) {
6622 		if (count > 2) /* VID = 0 always passes filter */
6623 			return -EOPNOTSUPP;
6624 
6625 		pmatch = cpu_to_le16(vid);
6626 		hash = 0;
6627 	}
6628 
6629 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6630 }
6631 
6632 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6633 {
6634 	struct stmmac_priv *priv = netdev_priv(ndev);
6635 	bool is_double = false;
6636 	int ret;
6637 
6638 	ret = pm_runtime_resume_and_get(priv->device);
6639 	if (ret < 0)
6640 		return ret;
6641 
6642 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6643 		is_double = true;
6644 
6645 	set_bit(vid, priv->active_vlans);
6646 	ret = stmmac_vlan_update(priv, is_double);
6647 	if (ret) {
6648 		clear_bit(vid, priv->active_vlans);
6649 		goto err_pm_put;
6650 	}
6651 
6652 	if (priv->hw->num_vlan) {
6653 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6654 		if (ret)
6655 			goto err_pm_put;
6656 	}
6657 err_pm_put:
6658 	pm_runtime_put(priv->device);
6659 
6660 	return ret;
6661 }
6662 
6663 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6664 {
6665 	struct stmmac_priv *priv = netdev_priv(ndev);
6666 	bool is_double = false;
6667 	int ret;
6668 
6669 	ret = pm_runtime_resume_and_get(priv->device);
6670 	if (ret < 0)
6671 		return ret;
6672 
6673 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6674 		is_double = true;
6675 
6676 	clear_bit(vid, priv->active_vlans);
6677 
6678 	if (priv->hw->num_vlan) {
6679 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6680 		if (ret)
6681 			goto del_vlan_error;
6682 	}
6683 
6684 	ret = stmmac_vlan_update(priv, is_double);
6685 
6686 del_vlan_error:
6687 	pm_runtime_put(priv->device);
6688 
6689 	return ret;
6690 }
6691 
6692 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6693 {
6694 	struct stmmac_priv *priv = netdev_priv(dev);
6695 
6696 	switch (bpf->command) {
6697 	case XDP_SETUP_PROG:
6698 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6699 	case XDP_SETUP_XSK_POOL:
6700 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6701 					     bpf->xsk.queue_id);
6702 	default:
6703 		return -EOPNOTSUPP;
6704 	}
6705 }
6706 
6707 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6708 			   struct xdp_frame **frames, u32 flags)
6709 {
6710 	struct stmmac_priv *priv = netdev_priv(dev);
6711 	int cpu = smp_processor_id();
6712 	struct netdev_queue *nq;
6713 	int i, nxmit = 0;
6714 	int queue;
6715 
6716 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6717 		return -ENETDOWN;
6718 
6719 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6720 		return -EINVAL;
6721 
6722 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6723 	nq = netdev_get_tx_queue(priv->dev, queue);
6724 
6725 	__netif_tx_lock(nq, cpu);
6726 	/* Avoids TX time-out as we are sharing with slow path */
6727 	txq_trans_cond_update(nq);
6728 
6729 	for (i = 0; i < num_frames; i++) {
6730 		int res;
6731 
6732 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6733 		if (res == STMMAC_XDP_CONSUMED)
6734 			break;
6735 
6736 		nxmit++;
6737 	}
6738 
6739 	if (flags & XDP_XMIT_FLUSH) {
6740 		stmmac_flush_tx_descriptors(priv, queue);
6741 		stmmac_tx_timer_arm(priv, queue);
6742 	}
6743 
6744 	__netif_tx_unlock(nq);
6745 
6746 	return nxmit;
6747 }
6748 
6749 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6750 {
6751 	struct stmmac_channel *ch = &priv->channel[queue];
6752 	unsigned long flags;
6753 
6754 	spin_lock_irqsave(&ch->lock, flags);
6755 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6756 	spin_unlock_irqrestore(&ch->lock, flags);
6757 
6758 	stmmac_stop_rx_dma(priv, queue);
6759 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6760 }
6761 
6762 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6763 {
6764 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6765 	struct stmmac_channel *ch = &priv->channel[queue];
6766 	unsigned long flags;
6767 	u32 buf_size;
6768 	int ret;
6769 
6770 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6771 	if (ret) {
6772 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6773 		return;
6774 	}
6775 
6776 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6777 	if (ret) {
6778 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6779 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6780 		return;
6781 	}
6782 
6783 	stmmac_reset_rx_queue(priv, queue);
6784 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6785 
6786 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6787 			    rx_q->dma_rx_phy, rx_q->queue_index);
6788 
6789 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6790 			     sizeof(struct dma_desc));
6791 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6792 			       rx_q->rx_tail_addr, rx_q->queue_index);
6793 
6794 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6795 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6796 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6797 				      buf_size,
6798 				      rx_q->queue_index);
6799 	} else {
6800 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6801 				      priv->dma_conf.dma_buf_sz,
6802 				      rx_q->queue_index);
6803 	}
6804 
6805 	stmmac_start_rx_dma(priv, queue);
6806 
6807 	spin_lock_irqsave(&ch->lock, flags);
6808 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6809 	spin_unlock_irqrestore(&ch->lock, flags);
6810 }
6811 
6812 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6813 {
6814 	struct stmmac_channel *ch = &priv->channel[queue];
6815 	unsigned long flags;
6816 
6817 	spin_lock_irqsave(&ch->lock, flags);
6818 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6819 	spin_unlock_irqrestore(&ch->lock, flags);
6820 
6821 	stmmac_stop_tx_dma(priv, queue);
6822 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6823 }
6824 
6825 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6826 {
6827 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6828 	struct stmmac_channel *ch = &priv->channel[queue];
6829 	unsigned long flags;
6830 	int ret;
6831 
6832 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6833 	if (ret) {
6834 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6835 		return;
6836 	}
6837 
6838 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6839 	if (ret) {
6840 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6841 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6842 		return;
6843 	}
6844 
6845 	stmmac_reset_tx_queue(priv, queue);
6846 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6847 
6848 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6849 			    tx_q->dma_tx_phy, tx_q->queue_index);
6850 
6851 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6852 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6853 
6854 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6855 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6856 			       tx_q->tx_tail_addr, tx_q->queue_index);
6857 
6858 	stmmac_start_tx_dma(priv, queue);
6859 
6860 	spin_lock_irqsave(&ch->lock, flags);
6861 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6862 	spin_unlock_irqrestore(&ch->lock, flags);
6863 }
6864 
6865 void stmmac_xdp_release(struct net_device *dev)
6866 {
6867 	struct stmmac_priv *priv = netdev_priv(dev);
6868 	u32 chan;
6869 
6870 	/* Ensure tx function is not running */
6871 	netif_tx_disable(dev);
6872 
6873 	/* Disable NAPI process */
6874 	stmmac_disable_all_queues(priv);
6875 
6876 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6877 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6878 
6879 	/* Free the IRQ lines */
6880 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6881 
6882 	/* Stop TX/RX DMA channels */
6883 	stmmac_stop_all_dma(priv);
6884 
6885 	/* Release and free the Rx/Tx resources */
6886 	free_dma_desc_resources(priv, &priv->dma_conf);
6887 
6888 	/* Disable the MAC Rx/Tx */
6889 	stmmac_mac_set(priv, priv->ioaddr, false);
6890 
6891 	/* set trans_start so we don't get spurious
6892 	 * watchdogs during reset
6893 	 */
6894 	netif_trans_update(dev);
6895 	netif_carrier_off(dev);
6896 }
6897 
6898 int stmmac_xdp_open(struct net_device *dev)
6899 {
6900 	struct stmmac_priv *priv = netdev_priv(dev);
6901 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6902 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6903 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6904 	struct stmmac_rx_queue *rx_q;
6905 	struct stmmac_tx_queue *tx_q;
6906 	u32 buf_size;
6907 	bool sph_en;
6908 	u32 chan;
6909 	int ret;
6910 
6911 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6912 	if (ret < 0) {
6913 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6914 			   __func__);
6915 		goto dma_desc_error;
6916 	}
6917 
6918 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6919 	if (ret < 0) {
6920 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6921 			   __func__);
6922 		goto init_error;
6923 	}
6924 
6925 	stmmac_reset_queues_param(priv);
6926 
6927 	/* DMA CSR Channel configuration */
6928 	for (chan = 0; chan < dma_csr_ch; chan++) {
6929 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6930 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6931 	}
6932 
6933 	/* Adjust Split header */
6934 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6935 
6936 	/* DMA RX Channel Configuration */
6937 	for (chan = 0; chan < rx_cnt; chan++) {
6938 		rx_q = &priv->dma_conf.rx_queue[chan];
6939 
6940 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6941 				    rx_q->dma_rx_phy, chan);
6942 
6943 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6944 				     (rx_q->buf_alloc_num *
6945 				      sizeof(struct dma_desc));
6946 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6947 				       rx_q->rx_tail_addr, chan);
6948 
6949 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6950 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6951 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6952 					      buf_size,
6953 					      rx_q->queue_index);
6954 		} else {
6955 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6956 					      priv->dma_conf.dma_buf_sz,
6957 					      rx_q->queue_index);
6958 		}
6959 
6960 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6961 	}
6962 
6963 	/* DMA TX Channel Configuration */
6964 	for (chan = 0; chan < tx_cnt; chan++) {
6965 		tx_q = &priv->dma_conf.tx_queue[chan];
6966 
6967 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6968 				    tx_q->dma_tx_phy, chan);
6969 
6970 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6971 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6972 				       tx_q->tx_tail_addr, chan);
6973 
6974 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6975 		tx_q->txtimer.function = stmmac_tx_timer;
6976 	}
6977 
6978 	/* Enable the MAC Rx/Tx */
6979 	stmmac_mac_set(priv, priv->ioaddr, true);
6980 
6981 	/* Start Rx & Tx DMA Channels */
6982 	stmmac_start_all_dma(priv);
6983 
6984 	ret = stmmac_request_irq(dev);
6985 	if (ret)
6986 		goto irq_error;
6987 
6988 	/* Enable NAPI process*/
6989 	stmmac_enable_all_queues(priv);
6990 	netif_carrier_on(dev);
6991 	netif_tx_start_all_queues(dev);
6992 	stmmac_enable_all_dma_irq(priv);
6993 
6994 	return 0;
6995 
6996 irq_error:
6997 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6998 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6999 
7000 	stmmac_hw_teardown(dev);
7001 init_error:
7002 	free_dma_desc_resources(priv, &priv->dma_conf);
7003 dma_desc_error:
7004 	return ret;
7005 }
7006 
7007 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7008 {
7009 	struct stmmac_priv *priv = netdev_priv(dev);
7010 	struct stmmac_rx_queue *rx_q;
7011 	struct stmmac_tx_queue *tx_q;
7012 	struct stmmac_channel *ch;
7013 
7014 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7015 	    !netif_carrier_ok(priv->dev))
7016 		return -ENETDOWN;
7017 
7018 	if (!stmmac_xdp_is_enabled(priv))
7019 		return -EINVAL;
7020 
7021 	if (queue >= priv->plat->rx_queues_to_use ||
7022 	    queue >= priv->plat->tx_queues_to_use)
7023 		return -EINVAL;
7024 
7025 	rx_q = &priv->dma_conf.rx_queue[queue];
7026 	tx_q = &priv->dma_conf.tx_queue[queue];
7027 	ch = &priv->channel[queue];
7028 
7029 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7030 		return -EINVAL;
7031 
7032 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7033 		/* EQoS does not have per-DMA channel SW interrupt,
7034 		 * so we schedule RX Napi straight-away.
7035 		 */
7036 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7037 			__napi_schedule(&ch->rxtx_napi);
7038 	}
7039 
7040 	return 0;
7041 }
7042 
7043 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7044 {
7045 	struct stmmac_priv *priv = netdev_priv(dev);
7046 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7047 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7048 	unsigned int start;
7049 	int q;
7050 
7051 	for (q = 0; q < tx_cnt; q++) {
7052 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7053 		u64 tx_packets;
7054 		u64 tx_bytes;
7055 
7056 		do {
7057 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7058 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7059 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7060 		do {
7061 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7062 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7063 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7064 
7065 		stats->tx_packets += tx_packets;
7066 		stats->tx_bytes += tx_bytes;
7067 	}
7068 
7069 	for (q = 0; q < rx_cnt; q++) {
7070 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7071 		u64 rx_packets;
7072 		u64 rx_bytes;
7073 
7074 		do {
7075 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7076 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7077 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7078 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7079 
7080 		stats->rx_packets += rx_packets;
7081 		stats->rx_bytes += rx_bytes;
7082 	}
7083 
7084 	stats->rx_dropped = priv->xstats.rx_dropped;
7085 	stats->rx_errors = priv->xstats.rx_errors;
7086 	stats->tx_dropped = priv->xstats.tx_dropped;
7087 	stats->tx_errors = priv->xstats.tx_errors;
7088 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7089 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7090 	stats->rx_length_errors = priv->xstats.rx_length;
7091 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7092 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7093 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7094 }
7095 
7096 static const struct net_device_ops stmmac_netdev_ops = {
7097 	.ndo_open = stmmac_open,
7098 	.ndo_start_xmit = stmmac_xmit,
7099 	.ndo_stop = stmmac_release,
7100 	.ndo_change_mtu = stmmac_change_mtu,
7101 	.ndo_fix_features = stmmac_fix_features,
7102 	.ndo_set_features = stmmac_set_features,
7103 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7104 	.ndo_tx_timeout = stmmac_tx_timeout,
7105 	.ndo_eth_ioctl = stmmac_ioctl,
7106 	.ndo_get_stats64 = stmmac_get_stats64,
7107 	.ndo_setup_tc = stmmac_setup_tc,
7108 	.ndo_select_queue = stmmac_select_queue,
7109 	.ndo_set_mac_address = stmmac_set_mac_address,
7110 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7111 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7112 	.ndo_bpf = stmmac_bpf,
7113 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7114 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7115 };
7116 
7117 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7118 {
7119 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7120 		return;
7121 	if (test_bit(STMMAC_DOWN, &priv->state))
7122 		return;
7123 
7124 	netdev_err(priv->dev, "Reset adapter.\n");
7125 
7126 	rtnl_lock();
7127 	netif_trans_update(priv->dev);
7128 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7129 		usleep_range(1000, 2000);
7130 
7131 	set_bit(STMMAC_DOWN, &priv->state);
7132 	dev_close(priv->dev);
7133 	dev_open(priv->dev, NULL);
7134 	clear_bit(STMMAC_DOWN, &priv->state);
7135 	clear_bit(STMMAC_RESETING, &priv->state);
7136 	rtnl_unlock();
7137 }
7138 
7139 static void stmmac_service_task(struct work_struct *work)
7140 {
7141 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7142 			service_task);
7143 
7144 	stmmac_reset_subtask(priv);
7145 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7146 }
7147 
7148 /**
7149  *  stmmac_hw_init - Init the MAC device
7150  *  @priv: driver private structure
7151  *  Description: this function is to configure the MAC device according to
7152  *  some platform parameters or the HW capability register. It prepares the
7153  *  driver to use either ring or chain modes and to setup either enhanced or
7154  *  normal descriptors.
7155  */
7156 static int stmmac_hw_init(struct stmmac_priv *priv)
7157 {
7158 	int ret;
7159 
7160 	/* dwmac-sun8i only work in chain mode */
7161 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7162 		chain_mode = 1;
7163 	priv->chain_mode = chain_mode;
7164 
7165 	/* Initialize HW Interface */
7166 	ret = stmmac_hwif_init(priv);
7167 	if (ret)
7168 		return ret;
7169 
7170 	/* Get the HW capability (new GMAC newer than 3.50a) */
7171 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7172 	if (priv->hw_cap_support) {
7173 		dev_info(priv->device, "DMA HW capability register supported\n");
7174 
7175 		/* We can override some gmac/dma configuration fields: e.g.
7176 		 * enh_desc, tx_coe (e.g. that are passed through the
7177 		 * platform) with the values from the HW capability
7178 		 * register (if supported).
7179 		 */
7180 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7181 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7182 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7183 		priv->hw->pmt = priv->plat->pmt;
7184 		if (priv->dma_cap.hash_tb_sz) {
7185 			priv->hw->multicast_filter_bins =
7186 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7187 			priv->hw->mcast_bits_log2 =
7188 					ilog2(priv->hw->multicast_filter_bins);
7189 		}
7190 
7191 		/* TXCOE doesn't work in thresh DMA mode */
7192 		if (priv->plat->force_thresh_dma_mode)
7193 			priv->plat->tx_coe = 0;
7194 		else
7195 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7196 
7197 		/* In case of GMAC4 rx_coe is from HW cap register. */
7198 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7199 
7200 		if (priv->dma_cap.rx_coe_type2)
7201 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7202 		else if (priv->dma_cap.rx_coe_type1)
7203 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7204 
7205 	} else {
7206 		dev_info(priv->device, "No HW DMA feature register supported\n");
7207 	}
7208 
7209 	if (priv->plat->rx_coe) {
7210 		priv->hw->rx_csum = priv->plat->rx_coe;
7211 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7212 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7213 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7214 	}
7215 	if (priv->plat->tx_coe)
7216 		dev_info(priv->device, "TX Checksum insertion supported\n");
7217 
7218 	if (priv->plat->pmt) {
7219 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7220 		device_set_wakeup_capable(priv->device, 1);
7221 	}
7222 
7223 	if (priv->dma_cap.tsoen)
7224 		dev_info(priv->device, "TSO supported\n");
7225 
7226 	priv->hw->vlan_fail_q_en =
7227 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7228 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7229 
7230 	/* Run HW quirks, if any */
7231 	if (priv->hwif_quirks) {
7232 		ret = priv->hwif_quirks(priv);
7233 		if (ret)
7234 			return ret;
7235 	}
7236 
7237 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7238 	 * In some case, for example on bugged HW this feature
7239 	 * has to be disable and this can be done by passing the
7240 	 * riwt_off field from the platform.
7241 	 */
7242 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7243 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7244 		priv->use_riwt = 1;
7245 		dev_info(priv->device,
7246 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7247 	}
7248 
7249 	return 0;
7250 }
7251 
7252 static void stmmac_napi_add(struct net_device *dev)
7253 {
7254 	struct stmmac_priv *priv = netdev_priv(dev);
7255 	u32 queue, maxq;
7256 
7257 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7258 
7259 	for (queue = 0; queue < maxq; queue++) {
7260 		struct stmmac_channel *ch = &priv->channel[queue];
7261 
7262 		ch->priv_data = priv;
7263 		ch->index = queue;
7264 		spin_lock_init(&ch->lock);
7265 
7266 		if (queue < priv->plat->rx_queues_to_use) {
7267 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7268 		}
7269 		if (queue < priv->plat->tx_queues_to_use) {
7270 			netif_napi_add_tx(dev, &ch->tx_napi,
7271 					  stmmac_napi_poll_tx);
7272 		}
7273 		if (queue < priv->plat->rx_queues_to_use &&
7274 		    queue < priv->plat->tx_queues_to_use) {
7275 			netif_napi_add(dev, &ch->rxtx_napi,
7276 				       stmmac_napi_poll_rxtx);
7277 		}
7278 	}
7279 }
7280 
7281 static void stmmac_napi_del(struct net_device *dev)
7282 {
7283 	struct stmmac_priv *priv = netdev_priv(dev);
7284 	u32 queue, maxq;
7285 
7286 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7287 
7288 	for (queue = 0; queue < maxq; queue++) {
7289 		struct stmmac_channel *ch = &priv->channel[queue];
7290 
7291 		if (queue < priv->plat->rx_queues_to_use)
7292 			netif_napi_del(&ch->rx_napi);
7293 		if (queue < priv->plat->tx_queues_to_use)
7294 			netif_napi_del(&ch->tx_napi);
7295 		if (queue < priv->plat->rx_queues_to_use &&
7296 		    queue < priv->plat->tx_queues_to_use) {
7297 			netif_napi_del(&ch->rxtx_napi);
7298 		}
7299 	}
7300 }
7301 
7302 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7303 {
7304 	struct stmmac_priv *priv = netdev_priv(dev);
7305 	int ret = 0, i;
7306 
7307 	if (netif_running(dev))
7308 		stmmac_release(dev);
7309 
7310 	stmmac_napi_del(dev);
7311 
7312 	priv->plat->rx_queues_to_use = rx_cnt;
7313 	priv->plat->tx_queues_to_use = tx_cnt;
7314 	if (!netif_is_rxfh_configured(dev))
7315 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7316 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7317 									rx_cnt);
7318 
7319 	stmmac_set_half_duplex(priv);
7320 	stmmac_napi_add(dev);
7321 
7322 	if (netif_running(dev))
7323 		ret = stmmac_open(dev);
7324 
7325 	return ret;
7326 }
7327 
7328 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7329 {
7330 	struct stmmac_priv *priv = netdev_priv(dev);
7331 	int ret = 0;
7332 
7333 	if (netif_running(dev))
7334 		stmmac_release(dev);
7335 
7336 	priv->dma_conf.dma_rx_size = rx_size;
7337 	priv->dma_conf.dma_tx_size = tx_size;
7338 
7339 	if (netif_running(dev))
7340 		ret = stmmac_open(dev);
7341 
7342 	return ret;
7343 }
7344 
7345 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7346 static void stmmac_fpe_lp_task(struct work_struct *work)
7347 {
7348 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7349 						fpe_task);
7350 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7351 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7352 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7353 	bool *hs_enable = &fpe_cfg->hs_enable;
7354 	bool *enable = &fpe_cfg->enable;
7355 	int retries = 20;
7356 
7357 	while (retries-- > 0) {
7358 		/* Bail out immediately if FPE handshake is OFF */
7359 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7360 			break;
7361 
7362 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7363 		    *lp_state == FPE_STATE_ENTERING_ON) {
7364 			stmmac_fpe_configure(priv, priv->ioaddr,
7365 					     fpe_cfg,
7366 					     priv->plat->tx_queues_to_use,
7367 					     priv->plat->rx_queues_to_use,
7368 					     *enable);
7369 
7370 			netdev_info(priv->dev, "configured FPE\n");
7371 
7372 			*lo_state = FPE_STATE_ON;
7373 			*lp_state = FPE_STATE_ON;
7374 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7375 			break;
7376 		}
7377 
7378 		if ((*lo_state == FPE_STATE_CAPABLE ||
7379 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7380 		     *lp_state != FPE_STATE_ON) {
7381 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7382 				    *lo_state, *lp_state);
7383 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7384 						fpe_cfg,
7385 						MPACKET_VERIFY);
7386 		}
7387 		/* Sleep then retry */
7388 		msleep(500);
7389 	}
7390 
7391 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7392 }
7393 
7394 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7395 {
7396 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7397 		if (enable) {
7398 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7399 						priv->plat->fpe_cfg,
7400 						MPACKET_VERIFY);
7401 		} else {
7402 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7403 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7404 		}
7405 
7406 		priv->plat->fpe_cfg->hs_enable = enable;
7407 	}
7408 }
7409 
7410 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7411 {
7412 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7413 	struct dma_desc *desc_contains_ts = ctx->desc;
7414 	struct stmmac_priv *priv = ctx->priv;
7415 	struct dma_desc *ndesc = ctx->ndesc;
7416 	struct dma_desc *desc = ctx->desc;
7417 	u64 ns = 0;
7418 
7419 	if (!priv->hwts_rx_en)
7420 		return -ENODATA;
7421 
7422 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7423 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7424 		desc_contains_ts = ndesc;
7425 
7426 	/* Check if timestamp is available */
7427 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7428 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7429 		ns -= priv->plat->cdc_error_adj;
7430 		*timestamp = ns_to_ktime(ns);
7431 		return 0;
7432 	}
7433 
7434 	return -ENODATA;
7435 }
7436 
7437 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7438 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7439 };
7440 
7441 /**
7442  * stmmac_dvr_probe
7443  * @device: device pointer
7444  * @plat_dat: platform data pointer
7445  * @res: stmmac resource pointer
7446  * Description: this is the main probe function used to
7447  * call the alloc_etherdev, allocate the priv structure.
7448  * Return:
7449  * returns 0 on success, otherwise errno.
7450  */
7451 int stmmac_dvr_probe(struct device *device,
7452 		     struct plat_stmmacenet_data *plat_dat,
7453 		     struct stmmac_resources *res)
7454 {
7455 	struct net_device *ndev = NULL;
7456 	struct stmmac_priv *priv;
7457 	u32 rxq;
7458 	int i, ret = 0;
7459 
7460 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7461 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7462 	if (!ndev)
7463 		return -ENOMEM;
7464 
7465 	SET_NETDEV_DEV(ndev, device);
7466 
7467 	priv = netdev_priv(ndev);
7468 	priv->device = device;
7469 	priv->dev = ndev;
7470 
7471 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7472 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7473 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7474 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7475 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7476 	}
7477 
7478 	priv->xstats.pcpu_stats =
7479 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7480 	if (!priv->xstats.pcpu_stats)
7481 		return -ENOMEM;
7482 
7483 	stmmac_set_ethtool_ops(ndev);
7484 	priv->pause = pause;
7485 	priv->plat = plat_dat;
7486 	priv->ioaddr = res->addr;
7487 	priv->dev->base_addr = (unsigned long)res->addr;
7488 	priv->plat->dma_cfg->multi_msi_en =
7489 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7490 
7491 	priv->dev->irq = res->irq;
7492 	priv->wol_irq = res->wol_irq;
7493 	priv->lpi_irq = res->lpi_irq;
7494 	priv->sfty_ce_irq = res->sfty_ce_irq;
7495 	priv->sfty_ue_irq = res->sfty_ue_irq;
7496 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7497 		priv->rx_irq[i] = res->rx_irq[i];
7498 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7499 		priv->tx_irq[i] = res->tx_irq[i];
7500 
7501 	if (!is_zero_ether_addr(res->mac))
7502 		eth_hw_addr_set(priv->dev, res->mac);
7503 
7504 	dev_set_drvdata(device, priv->dev);
7505 
7506 	/* Verify driver arguments */
7507 	stmmac_verify_args();
7508 
7509 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7510 	if (!priv->af_xdp_zc_qps)
7511 		return -ENOMEM;
7512 
7513 	/* Allocate workqueue */
7514 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7515 	if (!priv->wq) {
7516 		dev_err(priv->device, "failed to create workqueue\n");
7517 		ret = -ENOMEM;
7518 		goto error_wq_init;
7519 	}
7520 
7521 	INIT_WORK(&priv->service_task, stmmac_service_task);
7522 
7523 	/* Initialize Link Partner FPE workqueue */
7524 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7525 
7526 	/* Override with kernel parameters if supplied XXX CRS XXX
7527 	 * this needs to have multiple instances
7528 	 */
7529 	if ((phyaddr >= 0) && (phyaddr <= 31))
7530 		priv->plat->phy_addr = phyaddr;
7531 
7532 	if (priv->plat->stmmac_rst) {
7533 		ret = reset_control_assert(priv->plat->stmmac_rst);
7534 		reset_control_deassert(priv->plat->stmmac_rst);
7535 		/* Some reset controllers have only reset callback instead of
7536 		 * assert + deassert callbacks pair.
7537 		 */
7538 		if (ret == -ENOTSUPP)
7539 			reset_control_reset(priv->plat->stmmac_rst);
7540 	}
7541 
7542 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7543 	if (ret == -ENOTSUPP)
7544 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7545 			ERR_PTR(ret));
7546 
7547 	/* Wait a bit for the reset to take effect */
7548 	udelay(10);
7549 
7550 	/* Init MAC and get the capabilities */
7551 	ret = stmmac_hw_init(priv);
7552 	if (ret)
7553 		goto error_hw_init;
7554 
7555 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7556 	 */
7557 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7558 		priv->plat->dma_cfg->dche = false;
7559 
7560 	stmmac_check_ether_addr(priv);
7561 
7562 	ndev->netdev_ops = &stmmac_netdev_ops;
7563 
7564 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7565 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7566 
7567 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7568 			    NETIF_F_RXCSUM;
7569 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7570 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7571 
7572 	ret = stmmac_tc_init(priv, priv);
7573 	if (!ret) {
7574 		ndev->hw_features |= NETIF_F_HW_TC;
7575 	}
7576 
7577 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7578 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7579 		if (priv->plat->has_gmac4)
7580 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7581 		priv->tso = true;
7582 		dev_info(priv->device, "TSO feature enabled\n");
7583 	}
7584 
7585 	if (priv->dma_cap.sphen &&
7586 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7587 		ndev->hw_features |= NETIF_F_GRO;
7588 		priv->sph_cap = true;
7589 		priv->sph = priv->sph_cap;
7590 		dev_info(priv->device, "SPH feature enabled\n");
7591 	}
7592 
7593 	/* Ideally our host DMA address width is the same as for the
7594 	 * device. However, it may differ and then we have to use our
7595 	 * host DMA width for allocation and the device DMA width for
7596 	 * register handling.
7597 	 */
7598 	if (priv->plat->host_dma_width)
7599 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7600 	else
7601 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7602 
7603 	if (priv->dma_cap.host_dma_width) {
7604 		ret = dma_set_mask_and_coherent(device,
7605 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7606 		if (!ret) {
7607 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7608 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7609 
7610 			/*
7611 			 * If more than 32 bits can be addressed, make sure to
7612 			 * enable enhanced addressing mode.
7613 			 */
7614 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7615 				priv->plat->dma_cfg->eame = true;
7616 		} else {
7617 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7618 			if (ret) {
7619 				dev_err(priv->device, "Failed to set DMA Mask\n");
7620 				goto error_hw_init;
7621 			}
7622 
7623 			priv->dma_cap.host_dma_width = 32;
7624 		}
7625 	}
7626 
7627 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7628 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7629 #ifdef STMMAC_VLAN_TAG_USED
7630 	/* Both mac100 and gmac support receive VLAN tag detection */
7631 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7632 	ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7633 	priv->hw->hw_vlan_en = true;
7634 
7635 	if (priv->dma_cap.vlhash) {
7636 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7637 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7638 	}
7639 	if (priv->dma_cap.vlins) {
7640 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7641 		if (priv->dma_cap.dvlan)
7642 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7643 	}
7644 #endif
7645 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7646 
7647 	priv->xstats.threshold = tc;
7648 
7649 	/* Initialize RSS */
7650 	rxq = priv->plat->rx_queues_to_use;
7651 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7652 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7653 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7654 
7655 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7656 		ndev->features |= NETIF_F_RXHASH;
7657 
7658 	ndev->vlan_features |= ndev->features;
7659 	/* TSO doesn't work on VLANs yet */
7660 	ndev->vlan_features &= ~NETIF_F_TSO;
7661 
7662 	/* MTU range: 46 - hw-specific max */
7663 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7664 	if (priv->plat->has_xgmac)
7665 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7666 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7667 		ndev->max_mtu = JUMBO_LEN;
7668 	else
7669 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7670 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7671 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7672 	 */
7673 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7674 	    (priv->plat->maxmtu >= ndev->min_mtu))
7675 		ndev->max_mtu = priv->plat->maxmtu;
7676 	else if (priv->plat->maxmtu < ndev->min_mtu)
7677 		dev_warn(priv->device,
7678 			 "%s: warning: maxmtu having invalid value (%d)\n",
7679 			 __func__, priv->plat->maxmtu);
7680 
7681 	if (flow_ctrl)
7682 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7683 
7684 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7685 
7686 	/* Setup channels NAPI */
7687 	stmmac_napi_add(ndev);
7688 
7689 	mutex_init(&priv->lock);
7690 
7691 	/* If a specific clk_csr value is passed from the platform
7692 	 * this means that the CSR Clock Range selection cannot be
7693 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7694 	 * set the MDC clock dynamically according to the csr actual
7695 	 * clock input.
7696 	 */
7697 	if (priv->plat->clk_csr >= 0)
7698 		priv->clk_csr = priv->plat->clk_csr;
7699 	else
7700 		stmmac_clk_csr_set(priv);
7701 
7702 	stmmac_check_pcs_mode(priv);
7703 
7704 	pm_runtime_get_noresume(device);
7705 	pm_runtime_set_active(device);
7706 	if (!pm_runtime_enabled(device))
7707 		pm_runtime_enable(device);
7708 
7709 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7710 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7711 		/* MDIO bus Registration */
7712 		ret = stmmac_mdio_register(ndev);
7713 		if (ret < 0) {
7714 			dev_err_probe(priv->device, ret,
7715 				      "%s: MDIO bus (id: %d) registration failed\n",
7716 				      __func__, priv->plat->bus_id);
7717 			goto error_mdio_register;
7718 		}
7719 	}
7720 
7721 	if (priv->plat->speed_mode_2500)
7722 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7723 
7724 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7725 		ret = stmmac_xpcs_setup(priv->mii);
7726 		if (ret)
7727 			goto error_xpcs_setup;
7728 	}
7729 
7730 	ret = stmmac_phy_setup(priv);
7731 	if (ret) {
7732 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7733 		goto error_phy_setup;
7734 	}
7735 
7736 	ret = register_netdev(ndev);
7737 	if (ret) {
7738 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7739 			__func__, ret);
7740 		goto error_netdev_register;
7741 	}
7742 
7743 #ifdef CONFIG_DEBUG_FS
7744 	stmmac_init_fs(ndev);
7745 #endif
7746 
7747 	if (priv->plat->dump_debug_regs)
7748 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7749 
7750 	/* Let pm_runtime_put() disable the clocks.
7751 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7752 	 */
7753 	pm_runtime_put(device);
7754 
7755 	return ret;
7756 
7757 error_netdev_register:
7758 	phylink_destroy(priv->phylink);
7759 error_xpcs_setup:
7760 error_phy_setup:
7761 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7762 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7763 		stmmac_mdio_unregister(ndev);
7764 error_mdio_register:
7765 	stmmac_napi_del(ndev);
7766 error_hw_init:
7767 	destroy_workqueue(priv->wq);
7768 error_wq_init:
7769 	bitmap_free(priv->af_xdp_zc_qps);
7770 
7771 	return ret;
7772 }
7773 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7774 
7775 /**
7776  * stmmac_dvr_remove
7777  * @dev: device pointer
7778  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7779  * changes the link status, releases the DMA descriptor rings.
7780  */
7781 void stmmac_dvr_remove(struct device *dev)
7782 {
7783 	struct net_device *ndev = dev_get_drvdata(dev);
7784 	struct stmmac_priv *priv = netdev_priv(ndev);
7785 
7786 	netdev_info(priv->dev, "%s: removing driver", __func__);
7787 
7788 	pm_runtime_get_sync(dev);
7789 
7790 	stmmac_stop_all_dma(priv);
7791 	stmmac_mac_set(priv, priv->ioaddr, false);
7792 	netif_carrier_off(ndev);
7793 	unregister_netdev(ndev);
7794 
7795 #ifdef CONFIG_DEBUG_FS
7796 	stmmac_exit_fs(ndev);
7797 #endif
7798 	phylink_destroy(priv->phylink);
7799 	if (priv->plat->stmmac_rst)
7800 		reset_control_assert(priv->plat->stmmac_rst);
7801 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7802 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7803 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7804 		stmmac_mdio_unregister(ndev);
7805 	destroy_workqueue(priv->wq);
7806 	mutex_destroy(&priv->lock);
7807 	bitmap_free(priv->af_xdp_zc_qps);
7808 
7809 	pm_runtime_disable(dev);
7810 	pm_runtime_put_noidle(dev);
7811 }
7812 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7813 
7814 /**
7815  * stmmac_suspend - suspend callback
7816  * @dev: device pointer
7817  * Description: this is the function to suspend the device and it is called
7818  * by the platform driver to stop the network queue, release the resources,
7819  * program the PMT register (for WoL), clean and release driver resources.
7820  */
7821 int stmmac_suspend(struct device *dev)
7822 {
7823 	struct net_device *ndev = dev_get_drvdata(dev);
7824 	struct stmmac_priv *priv = netdev_priv(ndev);
7825 	u32 chan;
7826 
7827 	if (!ndev || !netif_running(ndev))
7828 		return 0;
7829 
7830 	mutex_lock(&priv->lock);
7831 
7832 	netif_device_detach(ndev);
7833 
7834 	stmmac_disable_all_queues(priv);
7835 
7836 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7837 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7838 
7839 	if (priv->eee_enabled) {
7840 		priv->tx_path_in_lpi_mode = false;
7841 		del_timer_sync(&priv->eee_ctrl_timer);
7842 	}
7843 
7844 	/* Stop TX/RX DMA */
7845 	stmmac_stop_all_dma(priv);
7846 
7847 	if (priv->plat->serdes_powerdown)
7848 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7849 
7850 	/* Enable Power down mode by programming the PMT regs */
7851 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7852 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7853 		priv->irq_wake = 1;
7854 	} else {
7855 		stmmac_mac_set(priv, priv->ioaddr, false);
7856 		pinctrl_pm_select_sleep_state(priv->device);
7857 	}
7858 
7859 	mutex_unlock(&priv->lock);
7860 
7861 	rtnl_lock();
7862 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7863 		phylink_suspend(priv->phylink, true);
7864 	} else {
7865 		if (device_may_wakeup(priv->device))
7866 			phylink_speed_down(priv->phylink, false);
7867 		phylink_suspend(priv->phylink, false);
7868 	}
7869 	rtnl_unlock();
7870 
7871 	if (priv->dma_cap.fpesel) {
7872 		/* Disable FPE */
7873 		stmmac_fpe_configure(priv, priv->ioaddr,
7874 				     priv->plat->fpe_cfg,
7875 				     priv->plat->tx_queues_to_use,
7876 				     priv->plat->rx_queues_to_use, false);
7877 
7878 		stmmac_fpe_handshake(priv, false);
7879 		stmmac_fpe_stop_wq(priv);
7880 	}
7881 
7882 	priv->speed = SPEED_UNKNOWN;
7883 	return 0;
7884 }
7885 EXPORT_SYMBOL_GPL(stmmac_suspend);
7886 
7887 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7888 {
7889 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7890 
7891 	rx_q->cur_rx = 0;
7892 	rx_q->dirty_rx = 0;
7893 }
7894 
7895 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7896 {
7897 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7898 
7899 	tx_q->cur_tx = 0;
7900 	tx_q->dirty_tx = 0;
7901 	tx_q->mss = 0;
7902 
7903 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7904 }
7905 
7906 /**
7907  * stmmac_reset_queues_param - reset queue parameters
7908  * @priv: device pointer
7909  */
7910 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7911 {
7912 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7913 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7914 	u32 queue;
7915 
7916 	for (queue = 0; queue < rx_cnt; queue++)
7917 		stmmac_reset_rx_queue(priv, queue);
7918 
7919 	for (queue = 0; queue < tx_cnt; queue++)
7920 		stmmac_reset_tx_queue(priv, queue);
7921 }
7922 
7923 /**
7924  * stmmac_resume - resume callback
7925  * @dev: device pointer
7926  * Description: when resume this function is invoked to setup the DMA and CORE
7927  * in a usable state.
7928  */
7929 int stmmac_resume(struct device *dev)
7930 {
7931 	struct net_device *ndev = dev_get_drvdata(dev);
7932 	struct stmmac_priv *priv = netdev_priv(ndev);
7933 	int ret;
7934 
7935 	if (!netif_running(ndev))
7936 		return 0;
7937 
7938 	/* Power Down bit, into the PM register, is cleared
7939 	 * automatically as soon as a magic packet or a Wake-up frame
7940 	 * is received. Anyway, it's better to manually clear
7941 	 * this bit because it can generate problems while resuming
7942 	 * from another devices (e.g. serial console).
7943 	 */
7944 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7945 		mutex_lock(&priv->lock);
7946 		stmmac_pmt(priv, priv->hw, 0);
7947 		mutex_unlock(&priv->lock);
7948 		priv->irq_wake = 0;
7949 	} else {
7950 		pinctrl_pm_select_default_state(priv->device);
7951 		/* reset the phy so that it's ready */
7952 		if (priv->mii)
7953 			stmmac_mdio_reset(priv->mii);
7954 	}
7955 
7956 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7957 	    priv->plat->serdes_powerup) {
7958 		ret = priv->plat->serdes_powerup(ndev,
7959 						 priv->plat->bsp_priv);
7960 
7961 		if (ret < 0)
7962 			return ret;
7963 	}
7964 
7965 	rtnl_lock();
7966 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7967 		phylink_resume(priv->phylink);
7968 	} else {
7969 		phylink_resume(priv->phylink);
7970 		if (device_may_wakeup(priv->device))
7971 			phylink_speed_up(priv->phylink);
7972 	}
7973 	rtnl_unlock();
7974 
7975 	rtnl_lock();
7976 	mutex_lock(&priv->lock);
7977 
7978 	stmmac_reset_queues_param(priv);
7979 
7980 	stmmac_free_tx_skbufs(priv);
7981 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7982 
7983 	stmmac_hw_setup(ndev, false);
7984 	stmmac_init_coalesce(priv);
7985 	stmmac_set_rx_mode(ndev);
7986 
7987 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7988 
7989 	stmmac_enable_all_queues(priv);
7990 	stmmac_enable_all_dma_irq(priv);
7991 
7992 	mutex_unlock(&priv->lock);
7993 	rtnl_unlock();
7994 
7995 	netif_device_attach(ndev);
7996 
7997 	return 0;
7998 }
7999 EXPORT_SYMBOL_GPL(stmmac_resume);
8000 
8001 #ifndef MODULE
8002 static int __init stmmac_cmdline_opt(char *str)
8003 {
8004 	char *opt;
8005 
8006 	if (!str || !*str)
8007 		return 1;
8008 	while ((opt = strsep(&str, ",")) != NULL) {
8009 		if (!strncmp(opt, "debug:", 6)) {
8010 			if (kstrtoint(opt + 6, 0, &debug))
8011 				goto err;
8012 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8013 			if (kstrtoint(opt + 8, 0, &phyaddr))
8014 				goto err;
8015 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8016 			if (kstrtoint(opt + 7, 0, &buf_sz))
8017 				goto err;
8018 		} else if (!strncmp(opt, "tc:", 3)) {
8019 			if (kstrtoint(opt + 3, 0, &tc))
8020 				goto err;
8021 		} else if (!strncmp(opt, "watchdog:", 9)) {
8022 			if (kstrtoint(opt + 9, 0, &watchdog))
8023 				goto err;
8024 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8025 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8026 				goto err;
8027 		} else if (!strncmp(opt, "pause:", 6)) {
8028 			if (kstrtoint(opt + 6, 0, &pause))
8029 				goto err;
8030 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8031 			if (kstrtoint(opt + 10, 0, &eee_timer))
8032 				goto err;
8033 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8034 			if (kstrtoint(opt + 11, 0, &chain_mode))
8035 				goto err;
8036 		}
8037 	}
8038 	return 1;
8039 
8040 err:
8041 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8042 	return 1;
8043 }
8044 
8045 __setup("stmmaceth=", stmmac_cmdline_opt);
8046 #endif /* MODULE */
8047 
8048 static int __init stmmac_init(void)
8049 {
8050 #ifdef CONFIG_DEBUG_FS
8051 	/* Create debugfs main directory if it doesn't exist yet */
8052 	if (!stmmac_fs_dir)
8053 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8054 	register_netdevice_notifier(&stmmac_notifier);
8055 #endif
8056 
8057 	return 0;
8058 }
8059 
8060 static void __exit stmmac_exit(void)
8061 {
8062 #ifdef CONFIG_DEBUG_FS
8063 	unregister_netdevice_notifier(&stmmac_notifier);
8064 	debugfs_remove_recursive(stmmac_fs_dir);
8065 #endif
8066 }
8067 
8068 module_init(stmmac_init)
8069 module_exit(stmmac_exit)
8070 
8071 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8072 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8073 MODULE_LICENSE("GPL");
8074