xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision b1156532bc29ac9a8d1cf71510cabc8f68181540)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Check if MAC core supports the EEE feature. */
475 	if (!priv->dma_cap.eee)
476 		return false;
477 
478 	mutex_lock(&priv->lock);
479 
480 	/* Check if it needs to be deactivated */
481 	if (!priv->eee_active) {
482 		if (priv->eee_enabled) {
483 			netdev_dbg(priv->dev, "disable EEE\n");
484 			stmmac_lpi_entry_timer_config(priv, 0);
485 			del_timer_sync(&priv->eee_ctrl_timer);
486 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
487 			if (priv->hw->xpcs)
488 				xpcs_config_eee(priv->hw->xpcs,
489 						priv->plat->mult_fact_100ns,
490 						false);
491 		}
492 		mutex_unlock(&priv->lock);
493 		return false;
494 	}
495 
496 	if (priv->eee_active && !priv->eee_enabled) {
497 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
498 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
499 				     eee_tw_timer);
500 		if (priv->hw->xpcs)
501 			xpcs_config_eee(priv->hw->xpcs,
502 					priv->plat->mult_fact_100ns,
503 					true);
504 	}
505 
506 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
507 		del_timer_sync(&priv->eee_ctrl_timer);
508 		priv->tx_path_in_lpi_mode = false;
509 		stmmac_lpi_entry_timer_config(priv, 1);
510 	} else {
511 		stmmac_lpi_entry_timer_config(priv, 0);
512 		mod_timer(&priv->eee_ctrl_timer,
513 			  STMMAC_LPI_T(priv->tx_lpi_timer));
514 	}
515 
516 	mutex_unlock(&priv->lock);
517 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
518 	return true;
519 }
520 
521 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
522  * @priv: driver private structure
523  * @p : descriptor pointer
524  * @skb : the socket buffer
525  * Description :
526  * This function will read timestamp from the descriptor & pass it to stack.
527  * and also perform some sanity checks.
528  */
529 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
530 				   struct dma_desc *p, struct sk_buff *skb)
531 {
532 	struct skb_shared_hwtstamps shhwtstamp;
533 	bool found = false;
534 	u64 ns = 0;
535 
536 	if (!priv->hwts_tx_en)
537 		return;
538 
539 	/* exit if skb doesn't support hw tstamp */
540 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
541 		return;
542 
543 	/* check tx tstamp status */
544 	if (stmmac_get_tx_timestamp_status(priv, p)) {
545 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
546 		found = true;
547 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
548 		found = true;
549 	}
550 
551 	if (found) {
552 		ns -= priv->plat->cdc_error_adj;
553 
554 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
555 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
556 
557 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
558 		/* pass tstamp to stack */
559 		skb_tstamp_tx(skb, &shhwtstamp);
560 	}
561 }
562 
563 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
564  * @priv: driver private structure
565  * @p : descriptor pointer
566  * @np : next descriptor pointer
567  * @skb : the socket buffer
568  * Description :
569  * This function will read received packet's timestamp from the descriptor
570  * and pass it to stack. It also perform some sanity checks.
571  */
572 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
573 				   struct dma_desc *np, struct sk_buff *skb)
574 {
575 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
576 	struct dma_desc *desc = p;
577 	u64 ns = 0;
578 
579 	if (!priv->hwts_rx_en)
580 		return;
581 	/* For GMAC4, the valid timestamp is from CTX next desc. */
582 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
583 		desc = np;
584 
585 	/* Check if timestamp is available */
586 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
587 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
588 
589 		ns -= priv->plat->cdc_error_adj;
590 
591 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
592 		shhwtstamp = skb_hwtstamps(skb);
593 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
594 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
595 	} else  {
596 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
597 	}
598 }
599 
600 /**
601  *  stmmac_hwtstamp_set - control hardware timestamping.
602  *  @dev: device pointer.
603  *  @ifr: An IOCTL specific structure, that can contain a pointer to
604  *  a proprietary structure used to pass information to the driver.
605  *  Description:
606  *  This function configures the MAC to enable/disable both outgoing(TX)
607  *  and incoming(RX) packets time stamping based on user input.
608  *  Return Value:
609  *  0 on success and an appropriate -ve integer on failure.
610  */
611 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
612 {
613 	struct stmmac_priv *priv = netdev_priv(dev);
614 	struct hwtstamp_config config;
615 	u32 ptp_v2 = 0;
616 	u32 tstamp_all = 0;
617 	u32 ptp_over_ipv4_udp = 0;
618 	u32 ptp_over_ipv6_udp = 0;
619 	u32 ptp_over_ethernet = 0;
620 	u32 snap_type_sel = 0;
621 	u32 ts_master_en = 0;
622 	u32 ts_event_en = 0;
623 
624 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
625 		netdev_alert(priv->dev, "No support for HW time stamping\n");
626 		priv->hwts_tx_en = 0;
627 		priv->hwts_rx_en = 0;
628 
629 		return -EOPNOTSUPP;
630 	}
631 
632 	if (copy_from_user(&config, ifr->ifr_data,
633 			   sizeof(config)))
634 		return -EFAULT;
635 
636 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
637 		   __func__, config.flags, config.tx_type, config.rx_filter);
638 
639 	if (config.tx_type != HWTSTAMP_TX_OFF &&
640 	    config.tx_type != HWTSTAMP_TX_ON)
641 		return -ERANGE;
642 
643 	if (priv->adv_ts) {
644 		switch (config.rx_filter) {
645 		case HWTSTAMP_FILTER_NONE:
646 			/* time stamp no incoming packet at all */
647 			config.rx_filter = HWTSTAMP_FILTER_NONE;
648 			break;
649 
650 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
651 			/* PTP v1, UDP, any kind of event packet */
652 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
653 			/* 'xmac' hardware can support Sync, Pdelay_Req and
654 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
655 			 * This leaves Delay_Req timestamps out.
656 			 * Enable all events *and* general purpose message
657 			 * timestamping
658 			 */
659 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			break;
663 
664 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
665 			/* PTP v1, UDP, Sync packet */
666 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
667 			/* take time stamp for SYNC messages only */
668 			ts_event_en = PTP_TCR_TSEVNTENA;
669 
670 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
671 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
672 			break;
673 
674 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
675 			/* PTP v1, UDP, Delay_req packet */
676 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
677 			/* take time stamp for Delay_Req messages only */
678 			ts_master_en = PTP_TCR_TSMSTRENA;
679 			ts_event_en = PTP_TCR_TSEVNTENA;
680 
681 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
682 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
683 			break;
684 
685 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
686 			/* PTP v2, UDP, any kind of event packet */
687 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
688 			ptp_v2 = PTP_TCR_TSVER2ENA;
689 			/* take time stamp for all event messages */
690 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
691 
692 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
693 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
694 			break;
695 
696 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
697 			/* PTP v2, UDP, Sync packet */
698 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
699 			ptp_v2 = PTP_TCR_TSVER2ENA;
700 			/* take time stamp for SYNC messages only */
701 			ts_event_en = PTP_TCR_TSEVNTENA;
702 
703 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
704 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
705 			break;
706 
707 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
708 			/* PTP v2, UDP, Delay_req packet */
709 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
710 			ptp_v2 = PTP_TCR_TSVER2ENA;
711 			/* take time stamp for Delay_Req messages only */
712 			ts_master_en = PTP_TCR_TSMSTRENA;
713 			ts_event_en = PTP_TCR_TSEVNTENA;
714 
715 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
716 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
717 			break;
718 
719 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
720 			/* PTP v2/802.AS1 any layer, any kind of event packet */
721 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
722 			ptp_v2 = PTP_TCR_TSVER2ENA;
723 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
724 			if (priv->synopsys_id < DWMAC_CORE_4_10)
725 				ts_event_en = PTP_TCR_TSEVNTENA;
726 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
727 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
728 			ptp_over_ethernet = PTP_TCR_TSIPENA;
729 			break;
730 
731 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
732 			/* PTP v2/802.AS1, any layer, Sync packet */
733 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
734 			ptp_v2 = PTP_TCR_TSVER2ENA;
735 			/* take time stamp for SYNC messages only */
736 			ts_event_en = PTP_TCR_TSEVNTENA;
737 
738 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
739 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
740 			ptp_over_ethernet = PTP_TCR_TSIPENA;
741 			break;
742 
743 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
744 			/* PTP v2/802.AS1, any layer, Delay_req packet */
745 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
746 			ptp_v2 = PTP_TCR_TSVER2ENA;
747 			/* take time stamp for Delay_Req messages only */
748 			ts_master_en = PTP_TCR_TSMSTRENA;
749 			ts_event_en = PTP_TCR_TSEVNTENA;
750 
751 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
752 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
753 			ptp_over_ethernet = PTP_TCR_TSIPENA;
754 			break;
755 
756 		case HWTSTAMP_FILTER_NTP_ALL:
757 		case HWTSTAMP_FILTER_ALL:
758 			/* time stamp any incoming packet */
759 			config.rx_filter = HWTSTAMP_FILTER_ALL;
760 			tstamp_all = PTP_TCR_TSENALL;
761 			break;
762 
763 		default:
764 			return -ERANGE;
765 		}
766 	} else {
767 		switch (config.rx_filter) {
768 		case HWTSTAMP_FILTER_NONE:
769 			config.rx_filter = HWTSTAMP_FILTER_NONE;
770 			break;
771 		default:
772 			/* PTP v1, UDP, any kind of event packet */
773 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
774 			break;
775 		}
776 	}
777 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
778 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
779 
780 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
781 
782 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
783 		priv->systime_flags |= tstamp_all | ptp_v2 |
784 				       ptp_over_ethernet | ptp_over_ipv6_udp |
785 				       ptp_over_ipv4_udp | ts_event_en |
786 				       ts_master_en | snap_type_sel;
787 	}
788 
789 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
790 
791 	memcpy(&priv->tstamp_config, &config, sizeof(config));
792 
793 	return copy_to_user(ifr->ifr_data, &config,
794 			    sizeof(config)) ? -EFAULT : 0;
795 }
796 
797 /**
798  *  stmmac_hwtstamp_get - read hardware timestamping.
799  *  @dev: device pointer.
800  *  @ifr: An IOCTL specific structure, that can contain a pointer to
801  *  a proprietary structure used to pass information to the driver.
802  *  Description:
803  *  This function obtain the current hardware timestamping settings
804  *  as requested.
805  */
806 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
807 {
808 	struct stmmac_priv *priv = netdev_priv(dev);
809 	struct hwtstamp_config *config = &priv->tstamp_config;
810 
811 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
812 		return -EOPNOTSUPP;
813 
814 	return copy_to_user(ifr->ifr_data, config,
815 			    sizeof(*config)) ? -EFAULT : 0;
816 }
817 
818 /**
819  * stmmac_init_tstamp_counter - init hardware timestamping counter
820  * @priv: driver private structure
821  * @systime_flags: timestamping flags
822  * Description:
823  * Initialize hardware counter for packet timestamping.
824  * This is valid as long as the interface is open and not suspended.
825  * Will be rerun after resuming from suspend, case in which the timestamping
826  * flags updated by stmmac_hwtstamp_set() also need to be restored.
827  */
828 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
829 {
830 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
831 	struct timespec64 now;
832 	u32 sec_inc = 0;
833 	u64 temp = 0;
834 
835 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
836 		return -EOPNOTSUPP;
837 
838 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
839 	priv->systime_flags = systime_flags;
840 
841 	/* program Sub Second Increment reg */
842 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
843 					   priv->plat->clk_ptp_rate,
844 					   xmac, &sec_inc);
845 	temp = div_u64(1000000000ULL, sec_inc);
846 
847 	/* Store sub second increment for later use */
848 	priv->sub_second_inc = sec_inc;
849 
850 	/* calculate default added value:
851 	 * formula is :
852 	 * addend = (2^32)/freq_div_ratio;
853 	 * where, freq_div_ratio = 1e9ns/sec_inc
854 	 */
855 	temp = (u64)(temp << 32);
856 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
857 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
858 
859 	/* initialize system time */
860 	ktime_get_real_ts64(&now);
861 
862 	/* lower 32 bits of tv_sec are safe until y2106 */
863 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
864 
865 	return 0;
866 }
867 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
868 
869 /**
870  * stmmac_init_ptp - init PTP
871  * @priv: driver private structure
872  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
873  * This is done by looking at the HW cap. register.
874  * This function also registers the ptp driver.
875  */
876 static int stmmac_init_ptp(struct stmmac_priv *priv)
877 {
878 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
879 	int ret;
880 
881 	if (priv->plat->ptp_clk_freq_config)
882 		priv->plat->ptp_clk_freq_config(priv);
883 
884 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
885 	if (ret)
886 		return ret;
887 
888 	priv->adv_ts = 0;
889 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
890 	if (xmac && priv->dma_cap.atime_stamp)
891 		priv->adv_ts = 1;
892 	/* Dwmac 3.x core with extend_desc can support adv_ts */
893 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
894 		priv->adv_ts = 1;
895 
896 	if (priv->dma_cap.time_stamp)
897 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
898 
899 	if (priv->adv_ts)
900 		netdev_info(priv->dev,
901 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
902 
903 	priv->hwts_tx_en = 0;
904 	priv->hwts_rx_en = 0;
905 
906 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
907 		stmmac_hwtstamp_correct_latency(priv, priv);
908 
909 	return 0;
910 }
911 
912 static void stmmac_release_ptp(struct stmmac_priv *priv)
913 {
914 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
915 	stmmac_ptp_unregister(priv);
916 }
917 
918 /**
919  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
920  *  @priv: driver private structure
921  *  @duplex: duplex passed to the next function
922  *  Description: It is used for configuring the flow control in all queues
923  */
924 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
925 {
926 	u32 tx_cnt = priv->plat->tx_queues_to_use;
927 
928 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
929 			priv->pause, tx_cnt);
930 }
931 
932 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
933 					 phy_interface_t interface)
934 {
935 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
936 
937 	/* Refresh the MAC-specific capabilities */
938 	stmmac_mac_update_caps(priv);
939 
940 	config->mac_capabilities = priv->hw->link.caps;
941 
942 	if (priv->plat->max_speed)
943 		phylink_limit_mac_speed(config, priv->plat->max_speed);
944 
945 	return config->mac_capabilities;
946 }
947 
948 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
949 						 phy_interface_t interface)
950 {
951 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952 
953 	if (priv->hw->xpcs)
954 		return &priv->hw->xpcs->pcs;
955 
956 	return priv->hw->phylink_pcs;
957 }
958 
959 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
960 			      const struct phylink_link_state *state)
961 {
962 	/* Nothing to do, xpcs_config() handles everything */
963 }
964 
965 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
966 {
967 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
968 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
969 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
970 	bool *hs_enable = &fpe_cfg->hs_enable;
971 
972 	if (is_up && *hs_enable) {
973 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
974 					MPACKET_VERIFY);
975 	} else {
976 		*lo_state = FPE_STATE_OFF;
977 		*lp_state = FPE_STATE_OFF;
978 	}
979 }
980 
981 static void stmmac_mac_link_down(struct phylink_config *config,
982 				 unsigned int mode, phy_interface_t interface)
983 {
984 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
985 
986 	stmmac_mac_set(priv, priv->ioaddr, false);
987 	priv->eee_active = false;
988 	priv->tx_lpi_enabled = false;
989 	priv->eee_enabled = stmmac_eee_init(priv);
990 	stmmac_set_eee_pls(priv, priv->hw, false);
991 
992 	if (priv->dma_cap.fpesel)
993 		stmmac_fpe_link_state_handle(priv, false);
994 }
995 
996 static void stmmac_mac_link_up(struct phylink_config *config,
997 			       struct phy_device *phy,
998 			       unsigned int mode, phy_interface_t interface,
999 			       int speed, int duplex,
1000 			       bool tx_pause, bool rx_pause)
1001 {
1002 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1003 	u32 old_ctrl, ctrl;
1004 
1005 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1006 	    priv->plat->serdes_powerup)
1007 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1008 
1009 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1010 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1011 
1012 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1013 		switch (speed) {
1014 		case SPEED_10000:
1015 			ctrl |= priv->hw->link.xgmii.speed10000;
1016 			break;
1017 		case SPEED_5000:
1018 			ctrl |= priv->hw->link.xgmii.speed5000;
1019 			break;
1020 		case SPEED_2500:
1021 			ctrl |= priv->hw->link.xgmii.speed2500;
1022 			break;
1023 		default:
1024 			return;
1025 		}
1026 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1027 		switch (speed) {
1028 		case SPEED_100000:
1029 			ctrl |= priv->hw->link.xlgmii.speed100000;
1030 			break;
1031 		case SPEED_50000:
1032 			ctrl |= priv->hw->link.xlgmii.speed50000;
1033 			break;
1034 		case SPEED_40000:
1035 			ctrl |= priv->hw->link.xlgmii.speed40000;
1036 			break;
1037 		case SPEED_25000:
1038 			ctrl |= priv->hw->link.xlgmii.speed25000;
1039 			break;
1040 		case SPEED_10000:
1041 			ctrl |= priv->hw->link.xgmii.speed10000;
1042 			break;
1043 		case SPEED_2500:
1044 			ctrl |= priv->hw->link.speed2500;
1045 			break;
1046 		case SPEED_1000:
1047 			ctrl |= priv->hw->link.speed1000;
1048 			break;
1049 		default:
1050 			return;
1051 		}
1052 	} else {
1053 		switch (speed) {
1054 		case SPEED_2500:
1055 			ctrl |= priv->hw->link.speed2500;
1056 			break;
1057 		case SPEED_1000:
1058 			ctrl |= priv->hw->link.speed1000;
1059 			break;
1060 		case SPEED_100:
1061 			ctrl |= priv->hw->link.speed100;
1062 			break;
1063 		case SPEED_10:
1064 			ctrl |= priv->hw->link.speed10;
1065 			break;
1066 		default:
1067 			return;
1068 		}
1069 	}
1070 
1071 	priv->speed = speed;
1072 
1073 	if (priv->plat->fix_mac_speed)
1074 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1075 
1076 	if (!duplex)
1077 		ctrl &= ~priv->hw->link.duplex;
1078 	else
1079 		ctrl |= priv->hw->link.duplex;
1080 
1081 	/* Flow Control operation */
1082 	if (rx_pause && tx_pause)
1083 		priv->flow_ctrl = FLOW_AUTO;
1084 	else if (rx_pause && !tx_pause)
1085 		priv->flow_ctrl = FLOW_RX;
1086 	else if (!rx_pause && tx_pause)
1087 		priv->flow_ctrl = FLOW_TX;
1088 	else
1089 		priv->flow_ctrl = FLOW_OFF;
1090 
1091 	stmmac_mac_flow_ctrl(priv, duplex);
1092 
1093 	if (ctrl != old_ctrl)
1094 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1095 
1096 	stmmac_mac_set(priv, priv->ioaddr, true);
1097 	if (phy && priv->dma_cap.eee) {
1098 		priv->eee_active =
1099 			phy_init_eee(phy, !(priv->plat->flags &
1100 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1101 		priv->eee_enabled = stmmac_eee_init(priv);
1102 		priv->tx_lpi_enabled = priv->eee_enabled;
1103 		stmmac_set_eee_pls(priv, priv->hw, true);
1104 	}
1105 
1106 	if (priv->dma_cap.fpesel)
1107 		stmmac_fpe_link_state_handle(priv, true);
1108 
1109 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1110 		stmmac_hwtstamp_correct_latency(priv, priv);
1111 }
1112 
1113 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1114 	.mac_get_caps = stmmac_mac_get_caps,
1115 	.mac_select_pcs = stmmac_mac_select_pcs,
1116 	.mac_config = stmmac_mac_config,
1117 	.mac_link_down = stmmac_mac_link_down,
1118 	.mac_link_up = stmmac_mac_link_up,
1119 };
1120 
1121 /**
1122  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1123  * @priv: driver private structure
1124  * Description: this is to verify if the HW supports the PCS.
1125  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1126  * configured for the TBI, RTBI, or SGMII PHY interface.
1127  */
1128 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1129 {
1130 	int interface = priv->plat->mac_interface;
1131 
1132 	if (priv->dma_cap.pcs) {
1133 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1134 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1135 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1136 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1137 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1138 			priv->hw->pcs = STMMAC_PCS_RGMII;
1139 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1140 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1141 			priv->hw->pcs = STMMAC_PCS_SGMII;
1142 		}
1143 	}
1144 }
1145 
1146 /**
1147  * stmmac_init_phy - PHY initialization
1148  * @dev: net device structure
1149  * Description: it initializes the driver's PHY state, and attaches the PHY
1150  * to the mac driver.
1151  *  Return value:
1152  *  0 on success
1153  */
1154 static int stmmac_init_phy(struct net_device *dev)
1155 {
1156 	struct stmmac_priv *priv = netdev_priv(dev);
1157 	struct fwnode_handle *phy_fwnode;
1158 	struct fwnode_handle *fwnode;
1159 	int ret;
1160 
1161 	if (!phylink_expects_phy(priv->phylink))
1162 		return 0;
1163 
1164 	fwnode = priv->plat->port_node;
1165 	if (!fwnode)
1166 		fwnode = dev_fwnode(priv->device);
1167 
1168 	if (fwnode)
1169 		phy_fwnode = fwnode_get_phy_node(fwnode);
1170 	else
1171 		phy_fwnode = NULL;
1172 
1173 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1174 	 * manually parse it
1175 	 */
1176 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1177 		int addr = priv->plat->phy_addr;
1178 		struct phy_device *phydev;
1179 
1180 		if (addr < 0) {
1181 			netdev_err(priv->dev, "no phy found\n");
1182 			return -ENODEV;
1183 		}
1184 
1185 		phydev = mdiobus_get_phy(priv->mii, addr);
1186 		if (!phydev) {
1187 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1188 			return -ENODEV;
1189 		}
1190 
1191 		ret = phylink_connect_phy(priv->phylink, phydev);
1192 	} else {
1193 		fwnode_handle_put(phy_fwnode);
1194 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1195 	}
1196 
1197 	if (!priv->plat->pmt) {
1198 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1199 
1200 		phylink_ethtool_get_wol(priv->phylink, &wol);
1201 		device_set_wakeup_capable(priv->device, !!wol.supported);
1202 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1203 	}
1204 
1205 	return ret;
1206 }
1207 
1208 static int stmmac_phy_setup(struct stmmac_priv *priv)
1209 {
1210 	struct stmmac_mdio_bus_data *mdio_bus_data;
1211 	int mode = priv->plat->phy_interface;
1212 	struct fwnode_handle *fwnode;
1213 	struct phylink *phylink;
1214 
1215 	priv->phylink_config.dev = &priv->dev->dev;
1216 	priv->phylink_config.type = PHYLINK_NETDEV;
1217 	priv->phylink_config.mac_managed_pm = true;
1218 
1219 	/* Stmmac always requires an RX clock for hardware initialization */
1220 	priv->phylink_config.mac_requires_rxc = true;
1221 
1222 	mdio_bus_data = priv->plat->mdio_bus_data;
1223 	if (mdio_bus_data)
1224 		priv->phylink_config.default_an_inband =
1225 			mdio_bus_data->default_an_inband;
1226 
1227 	/* Set the platform/firmware specified interface mode. Note, phylink
1228 	 * deals with the PHY interface mode, not the MAC interface mode.
1229 	 */
1230 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1231 
1232 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1233 	if (priv->hw->xpcs)
1234 		xpcs_get_interfaces(priv->hw->xpcs,
1235 				    priv->phylink_config.supported_interfaces);
1236 
1237 	fwnode = priv->plat->port_node;
1238 	if (!fwnode)
1239 		fwnode = dev_fwnode(priv->device);
1240 
1241 	phylink = phylink_create(&priv->phylink_config, fwnode,
1242 				 mode, &stmmac_phylink_mac_ops);
1243 	if (IS_ERR(phylink))
1244 		return PTR_ERR(phylink);
1245 
1246 	priv->phylink = phylink;
1247 	return 0;
1248 }
1249 
1250 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1251 				    struct stmmac_dma_conf *dma_conf)
1252 {
1253 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1254 	unsigned int desc_size;
1255 	void *head_rx;
1256 	u32 queue;
1257 
1258 	/* Display RX rings */
1259 	for (queue = 0; queue < rx_cnt; queue++) {
1260 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1261 
1262 		pr_info("\tRX Queue %u rings\n", queue);
1263 
1264 		if (priv->extend_desc) {
1265 			head_rx = (void *)rx_q->dma_erx;
1266 			desc_size = sizeof(struct dma_extended_desc);
1267 		} else {
1268 			head_rx = (void *)rx_q->dma_rx;
1269 			desc_size = sizeof(struct dma_desc);
1270 		}
1271 
1272 		/* Display RX ring */
1273 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1274 				    rx_q->dma_rx_phy, desc_size);
1275 	}
1276 }
1277 
1278 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1279 				    struct stmmac_dma_conf *dma_conf)
1280 {
1281 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1282 	unsigned int desc_size;
1283 	void *head_tx;
1284 	u32 queue;
1285 
1286 	/* Display TX rings */
1287 	for (queue = 0; queue < tx_cnt; queue++) {
1288 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1289 
1290 		pr_info("\tTX Queue %d rings\n", queue);
1291 
1292 		if (priv->extend_desc) {
1293 			head_tx = (void *)tx_q->dma_etx;
1294 			desc_size = sizeof(struct dma_extended_desc);
1295 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1296 			head_tx = (void *)tx_q->dma_entx;
1297 			desc_size = sizeof(struct dma_edesc);
1298 		} else {
1299 			head_tx = (void *)tx_q->dma_tx;
1300 			desc_size = sizeof(struct dma_desc);
1301 		}
1302 
1303 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1304 				    tx_q->dma_tx_phy, desc_size);
1305 	}
1306 }
1307 
1308 static void stmmac_display_rings(struct stmmac_priv *priv,
1309 				 struct stmmac_dma_conf *dma_conf)
1310 {
1311 	/* Display RX ring */
1312 	stmmac_display_rx_rings(priv, dma_conf);
1313 
1314 	/* Display TX ring */
1315 	stmmac_display_tx_rings(priv, dma_conf);
1316 }
1317 
1318 static int stmmac_set_bfsize(int mtu, int bufsize)
1319 {
1320 	int ret = bufsize;
1321 
1322 	if (mtu >= BUF_SIZE_8KiB)
1323 		ret = BUF_SIZE_16KiB;
1324 	else if (mtu >= BUF_SIZE_4KiB)
1325 		ret = BUF_SIZE_8KiB;
1326 	else if (mtu >= BUF_SIZE_2KiB)
1327 		ret = BUF_SIZE_4KiB;
1328 	else if (mtu > DEFAULT_BUFSIZE)
1329 		ret = BUF_SIZE_2KiB;
1330 	else
1331 		ret = DEFAULT_BUFSIZE;
1332 
1333 	return ret;
1334 }
1335 
1336 /**
1337  * stmmac_clear_rx_descriptors - clear RX descriptors
1338  * @priv: driver private structure
1339  * @dma_conf: structure to take the dma data
1340  * @queue: RX queue index
1341  * Description: this function is called to clear the RX descriptors
1342  * in case of both basic and extended descriptors are used.
1343  */
1344 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1345 					struct stmmac_dma_conf *dma_conf,
1346 					u32 queue)
1347 {
1348 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1349 	int i;
1350 
1351 	/* Clear the RX descriptors */
1352 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1353 		if (priv->extend_desc)
1354 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1355 					priv->use_riwt, priv->mode,
1356 					(i == dma_conf->dma_rx_size - 1),
1357 					dma_conf->dma_buf_sz);
1358 		else
1359 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1360 					priv->use_riwt, priv->mode,
1361 					(i == dma_conf->dma_rx_size - 1),
1362 					dma_conf->dma_buf_sz);
1363 }
1364 
1365 /**
1366  * stmmac_clear_tx_descriptors - clear tx descriptors
1367  * @priv: driver private structure
1368  * @dma_conf: structure to take the dma data
1369  * @queue: TX queue index.
1370  * Description: this function is called to clear the TX descriptors
1371  * in case of both basic and extended descriptors are used.
1372  */
1373 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1374 					struct stmmac_dma_conf *dma_conf,
1375 					u32 queue)
1376 {
1377 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1378 	int i;
1379 
1380 	/* Clear the TX descriptors */
1381 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1382 		int last = (i == (dma_conf->dma_tx_size - 1));
1383 		struct dma_desc *p;
1384 
1385 		if (priv->extend_desc)
1386 			p = &tx_q->dma_etx[i].basic;
1387 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1388 			p = &tx_q->dma_entx[i].basic;
1389 		else
1390 			p = &tx_q->dma_tx[i];
1391 
1392 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1393 	}
1394 }
1395 
1396 /**
1397  * stmmac_clear_descriptors - clear descriptors
1398  * @priv: driver private structure
1399  * @dma_conf: structure to take the dma data
1400  * Description: this function is called to clear the TX and RX descriptors
1401  * in case of both basic and extended descriptors are used.
1402  */
1403 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1404 				     struct stmmac_dma_conf *dma_conf)
1405 {
1406 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1407 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1408 	u32 queue;
1409 
1410 	/* Clear the RX descriptors */
1411 	for (queue = 0; queue < rx_queue_cnt; queue++)
1412 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1413 
1414 	/* Clear the TX descriptors */
1415 	for (queue = 0; queue < tx_queue_cnt; queue++)
1416 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1417 }
1418 
1419 /**
1420  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1421  * @priv: driver private structure
1422  * @dma_conf: structure to take the dma data
1423  * @p: descriptor pointer
1424  * @i: descriptor index
1425  * @flags: gfp flag
1426  * @queue: RX queue index
1427  * Description: this function is called to allocate a receive buffer, perform
1428  * the DMA mapping and init the descriptor.
1429  */
1430 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1431 				  struct stmmac_dma_conf *dma_conf,
1432 				  struct dma_desc *p,
1433 				  int i, gfp_t flags, u32 queue)
1434 {
1435 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1436 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1437 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1438 
1439 	if (priv->dma_cap.host_dma_width <= 32)
1440 		gfp |= GFP_DMA32;
1441 
1442 	if (!buf->page) {
1443 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1444 		if (!buf->page)
1445 			return -ENOMEM;
1446 		buf->page_offset = stmmac_rx_offset(priv);
1447 	}
1448 
1449 	if (priv->sph && !buf->sec_page) {
1450 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1451 		if (!buf->sec_page)
1452 			return -ENOMEM;
1453 
1454 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1455 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1456 	} else {
1457 		buf->sec_page = NULL;
1458 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1459 	}
1460 
1461 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1462 
1463 	stmmac_set_desc_addr(priv, p, buf->addr);
1464 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1465 		stmmac_init_desc3(priv, p);
1466 
1467 	return 0;
1468 }
1469 
1470 /**
1471  * stmmac_free_rx_buffer - free RX dma buffers
1472  * @priv: private structure
1473  * @rx_q: RX queue
1474  * @i: buffer index.
1475  */
1476 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1477 				  struct stmmac_rx_queue *rx_q,
1478 				  int i)
1479 {
1480 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1481 
1482 	if (buf->page)
1483 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1484 	buf->page = NULL;
1485 
1486 	if (buf->sec_page)
1487 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1488 	buf->sec_page = NULL;
1489 }
1490 
1491 /**
1492  * stmmac_free_tx_buffer - free RX dma buffers
1493  * @priv: private structure
1494  * @dma_conf: structure to take the dma data
1495  * @queue: RX queue index
1496  * @i: buffer index.
1497  */
1498 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1499 				  struct stmmac_dma_conf *dma_conf,
1500 				  u32 queue, int i)
1501 {
1502 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1503 
1504 	if (tx_q->tx_skbuff_dma[i].buf &&
1505 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1506 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1507 			dma_unmap_page(priv->device,
1508 				       tx_q->tx_skbuff_dma[i].buf,
1509 				       tx_q->tx_skbuff_dma[i].len,
1510 				       DMA_TO_DEVICE);
1511 		else
1512 			dma_unmap_single(priv->device,
1513 					 tx_q->tx_skbuff_dma[i].buf,
1514 					 tx_q->tx_skbuff_dma[i].len,
1515 					 DMA_TO_DEVICE);
1516 	}
1517 
1518 	if (tx_q->xdpf[i] &&
1519 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1520 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1521 		xdp_return_frame(tx_q->xdpf[i]);
1522 		tx_q->xdpf[i] = NULL;
1523 	}
1524 
1525 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1526 		tx_q->xsk_frames_done++;
1527 
1528 	if (tx_q->tx_skbuff[i] &&
1529 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1530 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1531 		tx_q->tx_skbuff[i] = NULL;
1532 	}
1533 
1534 	tx_q->tx_skbuff_dma[i].buf = 0;
1535 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1536 }
1537 
1538 /**
1539  * dma_free_rx_skbufs - free RX dma buffers
1540  * @priv: private structure
1541  * @dma_conf: structure to take the dma data
1542  * @queue: RX queue index
1543  */
1544 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1545 			       struct stmmac_dma_conf *dma_conf,
1546 			       u32 queue)
1547 {
1548 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1549 	int i;
1550 
1551 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1552 		stmmac_free_rx_buffer(priv, rx_q, i);
1553 }
1554 
1555 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1556 				   struct stmmac_dma_conf *dma_conf,
1557 				   u32 queue, gfp_t flags)
1558 {
1559 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1560 	int i;
1561 
1562 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1563 		struct dma_desc *p;
1564 		int ret;
1565 
1566 		if (priv->extend_desc)
1567 			p = &((rx_q->dma_erx + i)->basic);
1568 		else
1569 			p = rx_q->dma_rx + i;
1570 
1571 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1572 					     queue);
1573 		if (ret)
1574 			return ret;
1575 
1576 		rx_q->buf_alloc_num++;
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 /**
1583  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1584  * @priv: private structure
1585  * @dma_conf: structure to take the dma data
1586  * @queue: RX queue index
1587  */
1588 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1589 				struct stmmac_dma_conf *dma_conf,
1590 				u32 queue)
1591 {
1592 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1593 	int i;
1594 
1595 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1596 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1597 
1598 		if (!buf->xdp)
1599 			continue;
1600 
1601 		xsk_buff_free(buf->xdp);
1602 		buf->xdp = NULL;
1603 	}
1604 }
1605 
1606 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1607 				      struct stmmac_dma_conf *dma_conf,
1608 				      u32 queue)
1609 {
1610 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611 	int i;
1612 
1613 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1614 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1615 	 * use this macro to make sure no size violations.
1616 	 */
1617 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1618 
1619 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1620 		struct stmmac_rx_buffer *buf;
1621 		dma_addr_t dma_addr;
1622 		struct dma_desc *p;
1623 
1624 		if (priv->extend_desc)
1625 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1626 		else
1627 			p = rx_q->dma_rx + i;
1628 
1629 		buf = &rx_q->buf_pool[i];
1630 
1631 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1632 		if (!buf->xdp)
1633 			return -ENOMEM;
1634 
1635 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1636 		stmmac_set_desc_addr(priv, p, dma_addr);
1637 		rx_q->buf_alloc_num++;
1638 	}
1639 
1640 	return 0;
1641 }
1642 
1643 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1644 {
1645 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1646 		return NULL;
1647 
1648 	return xsk_get_pool_from_qid(priv->dev, queue);
1649 }
1650 
1651 /**
1652  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1653  * @priv: driver private structure
1654  * @dma_conf: structure to take the dma data
1655  * @queue: RX queue index
1656  * @flags: gfp flag.
1657  * Description: this function initializes the DMA RX descriptors
1658  * and allocates the socket buffers. It supports the chained and ring
1659  * modes.
1660  */
1661 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1662 				    struct stmmac_dma_conf *dma_conf,
1663 				    u32 queue, gfp_t flags)
1664 {
1665 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1666 	int ret;
1667 
1668 	netif_dbg(priv, probe, priv->dev,
1669 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1670 		  (u32)rx_q->dma_rx_phy);
1671 
1672 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1673 
1674 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1675 
1676 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1677 
1678 	if (rx_q->xsk_pool) {
1679 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680 						   MEM_TYPE_XSK_BUFF_POOL,
1681 						   NULL));
1682 		netdev_info(priv->dev,
1683 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1684 			    rx_q->queue_index);
1685 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1686 	} else {
1687 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1688 						   MEM_TYPE_PAGE_POOL,
1689 						   rx_q->page_pool));
1690 		netdev_info(priv->dev,
1691 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1692 			    rx_q->queue_index);
1693 	}
1694 
1695 	if (rx_q->xsk_pool) {
1696 		/* RX XDP ZC buffer pool may not be populated, e.g.
1697 		 * xdpsock TX-only.
1698 		 */
1699 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1700 	} else {
1701 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1702 		if (ret < 0)
1703 			return -ENOMEM;
1704 	}
1705 
1706 	/* Setup the chained descriptor addresses */
1707 	if (priv->mode == STMMAC_CHAIN_MODE) {
1708 		if (priv->extend_desc)
1709 			stmmac_mode_init(priv, rx_q->dma_erx,
1710 					 rx_q->dma_rx_phy,
1711 					 dma_conf->dma_rx_size, 1);
1712 		else
1713 			stmmac_mode_init(priv, rx_q->dma_rx,
1714 					 rx_q->dma_rx_phy,
1715 					 dma_conf->dma_rx_size, 0);
1716 	}
1717 
1718 	return 0;
1719 }
1720 
1721 static int init_dma_rx_desc_rings(struct net_device *dev,
1722 				  struct stmmac_dma_conf *dma_conf,
1723 				  gfp_t flags)
1724 {
1725 	struct stmmac_priv *priv = netdev_priv(dev);
1726 	u32 rx_count = priv->plat->rx_queues_to_use;
1727 	int queue;
1728 	int ret;
1729 
1730 	/* RX INITIALIZATION */
1731 	netif_dbg(priv, probe, priv->dev,
1732 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1733 
1734 	for (queue = 0; queue < rx_count; queue++) {
1735 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1736 		if (ret)
1737 			goto err_init_rx_buffers;
1738 	}
1739 
1740 	return 0;
1741 
1742 err_init_rx_buffers:
1743 	while (queue >= 0) {
1744 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1745 
1746 		if (rx_q->xsk_pool)
1747 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1748 		else
1749 			dma_free_rx_skbufs(priv, dma_conf, queue);
1750 
1751 		rx_q->buf_alloc_num = 0;
1752 		rx_q->xsk_pool = NULL;
1753 
1754 		queue--;
1755 	}
1756 
1757 	return ret;
1758 }
1759 
1760 /**
1761  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1762  * @priv: driver private structure
1763  * @dma_conf: structure to take the dma data
1764  * @queue: TX queue index
1765  * Description: this function initializes the DMA TX descriptors
1766  * and allocates the socket buffers. It supports the chained and ring
1767  * modes.
1768  */
1769 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1770 				    struct stmmac_dma_conf *dma_conf,
1771 				    u32 queue)
1772 {
1773 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1774 	int i;
1775 
1776 	netif_dbg(priv, probe, priv->dev,
1777 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1778 		  (u32)tx_q->dma_tx_phy);
1779 
1780 	/* Setup the chained descriptor addresses */
1781 	if (priv->mode == STMMAC_CHAIN_MODE) {
1782 		if (priv->extend_desc)
1783 			stmmac_mode_init(priv, tx_q->dma_etx,
1784 					 tx_q->dma_tx_phy,
1785 					 dma_conf->dma_tx_size, 1);
1786 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1787 			stmmac_mode_init(priv, tx_q->dma_tx,
1788 					 tx_q->dma_tx_phy,
1789 					 dma_conf->dma_tx_size, 0);
1790 	}
1791 
1792 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1793 
1794 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1795 		struct dma_desc *p;
1796 
1797 		if (priv->extend_desc)
1798 			p = &((tx_q->dma_etx + i)->basic);
1799 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1800 			p = &((tx_q->dma_entx + i)->basic);
1801 		else
1802 			p = tx_q->dma_tx + i;
1803 
1804 		stmmac_clear_desc(priv, p);
1805 
1806 		tx_q->tx_skbuff_dma[i].buf = 0;
1807 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1808 		tx_q->tx_skbuff_dma[i].len = 0;
1809 		tx_q->tx_skbuff_dma[i].last_segment = false;
1810 		tx_q->tx_skbuff[i] = NULL;
1811 	}
1812 
1813 	return 0;
1814 }
1815 
1816 static int init_dma_tx_desc_rings(struct net_device *dev,
1817 				  struct stmmac_dma_conf *dma_conf)
1818 {
1819 	struct stmmac_priv *priv = netdev_priv(dev);
1820 	u32 tx_queue_cnt;
1821 	u32 queue;
1822 
1823 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1824 
1825 	for (queue = 0; queue < tx_queue_cnt; queue++)
1826 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1827 
1828 	return 0;
1829 }
1830 
1831 /**
1832  * init_dma_desc_rings - init the RX/TX descriptor rings
1833  * @dev: net device structure
1834  * @dma_conf: structure to take the dma data
1835  * @flags: gfp flag.
1836  * Description: this function initializes the DMA RX/TX descriptors
1837  * and allocates the socket buffers. It supports the chained and ring
1838  * modes.
1839  */
1840 static int init_dma_desc_rings(struct net_device *dev,
1841 			       struct stmmac_dma_conf *dma_conf,
1842 			       gfp_t flags)
1843 {
1844 	struct stmmac_priv *priv = netdev_priv(dev);
1845 	int ret;
1846 
1847 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1848 	if (ret)
1849 		return ret;
1850 
1851 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1852 
1853 	stmmac_clear_descriptors(priv, dma_conf);
1854 
1855 	if (netif_msg_hw(priv))
1856 		stmmac_display_rings(priv, dma_conf);
1857 
1858 	return ret;
1859 }
1860 
1861 /**
1862  * dma_free_tx_skbufs - free TX dma buffers
1863  * @priv: private structure
1864  * @dma_conf: structure to take the dma data
1865  * @queue: TX queue index
1866  */
1867 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1868 			       struct stmmac_dma_conf *dma_conf,
1869 			       u32 queue)
1870 {
1871 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1872 	int i;
1873 
1874 	tx_q->xsk_frames_done = 0;
1875 
1876 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1877 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1878 
1879 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1880 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1881 		tx_q->xsk_frames_done = 0;
1882 		tx_q->xsk_pool = NULL;
1883 	}
1884 }
1885 
1886 /**
1887  * stmmac_free_tx_skbufs - free TX skb buffers
1888  * @priv: private structure
1889  */
1890 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1891 {
1892 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1893 	u32 queue;
1894 
1895 	for (queue = 0; queue < tx_queue_cnt; queue++)
1896 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1897 }
1898 
1899 /**
1900  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1901  * @priv: private structure
1902  * @dma_conf: structure to take the dma data
1903  * @queue: RX queue index
1904  */
1905 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1906 					 struct stmmac_dma_conf *dma_conf,
1907 					 u32 queue)
1908 {
1909 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1910 
1911 	/* Release the DMA RX socket buffers */
1912 	if (rx_q->xsk_pool)
1913 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1914 	else
1915 		dma_free_rx_skbufs(priv, dma_conf, queue);
1916 
1917 	rx_q->buf_alloc_num = 0;
1918 	rx_q->xsk_pool = NULL;
1919 
1920 	/* Free DMA regions of consistent memory previously allocated */
1921 	if (!priv->extend_desc)
1922 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1923 				  sizeof(struct dma_desc),
1924 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1925 	else
1926 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1927 				  sizeof(struct dma_extended_desc),
1928 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1929 
1930 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1931 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1932 
1933 	kfree(rx_q->buf_pool);
1934 	if (rx_q->page_pool)
1935 		page_pool_destroy(rx_q->page_pool);
1936 }
1937 
1938 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1939 				       struct stmmac_dma_conf *dma_conf)
1940 {
1941 	u32 rx_count = priv->plat->rx_queues_to_use;
1942 	u32 queue;
1943 
1944 	/* Free RX queue resources */
1945 	for (queue = 0; queue < rx_count; queue++)
1946 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1947 }
1948 
1949 /**
1950  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1951  * @priv: private structure
1952  * @dma_conf: structure to take the dma data
1953  * @queue: TX queue index
1954  */
1955 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1956 					 struct stmmac_dma_conf *dma_conf,
1957 					 u32 queue)
1958 {
1959 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1960 	size_t size;
1961 	void *addr;
1962 
1963 	/* Release the DMA TX socket buffers */
1964 	dma_free_tx_skbufs(priv, dma_conf, queue);
1965 
1966 	if (priv->extend_desc) {
1967 		size = sizeof(struct dma_extended_desc);
1968 		addr = tx_q->dma_etx;
1969 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1970 		size = sizeof(struct dma_edesc);
1971 		addr = tx_q->dma_entx;
1972 	} else {
1973 		size = sizeof(struct dma_desc);
1974 		addr = tx_q->dma_tx;
1975 	}
1976 
1977 	size *= dma_conf->dma_tx_size;
1978 
1979 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1980 
1981 	kfree(tx_q->tx_skbuff_dma);
1982 	kfree(tx_q->tx_skbuff);
1983 }
1984 
1985 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1986 				       struct stmmac_dma_conf *dma_conf)
1987 {
1988 	u32 tx_count = priv->plat->tx_queues_to_use;
1989 	u32 queue;
1990 
1991 	/* Free TX queue resources */
1992 	for (queue = 0; queue < tx_count; queue++)
1993 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1994 }
1995 
1996 /**
1997  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1998  * @priv: private structure
1999  * @dma_conf: structure to take the dma data
2000  * @queue: RX queue index
2001  * Description: according to which descriptor can be used (extend or basic)
2002  * this function allocates the resources for TX and RX paths. In case of
2003  * reception, for example, it pre-allocated the RX socket buffer in order to
2004  * allow zero-copy mechanism.
2005  */
2006 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2007 					 struct stmmac_dma_conf *dma_conf,
2008 					 u32 queue)
2009 {
2010 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2011 	struct stmmac_channel *ch = &priv->channel[queue];
2012 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2013 	struct page_pool_params pp_params = { 0 };
2014 	unsigned int num_pages;
2015 	unsigned int napi_id;
2016 	int ret;
2017 
2018 	rx_q->queue_index = queue;
2019 	rx_q->priv_data = priv;
2020 
2021 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2022 	pp_params.pool_size = dma_conf->dma_rx_size;
2023 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2024 	pp_params.order = ilog2(num_pages);
2025 	pp_params.nid = dev_to_node(priv->device);
2026 	pp_params.dev = priv->device;
2027 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2028 	pp_params.offset = stmmac_rx_offset(priv);
2029 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2030 
2031 	rx_q->page_pool = page_pool_create(&pp_params);
2032 	if (IS_ERR(rx_q->page_pool)) {
2033 		ret = PTR_ERR(rx_q->page_pool);
2034 		rx_q->page_pool = NULL;
2035 		return ret;
2036 	}
2037 
2038 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2039 				 sizeof(*rx_q->buf_pool),
2040 				 GFP_KERNEL);
2041 	if (!rx_q->buf_pool)
2042 		return -ENOMEM;
2043 
2044 	if (priv->extend_desc) {
2045 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2046 						   dma_conf->dma_rx_size *
2047 						   sizeof(struct dma_extended_desc),
2048 						   &rx_q->dma_rx_phy,
2049 						   GFP_KERNEL);
2050 		if (!rx_q->dma_erx)
2051 			return -ENOMEM;
2052 
2053 	} else {
2054 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2055 						  dma_conf->dma_rx_size *
2056 						  sizeof(struct dma_desc),
2057 						  &rx_q->dma_rx_phy,
2058 						  GFP_KERNEL);
2059 		if (!rx_q->dma_rx)
2060 			return -ENOMEM;
2061 	}
2062 
2063 	if (stmmac_xdp_is_enabled(priv) &&
2064 	    test_bit(queue, priv->af_xdp_zc_qps))
2065 		napi_id = ch->rxtx_napi.napi_id;
2066 	else
2067 		napi_id = ch->rx_napi.napi_id;
2068 
2069 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2070 			       rx_q->queue_index,
2071 			       napi_id);
2072 	if (ret) {
2073 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2074 		return -EINVAL;
2075 	}
2076 
2077 	return 0;
2078 }
2079 
2080 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2081 				       struct stmmac_dma_conf *dma_conf)
2082 {
2083 	u32 rx_count = priv->plat->rx_queues_to_use;
2084 	u32 queue;
2085 	int ret;
2086 
2087 	/* RX queues buffers and DMA */
2088 	for (queue = 0; queue < rx_count; queue++) {
2089 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2090 		if (ret)
2091 			goto err_dma;
2092 	}
2093 
2094 	return 0;
2095 
2096 err_dma:
2097 	free_dma_rx_desc_resources(priv, dma_conf);
2098 
2099 	return ret;
2100 }
2101 
2102 /**
2103  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2104  * @priv: private structure
2105  * @dma_conf: structure to take the dma data
2106  * @queue: TX queue index
2107  * Description: according to which descriptor can be used (extend or basic)
2108  * this function allocates the resources for TX and RX paths. In case of
2109  * reception, for example, it pre-allocated the RX socket buffer in order to
2110  * allow zero-copy mechanism.
2111  */
2112 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2113 					 struct stmmac_dma_conf *dma_conf,
2114 					 u32 queue)
2115 {
2116 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2117 	size_t size;
2118 	void *addr;
2119 
2120 	tx_q->queue_index = queue;
2121 	tx_q->priv_data = priv;
2122 
2123 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2124 				      sizeof(*tx_q->tx_skbuff_dma),
2125 				      GFP_KERNEL);
2126 	if (!tx_q->tx_skbuff_dma)
2127 		return -ENOMEM;
2128 
2129 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2130 				  sizeof(struct sk_buff *),
2131 				  GFP_KERNEL);
2132 	if (!tx_q->tx_skbuff)
2133 		return -ENOMEM;
2134 
2135 	if (priv->extend_desc)
2136 		size = sizeof(struct dma_extended_desc);
2137 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2138 		size = sizeof(struct dma_edesc);
2139 	else
2140 		size = sizeof(struct dma_desc);
2141 
2142 	size *= dma_conf->dma_tx_size;
2143 
2144 	addr = dma_alloc_coherent(priv->device, size,
2145 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2146 	if (!addr)
2147 		return -ENOMEM;
2148 
2149 	if (priv->extend_desc)
2150 		tx_q->dma_etx = addr;
2151 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2152 		tx_q->dma_entx = addr;
2153 	else
2154 		tx_q->dma_tx = addr;
2155 
2156 	return 0;
2157 }
2158 
2159 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2160 				       struct stmmac_dma_conf *dma_conf)
2161 {
2162 	u32 tx_count = priv->plat->tx_queues_to_use;
2163 	u32 queue;
2164 	int ret;
2165 
2166 	/* TX queues buffers and DMA */
2167 	for (queue = 0; queue < tx_count; queue++) {
2168 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2169 		if (ret)
2170 			goto err_dma;
2171 	}
2172 
2173 	return 0;
2174 
2175 err_dma:
2176 	free_dma_tx_desc_resources(priv, dma_conf);
2177 	return ret;
2178 }
2179 
2180 /**
2181  * alloc_dma_desc_resources - alloc TX/RX resources.
2182  * @priv: private structure
2183  * @dma_conf: structure to take the dma data
2184  * Description: according to which descriptor can be used (extend or basic)
2185  * this function allocates the resources for TX and RX paths. In case of
2186  * reception, for example, it pre-allocated the RX socket buffer in order to
2187  * allow zero-copy mechanism.
2188  */
2189 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2190 				    struct stmmac_dma_conf *dma_conf)
2191 {
2192 	/* RX Allocation */
2193 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2194 
2195 	if (ret)
2196 		return ret;
2197 
2198 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2199 
2200 	return ret;
2201 }
2202 
2203 /**
2204  * free_dma_desc_resources - free dma desc resources
2205  * @priv: private structure
2206  * @dma_conf: structure to take the dma data
2207  */
2208 static void free_dma_desc_resources(struct stmmac_priv *priv,
2209 				    struct stmmac_dma_conf *dma_conf)
2210 {
2211 	/* Release the DMA TX socket buffers */
2212 	free_dma_tx_desc_resources(priv, dma_conf);
2213 
2214 	/* Release the DMA RX socket buffers later
2215 	 * to ensure all pending XDP_TX buffers are returned.
2216 	 */
2217 	free_dma_rx_desc_resources(priv, dma_conf);
2218 }
2219 
2220 /**
2221  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2222  *  @priv: driver private structure
2223  *  Description: It is used for enabling the rx queues in the MAC
2224  */
2225 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2226 {
2227 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2228 	int queue;
2229 	u8 mode;
2230 
2231 	for (queue = 0; queue < rx_queues_count; queue++) {
2232 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2233 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2234 	}
2235 }
2236 
2237 /**
2238  * stmmac_start_rx_dma - start RX DMA channel
2239  * @priv: driver private structure
2240  * @chan: RX channel index
2241  * Description:
2242  * This starts a RX DMA channel
2243  */
2244 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2245 {
2246 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2247 	stmmac_start_rx(priv, priv->ioaddr, chan);
2248 }
2249 
2250 /**
2251  * stmmac_start_tx_dma - start TX DMA channel
2252  * @priv: driver private structure
2253  * @chan: TX channel index
2254  * Description:
2255  * This starts a TX DMA channel
2256  */
2257 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2258 {
2259 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2260 	stmmac_start_tx(priv, priv->ioaddr, chan);
2261 }
2262 
2263 /**
2264  * stmmac_stop_rx_dma - stop RX DMA channel
2265  * @priv: driver private structure
2266  * @chan: RX channel index
2267  * Description:
2268  * This stops a RX DMA channel
2269  */
2270 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2271 {
2272 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2273 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2274 }
2275 
2276 /**
2277  * stmmac_stop_tx_dma - stop TX DMA channel
2278  * @priv: driver private structure
2279  * @chan: TX channel index
2280  * Description:
2281  * This stops a TX DMA channel
2282  */
2283 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2284 {
2285 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2286 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2287 }
2288 
2289 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2290 {
2291 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2292 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2293 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2294 	u32 chan;
2295 
2296 	for (chan = 0; chan < dma_csr_ch; chan++) {
2297 		struct stmmac_channel *ch = &priv->channel[chan];
2298 		unsigned long flags;
2299 
2300 		spin_lock_irqsave(&ch->lock, flags);
2301 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2302 		spin_unlock_irqrestore(&ch->lock, flags);
2303 	}
2304 }
2305 
2306 /**
2307  * stmmac_start_all_dma - start all RX and TX DMA channels
2308  * @priv: driver private structure
2309  * Description:
2310  * This starts all the RX and TX DMA channels
2311  */
2312 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2313 {
2314 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2315 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2316 	u32 chan = 0;
2317 
2318 	for (chan = 0; chan < rx_channels_count; chan++)
2319 		stmmac_start_rx_dma(priv, chan);
2320 
2321 	for (chan = 0; chan < tx_channels_count; chan++)
2322 		stmmac_start_tx_dma(priv, chan);
2323 }
2324 
2325 /**
2326  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2327  * @priv: driver private structure
2328  * Description:
2329  * This stops the RX and TX DMA channels
2330  */
2331 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2332 {
2333 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2334 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2335 	u32 chan = 0;
2336 
2337 	for (chan = 0; chan < rx_channels_count; chan++)
2338 		stmmac_stop_rx_dma(priv, chan);
2339 
2340 	for (chan = 0; chan < tx_channels_count; chan++)
2341 		stmmac_stop_tx_dma(priv, chan);
2342 }
2343 
2344 /**
2345  *  stmmac_dma_operation_mode - HW DMA operation mode
2346  *  @priv: driver private structure
2347  *  Description: it is used for configuring the DMA operation mode register in
2348  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2349  */
2350 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2351 {
2352 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2353 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2354 	int rxfifosz = priv->plat->rx_fifo_size;
2355 	int txfifosz = priv->plat->tx_fifo_size;
2356 	u32 txmode = 0;
2357 	u32 rxmode = 0;
2358 	u32 chan = 0;
2359 	u8 qmode = 0;
2360 
2361 	if (rxfifosz == 0)
2362 		rxfifosz = priv->dma_cap.rx_fifo_size;
2363 	if (txfifosz == 0)
2364 		txfifosz = priv->dma_cap.tx_fifo_size;
2365 
2366 	/* Adjust for real per queue fifo size */
2367 	rxfifosz /= rx_channels_count;
2368 	txfifosz /= tx_channels_count;
2369 
2370 	if (priv->plat->force_thresh_dma_mode) {
2371 		txmode = tc;
2372 		rxmode = tc;
2373 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2374 		/*
2375 		 * In case of GMAC, SF mode can be enabled
2376 		 * to perform the TX COE in HW. This depends on:
2377 		 * 1) TX COE if actually supported
2378 		 * 2) There is no bugged Jumbo frame support
2379 		 *    that needs to not insert csum in the TDES.
2380 		 */
2381 		txmode = SF_DMA_MODE;
2382 		rxmode = SF_DMA_MODE;
2383 		priv->xstats.threshold = SF_DMA_MODE;
2384 	} else {
2385 		txmode = tc;
2386 		rxmode = SF_DMA_MODE;
2387 	}
2388 
2389 	/* configure all channels */
2390 	for (chan = 0; chan < rx_channels_count; chan++) {
2391 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2392 		u32 buf_size;
2393 
2394 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2395 
2396 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2397 				rxfifosz, qmode);
2398 
2399 		if (rx_q->xsk_pool) {
2400 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2401 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2402 					      buf_size,
2403 					      chan);
2404 		} else {
2405 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2406 					      priv->dma_conf.dma_buf_sz,
2407 					      chan);
2408 		}
2409 	}
2410 
2411 	for (chan = 0; chan < tx_channels_count; chan++) {
2412 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2413 
2414 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2415 				txfifosz, qmode);
2416 	}
2417 }
2418 
2419 static void stmmac_xsk_request_timestamp(void *_priv)
2420 {
2421 	struct stmmac_metadata_request *meta_req = _priv;
2422 
2423 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2424 	*meta_req->set_ic = true;
2425 }
2426 
2427 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2428 {
2429 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2430 	struct stmmac_priv *priv = tx_compl->priv;
2431 	struct dma_desc *desc = tx_compl->desc;
2432 	bool found = false;
2433 	u64 ns = 0;
2434 
2435 	if (!priv->hwts_tx_en)
2436 		return 0;
2437 
2438 	/* check tx tstamp status */
2439 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2440 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2441 		found = true;
2442 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2443 		found = true;
2444 	}
2445 
2446 	if (found) {
2447 		ns -= priv->plat->cdc_error_adj;
2448 		return ns_to_ktime(ns);
2449 	}
2450 
2451 	return 0;
2452 }
2453 
2454 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2455 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2456 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2457 };
2458 
2459 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2460 {
2461 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2462 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2463 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2464 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2465 	unsigned int entry = tx_q->cur_tx;
2466 	struct dma_desc *tx_desc = NULL;
2467 	struct xdp_desc xdp_desc;
2468 	bool work_done = true;
2469 	u32 tx_set_ic_bit = 0;
2470 
2471 	/* Avoids TX time-out as we are sharing with slow path */
2472 	txq_trans_cond_update(nq);
2473 
2474 	budget = min(budget, stmmac_tx_avail(priv, queue));
2475 
2476 	while (budget-- > 0) {
2477 		struct stmmac_metadata_request meta_req;
2478 		struct xsk_tx_metadata *meta = NULL;
2479 		dma_addr_t dma_addr;
2480 		bool set_ic;
2481 
2482 		/* We are sharing with slow path and stop XSK TX desc submission when
2483 		 * available TX ring is less than threshold.
2484 		 */
2485 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2486 		    !netif_carrier_ok(priv->dev)) {
2487 			work_done = false;
2488 			break;
2489 		}
2490 
2491 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2492 			break;
2493 
2494 		if (priv->est && priv->est->enable &&
2495 		    priv->est->max_sdu[queue] &&
2496 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2497 			priv->xstats.max_sdu_txq_drop[queue]++;
2498 			continue;
2499 		}
2500 
2501 		if (likely(priv->extend_desc))
2502 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2503 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2504 			tx_desc = &tx_q->dma_entx[entry].basic;
2505 		else
2506 			tx_desc = tx_q->dma_tx + entry;
2507 
2508 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2509 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2510 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2511 
2512 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2513 
2514 		/* To return XDP buffer to XSK pool, we simple call
2515 		 * xsk_tx_completed(), so we don't need to fill up
2516 		 * 'buf' and 'xdpf'.
2517 		 */
2518 		tx_q->tx_skbuff_dma[entry].buf = 0;
2519 		tx_q->xdpf[entry] = NULL;
2520 
2521 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2522 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2523 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2524 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2525 
2526 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2527 
2528 		tx_q->tx_count_frames++;
2529 
2530 		if (!priv->tx_coal_frames[queue])
2531 			set_ic = false;
2532 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2533 			set_ic = true;
2534 		else
2535 			set_ic = false;
2536 
2537 		meta_req.priv = priv;
2538 		meta_req.tx_desc = tx_desc;
2539 		meta_req.set_ic = &set_ic;
2540 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2541 					&meta_req);
2542 		if (set_ic) {
2543 			tx_q->tx_count_frames = 0;
2544 			stmmac_set_tx_ic(priv, tx_desc);
2545 			tx_set_ic_bit++;
2546 		}
2547 
2548 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2549 				       true, priv->mode, true, true,
2550 				       xdp_desc.len);
2551 
2552 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2553 
2554 		xsk_tx_metadata_to_compl(meta,
2555 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2556 
2557 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2558 		entry = tx_q->cur_tx;
2559 	}
2560 	u64_stats_update_begin(&txq_stats->napi_syncp);
2561 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2562 	u64_stats_update_end(&txq_stats->napi_syncp);
2563 
2564 	if (tx_desc) {
2565 		stmmac_flush_tx_descriptors(priv, queue);
2566 		xsk_tx_release(pool);
2567 	}
2568 
2569 	/* Return true if all of the 3 conditions are met
2570 	 *  a) TX Budget is still available
2571 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2572 	 *     pending XSK TX for transmission)
2573 	 */
2574 	return !!budget && work_done;
2575 }
2576 
2577 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2578 {
2579 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2580 		tc += 64;
2581 
2582 		if (priv->plat->force_thresh_dma_mode)
2583 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2584 		else
2585 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2586 						      chan);
2587 
2588 		priv->xstats.threshold = tc;
2589 	}
2590 }
2591 
2592 /**
2593  * stmmac_tx_clean - to manage the transmission completion
2594  * @priv: driver private structure
2595  * @budget: napi budget limiting this functions packet handling
2596  * @queue: TX queue index
2597  * @pending_packets: signal to arm the TX coal timer
2598  * Description: it reclaims the transmit resources after transmission completes.
2599  * If some packets still needs to be handled, due to TX coalesce, set
2600  * pending_packets to true to make NAPI arm the TX coal timer.
2601  */
2602 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2603 			   bool *pending_packets)
2604 {
2605 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2606 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2607 	unsigned int bytes_compl = 0, pkts_compl = 0;
2608 	unsigned int entry, xmits = 0, count = 0;
2609 	u32 tx_packets = 0, tx_errors = 0;
2610 
2611 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2612 
2613 	tx_q->xsk_frames_done = 0;
2614 
2615 	entry = tx_q->dirty_tx;
2616 
2617 	/* Try to clean all TX complete frame in 1 shot */
2618 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2619 		struct xdp_frame *xdpf;
2620 		struct sk_buff *skb;
2621 		struct dma_desc *p;
2622 		int status;
2623 
2624 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2625 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2626 			xdpf = tx_q->xdpf[entry];
2627 			skb = NULL;
2628 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2629 			xdpf = NULL;
2630 			skb = tx_q->tx_skbuff[entry];
2631 		} else {
2632 			xdpf = NULL;
2633 			skb = NULL;
2634 		}
2635 
2636 		if (priv->extend_desc)
2637 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2638 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2639 			p = &tx_q->dma_entx[entry].basic;
2640 		else
2641 			p = tx_q->dma_tx + entry;
2642 
2643 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2644 		/* Check if the descriptor is owned by the DMA */
2645 		if (unlikely(status & tx_dma_own))
2646 			break;
2647 
2648 		count++;
2649 
2650 		/* Make sure descriptor fields are read after reading
2651 		 * the own bit.
2652 		 */
2653 		dma_rmb();
2654 
2655 		/* Just consider the last segment and ...*/
2656 		if (likely(!(status & tx_not_ls))) {
2657 			/* ... verify the status error condition */
2658 			if (unlikely(status & tx_err)) {
2659 				tx_errors++;
2660 				if (unlikely(status & tx_err_bump_tc))
2661 					stmmac_bump_dma_threshold(priv, queue);
2662 			} else {
2663 				tx_packets++;
2664 			}
2665 			if (skb) {
2666 				stmmac_get_tx_hwtstamp(priv, p, skb);
2667 			} else if (tx_q->xsk_pool &&
2668 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2669 				struct stmmac_xsk_tx_complete tx_compl = {
2670 					.priv = priv,
2671 					.desc = p,
2672 				};
2673 
2674 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2675 							 &stmmac_xsk_tx_metadata_ops,
2676 							 &tx_compl);
2677 			}
2678 		}
2679 
2680 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2681 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2682 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2683 				dma_unmap_page(priv->device,
2684 					       tx_q->tx_skbuff_dma[entry].buf,
2685 					       tx_q->tx_skbuff_dma[entry].len,
2686 					       DMA_TO_DEVICE);
2687 			else
2688 				dma_unmap_single(priv->device,
2689 						 tx_q->tx_skbuff_dma[entry].buf,
2690 						 tx_q->tx_skbuff_dma[entry].len,
2691 						 DMA_TO_DEVICE);
2692 			tx_q->tx_skbuff_dma[entry].buf = 0;
2693 			tx_q->tx_skbuff_dma[entry].len = 0;
2694 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2695 		}
2696 
2697 		stmmac_clean_desc3(priv, tx_q, p);
2698 
2699 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2700 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2701 
2702 		if (xdpf &&
2703 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2704 			xdp_return_frame_rx_napi(xdpf);
2705 			tx_q->xdpf[entry] = NULL;
2706 		}
2707 
2708 		if (xdpf &&
2709 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2710 			xdp_return_frame(xdpf);
2711 			tx_q->xdpf[entry] = NULL;
2712 		}
2713 
2714 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2715 			tx_q->xsk_frames_done++;
2716 
2717 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2718 			if (likely(skb)) {
2719 				pkts_compl++;
2720 				bytes_compl += skb->len;
2721 				dev_consume_skb_any(skb);
2722 				tx_q->tx_skbuff[entry] = NULL;
2723 			}
2724 		}
2725 
2726 		stmmac_release_tx_desc(priv, p, priv->mode);
2727 
2728 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2729 	}
2730 	tx_q->dirty_tx = entry;
2731 
2732 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2733 				  pkts_compl, bytes_compl);
2734 
2735 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2736 								queue))) &&
2737 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2738 
2739 		netif_dbg(priv, tx_done, priv->dev,
2740 			  "%s: restart transmit\n", __func__);
2741 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2742 	}
2743 
2744 	if (tx_q->xsk_pool) {
2745 		bool work_done;
2746 
2747 		if (tx_q->xsk_frames_done)
2748 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2749 
2750 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2751 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2752 
2753 		/* For XSK TX, we try to send as many as possible.
2754 		 * If XSK work done (XSK TX desc empty and budget still
2755 		 * available), return "budget - 1" to reenable TX IRQ.
2756 		 * Else, return "budget" to make NAPI continue polling.
2757 		 */
2758 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2759 					       STMMAC_XSK_TX_BUDGET_MAX);
2760 		if (work_done)
2761 			xmits = budget - 1;
2762 		else
2763 			xmits = budget;
2764 	}
2765 
2766 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2767 	    priv->eee_sw_timer_en) {
2768 		if (stmmac_enable_eee_mode(priv))
2769 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2770 	}
2771 
2772 	/* We still have pending packets, let's call for a new scheduling */
2773 	if (tx_q->dirty_tx != tx_q->cur_tx)
2774 		*pending_packets = true;
2775 
2776 	u64_stats_update_begin(&txq_stats->napi_syncp);
2777 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2778 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2779 	u64_stats_inc(&txq_stats->napi.tx_clean);
2780 	u64_stats_update_end(&txq_stats->napi_syncp);
2781 
2782 	priv->xstats.tx_errors += tx_errors;
2783 
2784 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2785 
2786 	/* Combine decisions from TX clean and XSK TX */
2787 	return max(count, xmits);
2788 }
2789 
2790 /**
2791  * stmmac_tx_err - to manage the tx error
2792  * @priv: driver private structure
2793  * @chan: channel index
2794  * Description: it cleans the descriptors and restarts the transmission
2795  * in case of transmission errors.
2796  */
2797 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2798 {
2799 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2800 
2801 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2802 
2803 	stmmac_stop_tx_dma(priv, chan);
2804 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2805 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2806 	stmmac_reset_tx_queue(priv, chan);
2807 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2808 			    tx_q->dma_tx_phy, chan);
2809 	stmmac_start_tx_dma(priv, chan);
2810 
2811 	priv->xstats.tx_errors++;
2812 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2813 }
2814 
2815 /**
2816  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2817  *  @priv: driver private structure
2818  *  @txmode: TX operating mode
2819  *  @rxmode: RX operating mode
2820  *  @chan: channel index
2821  *  Description: it is used for configuring of the DMA operation mode in
2822  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2823  *  mode.
2824  */
2825 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2826 					  u32 rxmode, u32 chan)
2827 {
2828 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2829 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2830 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2831 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2832 	int rxfifosz = priv->plat->rx_fifo_size;
2833 	int txfifosz = priv->plat->tx_fifo_size;
2834 
2835 	if (rxfifosz == 0)
2836 		rxfifosz = priv->dma_cap.rx_fifo_size;
2837 	if (txfifosz == 0)
2838 		txfifosz = priv->dma_cap.tx_fifo_size;
2839 
2840 	/* Adjust for real per queue fifo size */
2841 	rxfifosz /= rx_channels_count;
2842 	txfifosz /= tx_channels_count;
2843 
2844 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2845 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2846 }
2847 
2848 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2849 {
2850 	int ret;
2851 
2852 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2853 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2854 	if (ret && (ret != -EINVAL)) {
2855 		stmmac_global_err(priv);
2856 		return true;
2857 	}
2858 
2859 	return false;
2860 }
2861 
2862 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2863 {
2864 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2865 						 &priv->xstats, chan, dir);
2866 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2867 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2868 	struct stmmac_channel *ch = &priv->channel[chan];
2869 	struct napi_struct *rx_napi;
2870 	struct napi_struct *tx_napi;
2871 	unsigned long flags;
2872 
2873 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2874 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2875 
2876 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2877 		if (napi_schedule_prep(rx_napi)) {
2878 			spin_lock_irqsave(&ch->lock, flags);
2879 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2880 			spin_unlock_irqrestore(&ch->lock, flags);
2881 			__napi_schedule(rx_napi);
2882 		}
2883 	}
2884 
2885 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2886 		if (napi_schedule_prep(tx_napi)) {
2887 			spin_lock_irqsave(&ch->lock, flags);
2888 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2889 			spin_unlock_irqrestore(&ch->lock, flags);
2890 			__napi_schedule(tx_napi);
2891 		}
2892 	}
2893 
2894 	return status;
2895 }
2896 
2897 /**
2898  * stmmac_dma_interrupt - DMA ISR
2899  * @priv: driver private structure
2900  * Description: this is the DMA ISR. It is called by the main ISR.
2901  * It calls the dwmac dma routine and schedule poll method in case of some
2902  * work can be done.
2903  */
2904 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2905 {
2906 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2907 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2908 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2909 				tx_channel_count : rx_channel_count;
2910 	u32 chan;
2911 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2912 
2913 	/* Make sure we never check beyond our status buffer. */
2914 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2915 		channels_to_check = ARRAY_SIZE(status);
2916 
2917 	for (chan = 0; chan < channels_to_check; chan++)
2918 		status[chan] = stmmac_napi_check(priv, chan,
2919 						 DMA_DIR_RXTX);
2920 
2921 	for (chan = 0; chan < tx_channel_count; chan++) {
2922 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2923 			/* Try to bump up the dma threshold on this failure */
2924 			stmmac_bump_dma_threshold(priv, chan);
2925 		} else if (unlikely(status[chan] == tx_hard_error)) {
2926 			stmmac_tx_err(priv, chan);
2927 		}
2928 	}
2929 }
2930 
2931 /**
2932  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2933  * @priv: driver private structure
2934  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2935  */
2936 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2937 {
2938 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2939 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2940 
2941 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2942 
2943 	if (priv->dma_cap.rmon) {
2944 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2945 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2946 	} else
2947 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2948 }
2949 
2950 /**
2951  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2952  * @priv: driver private structure
2953  * Description:
2954  *  new GMAC chip generations have a new register to indicate the
2955  *  presence of the optional feature/functions.
2956  *  This can be also used to override the value passed through the
2957  *  platform and necessary for old MAC10/100 and GMAC chips.
2958  */
2959 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2960 {
2961 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2962 }
2963 
2964 /**
2965  * stmmac_check_ether_addr - check if the MAC addr is valid
2966  * @priv: driver private structure
2967  * Description:
2968  * it is to verify if the MAC address is valid, in case of failures it
2969  * generates a random MAC address
2970  */
2971 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2972 {
2973 	u8 addr[ETH_ALEN];
2974 
2975 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2976 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2977 		if (is_valid_ether_addr(addr))
2978 			eth_hw_addr_set(priv->dev, addr);
2979 		else
2980 			eth_hw_addr_random(priv->dev);
2981 		dev_info(priv->device, "device MAC address %pM\n",
2982 			 priv->dev->dev_addr);
2983 	}
2984 }
2985 
2986 /**
2987  * stmmac_init_dma_engine - DMA init.
2988  * @priv: driver private structure
2989  * Description:
2990  * It inits the DMA invoking the specific MAC/GMAC callback.
2991  * Some DMA parameters can be passed from the platform;
2992  * in case of these are not passed a default is kept for the MAC or GMAC.
2993  */
2994 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2995 {
2996 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2997 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2998 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2999 	struct stmmac_rx_queue *rx_q;
3000 	struct stmmac_tx_queue *tx_q;
3001 	u32 chan = 0;
3002 	int atds = 0;
3003 	int ret = 0;
3004 
3005 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3006 		dev_err(priv->device, "Invalid DMA configuration\n");
3007 		return -EINVAL;
3008 	}
3009 
3010 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3011 		atds = 1;
3012 
3013 	ret = stmmac_reset(priv, priv->ioaddr);
3014 	if (ret) {
3015 		dev_err(priv->device, "Failed to reset the dma\n");
3016 		return ret;
3017 	}
3018 
3019 	/* DMA Configuration */
3020 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3021 
3022 	if (priv->plat->axi)
3023 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3024 
3025 	/* DMA CSR Channel configuration */
3026 	for (chan = 0; chan < dma_csr_ch; chan++) {
3027 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3028 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3029 	}
3030 
3031 	/* DMA RX Channel Configuration */
3032 	for (chan = 0; chan < rx_channels_count; chan++) {
3033 		rx_q = &priv->dma_conf.rx_queue[chan];
3034 
3035 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3036 				    rx_q->dma_rx_phy, chan);
3037 
3038 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3039 				     (rx_q->buf_alloc_num *
3040 				      sizeof(struct dma_desc));
3041 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3042 				       rx_q->rx_tail_addr, chan);
3043 	}
3044 
3045 	/* DMA TX Channel Configuration */
3046 	for (chan = 0; chan < tx_channels_count; chan++) {
3047 		tx_q = &priv->dma_conf.tx_queue[chan];
3048 
3049 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3050 				    tx_q->dma_tx_phy, chan);
3051 
3052 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3053 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3054 				       tx_q->tx_tail_addr, chan);
3055 	}
3056 
3057 	return ret;
3058 }
3059 
3060 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3061 {
3062 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3063 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3064 	struct stmmac_channel *ch;
3065 	struct napi_struct *napi;
3066 
3067 	if (!tx_coal_timer)
3068 		return;
3069 
3070 	ch = &priv->channel[tx_q->queue_index];
3071 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3072 
3073 	/* Arm timer only if napi is not already scheduled.
3074 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3075 	 * again in the next scheduled napi.
3076 	 */
3077 	if (unlikely(!napi_is_scheduled(napi)))
3078 		hrtimer_start(&tx_q->txtimer,
3079 			      STMMAC_COAL_TIMER(tx_coal_timer),
3080 			      HRTIMER_MODE_REL);
3081 	else
3082 		hrtimer_try_to_cancel(&tx_q->txtimer);
3083 }
3084 
3085 /**
3086  * stmmac_tx_timer - mitigation sw timer for tx.
3087  * @t: data pointer
3088  * Description:
3089  * This is the timer handler to directly invoke the stmmac_tx_clean.
3090  */
3091 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3092 {
3093 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3094 	struct stmmac_priv *priv = tx_q->priv_data;
3095 	struct stmmac_channel *ch;
3096 	struct napi_struct *napi;
3097 
3098 	ch = &priv->channel[tx_q->queue_index];
3099 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3100 
3101 	if (likely(napi_schedule_prep(napi))) {
3102 		unsigned long flags;
3103 
3104 		spin_lock_irqsave(&ch->lock, flags);
3105 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3106 		spin_unlock_irqrestore(&ch->lock, flags);
3107 		__napi_schedule(napi);
3108 	}
3109 
3110 	return HRTIMER_NORESTART;
3111 }
3112 
3113 /**
3114  * stmmac_init_coalesce - init mitigation options.
3115  * @priv: driver private structure
3116  * Description:
3117  * This inits the coalesce parameters: i.e. timer rate,
3118  * timer handler and default threshold used for enabling the
3119  * interrupt on completion bit.
3120  */
3121 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3122 {
3123 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3124 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3125 	u32 chan;
3126 
3127 	for (chan = 0; chan < tx_channel_count; chan++) {
3128 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3129 
3130 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3131 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3132 
3133 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3134 		tx_q->txtimer.function = stmmac_tx_timer;
3135 	}
3136 
3137 	for (chan = 0; chan < rx_channel_count; chan++)
3138 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3139 }
3140 
3141 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3142 {
3143 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3144 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3145 	u32 chan;
3146 
3147 	/* set TX ring length */
3148 	for (chan = 0; chan < tx_channels_count; chan++)
3149 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3150 				       (priv->dma_conf.dma_tx_size - 1), chan);
3151 
3152 	/* set RX ring length */
3153 	for (chan = 0; chan < rx_channels_count; chan++)
3154 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3155 				       (priv->dma_conf.dma_rx_size - 1), chan);
3156 }
3157 
3158 /**
3159  *  stmmac_set_tx_queue_weight - Set TX queue weight
3160  *  @priv: driver private structure
3161  *  Description: It is used for setting TX queues weight
3162  */
3163 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3164 {
3165 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3166 	u32 weight;
3167 	u32 queue;
3168 
3169 	for (queue = 0; queue < tx_queues_count; queue++) {
3170 		weight = priv->plat->tx_queues_cfg[queue].weight;
3171 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3172 	}
3173 }
3174 
3175 /**
3176  *  stmmac_configure_cbs - Configure CBS in TX queue
3177  *  @priv: driver private structure
3178  *  Description: It is used for configuring CBS in AVB TX queues
3179  */
3180 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3181 {
3182 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3183 	u32 mode_to_use;
3184 	u32 queue;
3185 
3186 	/* queue 0 is reserved for legacy traffic */
3187 	for (queue = 1; queue < tx_queues_count; queue++) {
3188 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3189 		if (mode_to_use == MTL_QUEUE_DCB)
3190 			continue;
3191 
3192 		stmmac_config_cbs(priv, priv->hw,
3193 				priv->plat->tx_queues_cfg[queue].send_slope,
3194 				priv->plat->tx_queues_cfg[queue].idle_slope,
3195 				priv->plat->tx_queues_cfg[queue].high_credit,
3196 				priv->plat->tx_queues_cfg[queue].low_credit,
3197 				queue);
3198 	}
3199 }
3200 
3201 /**
3202  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3203  *  @priv: driver private structure
3204  *  Description: It is used for mapping RX queues to RX dma channels
3205  */
3206 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3207 {
3208 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3209 	u32 queue;
3210 	u32 chan;
3211 
3212 	for (queue = 0; queue < rx_queues_count; queue++) {
3213 		chan = priv->plat->rx_queues_cfg[queue].chan;
3214 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3215 	}
3216 }
3217 
3218 /**
3219  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3220  *  @priv: driver private structure
3221  *  Description: It is used for configuring the RX Queue Priority
3222  */
3223 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3224 {
3225 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3226 	u32 queue;
3227 	u32 prio;
3228 
3229 	for (queue = 0; queue < rx_queues_count; queue++) {
3230 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3231 			continue;
3232 
3233 		prio = priv->plat->rx_queues_cfg[queue].prio;
3234 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3235 	}
3236 }
3237 
3238 /**
3239  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3240  *  @priv: driver private structure
3241  *  Description: It is used for configuring the TX Queue Priority
3242  */
3243 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3244 {
3245 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3246 	u32 queue;
3247 	u32 prio;
3248 
3249 	for (queue = 0; queue < tx_queues_count; queue++) {
3250 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3251 			continue;
3252 
3253 		prio = priv->plat->tx_queues_cfg[queue].prio;
3254 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3255 	}
3256 }
3257 
3258 /**
3259  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3260  *  @priv: driver private structure
3261  *  Description: It is used for configuring the RX queue routing
3262  */
3263 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3264 {
3265 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3266 	u32 queue;
3267 	u8 packet;
3268 
3269 	for (queue = 0; queue < rx_queues_count; queue++) {
3270 		/* no specific packet type routing specified for the queue */
3271 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3272 			continue;
3273 
3274 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3275 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3276 	}
3277 }
3278 
3279 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3280 {
3281 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3282 		priv->rss.enable = false;
3283 		return;
3284 	}
3285 
3286 	if (priv->dev->features & NETIF_F_RXHASH)
3287 		priv->rss.enable = true;
3288 	else
3289 		priv->rss.enable = false;
3290 
3291 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3292 			     priv->plat->rx_queues_to_use);
3293 }
3294 
3295 /**
3296  *  stmmac_mtl_configuration - Configure MTL
3297  *  @priv: driver private structure
3298  *  Description: It is used for configurring MTL
3299  */
3300 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3301 {
3302 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3303 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3304 
3305 	if (tx_queues_count > 1)
3306 		stmmac_set_tx_queue_weight(priv);
3307 
3308 	/* Configure MTL RX algorithms */
3309 	if (rx_queues_count > 1)
3310 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3311 				priv->plat->rx_sched_algorithm);
3312 
3313 	/* Configure MTL TX algorithms */
3314 	if (tx_queues_count > 1)
3315 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3316 				priv->plat->tx_sched_algorithm);
3317 
3318 	/* Configure CBS in AVB TX queues */
3319 	if (tx_queues_count > 1)
3320 		stmmac_configure_cbs(priv);
3321 
3322 	/* Map RX MTL to DMA channels */
3323 	stmmac_rx_queue_dma_chan_map(priv);
3324 
3325 	/* Enable MAC RX Queues */
3326 	stmmac_mac_enable_rx_queues(priv);
3327 
3328 	/* Set RX priorities */
3329 	if (rx_queues_count > 1)
3330 		stmmac_mac_config_rx_queues_prio(priv);
3331 
3332 	/* Set TX priorities */
3333 	if (tx_queues_count > 1)
3334 		stmmac_mac_config_tx_queues_prio(priv);
3335 
3336 	/* Set RX routing */
3337 	if (rx_queues_count > 1)
3338 		stmmac_mac_config_rx_queues_routing(priv);
3339 
3340 	/* Receive Side Scaling */
3341 	if (rx_queues_count > 1)
3342 		stmmac_mac_config_rss(priv);
3343 }
3344 
3345 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3346 {
3347 	if (priv->dma_cap.asp) {
3348 		netdev_info(priv->dev, "Enabling Safety Features\n");
3349 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3350 					  priv->plat->safety_feat_cfg);
3351 	} else {
3352 		netdev_info(priv->dev, "No Safety Features support found\n");
3353 	}
3354 }
3355 
3356 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3357 {
3358 	char *name;
3359 
3360 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3361 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3362 
3363 	name = priv->wq_name;
3364 	sprintf(name, "%s-fpe", priv->dev->name);
3365 
3366 	priv->fpe_wq = create_singlethread_workqueue(name);
3367 	if (!priv->fpe_wq) {
3368 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3369 
3370 		return -ENOMEM;
3371 	}
3372 	netdev_info(priv->dev, "FPE workqueue start");
3373 
3374 	return 0;
3375 }
3376 
3377 /**
3378  * stmmac_hw_setup - setup mac in a usable state.
3379  *  @dev : pointer to the device structure.
3380  *  @ptp_register: register PTP if set
3381  *  Description:
3382  *  this is the main function to setup the HW in a usable state because the
3383  *  dma engine is reset, the core registers are configured (e.g. AXI,
3384  *  Checksum features, timers). The DMA is ready to start receiving and
3385  *  transmitting.
3386  *  Return value:
3387  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3388  *  file on failure.
3389  */
3390 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3391 {
3392 	struct stmmac_priv *priv = netdev_priv(dev);
3393 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3394 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3395 	bool sph_en;
3396 	u32 chan;
3397 	int ret;
3398 
3399 	/* Make sure RX clock is enabled */
3400 	if (priv->hw->phylink_pcs)
3401 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3402 
3403 	/* DMA initialization and SW reset */
3404 	ret = stmmac_init_dma_engine(priv);
3405 	if (ret < 0) {
3406 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3407 			   __func__);
3408 		return ret;
3409 	}
3410 
3411 	/* Copy the MAC addr into the HW  */
3412 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3413 
3414 	/* PS and related bits will be programmed according to the speed */
3415 	if (priv->hw->pcs) {
3416 		int speed = priv->plat->mac_port_sel_speed;
3417 
3418 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3419 		    (speed == SPEED_1000)) {
3420 			priv->hw->ps = speed;
3421 		} else {
3422 			dev_warn(priv->device, "invalid port speed\n");
3423 			priv->hw->ps = 0;
3424 		}
3425 	}
3426 
3427 	/* Initialize the MAC Core */
3428 	stmmac_core_init(priv, priv->hw, dev);
3429 
3430 	/* Initialize MTL*/
3431 	stmmac_mtl_configuration(priv);
3432 
3433 	/* Initialize Safety Features */
3434 	stmmac_safety_feat_configuration(priv);
3435 
3436 	ret = stmmac_rx_ipc(priv, priv->hw);
3437 	if (!ret) {
3438 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3439 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3440 		priv->hw->rx_csum = 0;
3441 	}
3442 
3443 	/* Enable the MAC Rx/Tx */
3444 	stmmac_mac_set(priv, priv->ioaddr, true);
3445 
3446 	/* Set the HW DMA mode and the COE */
3447 	stmmac_dma_operation_mode(priv);
3448 
3449 	stmmac_mmc_setup(priv);
3450 
3451 	if (ptp_register) {
3452 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3453 		if (ret < 0)
3454 			netdev_warn(priv->dev,
3455 				    "failed to enable PTP reference clock: %pe\n",
3456 				    ERR_PTR(ret));
3457 	}
3458 
3459 	ret = stmmac_init_ptp(priv);
3460 	if (ret == -EOPNOTSUPP)
3461 		netdev_info(priv->dev, "PTP not supported by HW\n");
3462 	else if (ret)
3463 		netdev_warn(priv->dev, "PTP init failed\n");
3464 	else if (ptp_register)
3465 		stmmac_ptp_register(priv);
3466 
3467 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3468 
3469 	/* Convert the timer from msec to usec */
3470 	if (!priv->tx_lpi_timer)
3471 		priv->tx_lpi_timer = eee_timer * 1000;
3472 
3473 	if (priv->use_riwt) {
3474 		u32 queue;
3475 
3476 		for (queue = 0; queue < rx_cnt; queue++) {
3477 			if (!priv->rx_riwt[queue])
3478 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3479 
3480 			stmmac_rx_watchdog(priv, priv->ioaddr,
3481 					   priv->rx_riwt[queue], queue);
3482 		}
3483 	}
3484 
3485 	if (priv->hw->pcs)
3486 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3487 
3488 	/* set TX and RX rings length */
3489 	stmmac_set_rings_length(priv);
3490 
3491 	/* Enable TSO */
3492 	if (priv->tso) {
3493 		for (chan = 0; chan < tx_cnt; chan++) {
3494 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3495 
3496 			/* TSO and TBS cannot co-exist */
3497 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3498 				continue;
3499 
3500 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3501 		}
3502 	}
3503 
3504 	/* Enable Split Header */
3505 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3506 	for (chan = 0; chan < rx_cnt; chan++)
3507 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3508 
3509 
3510 	/* VLAN Tag Insertion */
3511 	if (priv->dma_cap.vlins)
3512 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3513 
3514 	/* TBS */
3515 	for (chan = 0; chan < tx_cnt; chan++) {
3516 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3517 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3518 
3519 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3520 	}
3521 
3522 	/* Configure real RX and TX queues */
3523 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3524 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3525 
3526 	/* Start the ball rolling... */
3527 	stmmac_start_all_dma(priv);
3528 
3529 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3530 
3531 	if (priv->dma_cap.fpesel) {
3532 		stmmac_fpe_start_wq(priv);
3533 
3534 		if (priv->plat->fpe_cfg->enable)
3535 			stmmac_fpe_handshake(priv, true);
3536 	}
3537 
3538 	return 0;
3539 }
3540 
3541 static void stmmac_hw_teardown(struct net_device *dev)
3542 {
3543 	struct stmmac_priv *priv = netdev_priv(dev);
3544 
3545 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3546 }
3547 
3548 static void stmmac_free_irq(struct net_device *dev,
3549 			    enum request_irq_err irq_err, int irq_idx)
3550 {
3551 	struct stmmac_priv *priv = netdev_priv(dev);
3552 	int j;
3553 
3554 	switch (irq_err) {
3555 	case REQ_IRQ_ERR_ALL:
3556 		irq_idx = priv->plat->tx_queues_to_use;
3557 		fallthrough;
3558 	case REQ_IRQ_ERR_TX:
3559 		for (j = irq_idx - 1; j >= 0; j--) {
3560 			if (priv->tx_irq[j] > 0) {
3561 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3562 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3563 			}
3564 		}
3565 		irq_idx = priv->plat->rx_queues_to_use;
3566 		fallthrough;
3567 	case REQ_IRQ_ERR_RX:
3568 		for (j = irq_idx - 1; j >= 0; j--) {
3569 			if (priv->rx_irq[j] > 0) {
3570 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3571 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3572 			}
3573 		}
3574 
3575 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3576 			free_irq(priv->sfty_ue_irq, dev);
3577 		fallthrough;
3578 	case REQ_IRQ_ERR_SFTY_UE:
3579 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3580 			free_irq(priv->sfty_ce_irq, dev);
3581 		fallthrough;
3582 	case REQ_IRQ_ERR_SFTY_CE:
3583 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3584 			free_irq(priv->lpi_irq, dev);
3585 		fallthrough;
3586 	case REQ_IRQ_ERR_LPI:
3587 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3588 			free_irq(priv->wol_irq, dev);
3589 		fallthrough;
3590 	case REQ_IRQ_ERR_SFTY:
3591 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3592 			free_irq(priv->sfty_irq, dev);
3593 		fallthrough;
3594 	case REQ_IRQ_ERR_WOL:
3595 		free_irq(dev->irq, dev);
3596 		fallthrough;
3597 	case REQ_IRQ_ERR_MAC:
3598 	case REQ_IRQ_ERR_NO:
3599 		/* If MAC IRQ request error, no more IRQ to free */
3600 		break;
3601 	}
3602 }
3603 
3604 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3605 {
3606 	struct stmmac_priv *priv = netdev_priv(dev);
3607 	enum request_irq_err irq_err;
3608 	cpumask_t cpu_mask;
3609 	int irq_idx = 0;
3610 	char *int_name;
3611 	int ret;
3612 	int i;
3613 
3614 	/* For common interrupt */
3615 	int_name = priv->int_name_mac;
3616 	sprintf(int_name, "%s:%s", dev->name, "mac");
3617 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3618 			  0, int_name, dev);
3619 	if (unlikely(ret < 0)) {
3620 		netdev_err(priv->dev,
3621 			   "%s: alloc mac MSI %d (error: %d)\n",
3622 			   __func__, dev->irq, ret);
3623 		irq_err = REQ_IRQ_ERR_MAC;
3624 		goto irq_error;
3625 	}
3626 
3627 	/* Request the Wake IRQ in case of another line
3628 	 * is used for WoL
3629 	 */
3630 	priv->wol_irq_disabled = true;
3631 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3632 		int_name = priv->int_name_wol;
3633 		sprintf(int_name, "%s:%s", dev->name, "wol");
3634 		ret = request_irq(priv->wol_irq,
3635 				  stmmac_mac_interrupt,
3636 				  0, int_name, dev);
3637 		if (unlikely(ret < 0)) {
3638 			netdev_err(priv->dev,
3639 				   "%s: alloc wol MSI %d (error: %d)\n",
3640 				   __func__, priv->wol_irq, ret);
3641 			irq_err = REQ_IRQ_ERR_WOL;
3642 			goto irq_error;
3643 		}
3644 	}
3645 
3646 	/* Request the LPI IRQ in case of another line
3647 	 * is used for LPI
3648 	 */
3649 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3650 		int_name = priv->int_name_lpi;
3651 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3652 		ret = request_irq(priv->lpi_irq,
3653 				  stmmac_mac_interrupt,
3654 				  0, int_name, dev);
3655 		if (unlikely(ret < 0)) {
3656 			netdev_err(priv->dev,
3657 				   "%s: alloc lpi MSI %d (error: %d)\n",
3658 				   __func__, priv->lpi_irq, ret);
3659 			irq_err = REQ_IRQ_ERR_LPI;
3660 			goto irq_error;
3661 		}
3662 	}
3663 
3664 	/* Request the common Safety Feature Correctible/Uncorrectible
3665 	 * Error line in case of another line is used
3666 	 */
3667 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3668 		int_name = priv->int_name_sfty;
3669 		sprintf(int_name, "%s:%s", dev->name, "safety");
3670 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3671 				  0, int_name, dev);
3672 		if (unlikely(ret < 0)) {
3673 			netdev_err(priv->dev,
3674 				   "%s: alloc sfty MSI %d (error: %d)\n",
3675 				   __func__, priv->sfty_irq, ret);
3676 			irq_err = REQ_IRQ_ERR_SFTY;
3677 			goto irq_error;
3678 		}
3679 	}
3680 
3681 	/* Request the Safety Feature Correctible Error line in
3682 	 * case of another line is used
3683 	 */
3684 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3685 		int_name = priv->int_name_sfty_ce;
3686 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3687 		ret = request_irq(priv->sfty_ce_irq,
3688 				  stmmac_safety_interrupt,
3689 				  0, int_name, dev);
3690 		if (unlikely(ret < 0)) {
3691 			netdev_err(priv->dev,
3692 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3693 				   __func__, priv->sfty_ce_irq, ret);
3694 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3695 			goto irq_error;
3696 		}
3697 	}
3698 
3699 	/* Request the Safety Feature Uncorrectible Error line in
3700 	 * case of another line is used
3701 	 */
3702 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3703 		int_name = priv->int_name_sfty_ue;
3704 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3705 		ret = request_irq(priv->sfty_ue_irq,
3706 				  stmmac_safety_interrupt,
3707 				  0, int_name, dev);
3708 		if (unlikely(ret < 0)) {
3709 			netdev_err(priv->dev,
3710 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3711 				   __func__, priv->sfty_ue_irq, ret);
3712 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3713 			goto irq_error;
3714 		}
3715 	}
3716 
3717 	/* Request Rx MSI irq */
3718 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3719 		if (i >= MTL_MAX_RX_QUEUES)
3720 			break;
3721 		if (priv->rx_irq[i] == 0)
3722 			continue;
3723 
3724 		int_name = priv->int_name_rx_irq[i];
3725 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3726 		ret = request_irq(priv->rx_irq[i],
3727 				  stmmac_msi_intr_rx,
3728 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3729 		if (unlikely(ret < 0)) {
3730 			netdev_err(priv->dev,
3731 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3732 				   __func__, i, priv->rx_irq[i], ret);
3733 			irq_err = REQ_IRQ_ERR_RX;
3734 			irq_idx = i;
3735 			goto irq_error;
3736 		}
3737 		cpumask_clear(&cpu_mask);
3738 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3739 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3740 	}
3741 
3742 	/* Request Tx MSI irq */
3743 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3744 		if (i >= MTL_MAX_TX_QUEUES)
3745 			break;
3746 		if (priv->tx_irq[i] == 0)
3747 			continue;
3748 
3749 		int_name = priv->int_name_tx_irq[i];
3750 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3751 		ret = request_irq(priv->tx_irq[i],
3752 				  stmmac_msi_intr_tx,
3753 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3754 		if (unlikely(ret < 0)) {
3755 			netdev_err(priv->dev,
3756 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3757 				   __func__, i, priv->tx_irq[i], ret);
3758 			irq_err = REQ_IRQ_ERR_TX;
3759 			irq_idx = i;
3760 			goto irq_error;
3761 		}
3762 		cpumask_clear(&cpu_mask);
3763 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3764 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3765 	}
3766 
3767 	return 0;
3768 
3769 irq_error:
3770 	stmmac_free_irq(dev, irq_err, irq_idx);
3771 	return ret;
3772 }
3773 
3774 static int stmmac_request_irq_single(struct net_device *dev)
3775 {
3776 	struct stmmac_priv *priv = netdev_priv(dev);
3777 	enum request_irq_err irq_err;
3778 	int ret;
3779 
3780 	ret = request_irq(dev->irq, stmmac_interrupt,
3781 			  IRQF_SHARED, dev->name, dev);
3782 	if (unlikely(ret < 0)) {
3783 		netdev_err(priv->dev,
3784 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3785 			   __func__, dev->irq, ret);
3786 		irq_err = REQ_IRQ_ERR_MAC;
3787 		goto irq_error;
3788 	}
3789 
3790 	/* Request the Wake IRQ in case of another line
3791 	 * is used for WoL
3792 	 */
3793 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3794 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3795 				  IRQF_SHARED, dev->name, dev);
3796 		if (unlikely(ret < 0)) {
3797 			netdev_err(priv->dev,
3798 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3799 				   __func__, priv->wol_irq, ret);
3800 			irq_err = REQ_IRQ_ERR_WOL;
3801 			goto irq_error;
3802 		}
3803 	}
3804 
3805 	/* Request the IRQ lines */
3806 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3807 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3808 				  IRQF_SHARED, dev->name, dev);
3809 		if (unlikely(ret < 0)) {
3810 			netdev_err(priv->dev,
3811 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3812 				   __func__, priv->lpi_irq, ret);
3813 			irq_err = REQ_IRQ_ERR_LPI;
3814 			goto irq_error;
3815 		}
3816 	}
3817 
3818 	/* Request the common Safety Feature Correctible/Uncorrectible
3819 	 * Error line in case of another line is used
3820 	 */
3821 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3822 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3823 				  IRQF_SHARED, dev->name, dev);
3824 		if (unlikely(ret < 0)) {
3825 			netdev_err(priv->dev,
3826 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3827 				   __func__, priv->sfty_irq, ret);
3828 			irq_err = REQ_IRQ_ERR_SFTY;
3829 			goto irq_error;
3830 		}
3831 	}
3832 
3833 	return 0;
3834 
3835 irq_error:
3836 	stmmac_free_irq(dev, irq_err, 0);
3837 	return ret;
3838 }
3839 
3840 static int stmmac_request_irq(struct net_device *dev)
3841 {
3842 	struct stmmac_priv *priv = netdev_priv(dev);
3843 	int ret;
3844 
3845 	/* Request the IRQ lines */
3846 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3847 		ret = stmmac_request_irq_multi_msi(dev);
3848 	else
3849 		ret = stmmac_request_irq_single(dev);
3850 
3851 	return ret;
3852 }
3853 
3854 /**
3855  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3856  *  @priv: driver private structure
3857  *  @mtu: MTU to setup the dma queue and buf with
3858  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3859  *  Allocate the Tx/Rx DMA queue and init them.
3860  *  Return value:
3861  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3862  */
3863 static struct stmmac_dma_conf *
3864 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3865 {
3866 	struct stmmac_dma_conf *dma_conf;
3867 	int chan, bfsize, ret;
3868 
3869 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3870 	if (!dma_conf) {
3871 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3872 			   __func__);
3873 		return ERR_PTR(-ENOMEM);
3874 	}
3875 
3876 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3877 	if (bfsize < 0)
3878 		bfsize = 0;
3879 
3880 	if (bfsize < BUF_SIZE_16KiB)
3881 		bfsize = stmmac_set_bfsize(mtu, 0);
3882 
3883 	dma_conf->dma_buf_sz = bfsize;
3884 	/* Chose the tx/rx size from the already defined one in the
3885 	 * priv struct. (if defined)
3886 	 */
3887 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3888 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3889 
3890 	if (!dma_conf->dma_tx_size)
3891 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3892 	if (!dma_conf->dma_rx_size)
3893 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3894 
3895 	/* Earlier check for TBS */
3896 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3897 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3898 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3899 
3900 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3901 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3902 	}
3903 
3904 	ret = alloc_dma_desc_resources(priv, dma_conf);
3905 	if (ret < 0) {
3906 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3907 			   __func__);
3908 		goto alloc_error;
3909 	}
3910 
3911 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3912 	if (ret < 0) {
3913 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3914 			   __func__);
3915 		goto init_error;
3916 	}
3917 
3918 	return dma_conf;
3919 
3920 init_error:
3921 	free_dma_desc_resources(priv, dma_conf);
3922 alloc_error:
3923 	kfree(dma_conf);
3924 	return ERR_PTR(ret);
3925 }
3926 
3927 /**
3928  *  __stmmac_open - open entry point of the driver
3929  *  @dev : pointer to the device structure.
3930  *  @dma_conf :  structure to take the dma data
3931  *  Description:
3932  *  This function is the open entry point of the driver.
3933  *  Return value:
3934  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3935  *  file on failure.
3936  */
3937 static int __stmmac_open(struct net_device *dev,
3938 			 struct stmmac_dma_conf *dma_conf)
3939 {
3940 	struct stmmac_priv *priv = netdev_priv(dev);
3941 	int mode = priv->plat->phy_interface;
3942 	u32 chan;
3943 	int ret;
3944 
3945 	ret = pm_runtime_resume_and_get(priv->device);
3946 	if (ret < 0)
3947 		return ret;
3948 
3949 	if ((!priv->hw->xpcs ||
3950 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3951 		ret = stmmac_init_phy(dev);
3952 		if (ret) {
3953 			netdev_err(priv->dev,
3954 				   "%s: Cannot attach to PHY (error: %d)\n",
3955 				   __func__, ret);
3956 			goto init_phy_error;
3957 		}
3958 	}
3959 
3960 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3961 
3962 	buf_sz = dma_conf->dma_buf_sz;
3963 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3964 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3965 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3966 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3967 
3968 	stmmac_reset_queues_param(priv);
3969 
3970 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3971 	    priv->plat->serdes_powerup) {
3972 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3973 		if (ret < 0) {
3974 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3975 				   __func__);
3976 			goto init_error;
3977 		}
3978 	}
3979 
3980 	ret = stmmac_hw_setup(dev, true);
3981 	if (ret < 0) {
3982 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3983 		goto init_error;
3984 	}
3985 
3986 	stmmac_init_coalesce(priv);
3987 
3988 	phylink_start(priv->phylink);
3989 	/* We may have called phylink_speed_down before */
3990 	phylink_speed_up(priv->phylink);
3991 
3992 	ret = stmmac_request_irq(dev);
3993 	if (ret)
3994 		goto irq_error;
3995 
3996 	stmmac_enable_all_queues(priv);
3997 	netif_tx_start_all_queues(priv->dev);
3998 	stmmac_enable_all_dma_irq(priv);
3999 
4000 	return 0;
4001 
4002 irq_error:
4003 	phylink_stop(priv->phylink);
4004 
4005 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4006 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4007 
4008 	stmmac_hw_teardown(dev);
4009 init_error:
4010 	phylink_disconnect_phy(priv->phylink);
4011 init_phy_error:
4012 	pm_runtime_put(priv->device);
4013 	return ret;
4014 }
4015 
4016 static int stmmac_open(struct net_device *dev)
4017 {
4018 	struct stmmac_priv *priv = netdev_priv(dev);
4019 	struct stmmac_dma_conf *dma_conf;
4020 	int ret;
4021 
4022 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4023 	if (IS_ERR(dma_conf))
4024 		return PTR_ERR(dma_conf);
4025 
4026 	ret = __stmmac_open(dev, dma_conf);
4027 	if (ret)
4028 		free_dma_desc_resources(priv, dma_conf);
4029 
4030 	kfree(dma_conf);
4031 	return ret;
4032 }
4033 
4034 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4035 {
4036 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4037 
4038 	if (priv->fpe_wq) {
4039 		destroy_workqueue(priv->fpe_wq);
4040 		priv->fpe_wq = NULL;
4041 	}
4042 
4043 	netdev_info(priv->dev, "FPE workqueue stop");
4044 }
4045 
4046 /**
4047  *  stmmac_release - close entry point of the driver
4048  *  @dev : device pointer.
4049  *  Description:
4050  *  This is the stop entry point of the driver.
4051  */
4052 static int stmmac_release(struct net_device *dev)
4053 {
4054 	struct stmmac_priv *priv = netdev_priv(dev);
4055 	u32 chan;
4056 
4057 	if (device_may_wakeup(priv->device))
4058 		phylink_speed_down(priv->phylink, false);
4059 	/* Stop and disconnect the PHY */
4060 	phylink_stop(priv->phylink);
4061 	phylink_disconnect_phy(priv->phylink);
4062 
4063 	stmmac_disable_all_queues(priv);
4064 
4065 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4066 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4067 
4068 	netif_tx_disable(dev);
4069 
4070 	/* Free the IRQ lines */
4071 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4072 
4073 	if (priv->eee_enabled) {
4074 		priv->tx_path_in_lpi_mode = false;
4075 		del_timer_sync(&priv->eee_ctrl_timer);
4076 	}
4077 
4078 	/* Stop TX/RX DMA and clear the descriptors */
4079 	stmmac_stop_all_dma(priv);
4080 
4081 	/* Release and free the Rx/Tx resources */
4082 	free_dma_desc_resources(priv, &priv->dma_conf);
4083 
4084 	/* Disable the MAC Rx/Tx */
4085 	stmmac_mac_set(priv, priv->ioaddr, false);
4086 
4087 	/* Powerdown Serdes if there is */
4088 	if (priv->plat->serdes_powerdown)
4089 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4090 
4091 	stmmac_release_ptp(priv);
4092 
4093 	pm_runtime_put(priv->device);
4094 
4095 	if (priv->dma_cap.fpesel)
4096 		stmmac_fpe_stop_wq(priv);
4097 
4098 	return 0;
4099 }
4100 
4101 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4102 			       struct stmmac_tx_queue *tx_q)
4103 {
4104 	u16 tag = 0x0, inner_tag = 0x0;
4105 	u32 inner_type = 0x0;
4106 	struct dma_desc *p;
4107 
4108 	if (!priv->dma_cap.vlins)
4109 		return false;
4110 	if (!skb_vlan_tag_present(skb))
4111 		return false;
4112 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4113 		inner_tag = skb_vlan_tag_get(skb);
4114 		inner_type = STMMAC_VLAN_INSERT;
4115 	}
4116 
4117 	tag = skb_vlan_tag_get(skb);
4118 
4119 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4120 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4121 	else
4122 		p = &tx_q->dma_tx[tx_q->cur_tx];
4123 
4124 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4125 		return false;
4126 
4127 	stmmac_set_tx_owner(priv, p);
4128 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4129 	return true;
4130 }
4131 
4132 /**
4133  *  stmmac_tso_allocator - close entry point of the driver
4134  *  @priv: driver private structure
4135  *  @des: buffer start address
4136  *  @total_len: total length to fill in descriptors
4137  *  @last_segment: condition for the last descriptor
4138  *  @queue: TX queue index
4139  *  Description:
4140  *  This function fills descriptor and request new descriptors according to
4141  *  buffer length to fill
4142  */
4143 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4144 				 int total_len, bool last_segment, u32 queue)
4145 {
4146 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4147 	struct dma_desc *desc;
4148 	u32 buff_size;
4149 	int tmp_len;
4150 
4151 	tmp_len = total_len;
4152 
4153 	while (tmp_len > 0) {
4154 		dma_addr_t curr_addr;
4155 
4156 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4157 						priv->dma_conf.dma_tx_size);
4158 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4159 
4160 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4161 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4162 		else
4163 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4164 
4165 		curr_addr = des + (total_len - tmp_len);
4166 		if (priv->dma_cap.addr64 <= 32)
4167 			desc->des0 = cpu_to_le32(curr_addr);
4168 		else
4169 			stmmac_set_desc_addr(priv, desc, curr_addr);
4170 
4171 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4172 			    TSO_MAX_BUFF_SIZE : tmp_len;
4173 
4174 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4175 				0, 1,
4176 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4177 				0, 0);
4178 
4179 		tmp_len -= TSO_MAX_BUFF_SIZE;
4180 	}
4181 }
4182 
4183 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4184 {
4185 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4186 	int desc_size;
4187 
4188 	if (likely(priv->extend_desc))
4189 		desc_size = sizeof(struct dma_extended_desc);
4190 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4191 		desc_size = sizeof(struct dma_edesc);
4192 	else
4193 		desc_size = sizeof(struct dma_desc);
4194 
4195 	/* The own bit must be the latest setting done when prepare the
4196 	 * descriptor and then barrier is needed to make sure that
4197 	 * all is coherent before granting the DMA engine.
4198 	 */
4199 	wmb();
4200 
4201 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4202 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4203 }
4204 
4205 /**
4206  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4207  *  @skb : the socket buffer
4208  *  @dev : device pointer
4209  *  Description: this is the transmit function that is called on TSO frames
4210  *  (support available on GMAC4 and newer chips).
4211  *  Diagram below show the ring programming in case of TSO frames:
4212  *
4213  *  First Descriptor
4214  *   --------
4215  *   | DES0 |---> buffer1 = L2/L3/L4 header
4216  *   | DES1 |---> TCP Payload (can continue on next descr...)
4217  *   | DES2 |---> buffer 1 and 2 len
4218  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4219  *   --------
4220  *	|
4221  *     ...
4222  *	|
4223  *   --------
4224  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4225  *   | DES1 | --|
4226  *   | DES2 | --> buffer 1 and 2 len
4227  *   | DES3 |
4228  *   --------
4229  *
4230  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4231  */
4232 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4233 {
4234 	struct dma_desc *desc, *first, *mss_desc = NULL;
4235 	struct stmmac_priv *priv = netdev_priv(dev);
4236 	int nfrags = skb_shinfo(skb)->nr_frags;
4237 	u32 queue = skb_get_queue_mapping(skb);
4238 	unsigned int first_entry, tx_packets;
4239 	struct stmmac_txq_stats *txq_stats;
4240 	int tmp_pay_len = 0, first_tx;
4241 	struct stmmac_tx_queue *tx_q;
4242 	bool has_vlan, set_ic;
4243 	u8 proto_hdr_len, hdr;
4244 	u32 pay_len, mss;
4245 	dma_addr_t des;
4246 	int i;
4247 
4248 	tx_q = &priv->dma_conf.tx_queue[queue];
4249 	txq_stats = &priv->xstats.txq_stats[queue];
4250 	first_tx = tx_q->cur_tx;
4251 
4252 	/* Compute header lengths */
4253 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4254 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4255 		hdr = sizeof(struct udphdr);
4256 	} else {
4257 		proto_hdr_len = skb_tcp_all_headers(skb);
4258 		hdr = tcp_hdrlen(skb);
4259 	}
4260 
4261 	/* Desc availability based on threshold should be enough safe */
4262 	if (unlikely(stmmac_tx_avail(priv, queue) <
4263 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4264 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4265 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4266 								queue));
4267 			/* This is a hard error, log it. */
4268 			netdev_err(priv->dev,
4269 				   "%s: Tx Ring full when queue awake\n",
4270 				   __func__);
4271 		}
4272 		return NETDEV_TX_BUSY;
4273 	}
4274 
4275 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4276 
4277 	mss = skb_shinfo(skb)->gso_size;
4278 
4279 	/* set new MSS value if needed */
4280 	if (mss != tx_q->mss) {
4281 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4282 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4283 		else
4284 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4285 
4286 		stmmac_set_mss(priv, mss_desc, mss);
4287 		tx_q->mss = mss;
4288 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4289 						priv->dma_conf.dma_tx_size);
4290 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4291 	}
4292 
4293 	if (netif_msg_tx_queued(priv)) {
4294 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4295 			__func__, hdr, proto_hdr_len, pay_len, mss);
4296 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4297 			skb->data_len);
4298 	}
4299 
4300 	/* Check if VLAN can be inserted by HW */
4301 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4302 
4303 	first_entry = tx_q->cur_tx;
4304 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4305 
4306 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4307 		desc = &tx_q->dma_entx[first_entry].basic;
4308 	else
4309 		desc = &tx_q->dma_tx[first_entry];
4310 	first = desc;
4311 
4312 	if (has_vlan)
4313 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4314 
4315 	/* first descriptor: fill Headers on Buf1 */
4316 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4317 			     DMA_TO_DEVICE);
4318 	if (dma_mapping_error(priv->device, des))
4319 		goto dma_map_err;
4320 
4321 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4322 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4323 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4324 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4325 
4326 	if (priv->dma_cap.addr64 <= 32) {
4327 		first->des0 = cpu_to_le32(des);
4328 
4329 		/* Fill start of payload in buff2 of first descriptor */
4330 		if (pay_len)
4331 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4332 
4333 		/* If needed take extra descriptors to fill the remaining payload */
4334 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4335 	} else {
4336 		stmmac_set_desc_addr(priv, first, des);
4337 		tmp_pay_len = pay_len;
4338 		des += proto_hdr_len;
4339 		pay_len = 0;
4340 	}
4341 
4342 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4343 
4344 	/* Prepare fragments */
4345 	for (i = 0; i < nfrags; i++) {
4346 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4347 
4348 		des = skb_frag_dma_map(priv->device, frag, 0,
4349 				       skb_frag_size(frag),
4350 				       DMA_TO_DEVICE);
4351 		if (dma_mapping_error(priv->device, des))
4352 			goto dma_map_err;
4353 
4354 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4355 				     (i == nfrags - 1), queue);
4356 
4357 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4358 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4359 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4360 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4361 	}
4362 
4363 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4364 
4365 	/* Only the last descriptor gets to point to the skb. */
4366 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4367 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4368 
4369 	/* Manage tx mitigation */
4370 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4371 	tx_q->tx_count_frames += tx_packets;
4372 
4373 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4374 		set_ic = true;
4375 	else if (!priv->tx_coal_frames[queue])
4376 		set_ic = false;
4377 	else if (tx_packets > priv->tx_coal_frames[queue])
4378 		set_ic = true;
4379 	else if ((tx_q->tx_count_frames %
4380 		  priv->tx_coal_frames[queue]) < tx_packets)
4381 		set_ic = true;
4382 	else
4383 		set_ic = false;
4384 
4385 	if (set_ic) {
4386 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4387 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4388 		else
4389 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4390 
4391 		tx_q->tx_count_frames = 0;
4392 		stmmac_set_tx_ic(priv, desc);
4393 	}
4394 
4395 	/* We've used all descriptors we need for this skb, however,
4396 	 * advance cur_tx so that it references a fresh descriptor.
4397 	 * ndo_start_xmit will fill this descriptor the next time it's
4398 	 * called and stmmac_tx_clean may clean up to this descriptor.
4399 	 */
4400 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4401 
4402 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4403 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4404 			  __func__);
4405 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4406 	}
4407 
4408 	u64_stats_update_begin(&txq_stats->q_syncp);
4409 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4410 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4411 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4412 	if (set_ic)
4413 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4414 	u64_stats_update_end(&txq_stats->q_syncp);
4415 
4416 	if (priv->sarc_type)
4417 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4418 
4419 	skb_tx_timestamp(skb);
4420 
4421 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4422 		     priv->hwts_tx_en)) {
4423 		/* declare that device is doing timestamping */
4424 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4425 		stmmac_enable_tx_timestamp(priv, first);
4426 	}
4427 
4428 	/* Complete the first descriptor before granting the DMA */
4429 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4430 			proto_hdr_len,
4431 			pay_len,
4432 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4433 			hdr / 4, (skb->len - proto_hdr_len));
4434 
4435 	/* If context desc is used to change MSS */
4436 	if (mss_desc) {
4437 		/* Make sure that first descriptor has been completely
4438 		 * written, including its own bit. This is because MSS is
4439 		 * actually before first descriptor, so we need to make
4440 		 * sure that MSS's own bit is the last thing written.
4441 		 */
4442 		dma_wmb();
4443 		stmmac_set_tx_owner(priv, mss_desc);
4444 	}
4445 
4446 	if (netif_msg_pktdata(priv)) {
4447 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4448 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4449 			tx_q->cur_tx, first, nfrags);
4450 		pr_info(">>> frame to be transmitted: ");
4451 		print_pkt(skb->data, skb_headlen(skb));
4452 	}
4453 
4454 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4455 
4456 	stmmac_flush_tx_descriptors(priv, queue);
4457 	stmmac_tx_timer_arm(priv, queue);
4458 
4459 	return NETDEV_TX_OK;
4460 
4461 dma_map_err:
4462 	dev_err(priv->device, "Tx dma map failed\n");
4463 	dev_kfree_skb(skb);
4464 	priv->xstats.tx_dropped++;
4465 	return NETDEV_TX_OK;
4466 }
4467 
4468 /**
4469  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4470  * @skb: socket buffer to check
4471  *
4472  * Check if a packet has an ethertype that will trigger the IP header checks
4473  * and IP/TCP checksum engine of the stmmac core.
4474  *
4475  * Return: true if the ethertype can trigger the checksum engine, false
4476  * otherwise
4477  */
4478 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4479 {
4480 	int depth = 0;
4481 	__be16 proto;
4482 
4483 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4484 				    &depth);
4485 
4486 	return (depth <= ETH_HLEN) &&
4487 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4488 }
4489 
4490 /**
4491  *  stmmac_xmit - Tx entry point of the driver
4492  *  @skb : the socket buffer
4493  *  @dev : device pointer
4494  *  Description : this is the tx entry point of the driver.
4495  *  It programs the chain or the ring and supports oversized frames
4496  *  and SG feature.
4497  */
4498 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4499 {
4500 	unsigned int first_entry, tx_packets, enh_desc;
4501 	struct stmmac_priv *priv = netdev_priv(dev);
4502 	unsigned int nopaged_len = skb_headlen(skb);
4503 	int i, csum_insertion = 0, is_jumbo = 0;
4504 	u32 queue = skb_get_queue_mapping(skb);
4505 	int nfrags = skb_shinfo(skb)->nr_frags;
4506 	int gso = skb_shinfo(skb)->gso_type;
4507 	struct stmmac_txq_stats *txq_stats;
4508 	struct dma_edesc *tbs_desc = NULL;
4509 	struct dma_desc *desc, *first;
4510 	struct stmmac_tx_queue *tx_q;
4511 	bool has_vlan, set_ic;
4512 	int entry, first_tx;
4513 	dma_addr_t des;
4514 
4515 	tx_q = &priv->dma_conf.tx_queue[queue];
4516 	txq_stats = &priv->xstats.txq_stats[queue];
4517 	first_tx = tx_q->cur_tx;
4518 
4519 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4520 		stmmac_disable_eee_mode(priv);
4521 
4522 	/* Manage oversized TCP frames for GMAC4 device */
4523 	if (skb_is_gso(skb) && priv->tso) {
4524 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4525 			return stmmac_tso_xmit(skb, dev);
4526 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4527 			return stmmac_tso_xmit(skb, dev);
4528 	}
4529 
4530 	if (priv->est && priv->est->enable &&
4531 	    priv->est->max_sdu[queue] &&
4532 	    skb->len > priv->est->max_sdu[queue]){
4533 		priv->xstats.max_sdu_txq_drop[queue]++;
4534 		goto max_sdu_err;
4535 	}
4536 
4537 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4538 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4539 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4540 								queue));
4541 			/* This is a hard error, log it. */
4542 			netdev_err(priv->dev,
4543 				   "%s: Tx Ring full when queue awake\n",
4544 				   __func__);
4545 		}
4546 		return NETDEV_TX_BUSY;
4547 	}
4548 
4549 	/* Check if VLAN can be inserted by HW */
4550 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4551 
4552 	entry = tx_q->cur_tx;
4553 	first_entry = entry;
4554 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4555 
4556 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4557 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4558 	 * queues. In that case, checksum offloading for those queues that don't
4559 	 * support tx coe needs to fallback to software checksum calculation.
4560 	 *
4561 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4562 	 * also have to be checksummed in software.
4563 	 */
4564 	if (csum_insertion &&
4565 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4566 	     !stmmac_has_ip_ethertype(skb))) {
4567 		if (unlikely(skb_checksum_help(skb)))
4568 			goto dma_map_err;
4569 		csum_insertion = !csum_insertion;
4570 	}
4571 
4572 	if (likely(priv->extend_desc))
4573 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4574 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4575 		desc = &tx_q->dma_entx[entry].basic;
4576 	else
4577 		desc = tx_q->dma_tx + entry;
4578 
4579 	first = desc;
4580 
4581 	if (has_vlan)
4582 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4583 
4584 	enh_desc = priv->plat->enh_desc;
4585 	/* To program the descriptors according to the size of the frame */
4586 	if (enh_desc)
4587 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4588 
4589 	if (unlikely(is_jumbo)) {
4590 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4591 		if (unlikely(entry < 0) && (entry != -EINVAL))
4592 			goto dma_map_err;
4593 	}
4594 
4595 	for (i = 0; i < nfrags; i++) {
4596 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4597 		int len = skb_frag_size(frag);
4598 		bool last_segment = (i == (nfrags - 1));
4599 
4600 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4601 		WARN_ON(tx_q->tx_skbuff[entry]);
4602 
4603 		if (likely(priv->extend_desc))
4604 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4605 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4606 			desc = &tx_q->dma_entx[entry].basic;
4607 		else
4608 			desc = tx_q->dma_tx + entry;
4609 
4610 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4611 				       DMA_TO_DEVICE);
4612 		if (dma_mapping_error(priv->device, des))
4613 			goto dma_map_err; /* should reuse desc w/o issues */
4614 
4615 		tx_q->tx_skbuff_dma[entry].buf = des;
4616 
4617 		stmmac_set_desc_addr(priv, desc, des);
4618 
4619 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4620 		tx_q->tx_skbuff_dma[entry].len = len;
4621 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4622 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4623 
4624 		/* Prepare the descriptor and set the own bit too */
4625 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4626 				priv->mode, 1, last_segment, skb->len);
4627 	}
4628 
4629 	/* Only the last descriptor gets to point to the skb. */
4630 	tx_q->tx_skbuff[entry] = skb;
4631 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4632 
4633 	/* According to the coalesce parameter the IC bit for the latest
4634 	 * segment is reset and the timer re-started to clean the tx status.
4635 	 * This approach takes care about the fragments: desc is the first
4636 	 * element in case of no SG.
4637 	 */
4638 	tx_packets = (entry + 1) - first_tx;
4639 	tx_q->tx_count_frames += tx_packets;
4640 
4641 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4642 		set_ic = true;
4643 	else if (!priv->tx_coal_frames[queue])
4644 		set_ic = false;
4645 	else if (tx_packets > priv->tx_coal_frames[queue])
4646 		set_ic = true;
4647 	else if ((tx_q->tx_count_frames %
4648 		  priv->tx_coal_frames[queue]) < tx_packets)
4649 		set_ic = true;
4650 	else
4651 		set_ic = false;
4652 
4653 	if (set_ic) {
4654 		if (likely(priv->extend_desc))
4655 			desc = &tx_q->dma_etx[entry].basic;
4656 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4657 			desc = &tx_q->dma_entx[entry].basic;
4658 		else
4659 			desc = &tx_q->dma_tx[entry];
4660 
4661 		tx_q->tx_count_frames = 0;
4662 		stmmac_set_tx_ic(priv, desc);
4663 	}
4664 
4665 	/* We've used all descriptors we need for this skb, however,
4666 	 * advance cur_tx so that it references a fresh descriptor.
4667 	 * ndo_start_xmit will fill this descriptor the next time it's
4668 	 * called and stmmac_tx_clean may clean up to this descriptor.
4669 	 */
4670 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4671 	tx_q->cur_tx = entry;
4672 
4673 	if (netif_msg_pktdata(priv)) {
4674 		netdev_dbg(priv->dev,
4675 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4676 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4677 			   entry, first, nfrags);
4678 
4679 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4680 		print_pkt(skb->data, skb->len);
4681 	}
4682 
4683 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4684 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4685 			  __func__);
4686 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4687 	}
4688 
4689 	u64_stats_update_begin(&txq_stats->q_syncp);
4690 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4691 	if (set_ic)
4692 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4693 	u64_stats_update_end(&txq_stats->q_syncp);
4694 
4695 	if (priv->sarc_type)
4696 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4697 
4698 	skb_tx_timestamp(skb);
4699 
4700 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4701 	 * problems because all the descriptors are actually ready to be
4702 	 * passed to the DMA engine.
4703 	 */
4704 	if (likely(!is_jumbo)) {
4705 		bool last_segment = (nfrags == 0);
4706 
4707 		des = dma_map_single(priv->device, skb->data,
4708 				     nopaged_len, DMA_TO_DEVICE);
4709 		if (dma_mapping_error(priv->device, des))
4710 			goto dma_map_err;
4711 
4712 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4713 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4714 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4715 
4716 		stmmac_set_desc_addr(priv, first, des);
4717 
4718 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4719 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4720 
4721 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4722 			     priv->hwts_tx_en)) {
4723 			/* declare that device is doing timestamping */
4724 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4725 			stmmac_enable_tx_timestamp(priv, first);
4726 		}
4727 
4728 		/* Prepare the first descriptor setting the OWN bit too */
4729 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4730 				csum_insertion, priv->mode, 0, last_segment,
4731 				skb->len);
4732 	}
4733 
4734 	if (tx_q->tbs & STMMAC_TBS_EN) {
4735 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4736 
4737 		tbs_desc = &tx_q->dma_entx[first_entry];
4738 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4739 	}
4740 
4741 	stmmac_set_tx_owner(priv, first);
4742 
4743 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4744 
4745 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4746 
4747 	stmmac_flush_tx_descriptors(priv, queue);
4748 	stmmac_tx_timer_arm(priv, queue);
4749 
4750 	return NETDEV_TX_OK;
4751 
4752 dma_map_err:
4753 	netdev_err(priv->dev, "Tx DMA map failed\n");
4754 max_sdu_err:
4755 	dev_kfree_skb(skb);
4756 	priv->xstats.tx_dropped++;
4757 	return NETDEV_TX_OK;
4758 }
4759 
4760 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4761 {
4762 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4763 	__be16 vlan_proto = veth->h_vlan_proto;
4764 	u16 vlanid;
4765 
4766 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4767 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4768 	    (vlan_proto == htons(ETH_P_8021AD) &&
4769 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4770 		/* pop the vlan tag */
4771 		vlanid = ntohs(veth->h_vlan_TCI);
4772 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4773 		skb_pull(skb, VLAN_HLEN);
4774 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4775 	}
4776 }
4777 
4778 /**
4779  * stmmac_rx_refill - refill used skb preallocated buffers
4780  * @priv: driver private structure
4781  * @queue: RX queue index
4782  * Description : this is to reallocate the skb for the reception process
4783  * that is based on zero-copy.
4784  */
4785 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4786 {
4787 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4788 	int dirty = stmmac_rx_dirty(priv, queue);
4789 	unsigned int entry = rx_q->dirty_rx;
4790 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4791 
4792 	if (priv->dma_cap.host_dma_width <= 32)
4793 		gfp |= GFP_DMA32;
4794 
4795 	while (dirty-- > 0) {
4796 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4797 		struct dma_desc *p;
4798 		bool use_rx_wd;
4799 
4800 		if (priv->extend_desc)
4801 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4802 		else
4803 			p = rx_q->dma_rx + entry;
4804 
4805 		if (!buf->page) {
4806 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4807 			if (!buf->page)
4808 				break;
4809 		}
4810 
4811 		if (priv->sph && !buf->sec_page) {
4812 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4813 			if (!buf->sec_page)
4814 				break;
4815 
4816 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4817 		}
4818 
4819 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4820 
4821 		stmmac_set_desc_addr(priv, p, buf->addr);
4822 		if (priv->sph)
4823 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4824 		else
4825 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4826 		stmmac_refill_desc3(priv, rx_q, p);
4827 
4828 		rx_q->rx_count_frames++;
4829 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4830 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4831 			rx_q->rx_count_frames = 0;
4832 
4833 		use_rx_wd = !priv->rx_coal_frames[queue];
4834 		use_rx_wd |= rx_q->rx_count_frames > 0;
4835 		if (!priv->use_riwt)
4836 			use_rx_wd = false;
4837 
4838 		dma_wmb();
4839 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4840 
4841 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4842 	}
4843 	rx_q->dirty_rx = entry;
4844 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4845 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4846 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4847 }
4848 
4849 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4850 				       struct dma_desc *p,
4851 				       int status, unsigned int len)
4852 {
4853 	unsigned int plen = 0, hlen = 0;
4854 	int coe = priv->hw->rx_csum;
4855 
4856 	/* Not first descriptor, buffer is always zero */
4857 	if (priv->sph && len)
4858 		return 0;
4859 
4860 	/* First descriptor, get split header length */
4861 	stmmac_get_rx_header_len(priv, p, &hlen);
4862 	if (priv->sph && hlen) {
4863 		priv->xstats.rx_split_hdr_pkt_n++;
4864 		return hlen;
4865 	}
4866 
4867 	/* First descriptor, not last descriptor and not split header */
4868 	if (status & rx_not_ls)
4869 		return priv->dma_conf.dma_buf_sz;
4870 
4871 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4872 
4873 	/* First descriptor and last descriptor and not split header */
4874 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4875 }
4876 
4877 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4878 				       struct dma_desc *p,
4879 				       int status, unsigned int len)
4880 {
4881 	int coe = priv->hw->rx_csum;
4882 	unsigned int plen = 0;
4883 
4884 	/* Not split header, buffer is not available */
4885 	if (!priv->sph)
4886 		return 0;
4887 
4888 	/* Not last descriptor */
4889 	if (status & rx_not_ls)
4890 		return priv->dma_conf.dma_buf_sz;
4891 
4892 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4893 
4894 	/* Last descriptor */
4895 	return plen - len;
4896 }
4897 
4898 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4899 				struct xdp_frame *xdpf, bool dma_map)
4900 {
4901 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4902 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4903 	unsigned int entry = tx_q->cur_tx;
4904 	struct dma_desc *tx_desc;
4905 	dma_addr_t dma_addr;
4906 	bool set_ic;
4907 
4908 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4909 		return STMMAC_XDP_CONSUMED;
4910 
4911 	if (priv->est && priv->est->enable &&
4912 	    priv->est->max_sdu[queue] &&
4913 	    xdpf->len > priv->est->max_sdu[queue]) {
4914 		priv->xstats.max_sdu_txq_drop[queue]++;
4915 		return STMMAC_XDP_CONSUMED;
4916 	}
4917 
4918 	if (likely(priv->extend_desc))
4919 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4920 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4921 		tx_desc = &tx_q->dma_entx[entry].basic;
4922 	else
4923 		tx_desc = tx_q->dma_tx + entry;
4924 
4925 	if (dma_map) {
4926 		dma_addr = dma_map_single(priv->device, xdpf->data,
4927 					  xdpf->len, DMA_TO_DEVICE);
4928 		if (dma_mapping_error(priv->device, dma_addr))
4929 			return STMMAC_XDP_CONSUMED;
4930 
4931 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4932 	} else {
4933 		struct page *page = virt_to_page(xdpf->data);
4934 
4935 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4936 			   xdpf->headroom;
4937 		dma_sync_single_for_device(priv->device, dma_addr,
4938 					   xdpf->len, DMA_BIDIRECTIONAL);
4939 
4940 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4941 	}
4942 
4943 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4944 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4945 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4946 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4947 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4948 
4949 	tx_q->xdpf[entry] = xdpf;
4950 
4951 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4952 
4953 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4954 			       true, priv->mode, true, true,
4955 			       xdpf->len);
4956 
4957 	tx_q->tx_count_frames++;
4958 
4959 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4960 		set_ic = true;
4961 	else
4962 		set_ic = false;
4963 
4964 	if (set_ic) {
4965 		tx_q->tx_count_frames = 0;
4966 		stmmac_set_tx_ic(priv, tx_desc);
4967 		u64_stats_update_begin(&txq_stats->q_syncp);
4968 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4969 		u64_stats_update_end(&txq_stats->q_syncp);
4970 	}
4971 
4972 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4973 
4974 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4975 	tx_q->cur_tx = entry;
4976 
4977 	return STMMAC_XDP_TX;
4978 }
4979 
4980 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4981 				   int cpu)
4982 {
4983 	int index = cpu;
4984 
4985 	if (unlikely(index < 0))
4986 		index = 0;
4987 
4988 	while (index >= priv->plat->tx_queues_to_use)
4989 		index -= priv->plat->tx_queues_to_use;
4990 
4991 	return index;
4992 }
4993 
4994 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4995 				struct xdp_buff *xdp)
4996 {
4997 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4998 	int cpu = smp_processor_id();
4999 	struct netdev_queue *nq;
5000 	int queue;
5001 	int res;
5002 
5003 	if (unlikely(!xdpf))
5004 		return STMMAC_XDP_CONSUMED;
5005 
5006 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5007 	nq = netdev_get_tx_queue(priv->dev, queue);
5008 
5009 	__netif_tx_lock(nq, cpu);
5010 	/* Avoids TX time-out as we are sharing with slow path */
5011 	txq_trans_cond_update(nq);
5012 
5013 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5014 	if (res == STMMAC_XDP_TX)
5015 		stmmac_flush_tx_descriptors(priv, queue);
5016 
5017 	__netif_tx_unlock(nq);
5018 
5019 	return res;
5020 }
5021 
5022 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5023 				 struct bpf_prog *prog,
5024 				 struct xdp_buff *xdp)
5025 {
5026 	u32 act;
5027 	int res;
5028 
5029 	act = bpf_prog_run_xdp(prog, xdp);
5030 	switch (act) {
5031 	case XDP_PASS:
5032 		res = STMMAC_XDP_PASS;
5033 		break;
5034 	case XDP_TX:
5035 		res = stmmac_xdp_xmit_back(priv, xdp);
5036 		break;
5037 	case XDP_REDIRECT:
5038 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5039 			res = STMMAC_XDP_CONSUMED;
5040 		else
5041 			res = STMMAC_XDP_REDIRECT;
5042 		break;
5043 	default:
5044 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5045 		fallthrough;
5046 	case XDP_ABORTED:
5047 		trace_xdp_exception(priv->dev, prog, act);
5048 		fallthrough;
5049 	case XDP_DROP:
5050 		res = STMMAC_XDP_CONSUMED;
5051 		break;
5052 	}
5053 
5054 	return res;
5055 }
5056 
5057 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5058 					   struct xdp_buff *xdp)
5059 {
5060 	struct bpf_prog *prog;
5061 	int res;
5062 
5063 	prog = READ_ONCE(priv->xdp_prog);
5064 	if (!prog) {
5065 		res = STMMAC_XDP_PASS;
5066 		goto out;
5067 	}
5068 
5069 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5070 out:
5071 	return ERR_PTR(-res);
5072 }
5073 
5074 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5075 				   int xdp_status)
5076 {
5077 	int cpu = smp_processor_id();
5078 	int queue;
5079 
5080 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5081 
5082 	if (xdp_status & STMMAC_XDP_TX)
5083 		stmmac_tx_timer_arm(priv, queue);
5084 
5085 	if (xdp_status & STMMAC_XDP_REDIRECT)
5086 		xdp_do_flush();
5087 }
5088 
5089 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5090 					       struct xdp_buff *xdp)
5091 {
5092 	unsigned int metasize = xdp->data - xdp->data_meta;
5093 	unsigned int datasize = xdp->data_end - xdp->data;
5094 	struct sk_buff *skb;
5095 
5096 	skb = napi_alloc_skb(&ch->rxtx_napi,
5097 			     xdp->data_end - xdp->data_hard_start);
5098 	if (unlikely(!skb))
5099 		return NULL;
5100 
5101 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5102 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5103 	if (metasize)
5104 		skb_metadata_set(skb, metasize);
5105 
5106 	return skb;
5107 }
5108 
5109 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5110 				   struct dma_desc *p, struct dma_desc *np,
5111 				   struct xdp_buff *xdp)
5112 {
5113 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5114 	struct stmmac_channel *ch = &priv->channel[queue];
5115 	unsigned int len = xdp->data_end - xdp->data;
5116 	enum pkt_hash_types hash_type;
5117 	int coe = priv->hw->rx_csum;
5118 	struct sk_buff *skb;
5119 	u32 hash;
5120 
5121 	skb = stmmac_construct_skb_zc(ch, xdp);
5122 	if (!skb) {
5123 		priv->xstats.rx_dropped++;
5124 		return;
5125 	}
5126 
5127 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5128 	if (priv->hw->hw_vlan_en)
5129 		/* MAC level stripping. */
5130 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5131 	else
5132 		/* Driver level stripping. */
5133 		stmmac_rx_vlan(priv->dev, skb);
5134 	skb->protocol = eth_type_trans(skb, priv->dev);
5135 
5136 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5137 		skb_checksum_none_assert(skb);
5138 	else
5139 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5140 
5141 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5142 		skb_set_hash(skb, hash, hash_type);
5143 
5144 	skb_record_rx_queue(skb, queue);
5145 	napi_gro_receive(&ch->rxtx_napi, skb);
5146 
5147 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5148 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5149 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5150 	u64_stats_update_end(&rxq_stats->napi_syncp);
5151 }
5152 
5153 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5154 {
5155 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5156 	unsigned int entry = rx_q->dirty_rx;
5157 	struct dma_desc *rx_desc = NULL;
5158 	bool ret = true;
5159 
5160 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5161 
5162 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5163 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5164 		dma_addr_t dma_addr;
5165 		bool use_rx_wd;
5166 
5167 		if (!buf->xdp) {
5168 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5169 			if (!buf->xdp) {
5170 				ret = false;
5171 				break;
5172 			}
5173 		}
5174 
5175 		if (priv->extend_desc)
5176 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5177 		else
5178 			rx_desc = rx_q->dma_rx + entry;
5179 
5180 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5181 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5182 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5183 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5184 
5185 		rx_q->rx_count_frames++;
5186 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5187 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5188 			rx_q->rx_count_frames = 0;
5189 
5190 		use_rx_wd = !priv->rx_coal_frames[queue];
5191 		use_rx_wd |= rx_q->rx_count_frames > 0;
5192 		if (!priv->use_riwt)
5193 			use_rx_wd = false;
5194 
5195 		dma_wmb();
5196 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5197 
5198 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5199 	}
5200 
5201 	if (rx_desc) {
5202 		rx_q->dirty_rx = entry;
5203 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5204 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5205 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5206 	}
5207 
5208 	return ret;
5209 }
5210 
5211 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5212 {
5213 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5214 	 * to represent incoming packet, whereas cb field in the same structure
5215 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5216 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5217 	 */
5218 	return (struct stmmac_xdp_buff *)xdp;
5219 }
5220 
5221 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5222 {
5223 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5224 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5225 	unsigned int count = 0, error = 0, len = 0;
5226 	int dirty = stmmac_rx_dirty(priv, queue);
5227 	unsigned int next_entry = rx_q->cur_rx;
5228 	u32 rx_errors = 0, rx_dropped = 0;
5229 	unsigned int desc_size;
5230 	struct bpf_prog *prog;
5231 	bool failure = false;
5232 	int xdp_status = 0;
5233 	int status = 0;
5234 
5235 	if (netif_msg_rx_status(priv)) {
5236 		void *rx_head;
5237 
5238 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5239 		if (priv->extend_desc) {
5240 			rx_head = (void *)rx_q->dma_erx;
5241 			desc_size = sizeof(struct dma_extended_desc);
5242 		} else {
5243 			rx_head = (void *)rx_q->dma_rx;
5244 			desc_size = sizeof(struct dma_desc);
5245 		}
5246 
5247 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5248 				    rx_q->dma_rx_phy, desc_size);
5249 	}
5250 	while (count < limit) {
5251 		struct stmmac_rx_buffer *buf;
5252 		struct stmmac_xdp_buff *ctx;
5253 		unsigned int buf1_len = 0;
5254 		struct dma_desc *np, *p;
5255 		int entry;
5256 		int res;
5257 
5258 		if (!count && rx_q->state_saved) {
5259 			error = rx_q->state.error;
5260 			len = rx_q->state.len;
5261 		} else {
5262 			rx_q->state_saved = false;
5263 			error = 0;
5264 			len = 0;
5265 		}
5266 
5267 		if (count >= limit)
5268 			break;
5269 
5270 read_again:
5271 		buf1_len = 0;
5272 		entry = next_entry;
5273 		buf = &rx_q->buf_pool[entry];
5274 
5275 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5276 			failure = failure ||
5277 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5278 			dirty = 0;
5279 		}
5280 
5281 		if (priv->extend_desc)
5282 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5283 		else
5284 			p = rx_q->dma_rx + entry;
5285 
5286 		/* read the status of the incoming frame */
5287 		status = stmmac_rx_status(priv, &priv->xstats, p);
5288 		/* check if managed by the DMA otherwise go ahead */
5289 		if (unlikely(status & dma_own))
5290 			break;
5291 
5292 		/* Prefetch the next RX descriptor */
5293 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5294 						priv->dma_conf.dma_rx_size);
5295 		next_entry = rx_q->cur_rx;
5296 
5297 		if (priv->extend_desc)
5298 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5299 		else
5300 			np = rx_q->dma_rx + next_entry;
5301 
5302 		prefetch(np);
5303 
5304 		/* Ensure a valid XSK buffer before proceed */
5305 		if (!buf->xdp)
5306 			break;
5307 
5308 		if (priv->extend_desc)
5309 			stmmac_rx_extended_status(priv, &priv->xstats,
5310 						  rx_q->dma_erx + entry);
5311 		if (unlikely(status == discard_frame)) {
5312 			xsk_buff_free(buf->xdp);
5313 			buf->xdp = NULL;
5314 			dirty++;
5315 			error = 1;
5316 			if (!priv->hwts_rx_en)
5317 				rx_errors++;
5318 		}
5319 
5320 		if (unlikely(error && (status & rx_not_ls)))
5321 			goto read_again;
5322 		if (unlikely(error)) {
5323 			count++;
5324 			continue;
5325 		}
5326 
5327 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5328 		if (likely(status & rx_not_ls)) {
5329 			xsk_buff_free(buf->xdp);
5330 			buf->xdp = NULL;
5331 			dirty++;
5332 			count++;
5333 			goto read_again;
5334 		}
5335 
5336 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5337 		ctx->priv = priv;
5338 		ctx->desc = p;
5339 		ctx->ndesc = np;
5340 
5341 		/* XDP ZC Frame only support primary buffers for now */
5342 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5343 		len += buf1_len;
5344 
5345 		/* ACS is disabled; strip manually. */
5346 		if (likely(!(status & rx_not_ls))) {
5347 			buf1_len -= ETH_FCS_LEN;
5348 			len -= ETH_FCS_LEN;
5349 		}
5350 
5351 		/* RX buffer is good and fit into a XSK pool buffer */
5352 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5353 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5354 
5355 		prog = READ_ONCE(priv->xdp_prog);
5356 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5357 
5358 		switch (res) {
5359 		case STMMAC_XDP_PASS:
5360 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5361 			xsk_buff_free(buf->xdp);
5362 			break;
5363 		case STMMAC_XDP_CONSUMED:
5364 			xsk_buff_free(buf->xdp);
5365 			rx_dropped++;
5366 			break;
5367 		case STMMAC_XDP_TX:
5368 		case STMMAC_XDP_REDIRECT:
5369 			xdp_status |= res;
5370 			break;
5371 		}
5372 
5373 		buf->xdp = NULL;
5374 		dirty++;
5375 		count++;
5376 	}
5377 
5378 	if (status & rx_not_ls) {
5379 		rx_q->state_saved = true;
5380 		rx_q->state.error = error;
5381 		rx_q->state.len = len;
5382 	}
5383 
5384 	stmmac_finalize_xdp_rx(priv, xdp_status);
5385 
5386 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5387 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5388 	u64_stats_update_end(&rxq_stats->napi_syncp);
5389 
5390 	priv->xstats.rx_dropped += rx_dropped;
5391 	priv->xstats.rx_errors += rx_errors;
5392 
5393 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5394 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5395 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5396 		else
5397 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5398 
5399 		return (int)count;
5400 	}
5401 
5402 	return failure ? limit : (int)count;
5403 }
5404 
5405 /**
5406  * stmmac_rx - manage the receive process
5407  * @priv: driver private structure
5408  * @limit: napi bugget
5409  * @queue: RX queue index.
5410  * Description :  this the function called by the napi poll method.
5411  * It gets all the frames inside the ring.
5412  */
5413 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5414 {
5415 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5416 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5417 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5418 	struct stmmac_channel *ch = &priv->channel[queue];
5419 	unsigned int count = 0, error = 0, len = 0;
5420 	int status = 0, coe = priv->hw->rx_csum;
5421 	unsigned int next_entry = rx_q->cur_rx;
5422 	enum dma_data_direction dma_dir;
5423 	unsigned int desc_size;
5424 	struct sk_buff *skb = NULL;
5425 	struct stmmac_xdp_buff ctx;
5426 	int xdp_status = 0;
5427 	int buf_sz;
5428 
5429 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5430 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5431 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5432 
5433 	if (netif_msg_rx_status(priv)) {
5434 		void *rx_head;
5435 
5436 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5437 		if (priv->extend_desc) {
5438 			rx_head = (void *)rx_q->dma_erx;
5439 			desc_size = sizeof(struct dma_extended_desc);
5440 		} else {
5441 			rx_head = (void *)rx_q->dma_rx;
5442 			desc_size = sizeof(struct dma_desc);
5443 		}
5444 
5445 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5446 				    rx_q->dma_rx_phy, desc_size);
5447 	}
5448 	while (count < limit) {
5449 		unsigned int buf1_len = 0, buf2_len = 0;
5450 		enum pkt_hash_types hash_type;
5451 		struct stmmac_rx_buffer *buf;
5452 		struct dma_desc *np, *p;
5453 		int entry;
5454 		u32 hash;
5455 
5456 		if (!count && rx_q->state_saved) {
5457 			skb = rx_q->state.skb;
5458 			error = rx_q->state.error;
5459 			len = rx_q->state.len;
5460 		} else {
5461 			rx_q->state_saved = false;
5462 			skb = NULL;
5463 			error = 0;
5464 			len = 0;
5465 		}
5466 
5467 read_again:
5468 		if (count >= limit)
5469 			break;
5470 
5471 		buf1_len = 0;
5472 		buf2_len = 0;
5473 		entry = next_entry;
5474 		buf = &rx_q->buf_pool[entry];
5475 
5476 		if (priv->extend_desc)
5477 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5478 		else
5479 			p = rx_q->dma_rx + entry;
5480 
5481 		/* read the status of the incoming frame */
5482 		status = stmmac_rx_status(priv, &priv->xstats, p);
5483 		/* check if managed by the DMA otherwise go ahead */
5484 		if (unlikely(status & dma_own))
5485 			break;
5486 
5487 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5488 						priv->dma_conf.dma_rx_size);
5489 		next_entry = rx_q->cur_rx;
5490 
5491 		if (priv->extend_desc)
5492 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5493 		else
5494 			np = rx_q->dma_rx + next_entry;
5495 
5496 		prefetch(np);
5497 
5498 		if (priv->extend_desc)
5499 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5500 		if (unlikely(status == discard_frame)) {
5501 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5502 			buf->page = NULL;
5503 			error = 1;
5504 			if (!priv->hwts_rx_en)
5505 				rx_errors++;
5506 		}
5507 
5508 		if (unlikely(error && (status & rx_not_ls)))
5509 			goto read_again;
5510 		if (unlikely(error)) {
5511 			dev_kfree_skb(skb);
5512 			skb = NULL;
5513 			count++;
5514 			continue;
5515 		}
5516 
5517 		/* Buffer is good. Go on. */
5518 
5519 		prefetch(page_address(buf->page) + buf->page_offset);
5520 		if (buf->sec_page)
5521 			prefetch(page_address(buf->sec_page));
5522 
5523 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5524 		len += buf1_len;
5525 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5526 		len += buf2_len;
5527 
5528 		/* ACS is disabled; strip manually. */
5529 		if (likely(!(status & rx_not_ls))) {
5530 			if (buf2_len) {
5531 				buf2_len -= ETH_FCS_LEN;
5532 				len -= ETH_FCS_LEN;
5533 			} else if (buf1_len) {
5534 				buf1_len -= ETH_FCS_LEN;
5535 				len -= ETH_FCS_LEN;
5536 			}
5537 		}
5538 
5539 		if (!skb) {
5540 			unsigned int pre_len, sync_len;
5541 
5542 			dma_sync_single_for_cpu(priv->device, buf->addr,
5543 						buf1_len, dma_dir);
5544 
5545 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5546 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5547 					 buf->page_offset, buf1_len, true);
5548 
5549 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5550 				  buf->page_offset;
5551 
5552 			ctx.priv = priv;
5553 			ctx.desc = p;
5554 			ctx.ndesc = np;
5555 
5556 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5557 			/* Due xdp_adjust_tail: DMA sync for_device
5558 			 * cover max len CPU touch
5559 			 */
5560 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5561 				   buf->page_offset;
5562 			sync_len = max(sync_len, pre_len);
5563 
5564 			/* For Not XDP_PASS verdict */
5565 			if (IS_ERR(skb)) {
5566 				unsigned int xdp_res = -PTR_ERR(skb);
5567 
5568 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5569 					page_pool_put_page(rx_q->page_pool,
5570 							   virt_to_head_page(ctx.xdp.data),
5571 							   sync_len, true);
5572 					buf->page = NULL;
5573 					rx_dropped++;
5574 
5575 					/* Clear skb as it was set as
5576 					 * status by XDP program.
5577 					 */
5578 					skb = NULL;
5579 
5580 					if (unlikely((status & rx_not_ls)))
5581 						goto read_again;
5582 
5583 					count++;
5584 					continue;
5585 				} else if (xdp_res & (STMMAC_XDP_TX |
5586 						      STMMAC_XDP_REDIRECT)) {
5587 					xdp_status |= xdp_res;
5588 					buf->page = NULL;
5589 					skb = NULL;
5590 					count++;
5591 					continue;
5592 				}
5593 			}
5594 		}
5595 
5596 		if (!skb) {
5597 			/* XDP program may expand or reduce tail */
5598 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5599 
5600 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5601 			if (!skb) {
5602 				rx_dropped++;
5603 				count++;
5604 				goto drain_data;
5605 			}
5606 
5607 			/* XDP program may adjust header */
5608 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5609 			skb_put(skb, buf1_len);
5610 
5611 			/* Data payload copied into SKB, page ready for recycle */
5612 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5613 			buf->page = NULL;
5614 		} else if (buf1_len) {
5615 			dma_sync_single_for_cpu(priv->device, buf->addr,
5616 						buf1_len, dma_dir);
5617 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5618 					buf->page, buf->page_offset, buf1_len,
5619 					priv->dma_conf.dma_buf_sz);
5620 
5621 			/* Data payload appended into SKB */
5622 			skb_mark_for_recycle(skb);
5623 			buf->page = NULL;
5624 		}
5625 
5626 		if (buf2_len) {
5627 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5628 						buf2_len, dma_dir);
5629 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5630 					buf->sec_page, 0, buf2_len,
5631 					priv->dma_conf.dma_buf_sz);
5632 
5633 			/* Data payload appended into SKB */
5634 			skb_mark_for_recycle(skb);
5635 			buf->sec_page = NULL;
5636 		}
5637 
5638 drain_data:
5639 		if (likely(status & rx_not_ls))
5640 			goto read_again;
5641 		if (!skb)
5642 			continue;
5643 
5644 		/* Got entire packet into SKB. Finish it. */
5645 
5646 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5647 
5648 		if (priv->hw->hw_vlan_en)
5649 			/* MAC level stripping. */
5650 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5651 		else
5652 			/* Driver level stripping. */
5653 			stmmac_rx_vlan(priv->dev, skb);
5654 
5655 		skb->protocol = eth_type_trans(skb, priv->dev);
5656 
5657 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5658 			skb_checksum_none_assert(skb);
5659 		else
5660 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5661 
5662 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5663 			skb_set_hash(skb, hash, hash_type);
5664 
5665 		skb_record_rx_queue(skb, queue);
5666 		napi_gro_receive(&ch->rx_napi, skb);
5667 		skb = NULL;
5668 
5669 		rx_packets++;
5670 		rx_bytes += len;
5671 		count++;
5672 	}
5673 
5674 	if (status & rx_not_ls || skb) {
5675 		rx_q->state_saved = true;
5676 		rx_q->state.skb = skb;
5677 		rx_q->state.error = error;
5678 		rx_q->state.len = len;
5679 	}
5680 
5681 	stmmac_finalize_xdp_rx(priv, xdp_status);
5682 
5683 	stmmac_rx_refill(priv, queue);
5684 
5685 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5686 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5687 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5688 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5689 	u64_stats_update_end(&rxq_stats->napi_syncp);
5690 
5691 	priv->xstats.rx_dropped += rx_dropped;
5692 	priv->xstats.rx_errors += rx_errors;
5693 
5694 	return count;
5695 }
5696 
5697 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5698 {
5699 	struct stmmac_channel *ch =
5700 		container_of(napi, struct stmmac_channel, rx_napi);
5701 	struct stmmac_priv *priv = ch->priv_data;
5702 	struct stmmac_rxq_stats *rxq_stats;
5703 	u32 chan = ch->index;
5704 	int work_done;
5705 
5706 	rxq_stats = &priv->xstats.rxq_stats[chan];
5707 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5708 	u64_stats_inc(&rxq_stats->napi.poll);
5709 	u64_stats_update_end(&rxq_stats->napi_syncp);
5710 
5711 	work_done = stmmac_rx(priv, budget, chan);
5712 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5713 		unsigned long flags;
5714 
5715 		spin_lock_irqsave(&ch->lock, flags);
5716 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5717 		spin_unlock_irqrestore(&ch->lock, flags);
5718 	}
5719 
5720 	return work_done;
5721 }
5722 
5723 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5724 {
5725 	struct stmmac_channel *ch =
5726 		container_of(napi, struct stmmac_channel, tx_napi);
5727 	struct stmmac_priv *priv = ch->priv_data;
5728 	struct stmmac_txq_stats *txq_stats;
5729 	bool pending_packets = false;
5730 	u32 chan = ch->index;
5731 	int work_done;
5732 
5733 	txq_stats = &priv->xstats.txq_stats[chan];
5734 	u64_stats_update_begin(&txq_stats->napi_syncp);
5735 	u64_stats_inc(&txq_stats->napi.poll);
5736 	u64_stats_update_end(&txq_stats->napi_syncp);
5737 
5738 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5739 	work_done = min(work_done, budget);
5740 
5741 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5742 		unsigned long flags;
5743 
5744 		spin_lock_irqsave(&ch->lock, flags);
5745 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5746 		spin_unlock_irqrestore(&ch->lock, flags);
5747 	}
5748 
5749 	/* TX still have packet to handle, check if we need to arm tx timer */
5750 	if (pending_packets)
5751 		stmmac_tx_timer_arm(priv, chan);
5752 
5753 	return work_done;
5754 }
5755 
5756 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5757 {
5758 	struct stmmac_channel *ch =
5759 		container_of(napi, struct stmmac_channel, rxtx_napi);
5760 	struct stmmac_priv *priv = ch->priv_data;
5761 	bool tx_pending_packets = false;
5762 	int rx_done, tx_done, rxtx_done;
5763 	struct stmmac_rxq_stats *rxq_stats;
5764 	struct stmmac_txq_stats *txq_stats;
5765 	u32 chan = ch->index;
5766 
5767 	rxq_stats = &priv->xstats.rxq_stats[chan];
5768 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5769 	u64_stats_inc(&rxq_stats->napi.poll);
5770 	u64_stats_update_end(&rxq_stats->napi_syncp);
5771 
5772 	txq_stats = &priv->xstats.txq_stats[chan];
5773 	u64_stats_update_begin(&txq_stats->napi_syncp);
5774 	u64_stats_inc(&txq_stats->napi.poll);
5775 	u64_stats_update_end(&txq_stats->napi_syncp);
5776 
5777 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5778 	tx_done = min(tx_done, budget);
5779 
5780 	rx_done = stmmac_rx_zc(priv, budget, chan);
5781 
5782 	rxtx_done = max(tx_done, rx_done);
5783 
5784 	/* If either TX or RX work is not complete, return budget
5785 	 * and keep pooling
5786 	 */
5787 	if (rxtx_done >= budget)
5788 		return budget;
5789 
5790 	/* all work done, exit the polling mode */
5791 	if (napi_complete_done(napi, rxtx_done)) {
5792 		unsigned long flags;
5793 
5794 		spin_lock_irqsave(&ch->lock, flags);
5795 		/* Both RX and TX work done are compelte,
5796 		 * so enable both RX & TX IRQs.
5797 		 */
5798 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5799 		spin_unlock_irqrestore(&ch->lock, flags);
5800 	}
5801 
5802 	/* TX still have packet to handle, check if we need to arm tx timer */
5803 	if (tx_pending_packets)
5804 		stmmac_tx_timer_arm(priv, chan);
5805 
5806 	return min(rxtx_done, budget - 1);
5807 }
5808 
5809 /**
5810  *  stmmac_tx_timeout
5811  *  @dev : Pointer to net device structure
5812  *  @txqueue: the index of the hanging transmit queue
5813  *  Description: this function is called when a packet transmission fails to
5814  *   complete within a reasonable time. The driver will mark the error in the
5815  *   netdev structure and arrange for the device to be reset to a sane state
5816  *   in order to transmit a new packet.
5817  */
5818 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5819 {
5820 	struct stmmac_priv *priv = netdev_priv(dev);
5821 
5822 	stmmac_global_err(priv);
5823 }
5824 
5825 /**
5826  *  stmmac_set_rx_mode - entry point for multicast addressing
5827  *  @dev : pointer to the device structure
5828  *  Description:
5829  *  This function is a driver entry point which gets called by the kernel
5830  *  whenever multicast addresses must be enabled/disabled.
5831  *  Return value:
5832  *  void.
5833  */
5834 static void stmmac_set_rx_mode(struct net_device *dev)
5835 {
5836 	struct stmmac_priv *priv = netdev_priv(dev);
5837 
5838 	stmmac_set_filter(priv, priv->hw, dev);
5839 }
5840 
5841 /**
5842  *  stmmac_change_mtu - entry point to change MTU size for the device.
5843  *  @dev : device pointer.
5844  *  @new_mtu : the new MTU size for the device.
5845  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5846  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5847  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5848  *  Return value:
5849  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5850  *  file on failure.
5851  */
5852 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5853 {
5854 	struct stmmac_priv *priv = netdev_priv(dev);
5855 	int txfifosz = priv->plat->tx_fifo_size;
5856 	struct stmmac_dma_conf *dma_conf;
5857 	const int mtu = new_mtu;
5858 	int ret;
5859 
5860 	if (txfifosz == 0)
5861 		txfifosz = priv->dma_cap.tx_fifo_size;
5862 
5863 	txfifosz /= priv->plat->tx_queues_to_use;
5864 
5865 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5866 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5867 		return -EINVAL;
5868 	}
5869 
5870 	new_mtu = STMMAC_ALIGN(new_mtu);
5871 
5872 	/* If condition true, FIFO is too small or MTU too large */
5873 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5874 		return -EINVAL;
5875 
5876 	if (netif_running(dev)) {
5877 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5878 		/* Try to allocate the new DMA conf with the new mtu */
5879 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5880 		if (IS_ERR(dma_conf)) {
5881 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5882 				   mtu);
5883 			return PTR_ERR(dma_conf);
5884 		}
5885 
5886 		stmmac_release(dev);
5887 
5888 		ret = __stmmac_open(dev, dma_conf);
5889 		if (ret) {
5890 			free_dma_desc_resources(priv, dma_conf);
5891 			kfree(dma_conf);
5892 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5893 			return ret;
5894 		}
5895 
5896 		kfree(dma_conf);
5897 
5898 		stmmac_set_rx_mode(dev);
5899 	}
5900 
5901 	WRITE_ONCE(dev->mtu, mtu);
5902 	netdev_update_features(dev);
5903 
5904 	return 0;
5905 }
5906 
5907 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5908 					     netdev_features_t features)
5909 {
5910 	struct stmmac_priv *priv = netdev_priv(dev);
5911 
5912 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5913 		features &= ~NETIF_F_RXCSUM;
5914 
5915 	if (!priv->plat->tx_coe)
5916 		features &= ~NETIF_F_CSUM_MASK;
5917 
5918 	/* Some GMAC devices have a bugged Jumbo frame support that
5919 	 * needs to have the Tx COE disabled for oversized frames
5920 	 * (due to limited buffer sizes). In this case we disable
5921 	 * the TX csum insertion in the TDES and not use SF.
5922 	 */
5923 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5924 		features &= ~NETIF_F_CSUM_MASK;
5925 
5926 	/* Disable tso if asked by ethtool */
5927 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5928 		if (features & NETIF_F_TSO)
5929 			priv->tso = true;
5930 		else
5931 			priv->tso = false;
5932 	}
5933 
5934 	return features;
5935 }
5936 
5937 static int stmmac_set_features(struct net_device *netdev,
5938 			       netdev_features_t features)
5939 {
5940 	struct stmmac_priv *priv = netdev_priv(netdev);
5941 
5942 	/* Keep the COE Type in case of csum is supporting */
5943 	if (features & NETIF_F_RXCSUM)
5944 		priv->hw->rx_csum = priv->plat->rx_coe;
5945 	else
5946 		priv->hw->rx_csum = 0;
5947 	/* No check needed because rx_coe has been set before and it will be
5948 	 * fixed in case of issue.
5949 	 */
5950 	stmmac_rx_ipc(priv, priv->hw);
5951 
5952 	if (priv->sph_cap) {
5953 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5954 		u32 chan;
5955 
5956 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5957 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5958 	}
5959 
5960 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5961 		priv->hw->hw_vlan_en = true;
5962 	else
5963 		priv->hw->hw_vlan_en = false;
5964 
5965 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5966 
5967 	return 0;
5968 }
5969 
5970 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5971 {
5972 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5973 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5974 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5975 	bool *hs_enable = &fpe_cfg->hs_enable;
5976 
5977 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5978 		return;
5979 
5980 	/* If LP has sent verify mPacket, LP is FPE capable */
5981 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5982 		if (*lp_state < FPE_STATE_CAPABLE)
5983 			*lp_state = FPE_STATE_CAPABLE;
5984 
5985 		/* If user has requested FPE enable, quickly response */
5986 		if (*hs_enable)
5987 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5988 						fpe_cfg,
5989 						MPACKET_RESPONSE);
5990 	}
5991 
5992 	/* If Local has sent verify mPacket, Local is FPE capable */
5993 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5994 		if (*lo_state < FPE_STATE_CAPABLE)
5995 			*lo_state = FPE_STATE_CAPABLE;
5996 	}
5997 
5998 	/* If LP has sent response mPacket, LP is entering FPE ON */
5999 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
6000 		*lp_state = FPE_STATE_ENTERING_ON;
6001 
6002 	/* If Local has sent response mPacket, Local is entering FPE ON */
6003 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
6004 		*lo_state = FPE_STATE_ENTERING_ON;
6005 
6006 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
6007 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
6008 	    priv->fpe_wq) {
6009 		queue_work(priv->fpe_wq, &priv->fpe_task);
6010 	}
6011 }
6012 
6013 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6014 {
6015 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6016 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6017 	u32 queues_count;
6018 	u32 queue;
6019 	bool xmac;
6020 
6021 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6022 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6023 
6024 	if (priv->irq_wake)
6025 		pm_wakeup_event(priv->device, 0);
6026 
6027 	if (priv->dma_cap.estsel)
6028 		stmmac_est_irq_status(priv, priv, priv->dev,
6029 				      &priv->xstats, tx_cnt);
6030 
6031 	if (priv->dma_cap.fpesel) {
6032 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6033 						   priv->dev);
6034 
6035 		stmmac_fpe_event_status(priv, status);
6036 	}
6037 
6038 	/* To handle GMAC own interrupts */
6039 	if ((priv->plat->has_gmac) || xmac) {
6040 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6041 
6042 		if (unlikely(status)) {
6043 			/* For LPI we need to save the tx status */
6044 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6045 				priv->tx_path_in_lpi_mode = true;
6046 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6047 				priv->tx_path_in_lpi_mode = false;
6048 		}
6049 
6050 		for (queue = 0; queue < queues_count; queue++)
6051 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6052 
6053 		/* PCS link status */
6054 		if (priv->hw->pcs &&
6055 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6056 			if (priv->xstats.pcs_link)
6057 				netif_carrier_on(priv->dev);
6058 			else
6059 				netif_carrier_off(priv->dev);
6060 		}
6061 
6062 		stmmac_timestamp_interrupt(priv, priv);
6063 	}
6064 }
6065 
6066 /**
6067  *  stmmac_interrupt - main ISR
6068  *  @irq: interrupt number.
6069  *  @dev_id: to pass the net device pointer.
6070  *  Description: this is the main driver interrupt service routine.
6071  *  It can call:
6072  *  o DMA service routine (to manage incoming frame reception and transmission
6073  *    status)
6074  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6075  *    interrupts.
6076  */
6077 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6078 {
6079 	struct net_device *dev = (struct net_device *)dev_id;
6080 	struct stmmac_priv *priv = netdev_priv(dev);
6081 
6082 	/* Check if adapter is up */
6083 	if (test_bit(STMMAC_DOWN, &priv->state))
6084 		return IRQ_HANDLED;
6085 
6086 	/* Check ASP error if it isn't delivered via an individual IRQ */
6087 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6088 		return IRQ_HANDLED;
6089 
6090 	/* To handle Common interrupts */
6091 	stmmac_common_interrupt(priv);
6092 
6093 	/* To handle DMA interrupts */
6094 	stmmac_dma_interrupt(priv);
6095 
6096 	return IRQ_HANDLED;
6097 }
6098 
6099 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6100 {
6101 	struct net_device *dev = (struct net_device *)dev_id;
6102 	struct stmmac_priv *priv = netdev_priv(dev);
6103 
6104 	/* Check if adapter is up */
6105 	if (test_bit(STMMAC_DOWN, &priv->state))
6106 		return IRQ_HANDLED;
6107 
6108 	/* To handle Common interrupts */
6109 	stmmac_common_interrupt(priv);
6110 
6111 	return IRQ_HANDLED;
6112 }
6113 
6114 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6115 {
6116 	struct net_device *dev = (struct net_device *)dev_id;
6117 	struct stmmac_priv *priv = netdev_priv(dev);
6118 
6119 	/* Check if adapter is up */
6120 	if (test_bit(STMMAC_DOWN, &priv->state))
6121 		return IRQ_HANDLED;
6122 
6123 	/* Check if a fatal error happened */
6124 	stmmac_safety_feat_interrupt(priv);
6125 
6126 	return IRQ_HANDLED;
6127 }
6128 
6129 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6130 {
6131 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6132 	struct stmmac_dma_conf *dma_conf;
6133 	int chan = tx_q->queue_index;
6134 	struct stmmac_priv *priv;
6135 	int status;
6136 
6137 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6138 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6139 
6140 	/* Check if adapter is up */
6141 	if (test_bit(STMMAC_DOWN, &priv->state))
6142 		return IRQ_HANDLED;
6143 
6144 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6145 
6146 	if (unlikely(status & tx_hard_error_bump_tc)) {
6147 		/* Try to bump up the dma threshold on this failure */
6148 		stmmac_bump_dma_threshold(priv, chan);
6149 	} else if (unlikely(status == tx_hard_error)) {
6150 		stmmac_tx_err(priv, chan);
6151 	}
6152 
6153 	return IRQ_HANDLED;
6154 }
6155 
6156 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6157 {
6158 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6159 	struct stmmac_dma_conf *dma_conf;
6160 	int chan = rx_q->queue_index;
6161 	struct stmmac_priv *priv;
6162 
6163 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6164 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6165 
6166 	/* Check if adapter is up */
6167 	if (test_bit(STMMAC_DOWN, &priv->state))
6168 		return IRQ_HANDLED;
6169 
6170 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6171 
6172 	return IRQ_HANDLED;
6173 }
6174 
6175 /**
6176  *  stmmac_ioctl - Entry point for the Ioctl
6177  *  @dev: Device pointer.
6178  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6179  *  a proprietary structure used to pass information to the driver.
6180  *  @cmd: IOCTL command
6181  *  Description:
6182  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6183  */
6184 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6185 {
6186 	struct stmmac_priv *priv = netdev_priv (dev);
6187 	int ret = -EOPNOTSUPP;
6188 
6189 	if (!netif_running(dev))
6190 		return -EINVAL;
6191 
6192 	switch (cmd) {
6193 	case SIOCGMIIPHY:
6194 	case SIOCGMIIREG:
6195 	case SIOCSMIIREG:
6196 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6197 		break;
6198 	case SIOCSHWTSTAMP:
6199 		ret = stmmac_hwtstamp_set(dev, rq);
6200 		break;
6201 	case SIOCGHWTSTAMP:
6202 		ret = stmmac_hwtstamp_get(dev, rq);
6203 		break;
6204 	default:
6205 		break;
6206 	}
6207 
6208 	return ret;
6209 }
6210 
6211 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6212 				    void *cb_priv)
6213 {
6214 	struct stmmac_priv *priv = cb_priv;
6215 	int ret = -EOPNOTSUPP;
6216 
6217 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6218 		return ret;
6219 
6220 	__stmmac_disable_all_queues(priv);
6221 
6222 	switch (type) {
6223 	case TC_SETUP_CLSU32:
6224 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6225 		break;
6226 	case TC_SETUP_CLSFLOWER:
6227 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6228 		break;
6229 	default:
6230 		break;
6231 	}
6232 
6233 	stmmac_enable_all_queues(priv);
6234 	return ret;
6235 }
6236 
6237 static LIST_HEAD(stmmac_block_cb_list);
6238 
6239 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6240 			   void *type_data)
6241 {
6242 	struct stmmac_priv *priv = netdev_priv(ndev);
6243 
6244 	switch (type) {
6245 	case TC_QUERY_CAPS:
6246 		return stmmac_tc_query_caps(priv, priv, type_data);
6247 	case TC_SETUP_BLOCK:
6248 		return flow_block_cb_setup_simple(type_data,
6249 						  &stmmac_block_cb_list,
6250 						  stmmac_setup_tc_block_cb,
6251 						  priv, priv, true);
6252 	case TC_SETUP_QDISC_CBS:
6253 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6254 	case TC_SETUP_QDISC_TAPRIO:
6255 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6256 	case TC_SETUP_QDISC_ETF:
6257 		return stmmac_tc_setup_etf(priv, priv, type_data);
6258 	default:
6259 		return -EOPNOTSUPP;
6260 	}
6261 }
6262 
6263 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6264 			       struct net_device *sb_dev)
6265 {
6266 	int gso = skb_shinfo(skb)->gso_type;
6267 
6268 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6269 		/*
6270 		 * There is no way to determine the number of TSO/USO
6271 		 * capable Queues. Let's use always the Queue 0
6272 		 * because if TSO/USO is supported then at least this
6273 		 * one will be capable.
6274 		 */
6275 		return 0;
6276 	}
6277 
6278 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6279 }
6280 
6281 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6282 {
6283 	struct stmmac_priv *priv = netdev_priv(ndev);
6284 	int ret = 0;
6285 
6286 	ret = pm_runtime_resume_and_get(priv->device);
6287 	if (ret < 0)
6288 		return ret;
6289 
6290 	ret = eth_mac_addr(ndev, addr);
6291 	if (ret)
6292 		goto set_mac_error;
6293 
6294 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6295 
6296 set_mac_error:
6297 	pm_runtime_put(priv->device);
6298 
6299 	return ret;
6300 }
6301 
6302 #ifdef CONFIG_DEBUG_FS
6303 static struct dentry *stmmac_fs_dir;
6304 
6305 static void sysfs_display_ring(void *head, int size, int extend_desc,
6306 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6307 {
6308 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6309 	struct dma_desc *p = (struct dma_desc *)head;
6310 	unsigned int desc_size;
6311 	dma_addr_t dma_addr;
6312 	int i;
6313 
6314 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6315 	for (i = 0; i < size; i++) {
6316 		dma_addr = dma_phy_addr + i * desc_size;
6317 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6318 				i, &dma_addr,
6319 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6320 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6321 		if (extend_desc)
6322 			p = &(++ep)->basic;
6323 		else
6324 			p++;
6325 	}
6326 }
6327 
6328 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6329 {
6330 	struct net_device *dev = seq->private;
6331 	struct stmmac_priv *priv = netdev_priv(dev);
6332 	u32 rx_count = priv->plat->rx_queues_to_use;
6333 	u32 tx_count = priv->plat->tx_queues_to_use;
6334 	u32 queue;
6335 
6336 	if ((dev->flags & IFF_UP) == 0)
6337 		return 0;
6338 
6339 	for (queue = 0; queue < rx_count; queue++) {
6340 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6341 
6342 		seq_printf(seq, "RX Queue %d:\n", queue);
6343 
6344 		if (priv->extend_desc) {
6345 			seq_printf(seq, "Extended descriptor ring:\n");
6346 			sysfs_display_ring((void *)rx_q->dma_erx,
6347 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6348 		} else {
6349 			seq_printf(seq, "Descriptor ring:\n");
6350 			sysfs_display_ring((void *)rx_q->dma_rx,
6351 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6352 		}
6353 	}
6354 
6355 	for (queue = 0; queue < tx_count; queue++) {
6356 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6357 
6358 		seq_printf(seq, "TX Queue %d:\n", queue);
6359 
6360 		if (priv->extend_desc) {
6361 			seq_printf(seq, "Extended descriptor ring:\n");
6362 			sysfs_display_ring((void *)tx_q->dma_etx,
6363 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6364 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6365 			seq_printf(seq, "Descriptor ring:\n");
6366 			sysfs_display_ring((void *)tx_q->dma_tx,
6367 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6368 		}
6369 	}
6370 
6371 	return 0;
6372 }
6373 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6374 
6375 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6376 {
6377 	static const char * const dwxgmac_timestamp_source[] = {
6378 		"None",
6379 		"Internal",
6380 		"External",
6381 		"Both",
6382 	};
6383 	static const char * const dwxgmac_safety_feature_desc[] = {
6384 		"No",
6385 		"All Safety Features with ECC and Parity",
6386 		"All Safety Features without ECC or Parity",
6387 		"All Safety Features with Parity Only",
6388 		"ECC Only",
6389 		"UNDEFINED",
6390 		"UNDEFINED",
6391 		"UNDEFINED",
6392 	};
6393 	struct net_device *dev = seq->private;
6394 	struct stmmac_priv *priv = netdev_priv(dev);
6395 
6396 	if (!priv->hw_cap_support) {
6397 		seq_printf(seq, "DMA HW features not supported\n");
6398 		return 0;
6399 	}
6400 
6401 	seq_printf(seq, "==============================\n");
6402 	seq_printf(seq, "\tDMA HW features\n");
6403 	seq_printf(seq, "==============================\n");
6404 
6405 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6406 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6407 	seq_printf(seq, "\t1000 Mbps: %s\n",
6408 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6409 	seq_printf(seq, "\tHalf duplex: %s\n",
6410 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6411 	if (priv->plat->has_xgmac) {
6412 		seq_printf(seq,
6413 			   "\tNumber of Additional MAC address registers: %d\n",
6414 			   priv->dma_cap.multi_addr);
6415 	} else {
6416 		seq_printf(seq, "\tHash Filter: %s\n",
6417 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6418 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6419 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6420 	}
6421 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6422 		   (priv->dma_cap.pcs) ? "Y" : "N");
6423 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6424 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6425 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6426 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6427 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6428 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6429 	seq_printf(seq, "\tRMON module: %s\n",
6430 		   (priv->dma_cap.rmon) ? "Y" : "N");
6431 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6432 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6433 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6434 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6435 	if (priv->plat->has_xgmac)
6436 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6437 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6438 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6439 		   (priv->dma_cap.eee) ? "Y" : "N");
6440 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6441 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6442 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6443 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6444 	    priv->plat->has_xgmac) {
6445 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6446 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6447 	} else {
6448 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6449 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6450 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6451 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6452 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6453 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6454 	}
6455 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6456 		   priv->dma_cap.number_rx_channel);
6457 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6458 		   priv->dma_cap.number_tx_channel);
6459 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6460 		   priv->dma_cap.number_rx_queues);
6461 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6462 		   priv->dma_cap.number_tx_queues);
6463 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6464 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6465 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6466 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6467 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6468 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6469 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6470 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6471 		   priv->dma_cap.pps_out_num);
6472 	seq_printf(seq, "\tSafety Features: %s\n",
6473 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6474 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6475 		   priv->dma_cap.frpsel ? "Y" : "N");
6476 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6477 		   priv->dma_cap.host_dma_width);
6478 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6479 		   priv->dma_cap.rssen ? "Y" : "N");
6480 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6481 		   priv->dma_cap.vlhash ? "Y" : "N");
6482 	seq_printf(seq, "\tSplit Header: %s\n",
6483 		   priv->dma_cap.sphen ? "Y" : "N");
6484 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6485 		   priv->dma_cap.vlins ? "Y" : "N");
6486 	seq_printf(seq, "\tDouble VLAN: %s\n",
6487 		   priv->dma_cap.dvlan ? "Y" : "N");
6488 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6489 		   priv->dma_cap.l3l4fnum);
6490 	seq_printf(seq, "\tARP Offloading: %s\n",
6491 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6492 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6493 		   priv->dma_cap.estsel ? "Y" : "N");
6494 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6495 		   priv->dma_cap.fpesel ? "Y" : "N");
6496 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6497 		   priv->dma_cap.tbssel ? "Y" : "N");
6498 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6499 		   priv->dma_cap.tbs_ch_num);
6500 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6501 		   priv->dma_cap.sgfsel ? "Y" : "N");
6502 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6503 		   BIT(priv->dma_cap.ttsfd) >> 1);
6504 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6505 		   priv->dma_cap.numtc);
6506 	seq_printf(seq, "\tDCB Feature: %s\n",
6507 		   priv->dma_cap.dcben ? "Y" : "N");
6508 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6509 		   priv->dma_cap.advthword ? "Y" : "N");
6510 	seq_printf(seq, "\tPTP Offload: %s\n",
6511 		   priv->dma_cap.ptoen ? "Y" : "N");
6512 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6513 		   priv->dma_cap.osten ? "Y" : "N");
6514 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6515 		   priv->dma_cap.pfcen ? "Y" : "N");
6516 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6517 		   BIT(priv->dma_cap.frpes) << 6);
6518 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6519 		   BIT(priv->dma_cap.frpbs) << 6);
6520 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6521 		   priv->dma_cap.frppipe_num);
6522 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6523 		   priv->dma_cap.nrvf_num ?
6524 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6525 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6526 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6527 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6528 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6529 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6530 		   priv->dma_cap.cbtisel ? "Y" : "N");
6531 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6532 		   priv->dma_cap.aux_snapshot_n);
6533 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6534 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6535 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6536 		   priv->dma_cap.edma ? "Y" : "N");
6537 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6538 		   priv->dma_cap.ediffc ? "Y" : "N");
6539 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6540 		   priv->dma_cap.vxn ? "Y" : "N");
6541 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6542 		   priv->dma_cap.dbgmem ? "Y" : "N");
6543 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6544 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6545 	return 0;
6546 }
6547 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6548 
6549 /* Use network device events to rename debugfs file entries.
6550  */
6551 static int stmmac_device_event(struct notifier_block *unused,
6552 			       unsigned long event, void *ptr)
6553 {
6554 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6555 	struct stmmac_priv *priv = netdev_priv(dev);
6556 
6557 	if (dev->netdev_ops != &stmmac_netdev_ops)
6558 		goto done;
6559 
6560 	switch (event) {
6561 	case NETDEV_CHANGENAME:
6562 		if (priv->dbgfs_dir)
6563 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6564 							 priv->dbgfs_dir,
6565 							 stmmac_fs_dir,
6566 							 dev->name);
6567 		break;
6568 	}
6569 done:
6570 	return NOTIFY_DONE;
6571 }
6572 
6573 static struct notifier_block stmmac_notifier = {
6574 	.notifier_call = stmmac_device_event,
6575 };
6576 
6577 static void stmmac_init_fs(struct net_device *dev)
6578 {
6579 	struct stmmac_priv *priv = netdev_priv(dev);
6580 
6581 	rtnl_lock();
6582 
6583 	/* Create per netdev entries */
6584 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6585 
6586 	/* Entry to report DMA RX/TX rings */
6587 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6588 			    &stmmac_rings_status_fops);
6589 
6590 	/* Entry to report the DMA HW features */
6591 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6592 			    &stmmac_dma_cap_fops);
6593 
6594 	rtnl_unlock();
6595 }
6596 
6597 static void stmmac_exit_fs(struct net_device *dev)
6598 {
6599 	struct stmmac_priv *priv = netdev_priv(dev);
6600 
6601 	debugfs_remove_recursive(priv->dbgfs_dir);
6602 }
6603 #endif /* CONFIG_DEBUG_FS */
6604 
6605 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6606 {
6607 	unsigned char *data = (unsigned char *)&vid_le;
6608 	unsigned char data_byte = 0;
6609 	u32 crc = ~0x0;
6610 	u32 temp = 0;
6611 	int i, bits;
6612 
6613 	bits = get_bitmask_order(VLAN_VID_MASK);
6614 	for (i = 0; i < bits; i++) {
6615 		if ((i % 8) == 0)
6616 			data_byte = data[i / 8];
6617 
6618 		temp = ((crc & 1) ^ data_byte) & 1;
6619 		crc >>= 1;
6620 		data_byte >>= 1;
6621 
6622 		if (temp)
6623 			crc ^= 0xedb88320;
6624 	}
6625 
6626 	return crc;
6627 }
6628 
6629 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6630 {
6631 	u32 crc, hash = 0;
6632 	__le16 pmatch = 0;
6633 	int count = 0;
6634 	u16 vid = 0;
6635 
6636 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6637 		__le16 vid_le = cpu_to_le16(vid);
6638 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6639 		hash |= (1 << crc);
6640 		count++;
6641 	}
6642 
6643 	if (!priv->dma_cap.vlhash) {
6644 		if (count > 2) /* VID = 0 always passes filter */
6645 			return -EOPNOTSUPP;
6646 
6647 		pmatch = cpu_to_le16(vid);
6648 		hash = 0;
6649 	}
6650 
6651 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6652 }
6653 
6654 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6655 {
6656 	struct stmmac_priv *priv = netdev_priv(ndev);
6657 	bool is_double = false;
6658 	int ret;
6659 
6660 	ret = pm_runtime_resume_and_get(priv->device);
6661 	if (ret < 0)
6662 		return ret;
6663 
6664 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6665 		is_double = true;
6666 
6667 	set_bit(vid, priv->active_vlans);
6668 	ret = stmmac_vlan_update(priv, is_double);
6669 	if (ret) {
6670 		clear_bit(vid, priv->active_vlans);
6671 		goto err_pm_put;
6672 	}
6673 
6674 	if (priv->hw->num_vlan) {
6675 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6676 		if (ret)
6677 			goto err_pm_put;
6678 	}
6679 err_pm_put:
6680 	pm_runtime_put(priv->device);
6681 
6682 	return ret;
6683 }
6684 
6685 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6686 {
6687 	struct stmmac_priv *priv = netdev_priv(ndev);
6688 	bool is_double = false;
6689 	int ret;
6690 
6691 	ret = pm_runtime_resume_and_get(priv->device);
6692 	if (ret < 0)
6693 		return ret;
6694 
6695 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6696 		is_double = true;
6697 
6698 	clear_bit(vid, priv->active_vlans);
6699 
6700 	if (priv->hw->num_vlan) {
6701 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6702 		if (ret)
6703 			goto del_vlan_error;
6704 	}
6705 
6706 	ret = stmmac_vlan_update(priv, is_double);
6707 
6708 del_vlan_error:
6709 	pm_runtime_put(priv->device);
6710 
6711 	return ret;
6712 }
6713 
6714 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6715 {
6716 	struct stmmac_priv *priv = netdev_priv(dev);
6717 
6718 	switch (bpf->command) {
6719 	case XDP_SETUP_PROG:
6720 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6721 	case XDP_SETUP_XSK_POOL:
6722 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6723 					     bpf->xsk.queue_id);
6724 	default:
6725 		return -EOPNOTSUPP;
6726 	}
6727 }
6728 
6729 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6730 			   struct xdp_frame **frames, u32 flags)
6731 {
6732 	struct stmmac_priv *priv = netdev_priv(dev);
6733 	int cpu = smp_processor_id();
6734 	struct netdev_queue *nq;
6735 	int i, nxmit = 0;
6736 	int queue;
6737 
6738 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6739 		return -ENETDOWN;
6740 
6741 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6742 		return -EINVAL;
6743 
6744 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6745 	nq = netdev_get_tx_queue(priv->dev, queue);
6746 
6747 	__netif_tx_lock(nq, cpu);
6748 	/* Avoids TX time-out as we are sharing with slow path */
6749 	txq_trans_cond_update(nq);
6750 
6751 	for (i = 0; i < num_frames; i++) {
6752 		int res;
6753 
6754 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6755 		if (res == STMMAC_XDP_CONSUMED)
6756 			break;
6757 
6758 		nxmit++;
6759 	}
6760 
6761 	if (flags & XDP_XMIT_FLUSH) {
6762 		stmmac_flush_tx_descriptors(priv, queue);
6763 		stmmac_tx_timer_arm(priv, queue);
6764 	}
6765 
6766 	__netif_tx_unlock(nq);
6767 
6768 	return nxmit;
6769 }
6770 
6771 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6772 {
6773 	struct stmmac_channel *ch = &priv->channel[queue];
6774 	unsigned long flags;
6775 
6776 	spin_lock_irqsave(&ch->lock, flags);
6777 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6778 	spin_unlock_irqrestore(&ch->lock, flags);
6779 
6780 	stmmac_stop_rx_dma(priv, queue);
6781 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6782 }
6783 
6784 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6785 {
6786 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6787 	struct stmmac_channel *ch = &priv->channel[queue];
6788 	unsigned long flags;
6789 	u32 buf_size;
6790 	int ret;
6791 
6792 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6793 	if (ret) {
6794 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6795 		return;
6796 	}
6797 
6798 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6799 	if (ret) {
6800 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6801 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6802 		return;
6803 	}
6804 
6805 	stmmac_reset_rx_queue(priv, queue);
6806 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6807 
6808 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6809 			    rx_q->dma_rx_phy, rx_q->queue_index);
6810 
6811 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6812 			     sizeof(struct dma_desc));
6813 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6814 			       rx_q->rx_tail_addr, rx_q->queue_index);
6815 
6816 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6817 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6818 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6819 				      buf_size,
6820 				      rx_q->queue_index);
6821 	} else {
6822 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6823 				      priv->dma_conf.dma_buf_sz,
6824 				      rx_q->queue_index);
6825 	}
6826 
6827 	stmmac_start_rx_dma(priv, queue);
6828 
6829 	spin_lock_irqsave(&ch->lock, flags);
6830 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6831 	spin_unlock_irqrestore(&ch->lock, flags);
6832 }
6833 
6834 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6835 {
6836 	struct stmmac_channel *ch = &priv->channel[queue];
6837 	unsigned long flags;
6838 
6839 	spin_lock_irqsave(&ch->lock, flags);
6840 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6841 	spin_unlock_irqrestore(&ch->lock, flags);
6842 
6843 	stmmac_stop_tx_dma(priv, queue);
6844 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6845 }
6846 
6847 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6848 {
6849 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6850 	struct stmmac_channel *ch = &priv->channel[queue];
6851 	unsigned long flags;
6852 	int ret;
6853 
6854 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6855 	if (ret) {
6856 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6857 		return;
6858 	}
6859 
6860 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6861 	if (ret) {
6862 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6863 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6864 		return;
6865 	}
6866 
6867 	stmmac_reset_tx_queue(priv, queue);
6868 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6869 
6870 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6871 			    tx_q->dma_tx_phy, tx_q->queue_index);
6872 
6873 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6874 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6875 
6876 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6877 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6878 			       tx_q->tx_tail_addr, tx_q->queue_index);
6879 
6880 	stmmac_start_tx_dma(priv, queue);
6881 
6882 	spin_lock_irqsave(&ch->lock, flags);
6883 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6884 	spin_unlock_irqrestore(&ch->lock, flags);
6885 }
6886 
6887 void stmmac_xdp_release(struct net_device *dev)
6888 {
6889 	struct stmmac_priv *priv = netdev_priv(dev);
6890 	u32 chan;
6891 
6892 	/* Ensure tx function is not running */
6893 	netif_tx_disable(dev);
6894 
6895 	/* Disable NAPI process */
6896 	stmmac_disable_all_queues(priv);
6897 
6898 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6899 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6900 
6901 	/* Free the IRQ lines */
6902 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6903 
6904 	/* Stop TX/RX DMA channels */
6905 	stmmac_stop_all_dma(priv);
6906 
6907 	/* Release and free the Rx/Tx resources */
6908 	free_dma_desc_resources(priv, &priv->dma_conf);
6909 
6910 	/* Disable the MAC Rx/Tx */
6911 	stmmac_mac_set(priv, priv->ioaddr, false);
6912 
6913 	/* set trans_start so we don't get spurious
6914 	 * watchdogs during reset
6915 	 */
6916 	netif_trans_update(dev);
6917 	netif_carrier_off(dev);
6918 }
6919 
6920 int stmmac_xdp_open(struct net_device *dev)
6921 {
6922 	struct stmmac_priv *priv = netdev_priv(dev);
6923 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6924 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6925 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6926 	struct stmmac_rx_queue *rx_q;
6927 	struct stmmac_tx_queue *tx_q;
6928 	u32 buf_size;
6929 	bool sph_en;
6930 	u32 chan;
6931 	int ret;
6932 
6933 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6934 	if (ret < 0) {
6935 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6936 			   __func__);
6937 		goto dma_desc_error;
6938 	}
6939 
6940 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6941 	if (ret < 0) {
6942 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6943 			   __func__);
6944 		goto init_error;
6945 	}
6946 
6947 	stmmac_reset_queues_param(priv);
6948 
6949 	/* DMA CSR Channel configuration */
6950 	for (chan = 0; chan < dma_csr_ch; chan++) {
6951 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6952 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6953 	}
6954 
6955 	/* Adjust Split header */
6956 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6957 
6958 	/* DMA RX Channel Configuration */
6959 	for (chan = 0; chan < rx_cnt; chan++) {
6960 		rx_q = &priv->dma_conf.rx_queue[chan];
6961 
6962 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6963 				    rx_q->dma_rx_phy, chan);
6964 
6965 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6966 				     (rx_q->buf_alloc_num *
6967 				      sizeof(struct dma_desc));
6968 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6969 				       rx_q->rx_tail_addr, chan);
6970 
6971 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6972 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6973 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6974 					      buf_size,
6975 					      rx_q->queue_index);
6976 		} else {
6977 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6978 					      priv->dma_conf.dma_buf_sz,
6979 					      rx_q->queue_index);
6980 		}
6981 
6982 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6983 	}
6984 
6985 	/* DMA TX Channel Configuration */
6986 	for (chan = 0; chan < tx_cnt; chan++) {
6987 		tx_q = &priv->dma_conf.tx_queue[chan];
6988 
6989 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6990 				    tx_q->dma_tx_phy, chan);
6991 
6992 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6993 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6994 				       tx_q->tx_tail_addr, chan);
6995 
6996 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6997 		tx_q->txtimer.function = stmmac_tx_timer;
6998 	}
6999 
7000 	/* Enable the MAC Rx/Tx */
7001 	stmmac_mac_set(priv, priv->ioaddr, true);
7002 
7003 	/* Start Rx & Tx DMA Channels */
7004 	stmmac_start_all_dma(priv);
7005 
7006 	ret = stmmac_request_irq(dev);
7007 	if (ret)
7008 		goto irq_error;
7009 
7010 	/* Enable NAPI process*/
7011 	stmmac_enable_all_queues(priv);
7012 	netif_carrier_on(dev);
7013 	netif_tx_start_all_queues(dev);
7014 	stmmac_enable_all_dma_irq(priv);
7015 
7016 	return 0;
7017 
7018 irq_error:
7019 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7020 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7021 
7022 	stmmac_hw_teardown(dev);
7023 init_error:
7024 	free_dma_desc_resources(priv, &priv->dma_conf);
7025 dma_desc_error:
7026 	return ret;
7027 }
7028 
7029 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7030 {
7031 	struct stmmac_priv *priv = netdev_priv(dev);
7032 	struct stmmac_rx_queue *rx_q;
7033 	struct stmmac_tx_queue *tx_q;
7034 	struct stmmac_channel *ch;
7035 
7036 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7037 	    !netif_carrier_ok(priv->dev))
7038 		return -ENETDOWN;
7039 
7040 	if (!stmmac_xdp_is_enabled(priv))
7041 		return -EINVAL;
7042 
7043 	if (queue >= priv->plat->rx_queues_to_use ||
7044 	    queue >= priv->plat->tx_queues_to_use)
7045 		return -EINVAL;
7046 
7047 	rx_q = &priv->dma_conf.rx_queue[queue];
7048 	tx_q = &priv->dma_conf.tx_queue[queue];
7049 	ch = &priv->channel[queue];
7050 
7051 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7052 		return -EINVAL;
7053 
7054 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7055 		/* EQoS does not have per-DMA channel SW interrupt,
7056 		 * so we schedule RX Napi straight-away.
7057 		 */
7058 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7059 			__napi_schedule(&ch->rxtx_napi);
7060 	}
7061 
7062 	return 0;
7063 }
7064 
7065 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7066 {
7067 	struct stmmac_priv *priv = netdev_priv(dev);
7068 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7069 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7070 	unsigned int start;
7071 	int q;
7072 
7073 	for (q = 0; q < tx_cnt; q++) {
7074 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7075 		u64 tx_packets;
7076 		u64 tx_bytes;
7077 
7078 		do {
7079 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7080 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7081 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7082 		do {
7083 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7084 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7085 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7086 
7087 		stats->tx_packets += tx_packets;
7088 		stats->tx_bytes += tx_bytes;
7089 	}
7090 
7091 	for (q = 0; q < rx_cnt; q++) {
7092 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7093 		u64 rx_packets;
7094 		u64 rx_bytes;
7095 
7096 		do {
7097 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7098 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7099 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7100 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7101 
7102 		stats->rx_packets += rx_packets;
7103 		stats->rx_bytes += rx_bytes;
7104 	}
7105 
7106 	stats->rx_dropped = priv->xstats.rx_dropped;
7107 	stats->rx_errors = priv->xstats.rx_errors;
7108 	stats->tx_dropped = priv->xstats.tx_dropped;
7109 	stats->tx_errors = priv->xstats.tx_errors;
7110 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7111 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7112 	stats->rx_length_errors = priv->xstats.rx_length;
7113 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7114 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7115 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7116 }
7117 
7118 static const struct net_device_ops stmmac_netdev_ops = {
7119 	.ndo_open = stmmac_open,
7120 	.ndo_start_xmit = stmmac_xmit,
7121 	.ndo_stop = stmmac_release,
7122 	.ndo_change_mtu = stmmac_change_mtu,
7123 	.ndo_fix_features = stmmac_fix_features,
7124 	.ndo_set_features = stmmac_set_features,
7125 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7126 	.ndo_tx_timeout = stmmac_tx_timeout,
7127 	.ndo_eth_ioctl = stmmac_ioctl,
7128 	.ndo_get_stats64 = stmmac_get_stats64,
7129 	.ndo_setup_tc = stmmac_setup_tc,
7130 	.ndo_select_queue = stmmac_select_queue,
7131 	.ndo_set_mac_address = stmmac_set_mac_address,
7132 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7133 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7134 	.ndo_bpf = stmmac_bpf,
7135 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7136 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7137 };
7138 
7139 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7140 {
7141 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7142 		return;
7143 	if (test_bit(STMMAC_DOWN, &priv->state))
7144 		return;
7145 
7146 	netdev_err(priv->dev, "Reset adapter.\n");
7147 
7148 	rtnl_lock();
7149 	netif_trans_update(priv->dev);
7150 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7151 		usleep_range(1000, 2000);
7152 
7153 	set_bit(STMMAC_DOWN, &priv->state);
7154 	dev_close(priv->dev);
7155 	dev_open(priv->dev, NULL);
7156 	clear_bit(STMMAC_DOWN, &priv->state);
7157 	clear_bit(STMMAC_RESETING, &priv->state);
7158 	rtnl_unlock();
7159 }
7160 
7161 static void stmmac_service_task(struct work_struct *work)
7162 {
7163 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7164 			service_task);
7165 
7166 	stmmac_reset_subtask(priv);
7167 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7168 }
7169 
7170 /**
7171  *  stmmac_hw_init - Init the MAC device
7172  *  @priv: driver private structure
7173  *  Description: this function is to configure the MAC device according to
7174  *  some platform parameters or the HW capability register. It prepares the
7175  *  driver to use either ring or chain modes and to setup either enhanced or
7176  *  normal descriptors.
7177  */
7178 static int stmmac_hw_init(struct stmmac_priv *priv)
7179 {
7180 	int ret;
7181 
7182 	/* dwmac-sun8i only work in chain mode */
7183 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7184 		chain_mode = 1;
7185 	priv->chain_mode = chain_mode;
7186 
7187 	/* Initialize HW Interface */
7188 	ret = stmmac_hwif_init(priv);
7189 	if (ret)
7190 		return ret;
7191 
7192 	/* Get the HW capability (new GMAC newer than 3.50a) */
7193 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7194 	if (priv->hw_cap_support) {
7195 		dev_info(priv->device, "DMA HW capability register supported\n");
7196 
7197 		/* We can override some gmac/dma configuration fields: e.g.
7198 		 * enh_desc, tx_coe (e.g. that are passed through the
7199 		 * platform) with the values from the HW capability
7200 		 * register (if supported).
7201 		 */
7202 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7203 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7204 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7205 		priv->hw->pmt = priv->plat->pmt;
7206 		if (priv->dma_cap.hash_tb_sz) {
7207 			priv->hw->multicast_filter_bins =
7208 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7209 			priv->hw->mcast_bits_log2 =
7210 					ilog2(priv->hw->multicast_filter_bins);
7211 		}
7212 
7213 		/* TXCOE doesn't work in thresh DMA mode */
7214 		if (priv->plat->force_thresh_dma_mode)
7215 			priv->plat->tx_coe = 0;
7216 		else
7217 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7218 
7219 		/* In case of GMAC4 rx_coe is from HW cap register. */
7220 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7221 
7222 		if (priv->dma_cap.rx_coe_type2)
7223 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7224 		else if (priv->dma_cap.rx_coe_type1)
7225 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7226 
7227 	} else {
7228 		dev_info(priv->device, "No HW DMA feature register supported\n");
7229 	}
7230 
7231 	if (priv->plat->rx_coe) {
7232 		priv->hw->rx_csum = priv->plat->rx_coe;
7233 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7234 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7235 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7236 	}
7237 	if (priv->plat->tx_coe)
7238 		dev_info(priv->device, "TX Checksum insertion supported\n");
7239 
7240 	if (priv->plat->pmt) {
7241 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7242 		device_set_wakeup_capable(priv->device, 1);
7243 	}
7244 
7245 	if (priv->dma_cap.tsoen)
7246 		dev_info(priv->device, "TSO supported\n");
7247 
7248 	priv->hw->vlan_fail_q_en =
7249 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7250 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7251 
7252 	/* Run HW quirks, if any */
7253 	if (priv->hwif_quirks) {
7254 		ret = priv->hwif_quirks(priv);
7255 		if (ret)
7256 			return ret;
7257 	}
7258 
7259 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7260 	 * In some case, for example on bugged HW this feature
7261 	 * has to be disable and this can be done by passing the
7262 	 * riwt_off field from the platform.
7263 	 */
7264 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7265 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7266 		priv->use_riwt = 1;
7267 		dev_info(priv->device,
7268 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7269 	}
7270 
7271 	return 0;
7272 }
7273 
7274 static void stmmac_napi_add(struct net_device *dev)
7275 {
7276 	struct stmmac_priv *priv = netdev_priv(dev);
7277 	u32 queue, maxq;
7278 
7279 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7280 
7281 	for (queue = 0; queue < maxq; queue++) {
7282 		struct stmmac_channel *ch = &priv->channel[queue];
7283 
7284 		ch->priv_data = priv;
7285 		ch->index = queue;
7286 		spin_lock_init(&ch->lock);
7287 
7288 		if (queue < priv->plat->rx_queues_to_use) {
7289 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7290 		}
7291 		if (queue < priv->plat->tx_queues_to_use) {
7292 			netif_napi_add_tx(dev, &ch->tx_napi,
7293 					  stmmac_napi_poll_tx);
7294 		}
7295 		if (queue < priv->plat->rx_queues_to_use &&
7296 		    queue < priv->plat->tx_queues_to_use) {
7297 			netif_napi_add(dev, &ch->rxtx_napi,
7298 				       stmmac_napi_poll_rxtx);
7299 		}
7300 	}
7301 }
7302 
7303 static void stmmac_napi_del(struct net_device *dev)
7304 {
7305 	struct stmmac_priv *priv = netdev_priv(dev);
7306 	u32 queue, maxq;
7307 
7308 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7309 
7310 	for (queue = 0; queue < maxq; queue++) {
7311 		struct stmmac_channel *ch = &priv->channel[queue];
7312 
7313 		if (queue < priv->plat->rx_queues_to_use)
7314 			netif_napi_del(&ch->rx_napi);
7315 		if (queue < priv->plat->tx_queues_to_use)
7316 			netif_napi_del(&ch->tx_napi);
7317 		if (queue < priv->plat->rx_queues_to_use &&
7318 		    queue < priv->plat->tx_queues_to_use) {
7319 			netif_napi_del(&ch->rxtx_napi);
7320 		}
7321 	}
7322 }
7323 
7324 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7325 {
7326 	struct stmmac_priv *priv = netdev_priv(dev);
7327 	int ret = 0, i;
7328 
7329 	if (netif_running(dev))
7330 		stmmac_release(dev);
7331 
7332 	stmmac_napi_del(dev);
7333 
7334 	priv->plat->rx_queues_to_use = rx_cnt;
7335 	priv->plat->tx_queues_to_use = tx_cnt;
7336 	if (!netif_is_rxfh_configured(dev))
7337 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7338 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7339 									rx_cnt);
7340 
7341 	stmmac_napi_add(dev);
7342 
7343 	if (netif_running(dev))
7344 		ret = stmmac_open(dev);
7345 
7346 	return ret;
7347 }
7348 
7349 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7350 {
7351 	struct stmmac_priv *priv = netdev_priv(dev);
7352 	int ret = 0;
7353 
7354 	if (netif_running(dev))
7355 		stmmac_release(dev);
7356 
7357 	priv->dma_conf.dma_rx_size = rx_size;
7358 	priv->dma_conf.dma_tx_size = tx_size;
7359 
7360 	if (netif_running(dev))
7361 		ret = stmmac_open(dev);
7362 
7363 	return ret;
7364 }
7365 
7366 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7367 static void stmmac_fpe_lp_task(struct work_struct *work)
7368 {
7369 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7370 						fpe_task);
7371 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7372 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7373 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7374 	bool *hs_enable = &fpe_cfg->hs_enable;
7375 	bool *enable = &fpe_cfg->enable;
7376 	int retries = 20;
7377 
7378 	while (retries-- > 0) {
7379 		/* Bail out immediately if FPE handshake is OFF */
7380 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7381 			break;
7382 
7383 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7384 		    *lp_state == FPE_STATE_ENTERING_ON) {
7385 			stmmac_fpe_configure(priv, priv->ioaddr,
7386 					     fpe_cfg,
7387 					     priv->plat->tx_queues_to_use,
7388 					     priv->plat->rx_queues_to_use,
7389 					     *enable);
7390 
7391 			netdev_info(priv->dev, "configured FPE\n");
7392 
7393 			*lo_state = FPE_STATE_ON;
7394 			*lp_state = FPE_STATE_ON;
7395 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7396 			break;
7397 		}
7398 
7399 		if ((*lo_state == FPE_STATE_CAPABLE ||
7400 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7401 		     *lp_state != FPE_STATE_ON) {
7402 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7403 				    *lo_state, *lp_state);
7404 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7405 						fpe_cfg,
7406 						MPACKET_VERIFY);
7407 		}
7408 		/* Sleep then retry */
7409 		msleep(500);
7410 	}
7411 
7412 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7413 }
7414 
7415 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7416 {
7417 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7418 		if (enable) {
7419 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7420 						priv->plat->fpe_cfg,
7421 						MPACKET_VERIFY);
7422 		} else {
7423 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7424 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7425 		}
7426 
7427 		priv->plat->fpe_cfg->hs_enable = enable;
7428 	}
7429 }
7430 
7431 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7432 {
7433 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7434 	struct dma_desc *desc_contains_ts = ctx->desc;
7435 	struct stmmac_priv *priv = ctx->priv;
7436 	struct dma_desc *ndesc = ctx->ndesc;
7437 	struct dma_desc *desc = ctx->desc;
7438 	u64 ns = 0;
7439 
7440 	if (!priv->hwts_rx_en)
7441 		return -ENODATA;
7442 
7443 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7444 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7445 		desc_contains_ts = ndesc;
7446 
7447 	/* Check if timestamp is available */
7448 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7449 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7450 		ns -= priv->plat->cdc_error_adj;
7451 		*timestamp = ns_to_ktime(ns);
7452 		return 0;
7453 	}
7454 
7455 	return -ENODATA;
7456 }
7457 
7458 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7459 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7460 };
7461 
7462 /**
7463  * stmmac_dvr_probe
7464  * @device: device pointer
7465  * @plat_dat: platform data pointer
7466  * @res: stmmac resource pointer
7467  * Description: this is the main probe function used to
7468  * call the alloc_etherdev, allocate the priv structure.
7469  * Return:
7470  * returns 0 on success, otherwise errno.
7471  */
7472 int stmmac_dvr_probe(struct device *device,
7473 		     struct plat_stmmacenet_data *plat_dat,
7474 		     struct stmmac_resources *res)
7475 {
7476 	struct net_device *ndev = NULL;
7477 	struct stmmac_priv *priv;
7478 	u32 rxq;
7479 	int i, ret = 0;
7480 
7481 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7482 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7483 	if (!ndev)
7484 		return -ENOMEM;
7485 
7486 	SET_NETDEV_DEV(ndev, device);
7487 
7488 	priv = netdev_priv(ndev);
7489 	priv->device = device;
7490 	priv->dev = ndev;
7491 
7492 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7493 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7494 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7495 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7496 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7497 	}
7498 
7499 	priv->xstats.pcpu_stats =
7500 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7501 	if (!priv->xstats.pcpu_stats)
7502 		return -ENOMEM;
7503 
7504 	stmmac_set_ethtool_ops(ndev);
7505 	priv->pause = pause;
7506 	priv->plat = plat_dat;
7507 	priv->ioaddr = res->addr;
7508 	priv->dev->base_addr = (unsigned long)res->addr;
7509 	priv->plat->dma_cfg->multi_msi_en =
7510 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7511 
7512 	priv->dev->irq = res->irq;
7513 	priv->wol_irq = res->wol_irq;
7514 	priv->lpi_irq = res->lpi_irq;
7515 	priv->sfty_irq = res->sfty_irq;
7516 	priv->sfty_ce_irq = res->sfty_ce_irq;
7517 	priv->sfty_ue_irq = res->sfty_ue_irq;
7518 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7519 		priv->rx_irq[i] = res->rx_irq[i];
7520 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7521 		priv->tx_irq[i] = res->tx_irq[i];
7522 
7523 	if (!is_zero_ether_addr(res->mac))
7524 		eth_hw_addr_set(priv->dev, res->mac);
7525 
7526 	dev_set_drvdata(device, priv->dev);
7527 
7528 	/* Verify driver arguments */
7529 	stmmac_verify_args();
7530 
7531 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7532 	if (!priv->af_xdp_zc_qps)
7533 		return -ENOMEM;
7534 
7535 	/* Allocate workqueue */
7536 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7537 	if (!priv->wq) {
7538 		dev_err(priv->device, "failed to create workqueue\n");
7539 		ret = -ENOMEM;
7540 		goto error_wq_init;
7541 	}
7542 
7543 	INIT_WORK(&priv->service_task, stmmac_service_task);
7544 
7545 	/* Initialize Link Partner FPE workqueue */
7546 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7547 
7548 	/* Override with kernel parameters if supplied XXX CRS XXX
7549 	 * this needs to have multiple instances
7550 	 */
7551 	if ((phyaddr >= 0) && (phyaddr <= 31))
7552 		priv->plat->phy_addr = phyaddr;
7553 
7554 	if (priv->plat->stmmac_rst) {
7555 		ret = reset_control_assert(priv->plat->stmmac_rst);
7556 		reset_control_deassert(priv->plat->stmmac_rst);
7557 		/* Some reset controllers have only reset callback instead of
7558 		 * assert + deassert callbacks pair.
7559 		 */
7560 		if (ret == -ENOTSUPP)
7561 			reset_control_reset(priv->plat->stmmac_rst);
7562 	}
7563 
7564 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7565 	if (ret == -ENOTSUPP)
7566 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7567 			ERR_PTR(ret));
7568 
7569 	/* Wait a bit for the reset to take effect */
7570 	udelay(10);
7571 
7572 	/* Init MAC and get the capabilities */
7573 	ret = stmmac_hw_init(priv);
7574 	if (ret)
7575 		goto error_hw_init;
7576 
7577 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7578 	 */
7579 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7580 		priv->plat->dma_cfg->dche = false;
7581 
7582 	stmmac_check_ether_addr(priv);
7583 
7584 	ndev->netdev_ops = &stmmac_netdev_ops;
7585 
7586 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7587 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7588 
7589 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7590 			    NETIF_F_RXCSUM;
7591 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7592 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7593 
7594 	ret = stmmac_tc_init(priv, priv);
7595 	if (!ret) {
7596 		ndev->hw_features |= NETIF_F_HW_TC;
7597 	}
7598 
7599 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7600 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7601 		if (priv->plat->has_gmac4)
7602 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7603 		priv->tso = true;
7604 		dev_info(priv->device, "TSO feature enabled\n");
7605 	}
7606 
7607 	if (priv->dma_cap.sphen &&
7608 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7609 		ndev->hw_features |= NETIF_F_GRO;
7610 		priv->sph_cap = true;
7611 		priv->sph = priv->sph_cap;
7612 		dev_info(priv->device, "SPH feature enabled\n");
7613 	}
7614 
7615 	/* Ideally our host DMA address width is the same as for the
7616 	 * device. However, it may differ and then we have to use our
7617 	 * host DMA width for allocation and the device DMA width for
7618 	 * register handling.
7619 	 */
7620 	if (priv->plat->host_dma_width)
7621 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7622 	else
7623 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7624 
7625 	if (priv->dma_cap.host_dma_width) {
7626 		ret = dma_set_mask_and_coherent(device,
7627 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7628 		if (!ret) {
7629 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7630 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7631 
7632 			/*
7633 			 * If more than 32 bits can be addressed, make sure to
7634 			 * enable enhanced addressing mode.
7635 			 */
7636 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7637 				priv->plat->dma_cfg->eame = true;
7638 		} else {
7639 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7640 			if (ret) {
7641 				dev_err(priv->device, "Failed to set DMA Mask\n");
7642 				goto error_hw_init;
7643 			}
7644 
7645 			priv->dma_cap.host_dma_width = 32;
7646 		}
7647 	}
7648 
7649 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7650 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7651 #ifdef STMMAC_VLAN_TAG_USED
7652 	/* Both mac100 and gmac support receive VLAN tag detection */
7653 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7654 	ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7655 	priv->hw->hw_vlan_en = true;
7656 
7657 	if (priv->dma_cap.vlhash) {
7658 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7659 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7660 	}
7661 	if (priv->dma_cap.vlins) {
7662 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7663 		if (priv->dma_cap.dvlan)
7664 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7665 	}
7666 #endif
7667 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7668 
7669 	priv->xstats.threshold = tc;
7670 
7671 	/* Initialize RSS */
7672 	rxq = priv->plat->rx_queues_to_use;
7673 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7674 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7675 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7676 
7677 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7678 		ndev->features |= NETIF_F_RXHASH;
7679 
7680 	ndev->vlan_features |= ndev->features;
7681 	/* TSO doesn't work on VLANs yet */
7682 	ndev->vlan_features &= ~NETIF_F_TSO;
7683 
7684 	/* MTU range: 46 - hw-specific max */
7685 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7686 	if (priv->plat->has_xgmac)
7687 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7688 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7689 		ndev->max_mtu = JUMBO_LEN;
7690 	else
7691 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7692 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7693 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7694 	 */
7695 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7696 	    (priv->plat->maxmtu >= ndev->min_mtu))
7697 		ndev->max_mtu = priv->plat->maxmtu;
7698 	else if (priv->plat->maxmtu < ndev->min_mtu)
7699 		dev_warn(priv->device,
7700 			 "%s: warning: maxmtu having invalid value (%d)\n",
7701 			 __func__, priv->plat->maxmtu);
7702 
7703 	if (flow_ctrl)
7704 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7705 
7706 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7707 
7708 	/* Setup channels NAPI */
7709 	stmmac_napi_add(ndev);
7710 
7711 	mutex_init(&priv->lock);
7712 
7713 	/* If a specific clk_csr value is passed from the platform
7714 	 * this means that the CSR Clock Range selection cannot be
7715 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7716 	 * set the MDC clock dynamically according to the csr actual
7717 	 * clock input.
7718 	 */
7719 	if (priv->plat->clk_csr >= 0)
7720 		priv->clk_csr = priv->plat->clk_csr;
7721 	else
7722 		stmmac_clk_csr_set(priv);
7723 
7724 	stmmac_check_pcs_mode(priv);
7725 
7726 	pm_runtime_get_noresume(device);
7727 	pm_runtime_set_active(device);
7728 	if (!pm_runtime_enabled(device))
7729 		pm_runtime_enable(device);
7730 
7731 	ret = stmmac_mdio_register(ndev);
7732 	if (ret < 0) {
7733 		dev_err_probe(priv->device, ret,
7734 			      "MDIO bus (id: %d) registration failed\n",
7735 			      priv->plat->bus_id);
7736 		goto error_mdio_register;
7737 	}
7738 
7739 	if (priv->plat->speed_mode_2500)
7740 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7741 
7742 	ret = stmmac_pcs_setup(ndev);
7743 	if (ret)
7744 		goto error_pcs_setup;
7745 
7746 	ret = stmmac_phy_setup(priv);
7747 	if (ret) {
7748 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7749 		goto error_phy_setup;
7750 	}
7751 
7752 	ret = register_netdev(ndev);
7753 	if (ret) {
7754 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7755 			__func__, ret);
7756 		goto error_netdev_register;
7757 	}
7758 
7759 #ifdef CONFIG_DEBUG_FS
7760 	stmmac_init_fs(ndev);
7761 #endif
7762 
7763 	if (priv->plat->dump_debug_regs)
7764 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7765 
7766 	/* Let pm_runtime_put() disable the clocks.
7767 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7768 	 */
7769 	pm_runtime_put(device);
7770 
7771 	return ret;
7772 
7773 error_netdev_register:
7774 	phylink_destroy(priv->phylink);
7775 error_phy_setup:
7776 	stmmac_pcs_clean(ndev);
7777 error_pcs_setup:
7778 	stmmac_mdio_unregister(ndev);
7779 error_mdio_register:
7780 	stmmac_napi_del(ndev);
7781 error_hw_init:
7782 	destroy_workqueue(priv->wq);
7783 error_wq_init:
7784 	bitmap_free(priv->af_xdp_zc_qps);
7785 
7786 	return ret;
7787 }
7788 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7789 
7790 /**
7791  * stmmac_dvr_remove
7792  * @dev: device pointer
7793  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7794  * changes the link status, releases the DMA descriptor rings.
7795  */
7796 void stmmac_dvr_remove(struct device *dev)
7797 {
7798 	struct net_device *ndev = dev_get_drvdata(dev);
7799 	struct stmmac_priv *priv = netdev_priv(ndev);
7800 
7801 	netdev_info(priv->dev, "%s: removing driver", __func__);
7802 
7803 	pm_runtime_get_sync(dev);
7804 
7805 	stmmac_stop_all_dma(priv);
7806 	stmmac_mac_set(priv, priv->ioaddr, false);
7807 	unregister_netdev(ndev);
7808 
7809 #ifdef CONFIG_DEBUG_FS
7810 	stmmac_exit_fs(ndev);
7811 #endif
7812 	phylink_destroy(priv->phylink);
7813 	if (priv->plat->stmmac_rst)
7814 		reset_control_assert(priv->plat->stmmac_rst);
7815 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7816 
7817 	stmmac_pcs_clean(ndev);
7818 	stmmac_mdio_unregister(ndev);
7819 
7820 	destroy_workqueue(priv->wq);
7821 	mutex_destroy(&priv->lock);
7822 	bitmap_free(priv->af_xdp_zc_qps);
7823 
7824 	pm_runtime_disable(dev);
7825 	pm_runtime_put_noidle(dev);
7826 }
7827 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7828 
7829 /**
7830  * stmmac_suspend - suspend callback
7831  * @dev: device pointer
7832  * Description: this is the function to suspend the device and it is called
7833  * by the platform driver to stop the network queue, release the resources,
7834  * program the PMT register (for WoL), clean and release driver resources.
7835  */
7836 int stmmac_suspend(struct device *dev)
7837 {
7838 	struct net_device *ndev = dev_get_drvdata(dev);
7839 	struct stmmac_priv *priv = netdev_priv(ndev);
7840 	u32 chan;
7841 
7842 	if (!ndev || !netif_running(ndev))
7843 		return 0;
7844 
7845 	mutex_lock(&priv->lock);
7846 
7847 	netif_device_detach(ndev);
7848 
7849 	stmmac_disable_all_queues(priv);
7850 
7851 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7852 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7853 
7854 	if (priv->eee_enabled) {
7855 		priv->tx_path_in_lpi_mode = false;
7856 		del_timer_sync(&priv->eee_ctrl_timer);
7857 	}
7858 
7859 	/* Stop TX/RX DMA */
7860 	stmmac_stop_all_dma(priv);
7861 
7862 	if (priv->plat->serdes_powerdown)
7863 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7864 
7865 	/* Enable Power down mode by programming the PMT regs */
7866 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7867 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7868 		priv->irq_wake = 1;
7869 	} else {
7870 		stmmac_mac_set(priv, priv->ioaddr, false);
7871 		pinctrl_pm_select_sleep_state(priv->device);
7872 	}
7873 
7874 	mutex_unlock(&priv->lock);
7875 
7876 	rtnl_lock();
7877 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7878 		phylink_suspend(priv->phylink, true);
7879 	} else {
7880 		if (device_may_wakeup(priv->device))
7881 			phylink_speed_down(priv->phylink, false);
7882 		phylink_suspend(priv->phylink, false);
7883 	}
7884 	rtnl_unlock();
7885 
7886 	if (priv->dma_cap.fpesel) {
7887 		/* Disable FPE */
7888 		stmmac_fpe_configure(priv, priv->ioaddr,
7889 				     priv->plat->fpe_cfg,
7890 				     priv->plat->tx_queues_to_use,
7891 				     priv->plat->rx_queues_to_use, false);
7892 
7893 		stmmac_fpe_handshake(priv, false);
7894 		stmmac_fpe_stop_wq(priv);
7895 	}
7896 
7897 	priv->speed = SPEED_UNKNOWN;
7898 	return 0;
7899 }
7900 EXPORT_SYMBOL_GPL(stmmac_suspend);
7901 
7902 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7903 {
7904 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7905 
7906 	rx_q->cur_rx = 0;
7907 	rx_q->dirty_rx = 0;
7908 }
7909 
7910 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7911 {
7912 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7913 
7914 	tx_q->cur_tx = 0;
7915 	tx_q->dirty_tx = 0;
7916 	tx_q->mss = 0;
7917 
7918 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7919 }
7920 
7921 /**
7922  * stmmac_reset_queues_param - reset queue parameters
7923  * @priv: device pointer
7924  */
7925 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7926 {
7927 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7928 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7929 	u32 queue;
7930 
7931 	for (queue = 0; queue < rx_cnt; queue++)
7932 		stmmac_reset_rx_queue(priv, queue);
7933 
7934 	for (queue = 0; queue < tx_cnt; queue++)
7935 		stmmac_reset_tx_queue(priv, queue);
7936 }
7937 
7938 /**
7939  * stmmac_resume - resume callback
7940  * @dev: device pointer
7941  * Description: when resume this function is invoked to setup the DMA and CORE
7942  * in a usable state.
7943  */
7944 int stmmac_resume(struct device *dev)
7945 {
7946 	struct net_device *ndev = dev_get_drvdata(dev);
7947 	struct stmmac_priv *priv = netdev_priv(ndev);
7948 	int ret;
7949 
7950 	if (!netif_running(ndev))
7951 		return 0;
7952 
7953 	/* Power Down bit, into the PM register, is cleared
7954 	 * automatically as soon as a magic packet or a Wake-up frame
7955 	 * is received. Anyway, it's better to manually clear
7956 	 * this bit because it can generate problems while resuming
7957 	 * from another devices (e.g. serial console).
7958 	 */
7959 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7960 		mutex_lock(&priv->lock);
7961 		stmmac_pmt(priv, priv->hw, 0);
7962 		mutex_unlock(&priv->lock);
7963 		priv->irq_wake = 0;
7964 	} else {
7965 		pinctrl_pm_select_default_state(priv->device);
7966 		/* reset the phy so that it's ready */
7967 		if (priv->mii)
7968 			stmmac_mdio_reset(priv->mii);
7969 	}
7970 
7971 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7972 	    priv->plat->serdes_powerup) {
7973 		ret = priv->plat->serdes_powerup(ndev,
7974 						 priv->plat->bsp_priv);
7975 
7976 		if (ret < 0)
7977 			return ret;
7978 	}
7979 
7980 	rtnl_lock();
7981 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7982 		phylink_resume(priv->phylink);
7983 	} else {
7984 		phylink_resume(priv->phylink);
7985 		if (device_may_wakeup(priv->device))
7986 			phylink_speed_up(priv->phylink);
7987 	}
7988 	rtnl_unlock();
7989 
7990 	rtnl_lock();
7991 	mutex_lock(&priv->lock);
7992 
7993 	stmmac_reset_queues_param(priv);
7994 
7995 	stmmac_free_tx_skbufs(priv);
7996 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7997 
7998 	stmmac_hw_setup(ndev, false);
7999 	stmmac_init_coalesce(priv);
8000 	stmmac_set_rx_mode(ndev);
8001 
8002 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8003 
8004 	stmmac_enable_all_queues(priv);
8005 	stmmac_enable_all_dma_irq(priv);
8006 
8007 	mutex_unlock(&priv->lock);
8008 	rtnl_unlock();
8009 
8010 	netif_device_attach(ndev);
8011 
8012 	return 0;
8013 }
8014 EXPORT_SYMBOL_GPL(stmmac_resume);
8015 
8016 #ifndef MODULE
8017 static int __init stmmac_cmdline_opt(char *str)
8018 {
8019 	char *opt;
8020 
8021 	if (!str || !*str)
8022 		return 1;
8023 	while ((opt = strsep(&str, ",")) != NULL) {
8024 		if (!strncmp(opt, "debug:", 6)) {
8025 			if (kstrtoint(opt + 6, 0, &debug))
8026 				goto err;
8027 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8028 			if (kstrtoint(opt + 8, 0, &phyaddr))
8029 				goto err;
8030 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8031 			if (kstrtoint(opt + 7, 0, &buf_sz))
8032 				goto err;
8033 		} else if (!strncmp(opt, "tc:", 3)) {
8034 			if (kstrtoint(opt + 3, 0, &tc))
8035 				goto err;
8036 		} else if (!strncmp(opt, "watchdog:", 9)) {
8037 			if (kstrtoint(opt + 9, 0, &watchdog))
8038 				goto err;
8039 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8040 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8041 				goto err;
8042 		} else if (!strncmp(opt, "pause:", 6)) {
8043 			if (kstrtoint(opt + 6, 0, &pause))
8044 				goto err;
8045 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8046 			if (kstrtoint(opt + 10, 0, &eee_timer))
8047 				goto err;
8048 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8049 			if (kstrtoint(opt + 11, 0, &chain_mode))
8050 				goto err;
8051 		}
8052 	}
8053 	return 1;
8054 
8055 err:
8056 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8057 	return 1;
8058 }
8059 
8060 __setup("stmmaceth=", stmmac_cmdline_opt);
8061 #endif /* MODULE */
8062 
8063 static int __init stmmac_init(void)
8064 {
8065 #ifdef CONFIG_DEBUG_FS
8066 	/* Create debugfs main directory if it doesn't exist yet */
8067 	if (!stmmac_fs_dir)
8068 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8069 	register_netdevice_notifier(&stmmac_notifier);
8070 #endif
8071 
8072 	return 0;
8073 }
8074 
8075 static void __exit stmmac_exit(void)
8076 {
8077 #ifdef CONFIG_DEBUG_FS
8078 	unregister_netdevice_notifier(&stmmac_notifier);
8079 	debugfs_remove_recursive(stmmac_fs_dir);
8080 #endif
8081 }
8082 
8083 module_init(stmmac_init)
8084 module_exit(stmmac_exit)
8085 
8086 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8087 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8088 MODULE_LICENSE("GPL");
8089