xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 2eff01ee2881becc9daaa0d53477ec202136b1f4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54 
55 /* As long as the interface is active, we keep the timestamping counter enabled
56  * with fine resolution and binary rollover. This avoid non-monotonic behavior
57  * (clock jumps) when changing timestamping settings at runtime.
58  */
59 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 				 PTP_TCR_TSCTRLSSR)
61 
62 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
64 
65 /* Module parameters */
66 #define TX_TIMEO	5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70 
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78 
79 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
80 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
81 
82 /* Limit to make sure XDP TX and slow path can coexist */
83 #define STMMAC_XSK_TX_BUDGET_MAX	256
84 #define STMMAC_TX_XSK_AVAIL		16
85 #define STMMAC_RX_FILL_BATCH		16
86 
87 #define STMMAC_XDP_PASS		0
88 #define STMMAC_XDP_CONSUMED	BIT(0)
89 #define STMMAC_XDP_TX		BIT(1)
90 #define STMMAC_XDP_REDIRECT	BIT(2)
91 
92 static int flow_ctrl = FLOW_AUTO;
93 module_param(flow_ctrl, int, 0644);
94 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
95 
96 static int pause = PAUSE_TIME;
97 module_param(pause, int, 0644);
98 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
99 
100 #define TC_DEFAULT 64
101 static int tc = TC_DEFAULT;
102 module_param(tc, int, 0644);
103 MODULE_PARM_DESC(tc, "DMA threshold control value");
104 
105 #define	DEFAULT_BUFSIZE	1536
106 static int buf_sz = DEFAULT_BUFSIZE;
107 module_param(buf_sz, int, 0644);
108 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
109 
110 #define	STMMAC_RX_COPYBREAK	256
111 
112 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
113 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
114 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
115 
116 #define STMMAC_DEFAULT_LPI_TIMER	1000
117 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
118 module_param(eee_timer, int, 0644);
119 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
120 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
121 
122 /* By default the driver will use the ring mode to manage tx and rx descriptors,
123  * but allow user to force to use the chain instead of the ring
124  */
125 static unsigned int chain_mode;
126 module_param(chain_mode, int, 0444);
127 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
128 
129 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
130 /* For MSI interrupts handling */
131 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
133 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
134 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
135 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
138 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
139 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
140 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
141 					  u32 rxmode, u32 chan);
142 
143 #ifdef CONFIG_DEBUG_FS
144 static const struct net_device_ops stmmac_netdev_ops;
145 static void stmmac_init_fs(struct net_device *dev);
146 static void stmmac_exit_fs(struct net_device *dev);
147 #endif
148 
149 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
150 
151 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
152 {
153 	int ret = 0;
154 
155 	if (enabled) {
156 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
157 		if (ret)
158 			return ret;
159 		ret = clk_prepare_enable(priv->plat->pclk);
160 		if (ret) {
161 			clk_disable_unprepare(priv->plat->stmmac_clk);
162 			return ret;
163 		}
164 		if (priv->plat->clks_config) {
165 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
166 			if (ret) {
167 				clk_disable_unprepare(priv->plat->stmmac_clk);
168 				clk_disable_unprepare(priv->plat->pclk);
169 				return ret;
170 			}
171 		}
172 	} else {
173 		clk_disable_unprepare(priv->plat->stmmac_clk);
174 		clk_disable_unprepare(priv->plat->pclk);
175 		if (priv->plat->clks_config)
176 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
177 	}
178 
179 	return ret;
180 }
181 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
182 
183 /**
184  * stmmac_verify_args - verify the driver parameters.
185  * Description: it checks the driver parameters and set a default in case of
186  * errors.
187  */
188 static void stmmac_verify_args(void)
189 {
190 	if (unlikely(watchdog < 0))
191 		watchdog = TX_TIMEO;
192 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
193 		buf_sz = DEFAULT_BUFSIZE;
194 	if (unlikely(flow_ctrl > 1))
195 		flow_ctrl = FLOW_AUTO;
196 	else if (likely(flow_ctrl < 0))
197 		flow_ctrl = FLOW_OFF;
198 	if (unlikely((pause < 0) || (pause > 0xffff)))
199 		pause = PAUSE_TIME;
200 	if (eee_timer < 0)
201 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
202 }
203 
204 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
205 {
206 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
207 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
208 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
209 	u32 queue;
210 
211 	for (queue = 0; queue < maxq; queue++) {
212 		struct stmmac_channel *ch = &priv->channel[queue];
213 
214 		if (stmmac_xdp_is_enabled(priv) &&
215 		    test_bit(queue, priv->af_xdp_zc_qps)) {
216 			napi_disable(&ch->rxtx_napi);
217 			continue;
218 		}
219 
220 		if (queue < rx_queues_cnt)
221 			napi_disable(&ch->rx_napi);
222 		if (queue < tx_queues_cnt)
223 			napi_disable(&ch->tx_napi);
224 	}
225 }
226 
227 /**
228  * stmmac_disable_all_queues - Disable all queues
229  * @priv: driver private structure
230  */
231 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
232 {
233 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
234 	struct stmmac_rx_queue *rx_q;
235 	u32 queue;
236 
237 	/* synchronize_rcu() needed for pending XDP buffers to drain */
238 	for (queue = 0; queue < rx_queues_cnt; queue++) {
239 		rx_q = &priv->dma_conf.rx_queue[queue];
240 		if (rx_q->xsk_pool) {
241 			synchronize_rcu();
242 			break;
243 		}
244 	}
245 
246 	__stmmac_disable_all_queues(priv);
247 }
248 
249 /**
250  * stmmac_enable_all_queues - Enable all queues
251  * @priv: driver private structure
252  */
253 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
254 {
255 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
256 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
257 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
258 	u32 queue;
259 
260 	for (queue = 0; queue < maxq; queue++) {
261 		struct stmmac_channel *ch = &priv->channel[queue];
262 
263 		if (stmmac_xdp_is_enabled(priv) &&
264 		    test_bit(queue, priv->af_xdp_zc_qps)) {
265 			napi_enable(&ch->rxtx_napi);
266 			continue;
267 		}
268 
269 		if (queue < rx_queues_cnt)
270 			napi_enable(&ch->rx_napi);
271 		if (queue < tx_queues_cnt)
272 			napi_enable(&ch->tx_napi);
273 	}
274 }
275 
276 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
277 {
278 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
279 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
280 		queue_work(priv->wq, &priv->service_task);
281 }
282 
283 static void stmmac_global_err(struct stmmac_priv *priv)
284 {
285 	netif_carrier_off(priv->dev);
286 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
287 	stmmac_service_event_schedule(priv);
288 }
289 
290 /**
291  * stmmac_clk_csr_set - dynamically set the MDC clock
292  * @priv: driver private structure
293  * Description: this is to dynamically set the MDC clock according to the csr
294  * clock input.
295  * Note:
296  *	If a specific clk_csr value is passed from the platform
297  *	this means that the CSR Clock Range selection cannot be
298  *	changed at run-time and it is fixed (as reported in the driver
299  *	documentation). Viceversa the driver will try to set the MDC
300  *	clock dynamically according to the actual clock input.
301  */
302 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
303 {
304 	u32 clk_rate;
305 
306 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
307 
308 	/* Platform provided default clk_csr would be assumed valid
309 	 * for all other cases except for the below mentioned ones.
310 	 * For values higher than the IEEE 802.3 specified frequency
311 	 * we can not estimate the proper divider as it is not known
312 	 * the frequency of clk_csr_i. So we do not change the default
313 	 * divider.
314 	 */
315 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
316 		if (clk_rate < CSR_F_35M)
317 			priv->clk_csr = STMMAC_CSR_20_35M;
318 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
319 			priv->clk_csr = STMMAC_CSR_35_60M;
320 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
321 			priv->clk_csr = STMMAC_CSR_60_100M;
322 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
323 			priv->clk_csr = STMMAC_CSR_100_150M;
324 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
325 			priv->clk_csr = STMMAC_CSR_150_250M;
326 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
327 			priv->clk_csr = STMMAC_CSR_250_300M;
328 	}
329 
330 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
331 		if (clk_rate > 160000000)
332 			priv->clk_csr = 0x03;
333 		else if (clk_rate > 80000000)
334 			priv->clk_csr = 0x02;
335 		else if (clk_rate > 40000000)
336 			priv->clk_csr = 0x01;
337 		else
338 			priv->clk_csr = 0;
339 	}
340 
341 	if (priv->plat->has_xgmac) {
342 		if (clk_rate > 400000000)
343 			priv->clk_csr = 0x5;
344 		else if (clk_rate > 350000000)
345 			priv->clk_csr = 0x4;
346 		else if (clk_rate > 300000000)
347 			priv->clk_csr = 0x3;
348 		else if (clk_rate > 250000000)
349 			priv->clk_csr = 0x2;
350 		else if (clk_rate > 150000000)
351 			priv->clk_csr = 0x1;
352 		else
353 			priv->clk_csr = 0x0;
354 	}
355 }
356 
357 static void print_pkt(unsigned char *buf, int len)
358 {
359 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
360 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
361 }
362 
363 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
364 {
365 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
366 	u32 avail;
367 
368 	if (tx_q->dirty_tx > tx_q->cur_tx)
369 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
370 	else
371 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
372 
373 	return avail;
374 }
375 
376 /**
377  * stmmac_rx_dirty - Get RX queue dirty
378  * @priv: driver private structure
379  * @queue: RX queue index
380  */
381 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
382 {
383 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
384 	u32 dirty;
385 
386 	if (rx_q->dirty_rx <= rx_q->cur_rx)
387 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
388 	else
389 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
390 
391 	return dirty;
392 }
393 
394 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
395 {
396 	int tx_lpi_timer;
397 
398 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
399 	priv->eee_sw_timer_en = en ? 0 : 1;
400 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
401 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
402 }
403 
404 /**
405  * stmmac_enable_eee_mode - check and enter in LPI mode
406  * @priv: driver private structure
407  * Description: this function is to verify and enter in LPI mode in case of
408  * EEE.
409  */
410 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
411 {
412 	u32 tx_cnt = priv->plat->tx_queues_to_use;
413 	u32 queue;
414 
415 	/* check if all TX queues have the work finished */
416 	for (queue = 0; queue < tx_cnt; queue++) {
417 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
418 
419 		if (tx_q->dirty_tx != tx_q->cur_tx)
420 			return -EBUSY; /* still unfinished work */
421 	}
422 
423 	/* Check and enter in LPI mode */
424 	if (!priv->tx_path_in_lpi_mode)
425 		stmmac_set_eee_mode(priv, priv->hw,
426 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
427 	return 0;
428 }
429 
430 /**
431  * stmmac_disable_eee_mode - disable and exit from LPI mode
432  * @priv: driver private structure
433  * Description: this function is to exit and disable EEE in case of
434  * LPI state is true. This is called by the xmit.
435  */
436 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
437 {
438 	if (!priv->eee_sw_timer_en) {
439 		stmmac_lpi_entry_timer_config(priv, 0);
440 		return;
441 	}
442 
443 	stmmac_reset_eee_mode(priv, priv->hw);
444 	del_timer_sync(&priv->eee_ctrl_timer);
445 	priv->tx_path_in_lpi_mode = false;
446 }
447 
448 /**
449  * stmmac_eee_ctrl_timer - EEE TX SW timer.
450  * @t:  timer_list struct containing private info
451  * Description:
452  *  if there is no data transfer and if we are not in LPI state,
453  *  then MAC Transmitter can be moved to LPI state.
454  */
455 static void stmmac_eee_ctrl_timer(struct timer_list *t)
456 {
457 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
458 
459 	if (stmmac_enable_eee_mode(priv))
460 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
461 }
462 
463 /**
464  * stmmac_eee_init - init EEE
465  * @priv: driver private structure
466  * Description:
467  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
468  *  can also manage EEE, this function enable the LPI state and start related
469  *  timer.
470  */
471 bool stmmac_eee_init(struct stmmac_priv *priv)
472 {
473 	int eee_tw_timer = priv->eee_tw_timer;
474 
475 	/* Check if MAC core supports the EEE feature. */
476 	if (!priv->dma_cap.eee)
477 		return false;
478 
479 	mutex_lock(&priv->lock);
480 
481 	/* Check if it needs to be deactivated */
482 	if (!priv->eee_active) {
483 		if (priv->eee_enabled) {
484 			netdev_dbg(priv->dev, "disable EEE\n");
485 			stmmac_lpi_entry_timer_config(priv, 0);
486 			del_timer_sync(&priv->eee_ctrl_timer);
487 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
488 			if (priv->hw->xpcs)
489 				xpcs_config_eee(priv->hw->xpcs,
490 						priv->plat->mult_fact_100ns,
491 						false);
492 		}
493 		mutex_unlock(&priv->lock);
494 		return false;
495 	}
496 
497 	if (priv->eee_active && !priv->eee_enabled) {
498 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
499 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
500 				     eee_tw_timer);
501 		if (priv->hw->xpcs)
502 			xpcs_config_eee(priv->hw->xpcs,
503 					priv->plat->mult_fact_100ns,
504 					true);
505 	}
506 
507 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
508 		del_timer_sync(&priv->eee_ctrl_timer);
509 		priv->tx_path_in_lpi_mode = false;
510 		stmmac_lpi_entry_timer_config(priv, 1);
511 	} else {
512 		stmmac_lpi_entry_timer_config(priv, 0);
513 		mod_timer(&priv->eee_ctrl_timer,
514 			  STMMAC_LPI_T(priv->tx_lpi_timer));
515 	}
516 
517 	mutex_unlock(&priv->lock);
518 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
519 	return true;
520 }
521 
522 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
523  * @priv: driver private structure
524  * @p : descriptor pointer
525  * @skb : the socket buffer
526  * Description :
527  * This function will read timestamp from the descriptor & pass it to stack.
528  * and also perform some sanity checks.
529  */
530 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
531 				   struct dma_desc *p, struct sk_buff *skb)
532 {
533 	struct skb_shared_hwtstamps shhwtstamp;
534 	bool found = false;
535 	u64 ns = 0;
536 
537 	if (!priv->hwts_tx_en)
538 		return;
539 
540 	/* exit if skb doesn't support hw tstamp */
541 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
542 		return;
543 
544 	/* check tx tstamp status */
545 	if (stmmac_get_tx_timestamp_status(priv, p)) {
546 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
547 		found = true;
548 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
549 		found = true;
550 	}
551 
552 	if (found) {
553 		ns -= priv->plat->cdc_error_adj;
554 
555 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
556 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
557 
558 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
559 		/* pass tstamp to stack */
560 		skb_tstamp_tx(skb, &shhwtstamp);
561 	}
562 }
563 
564 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
565  * @priv: driver private structure
566  * @p : descriptor pointer
567  * @np : next descriptor pointer
568  * @skb : the socket buffer
569  * Description :
570  * This function will read received packet's timestamp from the descriptor
571  * and pass it to stack. It also perform some sanity checks.
572  */
573 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
574 				   struct dma_desc *np, struct sk_buff *skb)
575 {
576 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
577 	struct dma_desc *desc = p;
578 	u64 ns = 0;
579 
580 	if (!priv->hwts_rx_en)
581 		return;
582 	/* For GMAC4, the valid timestamp is from CTX next desc. */
583 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
584 		desc = np;
585 
586 	/* Check if timestamp is available */
587 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
588 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
589 
590 		ns -= priv->plat->cdc_error_adj;
591 
592 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
593 		shhwtstamp = skb_hwtstamps(skb);
594 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
595 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
596 	} else  {
597 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
598 	}
599 }
600 
601 /**
602  *  stmmac_hwtstamp_set - control hardware timestamping.
603  *  @dev: device pointer.
604  *  @ifr: An IOCTL specific structure, that can contain a pointer to
605  *  a proprietary structure used to pass information to the driver.
606  *  Description:
607  *  This function configures the MAC to enable/disable both outgoing(TX)
608  *  and incoming(RX) packets time stamping based on user input.
609  *  Return Value:
610  *  0 on success and an appropriate -ve integer on failure.
611  */
612 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
613 {
614 	struct stmmac_priv *priv = netdev_priv(dev);
615 	struct hwtstamp_config config;
616 	u32 ptp_v2 = 0;
617 	u32 tstamp_all = 0;
618 	u32 ptp_over_ipv4_udp = 0;
619 	u32 ptp_over_ipv6_udp = 0;
620 	u32 ptp_over_ethernet = 0;
621 	u32 snap_type_sel = 0;
622 	u32 ts_master_en = 0;
623 	u32 ts_event_en = 0;
624 
625 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
626 		netdev_alert(priv->dev, "No support for HW time stamping\n");
627 		priv->hwts_tx_en = 0;
628 		priv->hwts_rx_en = 0;
629 
630 		return -EOPNOTSUPP;
631 	}
632 
633 	if (copy_from_user(&config, ifr->ifr_data,
634 			   sizeof(config)))
635 		return -EFAULT;
636 
637 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
638 		   __func__, config.flags, config.tx_type, config.rx_filter);
639 
640 	if (config.tx_type != HWTSTAMP_TX_OFF &&
641 	    config.tx_type != HWTSTAMP_TX_ON)
642 		return -ERANGE;
643 
644 	if (priv->adv_ts) {
645 		switch (config.rx_filter) {
646 		case HWTSTAMP_FILTER_NONE:
647 			/* time stamp no incoming packet at all */
648 			config.rx_filter = HWTSTAMP_FILTER_NONE;
649 			break;
650 
651 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
652 			/* PTP v1, UDP, any kind of event packet */
653 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
654 			/* 'xmac' hardware can support Sync, Pdelay_Req and
655 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
656 			 * This leaves Delay_Req timestamps out.
657 			 * Enable all events *and* general purpose message
658 			 * timestamping
659 			 */
660 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
661 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
662 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
663 			break;
664 
665 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
666 			/* PTP v1, UDP, Sync packet */
667 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
668 			/* take time stamp for SYNC messages only */
669 			ts_event_en = PTP_TCR_TSEVNTENA;
670 
671 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
672 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
673 			break;
674 
675 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
676 			/* PTP v1, UDP, Delay_req packet */
677 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
678 			/* take time stamp for Delay_Req messages only */
679 			ts_master_en = PTP_TCR_TSMSTRENA;
680 			ts_event_en = PTP_TCR_TSEVNTENA;
681 
682 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
683 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
684 			break;
685 
686 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
687 			/* PTP v2, UDP, any kind of event packet */
688 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
689 			ptp_v2 = PTP_TCR_TSVER2ENA;
690 			/* take time stamp for all event messages */
691 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
692 
693 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
694 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
695 			break;
696 
697 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
698 			/* PTP v2, UDP, Sync packet */
699 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
700 			ptp_v2 = PTP_TCR_TSVER2ENA;
701 			/* take time stamp for SYNC messages only */
702 			ts_event_en = PTP_TCR_TSEVNTENA;
703 
704 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
705 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
706 			break;
707 
708 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
709 			/* PTP v2, UDP, Delay_req packet */
710 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
711 			ptp_v2 = PTP_TCR_TSVER2ENA;
712 			/* take time stamp for Delay_Req messages only */
713 			ts_master_en = PTP_TCR_TSMSTRENA;
714 			ts_event_en = PTP_TCR_TSEVNTENA;
715 
716 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
717 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
718 			break;
719 
720 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
721 			/* PTP v2/802.AS1 any layer, any kind of event packet */
722 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
723 			ptp_v2 = PTP_TCR_TSVER2ENA;
724 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
725 			if (priv->synopsys_id < DWMAC_CORE_4_10)
726 				ts_event_en = PTP_TCR_TSEVNTENA;
727 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
728 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
729 			ptp_over_ethernet = PTP_TCR_TSIPENA;
730 			break;
731 
732 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
733 			/* PTP v2/802.AS1, any layer, Sync packet */
734 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
735 			ptp_v2 = PTP_TCR_TSVER2ENA;
736 			/* take time stamp for SYNC messages only */
737 			ts_event_en = PTP_TCR_TSEVNTENA;
738 
739 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
740 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
741 			ptp_over_ethernet = PTP_TCR_TSIPENA;
742 			break;
743 
744 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
745 			/* PTP v2/802.AS1, any layer, Delay_req packet */
746 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
747 			ptp_v2 = PTP_TCR_TSVER2ENA;
748 			/* take time stamp for Delay_Req messages only */
749 			ts_master_en = PTP_TCR_TSMSTRENA;
750 			ts_event_en = PTP_TCR_TSEVNTENA;
751 
752 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
753 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
754 			ptp_over_ethernet = PTP_TCR_TSIPENA;
755 			break;
756 
757 		case HWTSTAMP_FILTER_NTP_ALL:
758 		case HWTSTAMP_FILTER_ALL:
759 			/* time stamp any incoming packet */
760 			config.rx_filter = HWTSTAMP_FILTER_ALL;
761 			tstamp_all = PTP_TCR_TSENALL;
762 			break;
763 
764 		default:
765 			return -ERANGE;
766 		}
767 	} else {
768 		switch (config.rx_filter) {
769 		case HWTSTAMP_FILTER_NONE:
770 			config.rx_filter = HWTSTAMP_FILTER_NONE;
771 			break;
772 		default:
773 			/* PTP v1, UDP, any kind of event packet */
774 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
775 			break;
776 		}
777 	}
778 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
779 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
780 
781 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
782 
783 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
784 		priv->systime_flags |= tstamp_all | ptp_v2 |
785 				       ptp_over_ethernet | ptp_over_ipv6_udp |
786 				       ptp_over_ipv4_udp | ts_event_en |
787 				       ts_master_en | snap_type_sel;
788 	}
789 
790 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
791 
792 	memcpy(&priv->tstamp_config, &config, sizeof(config));
793 
794 	return copy_to_user(ifr->ifr_data, &config,
795 			    sizeof(config)) ? -EFAULT : 0;
796 }
797 
798 /**
799  *  stmmac_hwtstamp_get - read hardware timestamping.
800  *  @dev: device pointer.
801  *  @ifr: An IOCTL specific structure, that can contain a pointer to
802  *  a proprietary structure used to pass information to the driver.
803  *  Description:
804  *  This function obtain the current hardware timestamping settings
805  *  as requested.
806  */
807 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
808 {
809 	struct stmmac_priv *priv = netdev_priv(dev);
810 	struct hwtstamp_config *config = &priv->tstamp_config;
811 
812 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
813 		return -EOPNOTSUPP;
814 
815 	return copy_to_user(ifr->ifr_data, config,
816 			    sizeof(*config)) ? -EFAULT : 0;
817 }
818 
819 /**
820  * stmmac_init_tstamp_counter - init hardware timestamping counter
821  * @priv: driver private structure
822  * @systime_flags: timestamping flags
823  * Description:
824  * Initialize hardware counter for packet timestamping.
825  * This is valid as long as the interface is open and not suspended.
826  * Will be rerun after resuming from suspend, case in which the timestamping
827  * flags updated by stmmac_hwtstamp_set() also need to be restored.
828  */
829 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
830 {
831 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
832 	struct timespec64 now;
833 	u32 sec_inc = 0;
834 	u64 temp = 0;
835 
836 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
837 		return -EOPNOTSUPP;
838 
839 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
840 	priv->systime_flags = systime_flags;
841 
842 	/* program Sub Second Increment reg */
843 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
844 					   priv->plat->clk_ptp_rate,
845 					   xmac, &sec_inc);
846 	temp = div_u64(1000000000ULL, sec_inc);
847 
848 	/* Store sub second increment for later use */
849 	priv->sub_second_inc = sec_inc;
850 
851 	/* calculate default added value:
852 	 * formula is :
853 	 * addend = (2^32)/freq_div_ratio;
854 	 * where, freq_div_ratio = 1e9ns/sec_inc
855 	 */
856 	temp = (u64)(temp << 32);
857 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
858 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
859 
860 	/* initialize system time */
861 	ktime_get_real_ts64(&now);
862 
863 	/* lower 32 bits of tv_sec are safe until y2106 */
864 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
865 
866 	return 0;
867 }
868 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
869 
870 /**
871  * stmmac_init_ptp - init PTP
872  * @priv: driver private structure
873  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
874  * This is done by looking at the HW cap. register.
875  * This function also registers the ptp driver.
876  */
877 static int stmmac_init_ptp(struct stmmac_priv *priv)
878 {
879 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
880 	int ret;
881 
882 	if (priv->plat->ptp_clk_freq_config)
883 		priv->plat->ptp_clk_freq_config(priv);
884 
885 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
886 	if (ret)
887 		return ret;
888 
889 	priv->adv_ts = 0;
890 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
891 	if (xmac && priv->dma_cap.atime_stamp)
892 		priv->adv_ts = 1;
893 	/* Dwmac 3.x core with extend_desc can support adv_ts */
894 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
895 		priv->adv_ts = 1;
896 
897 	if (priv->dma_cap.time_stamp)
898 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
899 
900 	if (priv->adv_ts)
901 		netdev_info(priv->dev,
902 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
903 
904 	priv->hwts_tx_en = 0;
905 	priv->hwts_rx_en = 0;
906 
907 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
908 		stmmac_hwtstamp_correct_latency(priv, priv);
909 
910 	return 0;
911 }
912 
913 static void stmmac_release_ptp(struct stmmac_priv *priv)
914 {
915 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
916 	stmmac_ptp_unregister(priv);
917 }
918 
919 /**
920  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
921  *  @priv: driver private structure
922  *  @duplex: duplex passed to the next function
923  *  Description: It is used for configuring the flow control in all queues
924  */
925 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
926 {
927 	u32 tx_cnt = priv->plat->tx_queues_to_use;
928 
929 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
930 			priv->pause, tx_cnt);
931 }
932 
933 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
934 					 phy_interface_t interface)
935 {
936 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
937 
938 	/* Refresh the MAC-specific capabilities */
939 	stmmac_mac_update_caps(priv);
940 
941 	config->mac_capabilities = priv->hw->link.caps;
942 
943 	if (priv->plat->max_speed)
944 		phylink_limit_mac_speed(config, priv->plat->max_speed);
945 
946 	return config->mac_capabilities;
947 }
948 
949 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
950 						 phy_interface_t interface)
951 {
952 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
953 	struct phylink_pcs *pcs;
954 
955 	if (priv->plat->select_pcs) {
956 		pcs = priv->plat->select_pcs(priv, interface);
957 		if (!IS_ERR(pcs))
958 			return pcs;
959 	}
960 
961 	return NULL;
962 }
963 
964 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
965 			      const struct phylink_link_state *state)
966 {
967 	/* Nothing to do, xpcs_config() handles everything */
968 }
969 
970 static void stmmac_mac_link_down(struct phylink_config *config,
971 				 unsigned int mode, phy_interface_t interface)
972 {
973 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
974 
975 	stmmac_mac_set(priv, priv->ioaddr, false);
976 	priv->eee_active = false;
977 	priv->tx_lpi_enabled = false;
978 	priv->eee_enabled = stmmac_eee_init(priv);
979 	stmmac_set_eee_pls(priv, priv->hw, false);
980 
981 	if (stmmac_fpe_supported(priv))
982 		stmmac_fpe_link_state_handle(priv, false);
983 }
984 
985 static void stmmac_mac_link_up(struct phylink_config *config,
986 			       struct phy_device *phy,
987 			       unsigned int mode, phy_interface_t interface,
988 			       int speed, int duplex,
989 			       bool tx_pause, bool rx_pause)
990 {
991 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
992 	u32 old_ctrl, ctrl;
993 
994 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
995 	    priv->plat->serdes_powerup)
996 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
997 
998 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
999 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1000 
1001 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1002 		switch (speed) {
1003 		case SPEED_10000:
1004 			ctrl |= priv->hw->link.xgmii.speed10000;
1005 			break;
1006 		case SPEED_5000:
1007 			ctrl |= priv->hw->link.xgmii.speed5000;
1008 			break;
1009 		case SPEED_2500:
1010 			ctrl |= priv->hw->link.xgmii.speed2500;
1011 			break;
1012 		default:
1013 			return;
1014 		}
1015 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1016 		switch (speed) {
1017 		case SPEED_100000:
1018 			ctrl |= priv->hw->link.xlgmii.speed100000;
1019 			break;
1020 		case SPEED_50000:
1021 			ctrl |= priv->hw->link.xlgmii.speed50000;
1022 			break;
1023 		case SPEED_40000:
1024 			ctrl |= priv->hw->link.xlgmii.speed40000;
1025 			break;
1026 		case SPEED_25000:
1027 			ctrl |= priv->hw->link.xlgmii.speed25000;
1028 			break;
1029 		case SPEED_10000:
1030 			ctrl |= priv->hw->link.xgmii.speed10000;
1031 			break;
1032 		case SPEED_2500:
1033 			ctrl |= priv->hw->link.speed2500;
1034 			break;
1035 		case SPEED_1000:
1036 			ctrl |= priv->hw->link.speed1000;
1037 			break;
1038 		default:
1039 			return;
1040 		}
1041 	} else {
1042 		switch (speed) {
1043 		case SPEED_2500:
1044 			ctrl |= priv->hw->link.speed2500;
1045 			break;
1046 		case SPEED_1000:
1047 			ctrl |= priv->hw->link.speed1000;
1048 			break;
1049 		case SPEED_100:
1050 			ctrl |= priv->hw->link.speed100;
1051 			break;
1052 		case SPEED_10:
1053 			ctrl |= priv->hw->link.speed10;
1054 			break;
1055 		default:
1056 			return;
1057 		}
1058 	}
1059 
1060 	priv->speed = speed;
1061 
1062 	if (priv->plat->fix_mac_speed)
1063 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1064 
1065 	if (!duplex)
1066 		ctrl &= ~priv->hw->link.duplex;
1067 	else
1068 		ctrl |= priv->hw->link.duplex;
1069 
1070 	/* Flow Control operation */
1071 	if (rx_pause && tx_pause)
1072 		priv->flow_ctrl = FLOW_AUTO;
1073 	else if (rx_pause && !tx_pause)
1074 		priv->flow_ctrl = FLOW_RX;
1075 	else if (!rx_pause && tx_pause)
1076 		priv->flow_ctrl = FLOW_TX;
1077 	else
1078 		priv->flow_ctrl = FLOW_OFF;
1079 
1080 	stmmac_mac_flow_ctrl(priv, duplex);
1081 
1082 	if (ctrl != old_ctrl)
1083 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1084 
1085 	stmmac_mac_set(priv, priv->ioaddr, true);
1086 	if (phy && priv->dma_cap.eee) {
1087 		priv->eee_active =
1088 			phy_init_eee(phy, !(priv->plat->flags &
1089 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1090 		priv->eee_enabled = stmmac_eee_init(priv);
1091 		priv->tx_lpi_enabled = priv->eee_enabled;
1092 		stmmac_set_eee_pls(priv, priv->hw, true);
1093 	}
1094 
1095 	if (stmmac_fpe_supported(priv))
1096 		stmmac_fpe_link_state_handle(priv, true);
1097 
1098 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1099 		stmmac_hwtstamp_correct_latency(priv, priv);
1100 }
1101 
1102 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1103 	.mac_get_caps = stmmac_mac_get_caps,
1104 	.mac_select_pcs = stmmac_mac_select_pcs,
1105 	.mac_config = stmmac_mac_config,
1106 	.mac_link_down = stmmac_mac_link_down,
1107 	.mac_link_up = stmmac_mac_link_up,
1108 };
1109 
1110 /**
1111  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1112  * @priv: driver private structure
1113  * Description: this is to verify if the HW supports the PCS.
1114  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1115  * configured for the TBI, RTBI, or SGMII PHY interface.
1116  */
1117 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1118 {
1119 	int interface = priv->plat->mac_interface;
1120 
1121 	if (priv->dma_cap.pcs) {
1122 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1123 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1124 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1125 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1126 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1127 			priv->hw->pcs = STMMAC_PCS_RGMII;
1128 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1129 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1130 			priv->hw->pcs = STMMAC_PCS_SGMII;
1131 		}
1132 	}
1133 }
1134 
1135 /**
1136  * stmmac_init_phy - PHY initialization
1137  * @dev: net device structure
1138  * Description: it initializes the driver's PHY state, and attaches the PHY
1139  * to the mac driver.
1140  *  Return value:
1141  *  0 on success
1142  */
1143 static int stmmac_init_phy(struct net_device *dev)
1144 {
1145 	struct stmmac_priv *priv = netdev_priv(dev);
1146 	struct fwnode_handle *phy_fwnode;
1147 	struct fwnode_handle *fwnode;
1148 	int ret;
1149 
1150 	if (!phylink_expects_phy(priv->phylink))
1151 		return 0;
1152 
1153 	fwnode = priv->plat->port_node;
1154 	if (!fwnode)
1155 		fwnode = dev_fwnode(priv->device);
1156 
1157 	if (fwnode)
1158 		phy_fwnode = fwnode_get_phy_node(fwnode);
1159 	else
1160 		phy_fwnode = NULL;
1161 
1162 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1163 	 * manually parse it
1164 	 */
1165 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1166 		int addr = priv->plat->phy_addr;
1167 		struct phy_device *phydev;
1168 
1169 		if (addr < 0) {
1170 			netdev_err(priv->dev, "no phy found\n");
1171 			return -ENODEV;
1172 		}
1173 
1174 		phydev = mdiobus_get_phy(priv->mii, addr);
1175 		if (!phydev) {
1176 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1177 			return -ENODEV;
1178 		}
1179 
1180 		if (priv->dma_cap.eee)
1181 			phy_support_eee(phydev);
1182 
1183 		ret = phylink_connect_phy(priv->phylink, phydev);
1184 	} else {
1185 		fwnode_handle_put(phy_fwnode);
1186 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 	}
1188 
1189 	if (!priv->plat->pmt) {
1190 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191 
1192 		phylink_ethtool_get_wol(priv->phylink, &wol);
1193 		device_set_wakeup_capable(priv->device, !!wol.supported);
1194 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 static int stmmac_phy_setup(struct stmmac_priv *priv)
1201 {
1202 	struct stmmac_mdio_bus_data *mdio_bus_data;
1203 	int mode = priv->plat->phy_interface;
1204 	struct fwnode_handle *fwnode;
1205 	struct phylink *phylink;
1206 
1207 	priv->phylink_config.dev = &priv->dev->dev;
1208 	priv->phylink_config.type = PHYLINK_NETDEV;
1209 	priv->phylink_config.mac_managed_pm = true;
1210 
1211 	/* Stmmac always requires an RX clock for hardware initialization */
1212 	priv->phylink_config.mac_requires_rxc = true;
1213 
1214 	mdio_bus_data = priv->plat->mdio_bus_data;
1215 	if (mdio_bus_data)
1216 		priv->phylink_config.default_an_inband =
1217 			mdio_bus_data->default_an_inband;
1218 
1219 	/* Set the platform/firmware specified interface mode. Note, phylink
1220 	 * deals with the PHY interface mode, not the MAC interface mode.
1221 	 */
1222 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1223 
1224 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1225 	if (priv->hw->xpcs)
1226 		xpcs_get_interfaces(priv->hw->xpcs,
1227 				    priv->phylink_config.supported_interfaces);
1228 
1229 	fwnode = priv->plat->port_node;
1230 	if (!fwnode)
1231 		fwnode = dev_fwnode(priv->device);
1232 
1233 	phylink = phylink_create(&priv->phylink_config, fwnode,
1234 				 mode, &stmmac_phylink_mac_ops);
1235 	if (IS_ERR(phylink))
1236 		return PTR_ERR(phylink);
1237 
1238 	priv->phylink = phylink;
1239 	return 0;
1240 }
1241 
1242 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1243 				    struct stmmac_dma_conf *dma_conf)
1244 {
1245 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1246 	unsigned int desc_size;
1247 	void *head_rx;
1248 	u32 queue;
1249 
1250 	/* Display RX rings */
1251 	for (queue = 0; queue < rx_cnt; queue++) {
1252 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1253 
1254 		pr_info("\tRX Queue %u rings\n", queue);
1255 
1256 		if (priv->extend_desc) {
1257 			head_rx = (void *)rx_q->dma_erx;
1258 			desc_size = sizeof(struct dma_extended_desc);
1259 		} else {
1260 			head_rx = (void *)rx_q->dma_rx;
1261 			desc_size = sizeof(struct dma_desc);
1262 		}
1263 
1264 		/* Display RX ring */
1265 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1266 				    rx_q->dma_rx_phy, desc_size);
1267 	}
1268 }
1269 
1270 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1271 				    struct stmmac_dma_conf *dma_conf)
1272 {
1273 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1274 	unsigned int desc_size;
1275 	void *head_tx;
1276 	u32 queue;
1277 
1278 	/* Display TX rings */
1279 	for (queue = 0; queue < tx_cnt; queue++) {
1280 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1281 
1282 		pr_info("\tTX Queue %d rings\n", queue);
1283 
1284 		if (priv->extend_desc) {
1285 			head_tx = (void *)tx_q->dma_etx;
1286 			desc_size = sizeof(struct dma_extended_desc);
1287 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1288 			head_tx = (void *)tx_q->dma_entx;
1289 			desc_size = sizeof(struct dma_edesc);
1290 		} else {
1291 			head_tx = (void *)tx_q->dma_tx;
1292 			desc_size = sizeof(struct dma_desc);
1293 		}
1294 
1295 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1296 				    tx_q->dma_tx_phy, desc_size);
1297 	}
1298 }
1299 
1300 static void stmmac_display_rings(struct stmmac_priv *priv,
1301 				 struct stmmac_dma_conf *dma_conf)
1302 {
1303 	/* Display RX ring */
1304 	stmmac_display_rx_rings(priv, dma_conf);
1305 
1306 	/* Display TX ring */
1307 	stmmac_display_tx_rings(priv, dma_conf);
1308 }
1309 
1310 static int stmmac_set_bfsize(int mtu, int bufsize)
1311 {
1312 	int ret = bufsize;
1313 
1314 	if (mtu >= BUF_SIZE_8KiB)
1315 		ret = BUF_SIZE_16KiB;
1316 	else if (mtu >= BUF_SIZE_4KiB)
1317 		ret = BUF_SIZE_8KiB;
1318 	else if (mtu >= BUF_SIZE_2KiB)
1319 		ret = BUF_SIZE_4KiB;
1320 	else if (mtu > DEFAULT_BUFSIZE)
1321 		ret = BUF_SIZE_2KiB;
1322 	else
1323 		ret = DEFAULT_BUFSIZE;
1324 
1325 	return ret;
1326 }
1327 
1328 /**
1329  * stmmac_clear_rx_descriptors - clear RX descriptors
1330  * @priv: driver private structure
1331  * @dma_conf: structure to take the dma data
1332  * @queue: RX queue index
1333  * Description: this function is called to clear the RX descriptors
1334  * in case of both basic and extended descriptors are used.
1335  */
1336 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1337 					struct stmmac_dma_conf *dma_conf,
1338 					u32 queue)
1339 {
1340 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1341 	int i;
1342 
1343 	/* Clear the RX descriptors */
1344 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1345 		if (priv->extend_desc)
1346 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1347 					priv->use_riwt, priv->mode,
1348 					(i == dma_conf->dma_rx_size - 1),
1349 					dma_conf->dma_buf_sz);
1350 		else
1351 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1352 					priv->use_riwt, priv->mode,
1353 					(i == dma_conf->dma_rx_size - 1),
1354 					dma_conf->dma_buf_sz);
1355 }
1356 
1357 /**
1358  * stmmac_clear_tx_descriptors - clear tx descriptors
1359  * @priv: driver private structure
1360  * @dma_conf: structure to take the dma data
1361  * @queue: TX queue index.
1362  * Description: this function is called to clear the TX descriptors
1363  * in case of both basic and extended descriptors are used.
1364  */
1365 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1366 					struct stmmac_dma_conf *dma_conf,
1367 					u32 queue)
1368 {
1369 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1370 	int i;
1371 
1372 	/* Clear the TX descriptors */
1373 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1374 		int last = (i == (dma_conf->dma_tx_size - 1));
1375 		struct dma_desc *p;
1376 
1377 		if (priv->extend_desc)
1378 			p = &tx_q->dma_etx[i].basic;
1379 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1380 			p = &tx_q->dma_entx[i].basic;
1381 		else
1382 			p = &tx_q->dma_tx[i];
1383 
1384 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1385 	}
1386 }
1387 
1388 /**
1389  * stmmac_clear_descriptors - clear descriptors
1390  * @priv: driver private structure
1391  * @dma_conf: structure to take the dma data
1392  * Description: this function is called to clear the TX and RX descriptors
1393  * in case of both basic and extended descriptors are used.
1394  */
1395 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1396 				     struct stmmac_dma_conf *dma_conf)
1397 {
1398 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1399 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1400 	u32 queue;
1401 
1402 	/* Clear the RX descriptors */
1403 	for (queue = 0; queue < rx_queue_cnt; queue++)
1404 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1405 
1406 	/* Clear the TX descriptors */
1407 	for (queue = 0; queue < tx_queue_cnt; queue++)
1408 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1409 }
1410 
1411 /**
1412  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1413  * @priv: driver private structure
1414  * @dma_conf: structure to take the dma data
1415  * @p: descriptor pointer
1416  * @i: descriptor index
1417  * @flags: gfp flag
1418  * @queue: RX queue index
1419  * Description: this function is called to allocate a receive buffer, perform
1420  * the DMA mapping and init the descriptor.
1421  */
1422 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1423 				  struct stmmac_dma_conf *dma_conf,
1424 				  struct dma_desc *p,
1425 				  int i, gfp_t flags, u32 queue)
1426 {
1427 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1428 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1429 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1430 
1431 	if (priv->dma_cap.host_dma_width <= 32)
1432 		gfp |= GFP_DMA32;
1433 
1434 	if (!buf->page) {
1435 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1436 		if (!buf->page)
1437 			return -ENOMEM;
1438 		buf->page_offset = stmmac_rx_offset(priv);
1439 	}
1440 
1441 	if (priv->sph && !buf->sec_page) {
1442 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1443 		if (!buf->sec_page)
1444 			return -ENOMEM;
1445 
1446 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1447 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1448 	} else {
1449 		buf->sec_page = NULL;
1450 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1451 	}
1452 
1453 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1454 
1455 	stmmac_set_desc_addr(priv, p, buf->addr);
1456 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1457 		stmmac_init_desc3(priv, p);
1458 
1459 	return 0;
1460 }
1461 
1462 /**
1463  * stmmac_free_rx_buffer - free RX dma buffers
1464  * @priv: private structure
1465  * @rx_q: RX queue
1466  * @i: buffer index.
1467  */
1468 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1469 				  struct stmmac_rx_queue *rx_q,
1470 				  int i)
1471 {
1472 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1473 
1474 	if (buf->page)
1475 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1476 	buf->page = NULL;
1477 
1478 	if (buf->sec_page)
1479 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1480 	buf->sec_page = NULL;
1481 }
1482 
1483 /**
1484  * stmmac_free_tx_buffer - free RX dma buffers
1485  * @priv: private structure
1486  * @dma_conf: structure to take the dma data
1487  * @queue: RX queue index
1488  * @i: buffer index.
1489  */
1490 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1491 				  struct stmmac_dma_conf *dma_conf,
1492 				  u32 queue, int i)
1493 {
1494 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1495 
1496 	if (tx_q->tx_skbuff_dma[i].buf &&
1497 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1498 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1499 			dma_unmap_page(priv->device,
1500 				       tx_q->tx_skbuff_dma[i].buf,
1501 				       tx_q->tx_skbuff_dma[i].len,
1502 				       DMA_TO_DEVICE);
1503 		else
1504 			dma_unmap_single(priv->device,
1505 					 tx_q->tx_skbuff_dma[i].buf,
1506 					 tx_q->tx_skbuff_dma[i].len,
1507 					 DMA_TO_DEVICE);
1508 	}
1509 
1510 	if (tx_q->xdpf[i] &&
1511 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1512 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1513 		xdp_return_frame(tx_q->xdpf[i]);
1514 		tx_q->xdpf[i] = NULL;
1515 	}
1516 
1517 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1518 		tx_q->xsk_frames_done++;
1519 
1520 	if (tx_q->tx_skbuff[i] &&
1521 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1522 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1523 		tx_q->tx_skbuff[i] = NULL;
1524 	}
1525 
1526 	tx_q->tx_skbuff_dma[i].buf = 0;
1527 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1528 }
1529 
1530 /**
1531  * dma_free_rx_skbufs - free RX dma buffers
1532  * @priv: private structure
1533  * @dma_conf: structure to take the dma data
1534  * @queue: RX queue index
1535  */
1536 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1537 			       struct stmmac_dma_conf *dma_conf,
1538 			       u32 queue)
1539 {
1540 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1541 	int i;
1542 
1543 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1544 		stmmac_free_rx_buffer(priv, rx_q, i);
1545 }
1546 
1547 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1548 				   struct stmmac_dma_conf *dma_conf,
1549 				   u32 queue, gfp_t flags)
1550 {
1551 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1552 	int i;
1553 
1554 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1555 		struct dma_desc *p;
1556 		int ret;
1557 
1558 		if (priv->extend_desc)
1559 			p = &((rx_q->dma_erx + i)->basic);
1560 		else
1561 			p = rx_q->dma_rx + i;
1562 
1563 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1564 					     queue);
1565 		if (ret)
1566 			return ret;
1567 
1568 		rx_q->buf_alloc_num++;
1569 	}
1570 
1571 	return 0;
1572 }
1573 
1574 /**
1575  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1576  * @priv: private structure
1577  * @dma_conf: structure to take the dma data
1578  * @queue: RX queue index
1579  */
1580 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1581 				struct stmmac_dma_conf *dma_conf,
1582 				u32 queue)
1583 {
1584 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1585 	int i;
1586 
1587 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1588 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1589 
1590 		if (!buf->xdp)
1591 			continue;
1592 
1593 		xsk_buff_free(buf->xdp);
1594 		buf->xdp = NULL;
1595 	}
1596 }
1597 
1598 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1599 				      struct stmmac_dma_conf *dma_conf,
1600 				      u32 queue)
1601 {
1602 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1603 	int i;
1604 
1605 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1606 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1607 	 * use this macro to make sure no size violations.
1608 	 */
1609 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1610 
1611 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1612 		struct stmmac_rx_buffer *buf;
1613 		dma_addr_t dma_addr;
1614 		struct dma_desc *p;
1615 
1616 		if (priv->extend_desc)
1617 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1618 		else
1619 			p = rx_q->dma_rx + i;
1620 
1621 		buf = &rx_q->buf_pool[i];
1622 
1623 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1624 		if (!buf->xdp)
1625 			return -ENOMEM;
1626 
1627 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1628 		stmmac_set_desc_addr(priv, p, dma_addr);
1629 		rx_q->buf_alloc_num++;
1630 	}
1631 
1632 	return 0;
1633 }
1634 
1635 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1636 {
1637 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1638 		return NULL;
1639 
1640 	return xsk_get_pool_from_qid(priv->dev, queue);
1641 }
1642 
1643 /**
1644  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1645  * @priv: driver private structure
1646  * @dma_conf: structure to take the dma data
1647  * @queue: RX queue index
1648  * @flags: gfp flag.
1649  * Description: this function initializes the DMA RX descriptors
1650  * and allocates the socket buffers. It supports the chained and ring
1651  * modes.
1652  */
1653 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1654 				    struct stmmac_dma_conf *dma_conf,
1655 				    u32 queue, gfp_t flags)
1656 {
1657 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1658 	int ret;
1659 
1660 	netif_dbg(priv, probe, priv->dev,
1661 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1662 		  (u32)rx_q->dma_rx_phy);
1663 
1664 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1665 
1666 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1667 
1668 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1669 
1670 	if (rx_q->xsk_pool) {
1671 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1672 						   MEM_TYPE_XSK_BUFF_POOL,
1673 						   NULL));
1674 		netdev_info(priv->dev,
1675 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1676 			    rx_q->queue_index);
1677 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1678 	} else {
1679 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680 						   MEM_TYPE_PAGE_POOL,
1681 						   rx_q->page_pool));
1682 		netdev_info(priv->dev,
1683 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1684 			    rx_q->queue_index);
1685 	}
1686 
1687 	if (rx_q->xsk_pool) {
1688 		/* RX XDP ZC buffer pool may not be populated, e.g.
1689 		 * xdpsock TX-only.
1690 		 */
1691 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1692 	} else {
1693 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1694 		if (ret < 0)
1695 			return -ENOMEM;
1696 	}
1697 
1698 	/* Setup the chained descriptor addresses */
1699 	if (priv->mode == STMMAC_CHAIN_MODE) {
1700 		if (priv->extend_desc)
1701 			stmmac_mode_init(priv, rx_q->dma_erx,
1702 					 rx_q->dma_rx_phy,
1703 					 dma_conf->dma_rx_size, 1);
1704 		else
1705 			stmmac_mode_init(priv, rx_q->dma_rx,
1706 					 rx_q->dma_rx_phy,
1707 					 dma_conf->dma_rx_size, 0);
1708 	}
1709 
1710 	return 0;
1711 }
1712 
1713 static int init_dma_rx_desc_rings(struct net_device *dev,
1714 				  struct stmmac_dma_conf *dma_conf,
1715 				  gfp_t flags)
1716 {
1717 	struct stmmac_priv *priv = netdev_priv(dev);
1718 	u32 rx_count = priv->plat->rx_queues_to_use;
1719 	int queue;
1720 	int ret;
1721 
1722 	/* RX INITIALIZATION */
1723 	netif_dbg(priv, probe, priv->dev,
1724 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1725 
1726 	for (queue = 0; queue < rx_count; queue++) {
1727 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1728 		if (ret)
1729 			goto err_init_rx_buffers;
1730 	}
1731 
1732 	return 0;
1733 
1734 err_init_rx_buffers:
1735 	while (queue >= 0) {
1736 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1737 
1738 		if (rx_q->xsk_pool)
1739 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1740 		else
1741 			dma_free_rx_skbufs(priv, dma_conf, queue);
1742 
1743 		rx_q->buf_alloc_num = 0;
1744 		rx_q->xsk_pool = NULL;
1745 
1746 		queue--;
1747 	}
1748 
1749 	return ret;
1750 }
1751 
1752 /**
1753  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1754  * @priv: driver private structure
1755  * @dma_conf: structure to take the dma data
1756  * @queue: TX queue index
1757  * Description: this function initializes the DMA TX descriptors
1758  * and allocates the socket buffers. It supports the chained and ring
1759  * modes.
1760  */
1761 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1762 				    struct stmmac_dma_conf *dma_conf,
1763 				    u32 queue)
1764 {
1765 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1766 	int i;
1767 
1768 	netif_dbg(priv, probe, priv->dev,
1769 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1770 		  (u32)tx_q->dma_tx_phy);
1771 
1772 	/* Setup the chained descriptor addresses */
1773 	if (priv->mode == STMMAC_CHAIN_MODE) {
1774 		if (priv->extend_desc)
1775 			stmmac_mode_init(priv, tx_q->dma_etx,
1776 					 tx_q->dma_tx_phy,
1777 					 dma_conf->dma_tx_size, 1);
1778 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1779 			stmmac_mode_init(priv, tx_q->dma_tx,
1780 					 tx_q->dma_tx_phy,
1781 					 dma_conf->dma_tx_size, 0);
1782 	}
1783 
1784 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1785 
1786 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1787 		struct dma_desc *p;
1788 
1789 		if (priv->extend_desc)
1790 			p = &((tx_q->dma_etx + i)->basic);
1791 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1792 			p = &((tx_q->dma_entx + i)->basic);
1793 		else
1794 			p = tx_q->dma_tx + i;
1795 
1796 		stmmac_clear_desc(priv, p);
1797 
1798 		tx_q->tx_skbuff_dma[i].buf = 0;
1799 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1800 		tx_q->tx_skbuff_dma[i].len = 0;
1801 		tx_q->tx_skbuff_dma[i].last_segment = false;
1802 		tx_q->tx_skbuff[i] = NULL;
1803 	}
1804 
1805 	return 0;
1806 }
1807 
1808 static int init_dma_tx_desc_rings(struct net_device *dev,
1809 				  struct stmmac_dma_conf *dma_conf)
1810 {
1811 	struct stmmac_priv *priv = netdev_priv(dev);
1812 	u32 tx_queue_cnt;
1813 	u32 queue;
1814 
1815 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1816 
1817 	for (queue = 0; queue < tx_queue_cnt; queue++)
1818 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1819 
1820 	return 0;
1821 }
1822 
1823 /**
1824  * init_dma_desc_rings - init the RX/TX descriptor rings
1825  * @dev: net device structure
1826  * @dma_conf: structure to take the dma data
1827  * @flags: gfp flag.
1828  * Description: this function initializes the DMA RX/TX descriptors
1829  * and allocates the socket buffers. It supports the chained and ring
1830  * modes.
1831  */
1832 static int init_dma_desc_rings(struct net_device *dev,
1833 			       struct stmmac_dma_conf *dma_conf,
1834 			       gfp_t flags)
1835 {
1836 	struct stmmac_priv *priv = netdev_priv(dev);
1837 	int ret;
1838 
1839 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1840 	if (ret)
1841 		return ret;
1842 
1843 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1844 
1845 	stmmac_clear_descriptors(priv, dma_conf);
1846 
1847 	if (netif_msg_hw(priv))
1848 		stmmac_display_rings(priv, dma_conf);
1849 
1850 	return ret;
1851 }
1852 
1853 /**
1854  * dma_free_tx_skbufs - free TX dma buffers
1855  * @priv: private structure
1856  * @dma_conf: structure to take the dma data
1857  * @queue: TX queue index
1858  */
1859 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1860 			       struct stmmac_dma_conf *dma_conf,
1861 			       u32 queue)
1862 {
1863 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1864 	int i;
1865 
1866 	tx_q->xsk_frames_done = 0;
1867 
1868 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1869 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1870 
1871 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1872 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1873 		tx_q->xsk_frames_done = 0;
1874 		tx_q->xsk_pool = NULL;
1875 	}
1876 }
1877 
1878 /**
1879  * stmmac_free_tx_skbufs - free TX skb buffers
1880  * @priv: private structure
1881  */
1882 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1883 {
1884 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1885 	u32 queue;
1886 
1887 	for (queue = 0; queue < tx_queue_cnt; queue++)
1888 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1889 }
1890 
1891 /**
1892  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1893  * @priv: private structure
1894  * @dma_conf: structure to take the dma data
1895  * @queue: RX queue index
1896  */
1897 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1898 					 struct stmmac_dma_conf *dma_conf,
1899 					 u32 queue)
1900 {
1901 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1902 
1903 	/* Release the DMA RX socket buffers */
1904 	if (rx_q->xsk_pool)
1905 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1906 	else
1907 		dma_free_rx_skbufs(priv, dma_conf, queue);
1908 
1909 	rx_q->buf_alloc_num = 0;
1910 	rx_q->xsk_pool = NULL;
1911 
1912 	/* Free DMA regions of consistent memory previously allocated */
1913 	if (!priv->extend_desc)
1914 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1915 				  sizeof(struct dma_desc),
1916 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1917 	else
1918 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1919 				  sizeof(struct dma_extended_desc),
1920 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1921 
1922 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1923 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1924 
1925 	kfree(rx_q->buf_pool);
1926 	if (rx_q->page_pool)
1927 		page_pool_destroy(rx_q->page_pool);
1928 }
1929 
1930 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1931 				       struct stmmac_dma_conf *dma_conf)
1932 {
1933 	u32 rx_count = priv->plat->rx_queues_to_use;
1934 	u32 queue;
1935 
1936 	/* Free RX queue resources */
1937 	for (queue = 0; queue < rx_count; queue++)
1938 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1939 }
1940 
1941 /**
1942  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1943  * @priv: private structure
1944  * @dma_conf: structure to take the dma data
1945  * @queue: TX queue index
1946  */
1947 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1948 					 struct stmmac_dma_conf *dma_conf,
1949 					 u32 queue)
1950 {
1951 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1952 	size_t size;
1953 	void *addr;
1954 
1955 	/* Release the DMA TX socket buffers */
1956 	dma_free_tx_skbufs(priv, dma_conf, queue);
1957 
1958 	if (priv->extend_desc) {
1959 		size = sizeof(struct dma_extended_desc);
1960 		addr = tx_q->dma_etx;
1961 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1962 		size = sizeof(struct dma_edesc);
1963 		addr = tx_q->dma_entx;
1964 	} else {
1965 		size = sizeof(struct dma_desc);
1966 		addr = tx_q->dma_tx;
1967 	}
1968 
1969 	size *= dma_conf->dma_tx_size;
1970 
1971 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1972 
1973 	kfree(tx_q->tx_skbuff_dma);
1974 	kfree(tx_q->tx_skbuff);
1975 }
1976 
1977 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1978 				       struct stmmac_dma_conf *dma_conf)
1979 {
1980 	u32 tx_count = priv->plat->tx_queues_to_use;
1981 	u32 queue;
1982 
1983 	/* Free TX queue resources */
1984 	for (queue = 0; queue < tx_count; queue++)
1985 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1986 }
1987 
1988 /**
1989  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1990  * @priv: private structure
1991  * @dma_conf: structure to take the dma data
1992  * @queue: RX queue index
1993  * Description: according to which descriptor can be used (extend or basic)
1994  * this function allocates the resources for TX and RX paths. In case of
1995  * reception, for example, it pre-allocated the RX socket buffer in order to
1996  * allow zero-copy mechanism.
1997  */
1998 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
1999 					 struct stmmac_dma_conf *dma_conf,
2000 					 u32 queue)
2001 {
2002 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2003 	struct stmmac_channel *ch = &priv->channel[queue];
2004 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2005 	struct page_pool_params pp_params = { 0 };
2006 	unsigned int num_pages;
2007 	unsigned int napi_id;
2008 	int ret;
2009 
2010 	rx_q->queue_index = queue;
2011 	rx_q->priv_data = priv;
2012 
2013 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2014 	pp_params.pool_size = dma_conf->dma_rx_size;
2015 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2016 	pp_params.order = ilog2(num_pages);
2017 	pp_params.nid = dev_to_node(priv->device);
2018 	pp_params.dev = priv->device;
2019 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2020 	pp_params.offset = stmmac_rx_offset(priv);
2021 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2022 
2023 	rx_q->page_pool = page_pool_create(&pp_params);
2024 	if (IS_ERR(rx_q->page_pool)) {
2025 		ret = PTR_ERR(rx_q->page_pool);
2026 		rx_q->page_pool = NULL;
2027 		return ret;
2028 	}
2029 
2030 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2031 				 sizeof(*rx_q->buf_pool),
2032 				 GFP_KERNEL);
2033 	if (!rx_q->buf_pool)
2034 		return -ENOMEM;
2035 
2036 	if (priv->extend_desc) {
2037 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2038 						   dma_conf->dma_rx_size *
2039 						   sizeof(struct dma_extended_desc),
2040 						   &rx_q->dma_rx_phy,
2041 						   GFP_KERNEL);
2042 		if (!rx_q->dma_erx)
2043 			return -ENOMEM;
2044 
2045 	} else {
2046 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2047 						  dma_conf->dma_rx_size *
2048 						  sizeof(struct dma_desc),
2049 						  &rx_q->dma_rx_phy,
2050 						  GFP_KERNEL);
2051 		if (!rx_q->dma_rx)
2052 			return -ENOMEM;
2053 	}
2054 
2055 	if (stmmac_xdp_is_enabled(priv) &&
2056 	    test_bit(queue, priv->af_xdp_zc_qps))
2057 		napi_id = ch->rxtx_napi.napi_id;
2058 	else
2059 		napi_id = ch->rx_napi.napi_id;
2060 
2061 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2062 			       rx_q->queue_index,
2063 			       napi_id);
2064 	if (ret) {
2065 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2066 		return -EINVAL;
2067 	}
2068 
2069 	return 0;
2070 }
2071 
2072 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2073 				       struct stmmac_dma_conf *dma_conf)
2074 {
2075 	u32 rx_count = priv->plat->rx_queues_to_use;
2076 	u32 queue;
2077 	int ret;
2078 
2079 	/* RX queues buffers and DMA */
2080 	for (queue = 0; queue < rx_count; queue++) {
2081 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2082 		if (ret)
2083 			goto err_dma;
2084 	}
2085 
2086 	return 0;
2087 
2088 err_dma:
2089 	free_dma_rx_desc_resources(priv, dma_conf);
2090 
2091 	return ret;
2092 }
2093 
2094 /**
2095  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2096  * @priv: private structure
2097  * @dma_conf: structure to take the dma data
2098  * @queue: TX queue index
2099  * Description: according to which descriptor can be used (extend or basic)
2100  * this function allocates the resources for TX and RX paths. In case of
2101  * reception, for example, it pre-allocated the RX socket buffer in order to
2102  * allow zero-copy mechanism.
2103  */
2104 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2105 					 struct stmmac_dma_conf *dma_conf,
2106 					 u32 queue)
2107 {
2108 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2109 	size_t size;
2110 	void *addr;
2111 
2112 	tx_q->queue_index = queue;
2113 	tx_q->priv_data = priv;
2114 
2115 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2116 				      sizeof(*tx_q->tx_skbuff_dma),
2117 				      GFP_KERNEL);
2118 	if (!tx_q->tx_skbuff_dma)
2119 		return -ENOMEM;
2120 
2121 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2122 				  sizeof(struct sk_buff *),
2123 				  GFP_KERNEL);
2124 	if (!tx_q->tx_skbuff)
2125 		return -ENOMEM;
2126 
2127 	if (priv->extend_desc)
2128 		size = sizeof(struct dma_extended_desc);
2129 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2130 		size = sizeof(struct dma_edesc);
2131 	else
2132 		size = sizeof(struct dma_desc);
2133 
2134 	size *= dma_conf->dma_tx_size;
2135 
2136 	addr = dma_alloc_coherent(priv->device, size,
2137 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2138 	if (!addr)
2139 		return -ENOMEM;
2140 
2141 	if (priv->extend_desc)
2142 		tx_q->dma_etx = addr;
2143 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2144 		tx_q->dma_entx = addr;
2145 	else
2146 		tx_q->dma_tx = addr;
2147 
2148 	return 0;
2149 }
2150 
2151 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2152 				       struct stmmac_dma_conf *dma_conf)
2153 {
2154 	u32 tx_count = priv->plat->tx_queues_to_use;
2155 	u32 queue;
2156 	int ret;
2157 
2158 	/* TX queues buffers and DMA */
2159 	for (queue = 0; queue < tx_count; queue++) {
2160 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2161 		if (ret)
2162 			goto err_dma;
2163 	}
2164 
2165 	return 0;
2166 
2167 err_dma:
2168 	free_dma_tx_desc_resources(priv, dma_conf);
2169 	return ret;
2170 }
2171 
2172 /**
2173  * alloc_dma_desc_resources - alloc TX/RX resources.
2174  * @priv: private structure
2175  * @dma_conf: structure to take the dma data
2176  * Description: according to which descriptor can be used (extend or basic)
2177  * this function allocates the resources for TX and RX paths. In case of
2178  * reception, for example, it pre-allocated the RX socket buffer in order to
2179  * allow zero-copy mechanism.
2180  */
2181 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2182 				    struct stmmac_dma_conf *dma_conf)
2183 {
2184 	/* RX Allocation */
2185 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2186 
2187 	if (ret)
2188 		return ret;
2189 
2190 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2191 
2192 	return ret;
2193 }
2194 
2195 /**
2196  * free_dma_desc_resources - free dma desc resources
2197  * @priv: private structure
2198  * @dma_conf: structure to take the dma data
2199  */
2200 static void free_dma_desc_resources(struct stmmac_priv *priv,
2201 				    struct stmmac_dma_conf *dma_conf)
2202 {
2203 	/* Release the DMA TX socket buffers */
2204 	free_dma_tx_desc_resources(priv, dma_conf);
2205 
2206 	/* Release the DMA RX socket buffers later
2207 	 * to ensure all pending XDP_TX buffers are returned.
2208 	 */
2209 	free_dma_rx_desc_resources(priv, dma_conf);
2210 }
2211 
2212 /**
2213  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2214  *  @priv: driver private structure
2215  *  Description: It is used for enabling the rx queues in the MAC
2216  */
2217 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2218 {
2219 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2220 	int queue;
2221 	u8 mode;
2222 
2223 	for (queue = 0; queue < rx_queues_count; queue++) {
2224 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2225 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2226 	}
2227 }
2228 
2229 /**
2230  * stmmac_start_rx_dma - start RX DMA channel
2231  * @priv: driver private structure
2232  * @chan: RX channel index
2233  * Description:
2234  * This starts a RX DMA channel
2235  */
2236 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2237 {
2238 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2239 	stmmac_start_rx(priv, priv->ioaddr, chan);
2240 }
2241 
2242 /**
2243  * stmmac_start_tx_dma - start TX DMA channel
2244  * @priv: driver private structure
2245  * @chan: TX channel index
2246  * Description:
2247  * This starts a TX DMA channel
2248  */
2249 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2250 {
2251 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2252 	stmmac_start_tx(priv, priv->ioaddr, chan);
2253 }
2254 
2255 /**
2256  * stmmac_stop_rx_dma - stop RX DMA channel
2257  * @priv: driver private structure
2258  * @chan: RX channel index
2259  * Description:
2260  * This stops a RX DMA channel
2261  */
2262 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2263 {
2264 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2265 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2266 }
2267 
2268 /**
2269  * stmmac_stop_tx_dma - stop TX DMA channel
2270  * @priv: driver private structure
2271  * @chan: TX channel index
2272  * Description:
2273  * This stops a TX DMA channel
2274  */
2275 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2276 {
2277 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2278 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2279 }
2280 
2281 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2282 {
2283 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2284 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2285 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2286 	u32 chan;
2287 
2288 	for (chan = 0; chan < dma_csr_ch; chan++) {
2289 		struct stmmac_channel *ch = &priv->channel[chan];
2290 		unsigned long flags;
2291 
2292 		spin_lock_irqsave(&ch->lock, flags);
2293 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2294 		spin_unlock_irqrestore(&ch->lock, flags);
2295 	}
2296 }
2297 
2298 /**
2299  * stmmac_start_all_dma - start all RX and TX DMA channels
2300  * @priv: driver private structure
2301  * Description:
2302  * This starts all the RX and TX DMA channels
2303  */
2304 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2305 {
2306 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 	u32 chan = 0;
2309 
2310 	for (chan = 0; chan < rx_channels_count; chan++)
2311 		stmmac_start_rx_dma(priv, chan);
2312 
2313 	for (chan = 0; chan < tx_channels_count; chan++)
2314 		stmmac_start_tx_dma(priv, chan);
2315 }
2316 
2317 /**
2318  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2319  * @priv: driver private structure
2320  * Description:
2321  * This stops the RX and TX DMA channels
2322  */
2323 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2324 {
2325 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2326 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2327 	u32 chan = 0;
2328 
2329 	for (chan = 0; chan < rx_channels_count; chan++)
2330 		stmmac_stop_rx_dma(priv, chan);
2331 
2332 	for (chan = 0; chan < tx_channels_count; chan++)
2333 		stmmac_stop_tx_dma(priv, chan);
2334 }
2335 
2336 /**
2337  *  stmmac_dma_operation_mode - HW DMA operation mode
2338  *  @priv: driver private structure
2339  *  Description: it is used for configuring the DMA operation mode register in
2340  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2341  */
2342 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2343 {
2344 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2345 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2346 	int rxfifosz = priv->plat->rx_fifo_size;
2347 	int txfifosz = priv->plat->tx_fifo_size;
2348 	u32 txmode = 0;
2349 	u32 rxmode = 0;
2350 	u32 chan = 0;
2351 	u8 qmode = 0;
2352 
2353 	if (rxfifosz == 0)
2354 		rxfifosz = priv->dma_cap.rx_fifo_size;
2355 	if (txfifosz == 0)
2356 		txfifosz = priv->dma_cap.tx_fifo_size;
2357 
2358 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2359 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2360 		rxfifosz /= rx_channels_count;
2361 		txfifosz /= tx_channels_count;
2362 	}
2363 
2364 	if (priv->plat->force_thresh_dma_mode) {
2365 		txmode = tc;
2366 		rxmode = tc;
2367 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2368 		/*
2369 		 * In case of GMAC, SF mode can be enabled
2370 		 * to perform the TX COE in HW. This depends on:
2371 		 * 1) TX COE if actually supported
2372 		 * 2) There is no bugged Jumbo frame support
2373 		 *    that needs to not insert csum in the TDES.
2374 		 */
2375 		txmode = SF_DMA_MODE;
2376 		rxmode = SF_DMA_MODE;
2377 		priv->xstats.threshold = SF_DMA_MODE;
2378 	} else {
2379 		txmode = tc;
2380 		rxmode = SF_DMA_MODE;
2381 	}
2382 
2383 	/* configure all channels */
2384 	for (chan = 0; chan < rx_channels_count; chan++) {
2385 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2386 		u32 buf_size;
2387 
2388 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2389 
2390 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2391 				rxfifosz, qmode);
2392 
2393 		if (rx_q->xsk_pool) {
2394 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2395 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2396 					      buf_size,
2397 					      chan);
2398 		} else {
2399 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2400 					      priv->dma_conf.dma_buf_sz,
2401 					      chan);
2402 		}
2403 	}
2404 
2405 	for (chan = 0; chan < tx_channels_count; chan++) {
2406 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2407 
2408 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2409 				txfifosz, qmode);
2410 	}
2411 }
2412 
2413 static void stmmac_xsk_request_timestamp(void *_priv)
2414 {
2415 	struct stmmac_metadata_request *meta_req = _priv;
2416 
2417 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2418 	*meta_req->set_ic = true;
2419 }
2420 
2421 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2422 {
2423 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2424 	struct stmmac_priv *priv = tx_compl->priv;
2425 	struct dma_desc *desc = tx_compl->desc;
2426 	bool found = false;
2427 	u64 ns = 0;
2428 
2429 	if (!priv->hwts_tx_en)
2430 		return 0;
2431 
2432 	/* check tx tstamp status */
2433 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2434 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2435 		found = true;
2436 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2437 		found = true;
2438 	}
2439 
2440 	if (found) {
2441 		ns -= priv->plat->cdc_error_adj;
2442 		return ns_to_ktime(ns);
2443 	}
2444 
2445 	return 0;
2446 }
2447 
2448 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2449 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2450 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2451 };
2452 
2453 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2454 {
2455 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2456 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2457 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2458 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2459 	unsigned int entry = tx_q->cur_tx;
2460 	struct dma_desc *tx_desc = NULL;
2461 	struct xdp_desc xdp_desc;
2462 	bool work_done = true;
2463 	u32 tx_set_ic_bit = 0;
2464 
2465 	/* Avoids TX time-out as we are sharing with slow path */
2466 	txq_trans_cond_update(nq);
2467 
2468 	budget = min(budget, stmmac_tx_avail(priv, queue));
2469 
2470 	while (budget-- > 0) {
2471 		struct stmmac_metadata_request meta_req;
2472 		struct xsk_tx_metadata *meta = NULL;
2473 		dma_addr_t dma_addr;
2474 		bool set_ic;
2475 
2476 		/* We are sharing with slow path and stop XSK TX desc submission when
2477 		 * available TX ring is less than threshold.
2478 		 */
2479 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2480 		    !netif_carrier_ok(priv->dev)) {
2481 			work_done = false;
2482 			break;
2483 		}
2484 
2485 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2486 			break;
2487 
2488 		if (priv->est && priv->est->enable &&
2489 		    priv->est->max_sdu[queue] &&
2490 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2491 			priv->xstats.max_sdu_txq_drop[queue]++;
2492 			continue;
2493 		}
2494 
2495 		if (likely(priv->extend_desc))
2496 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2497 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2498 			tx_desc = &tx_q->dma_entx[entry].basic;
2499 		else
2500 			tx_desc = tx_q->dma_tx + entry;
2501 
2502 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2503 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2504 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2505 
2506 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2507 
2508 		/* To return XDP buffer to XSK pool, we simple call
2509 		 * xsk_tx_completed(), so we don't need to fill up
2510 		 * 'buf' and 'xdpf'.
2511 		 */
2512 		tx_q->tx_skbuff_dma[entry].buf = 0;
2513 		tx_q->xdpf[entry] = NULL;
2514 
2515 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2516 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2517 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2518 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2519 
2520 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2521 
2522 		tx_q->tx_count_frames++;
2523 
2524 		if (!priv->tx_coal_frames[queue])
2525 			set_ic = false;
2526 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2527 			set_ic = true;
2528 		else
2529 			set_ic = false;
2530 
2531 		meta_req.priv = priv;
2532 		meta_req.tx_desc = tx_desc;
2533 		meta_req.set_ic = &set_ic;
2534 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2535 					&meta_req);
2536 		if (set_ic) {
2537 			tx_q->tx_count_frames = 0;
2538 			stmmac_set_tx_ic(priv, tx_desc);
2539 			tx_set_ic_bit++;
2540 		}
2541 
2542 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2543 				       true, priv->mode, true, true,
2544 				       xdp_desc.len);
2545 
2546 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2547 
2548 		xsk_tx_metadata_to_compl(meta,
2549 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2550 
2551 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2552 		entry = tx_q->cur_tx;
2553 	}
2554 	u64_stats_update_begin(&txq_stats->napi_syncp);
2555 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2556 	u64_stats_update_end(&txq_stats->napi_syncp);
2557 
2558 	if (tx_desc) {
2559 		stmmac_flush_tx_descriptors(priv, queue);
2560 		xsk_tx_release(pool);
2561 	}
2562 
2563 	/* Return true if all of the 3 conditions are met
2564 	 *  a) TX Budget is still available
2565 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2566 	 *     pending XSK TX for transmission)
2567 	 */
2568 	return !!budget && work_done;
2569 }
2570 
2571 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2572 {
2573 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2574 		tc += 64;
2575 
2576 		if (priv->plat->force_thresh_dma_mode)
2577 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2578 		else
2579 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2580 						      chan);
2581 
2582 		priv->xstats.threshold = tc;
2583 	}
2584 }
2585 
2586 /**
2587  * stmmac_tx_clean - to manage the transmission completion
2588  * @priv: driver private structure
2589  * @budget: napi budget limiting this functions packet handling
2590  * @queue: TX queue index
2591  * @pending_packets: signal to arm the TX coal timer
2592  * Description: it reclaims the transmit resources after transmission completes.
2593  * If some packets still needs to be handled, due to TX coalesce, set
2594  * pending_packets to true to make NAPI arm the TX coal timer.
2595  */
2596 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2597 			   bool *pending_packets)
2598 {
2599 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2600 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2601 	unsigned int bytes_compl = 0, pkts_compl = 0;
2602 	unsigned int entry, xmits = 0, count = 0;
2603 	u32 tx_packets = 0, tx_errors = 0;
2604 
2605 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2606 
2607 	tx_q->xsk_frames_done = 0;
2608 
2609 	entry = tx_q->dirty_tx;
2610 
2611 	/* Try to clean all TX complete frame in 1 shot */
2612 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2613 		struct xdp_frame *xdpf;
2614 		struct sk_buff *skb;
2615 		struct dma_desc *p;
2616 		int status;
2617 
2618 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2619 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2620 			xdpf = tx_q->xdpf[entry];
2621 			skb = NULL;
2622 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2623 			xdpf = NULL;
2624 			skb = tx_q->tx_skbuff[entry];
2625 		} else {
2626 			xdpf = NULL;
2627 			skb = NULL;
2628 		}
2629 
2630 		if (priv->extend_desc)
2631 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2632 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2633 			p = &tx_q->dma_entx[entry].basic;
2634 		else
2635 			p = tx_q->dma_tx + entry;
2636 
2637 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2638 		/* Check if the descriptor is owned by the DMA */
2639 		if (unlikely(status & tx_dma_own))
2640 			break;
2641 
2642 		count++;
2643 
2644 		/* Make sure descriptor fields are read after reading
2645 		 * the own bit.
2646 		 */
2647 		dma_rmb();
2648 
2649 		/* Just consider the last segment and ...*/
2650 		if (likely(!(status & tx_not_ls))) {
2651 			/* ... verify the status error condition */
2652 			if (unlikely(status & tx_err)) {
2653 				tx_errors++;
2654 				if (unlikely(status & tx_err_bump_tc))
2655 					stmmac_bump_dma_threshold(priv, queue);
2656 			} else {
2657 				tx_packets++;
2658 			}
2659 			if (skb) {
2660 				stmmac_get_tx_hwtstamp(priv, p, skb);
2661 			} else if (tx_q->xsk_pool &&
2662 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2663 				struct stmmac_xsk_tx_complete tx_compl = {
2664 					.priv = priv,
2665 					.desc = p,
2666 				};
2667 
2668 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2669 							 &stmmac_xsk_tx_metadata_ops,
2670 							 &tx_compl);
2671 			}
2672 		}
2673 
2674 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2675 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2676 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2677 				dma_unmap_page(priv->device,
2678 					       tx_q->tx_skbuff_dma[entry].buf,
2679 					       tx_q->tx_skbuff_dma[entry].len,
2680 					       DMA_TO_DEVICE);
2681 			else
2682 				dma_unmap_single(priv->device,
2683 						 tx_q->tx_skbuff_dma[entry].buf,
2684 						 tx_q->tx_skbuff_dma[entry].len,
2685 						 DMA_TO_DEVICE);
2686 			tx_q->tx_skbuff_dma[entry].buf = 0;
2687 			tx_q->tx_skbuff_dma[entry].len = 0;
2688 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2689 		}
2690 
2691 		stmmac_clean_desc3(priv, tx_q, p);
2692 
2693 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2694 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2695 
2696 		if (xdpf &&
2697 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2698 			xdp_return_frame_rx_napi(xdpf);
2699 			tx_q->xdpf[entry] = NULL;
2700 		}
2701 
2702 		if (xdpf &&
2703 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2704 			xdp_return_frame(xdpf);
2705 			tx_q->xdpf[entry] = NULL;
2706 		}
2707 
2708 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2709 			tx_q->xsk_frames_done++;
2710 
2711 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2712 			if (likely(skb)) {
2713 				pkts_compl++;
2714 				bytes_compl += skb->len;
2715 				dev_consume_skb_any(skb);
2716 				tx_q->tx_skbuff[entry] = NULL;
2717 			}
2718 		}
2719 
2720 		stmmac_release_tx_desc(priv, p, priv->mode);
2721 
2722 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2723 	}
2724 	tx_q->dirty_tx = entry;
2725 
2726 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2727 				  pkts_compl, bytes_compl);
2728 
2729 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2730 								queue))) &&
2731 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2732 
2733 		netif_dbg(priv, tx_done, priv->dev,
2734 			  "%s: restart transmit\n", __func__);
2735 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2736 	}
2737 
2738 	if (tx_q->xsk_pool) {
2739 		bool work_done;
2740 
2741 		if (tx_q->xsk_frames_done)
2742 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2743 
2744 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2745 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2746 
2747 		/* For XSK TX, we try to send as many as possible.
2748 		 * If XSK work done (XSK TX desc empty and budget still
2749 		 * available), return "budget - 1" to reenable TX IRQ.
2750 		 * Else, return "budget" to make NAPI continue polling.
2751 		 */
2752 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2753 					       STMMAC_XSK_TX_BUDGET_MAX);
2754 		if (work_done)
2755 			xmits = budget - 1;
2756 		else
2757 			xmits = budget;
2758 	}
2759 
2760 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2761 	    priv->eee_sw_timer_en) {
2762 		if (stmmac_enable_eee_mode(priv))
2763 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2764 	}
2765 
2766 	/* We still have pending packets, let's call for a new scheduling */
2767 	if (tx_q->dirty_tx != tx_q->cur_tx)
2768 		*pending_packets = true;
2769 
2770 	u64_stats_update_begin(&txq_stats->napi_syncp);
2771 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2772 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2773 	u64_stats_inc(&txq_stats->napi.tx_clean);
2774 	u64_stats_update_end(&txq_stats->napi_syncp);
2775 
2776 	priv->xstats.tx_errors += tx_errors;
2777 
2778 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2779 
2780 	/* Combine decisions from TX clean and XSK TX */
2781 	return max(count, xmits);
2782 }
2783 
2784 /**
2785  * stmmac_tx_err - to manage the tx error
2786  * @priv: driver private structure
2787  * @chan: channel index
2788  * Description: it cleans the descriptors and restarts the transmission
2789  * in case of transmission errors.
2790  */
2791 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2792 {
2793 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2794 
2795 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2796 
2797 	stmmac_stop_tx_dma(priv, chan);
2798 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2799 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2800 	stmmac_reset_tx_queue(priv, chan);
2801 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2802 			    tx_q->dma_tx_phy, chan);
2803 	stmmac_start_tx_dma(priv, chan);
2804 
2805 	priv->xstats.tx_errors++;
2806 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2807 }
2808 
2809 /**
2810  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2811  *  @priv: driver private structure
2812  *  @txmode: TX operating mode
2813  *  @rxmode: RX operating mode
2814  *  @chan: channel index
2815  *  Description: it is used for configuring of the DMA operation mode in
2816  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2817  *  mode.
2818  */
2819 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2820 					  u32 rxmode, u32 chan)
2821 {
2822 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2823 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2824 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2825 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2826 	int rxfifosz = priv->plat->rx_fifo_size;
2827 	int txfifosz = priv->plat->tx_fifo_size;
2828 
2829 	if (rxfifosz == 0)
2830 		rxfifosz = priv->dma_cap.rx_fifo_size;
2831 	if (txfifosz == 0)
2832 		txfifosz = priv->dma_cap.tx_fifo_size;
2833 
2834 	/* Adjust for real per queue fifo size */
2835 	rxfifosz /= rx_channels_count;
2836 	txfifosz /= tx_channels_count;
2837 
2838 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2839 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2840 }
2841 
2842 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2843 {
2844 	int ret;
2845 
2846 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2847 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2848 	if (ret && (ret != -EINVAL)) {
2849 		stmmac_global_err(priv);
2850 		return true;
2851 	}
2852 
2853 	return false;
2854 }
2855 
2856 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2857 {
2858 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2859 						 &priv->xstats, chan, dir);
2860 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2861 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2862 	struct stmmac_channel *ch = &priv->channel[chan];
2863 	struct napi_struct *rx_napi;
2864 	struct napi_struct *tx_napi;
2865 	unsigned long flags;
2866 
2867 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2868 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2869 
2870 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2871 		if (napi_schedule_prep(rx_napi)) {
2872 			spin_lock_irqsave(&ch->lock, flags);
2873 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2874 			spin_unlock_irqrestore(&ch->lock, flags);
2875 			__napi_schedule(rx_napi);
2876 		}
2877 	}
2878 
2879 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2880 		if (napi_schedule_prep(tx_napi)) {
2881 			spin_lock_irqsave(&ch->lock, flags);
2882 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2883 			spin_unlock_irqrestore(&ch->lock, flags);
2884 			__napi_schedule(tx_napi);
2885 		}
2886 	}
2887 
2888 	return status;
2889 }
2890 
2891 /**
2892  * stmmac_dma_interrupt - DMA ISR
2893  * @priv: driver private structure
2894  * Description: this is the DMA ISR. It is called by the main ISR.
2895  * It calls the dwmac dma routine and schedule poll method in case of some
2896  * work can be done.
2897  */
2898 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2899 {
2900 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2901 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2902 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2903 				tx_channel_count : rx_channel_count;
2904 	u32 chan;
2905 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2906 
2907 	/* Make sure we never check beyond our status buffer. */
2908 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2909 		channels_to_check = ARRAY_SIZE(status);
2910 
2911 	for (chan = 0; chan < channels_to_check; chan++)
2912 		status[chan] = stmmac_napi_check(priv, chan,
2913 						 DMA_DIR_RXTX);
2914 
2915 	for (chan = 0; chan < tx_channel_count; chan++) {
2916 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2917 			/* Try to bump up the dma threshold on this failure */
2918 			stmmac_bump_dma_threshold(priv, chan);
2919 		} else if (unlikely(status[chan] == tx_hard_error)) {
2920 			stmmac_tx_err(priv, chan);
2921 		}
2922 	}
2923 }
2924 
2925 /**
2926  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2927  * @priv: driver private structure
2928  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2929  */
2930 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2931 {
2932 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2933 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2934 
2935 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2936 
2937 	if (priv->dma_cap.rmon) {
2938 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2939 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2940 	} else
2941 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2942 }
2943 
2944 /**
2945  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2946  * @priv: driver private structure
2947  * Description:
2948  *  new GMAC chip generations have a new register to indicate the
2949  *  presence of the optional feature/functions.
2950  *  This can be also used to override the value passed through the
2951  *  platform and necessary for old MAC10/100 and GMAC chips.
2952  */
2953 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2954 {
2955 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2956 }
2957 
2958 /**
2959  * stmmac_check_ether_addr - check if the MAC addr is valid
2960  * @priv: driver private structure
2961  * Description:
2962  * it is to verify if the MAC address is valid, in case of failures it
2963  * generates a random MAC address
2964  */
2965 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2966 {
2967 	u8 addr[ETH_ALEN];
2968 
2969 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2970 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2971 		if (is_valid_ether_addr(addr))
2972 			eth_hw_addr_set(priv->dev, addr);
2973 		else
2974 			eth_hw_addr_random(priv->dev);
2975 		dev_info(priv->device, "device MAC address %pM\n",
2976 			 priv->dev->dev_addr);
2977 	}
2978 }
2979 
2980 /**
2981  * stmmac_init_dma_engine - DMA init.
2982  * @priv: driver private structure
2983  * Description:
2984  * It inits the DMA invoking the specific MAC/GMAC callback.
2985  * Some DMA parameters can be passed from the platform;
2986  * in case of these are not passed a default is kept for the MAC or GMAC.
2987  */
2988 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2989 {
2990 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2991 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2992 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2993 	struct stmmac_rx_queue *rx_q;
2994 	struct stmmac_tx_queue *tx_q;
2995 	u32 chan = 0;
2996 	int ret = 0;
2997 
2998 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2999 		dev_err(priv->device, "Invalid DMA configuration\n");
3000 		return -EINVAL;
3001 	}
3002 
3003 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3004 		priv->plat->dma_cfg->atds = 1;
3005 
3006 	ret = stmmac_reset(priv, priv->ioaddr);
3007 	if (ret) {
3008 		dev_err(priv->device, "Failed to reset the dma\n");
3009 		return ret;
3010 	}
3011 
3012 	/* DMA Configuration */
3013 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3014 
3015 	if (priv->plat->axi)
3016 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3017 
3018 	/* DMA CSR Channel configuration */
3019 	for (chan = 0; chan < dma_csr_ch; chan++) {
3020 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3021 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3022 	}
3023 
3024 	/* DMA RX Channel Configuration */
3025 	for (chan = 0; chan < rx_channels_count; chan++) {
3026 		rx_q = &priv->dma_conf.rx_queue[chan];
3027 
3028 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3029 				    rx_q->dma_rx_phy, chan);
3030 
3031 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3032 				     (rx_q->buf_alloc_num *
3033 				      sizeof(struct dma_desc));
3034 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3035 				       rx_q->rx_tail_addr, chan);
3036 	}
3037 
3038 	/* DMA TX Channel Configuration */
3039 	for (chan = 0; chan < tx_channels_count; chan++) {
3040 		tx_q = &priv->dma_conf.tx_queue[chan];
3041 
3042 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3043 				    tx_q->dma_tx_phy, chan);
3044 
3045 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3046 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3047 				       tx_q->tx_tail_addr, chan);
3048 	}
3049 
3050 	return ret;
3051 }
3052 
3053 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3054 {
3055 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3056 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3057 	struct stmmac_channel *ch;
3058 	struct napi_struct *napi;
3059 
3060 	if (!tx_coal_timer)
3061 		return;
3062 
3063 	ch = &priv->channel[tx_q->queue_index];
3064 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3065 
3066 	/* Arm timer only if napi is not already scheduled.
3067 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3068 	 * again in the next scheduled napi.
3069 	 */
3070 	if (unlikely(!napi_is_scheduled(napi)))
3071 		hrtimer_start(&tx_q->txtimer,
3072 			      STMMAC_COAL_TIMER(tx_coal_timer),
3073 			      HRTIMER_MODE_REL);
3074 	else
3075 		hrtimer_try_to_cancel(&tx_q->txtimer);
3076 }
3077 
3078 /**
3079  * stmmac_tx_timer - mitigation sw timer for tx.
3080  * @t: data pointer
3081  * Description:
3082  * This is the timer handler to directly invoke the stmmac_tx_clean.
3083  */
3084 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3085 {
3086 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3087 	struct stmmac_priv *priv = tx_q->priv_data;
3088 	struct stmmac_channel *ch;
3089 	struct napi_struct *napi;
3090 
3091 	ch = &priv->channel[tx_q->queue_index];
3092 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3093 
3094 	if (likely(napi_schedule_prep(napi))) {
3095 		unsigned long flags;
3096 
3097 		spin_lock_irqsave(&ch->lock, flags);
3098 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3099 		spin_unlock_irqrestore(&ch->lock, flags);
3100 		__napi_schedule(napi);
3101 	}
3102 
3103 	return HRTIMER_NORESTART;
3104 }
3105 
3106 /**
3107  * stmmac_init_coalesce - init mitigation options.
3108  * @priv: driver private structure
3109  * Description:
3110  * This inits the coalesce parameters: i.e. timer rate,
3111  * timer handler and default threshold used for enabling the
3112  * interrupt on completion bit.
3113  */
3114 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3115 {
3116 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3117 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3118 	u32 chan;
3119 
3120 	for (chan = 0; chan < tx_channel_count; chan++) {
3121 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3122 
3123 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3124 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3125 
3126 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3127 		tx_q->txtimer.function = stmmac_tx_timer;
3128 	}
3129 
3130 	for (chan = 0; chan < rx_channel_count; chan++)
3131 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3132 }
3133 
3134 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3135 {
3136 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3137 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3138 	u32 chan;
3139 
3140 	/* set TX ring length */
3141 	for (chan = 0; chan < tx_channels_count; chan++)
3142 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3143 				       (priv->dma_conf.dma_tx_size - 1), chan);
3144 
3145 	/* set RX ring length */
3146 	for (chan = 0; chan < rx_channels_count; chan++)
3147 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3148 				       (priv->dma_conf.dma_rx_size - 1), chan);
3149 }
3150 
3151 /**
3152  *  stmmac_set_tx_queue_weight - Set TX queue weight
3153  *  @priv: driver private structure
3154  *  Description: It is used for setting TX queues weight
3155  */
3156 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3157 {
3158 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3159 	u32 weight;
3160 	u32 queue;
3161 
3162 	for (queue = 0; queue < tx_queues_count; queue++) {
3163 		weight = priv->plat->tx_queues_cfg[queue].weight;
3164 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3165 	}
3166 }
3167 
3168 /**
3169  *  stmmac_configure_cbs - Configure CBS in TX queue
3170  *  @priv: driver private structure
3171  *  Description: It is used for configuring CBS in AVB TX queues
3172  */
3173 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3174 {
3175 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3176 	u32 mode_to_use;
3177 	u32 queue;
3178 
3179 	/* queue 0 is reserved for legacy traffic */
3180 	for (queue = 1; queue < tx_queues_count; queue++) {
3181 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3182 		if (mode_to_use == MTL_QUEUE_DCB)
3183 			continue;
3184 
3185 		stmmac_config_cbs(priv, priv->hw,
3186 				priv->plat->tx_queues_cfg[queue].send_slope,
3187 				priv->plat->tx_queues_cfg[queue].idle_slope,
3188 				priv->plat->tx_queues_cfg[queue].high_credit,
3189 				priv->plat->tx_queues_cfg[queue].low_credit,
3190 				queue);
3191 	}
3192 }
3193 
3194 /**
3195  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3196  *  @priv: driver private structure
3197  *  Description: It is used for mapping RX queues to RX dma channels
3198  */
3199 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3200 {
3201 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3202 	u32 queue;
3203 	u32 chan;
3204 
3205 	for (queue = 0; queue < rx_queues_count; queue++) {
3206 		chan = priv->plat->rx_queues_cfg[queue].chan;
3207 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3208 	}
3209 }
3210 
3211 /**
3212  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3213  *  @priv: driver private structure
3214  *  Description: It is used for configuring the RX Queue Priority
3215  */
3216 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3217 {
3218 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3219 	u32 queue;
3220 	u32 prio;
3221 
3222 	for (queue = 0; queue < rx_queues_count; queue++) {
3223 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3224 			continue;
3225 
3226 		prio = priv->plat->rx_queues_cfg[queue].prio;
3227 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3228 	}
3229 }
3230 
3231 /**
3232  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3233  *  @priv: driver private structure
3234  *  Description: It is used for configuring the TX Queue Priority
3235  */
3236 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3237 {
3238 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3239 	u32 queue;
3240 	u32 prio;
3241 
3242 	for (queue = 0; queue < tx_queues_count; queue++) {
3243 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3244 			continue;
3245 
3246 		prio = priv->plat->tx_queues_cfg[queue].prio;
3247 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3248 	}
3249 }
3250 
3251 /**
3252  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3253  *  @priv: driver private structure
3254  *  Description: It is used for configuring the RX queue routing
3255  */
3256 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3257 {
3258 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3259 	u32 queue;
3260 	u8 packet;
3261 
3262 	for (queue = 0; queue < rx_queues_count; queue++) {
3263 		/* no specific packet type routing specified for the queue */
3264 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3265 			continue;
3266 
3267 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3268 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3269 	}
3270 }
3271 
3272 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3273 {
3274 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3275 		priv->rss.enable = false;
3276 		return;
3277 	}
3278 
3279 	if (priv->dev->features & NETIF_F_RXHASH)
3280 		priv->rss.enable = true;
3281 	else
3282 		priv->rss.enable = false;
3283 
3284 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3285 			     priv->plat->rx_queues_to_use);
3286 }
3287 
3288 /**
3289  *  stmmac_mtl_configuration - Configure MTL
3290  *  @priv: driver private structure
3291  *  Description: It is used for configurring MTL
3292  */
3293 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3294 {
3295 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3296 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3297 
3298 	if (tx_queues_count > 1)
3299 		stmmac_set_tx_queue_weight(priv);
3300 
3301 	/* Configure MTL RX algorithms */
3302 	if (rx_queues_count > 1)
3303 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3304 				priv->plat->rx_sched_algorithm);
3305 
3306 	/* Configure MTL TX algorithms */
3307 	if (tx_queues_count > 1)
3308 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3309 				priv->plat->tx_sched_algorithm);
3310 
3311 	/* Configure CBS in AVB TX queues */
3312 	if (tx_queues_count > 1)
3313 		stmmac_configure_cbs(priv);
3314 
3315 	/* Map RX MTL to DMA channels */
3316 	stmmac_rx_queue_dma_chan_map(priv);
3317 
3318 	/* Enable MAC RX Queues */
3319 	stmmac_mac_enable_rx_queues(priv);
3320 
3321 	/* Set RX priorities */
3322 	if (rx_queues_count > 1)
3323 		stmmac_mac_config_rx_queues_prio(priv);
3324 
3325 	/* Set TX priorities */
3326 	if (tx_queues_count > 1)
3327 		stmmac_mac_config_tx_queues_prio(priv);
3328 
3329 	/* Set RX routing */
3330 	if (rx_queues_count > 1)
3331 		stmmac_mac_config_rx_queues_routing(priv);
3332 
3333 	/* Receive Side Scaling */
3334 	if (rx_queues_count > 1)
3335 		stmmac_mac_config_rss(priv);
3336 }
3337 
3338 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3339 {
3340 	if (priv->dma_cap.asp) {
3341 		netdev_info(priv->dev, "Enabling Safety Features\n");
3342 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3343 					  priv->plat->safety_feat_cfg);
3344 	} else {
3345 		netdev_info(priv->dev, "No Safety Features support found\n");
3346 	}
3347 }
3348 
3349 /**
3350  * stmmac_hw_setup - setup mac in a usable state.
3351  *  @dev : pointer to the device structure.
3352  *  @ptp_register: register PTP if set
3353  *  Description:
3354  *  this is the main function to setup the HW in a usable state because the
3355  *  dma engine is reset, the core registers are configured (e.g. AXI,
3356  *  Checksum features, timers). The DMA is ready to start receiving and
3357  *  transmitting.
3358  *  Return value:
3359  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3360  *  file on failure.
3361  */
3362 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3363 {
3364 	struct stmmac_priv *priv = netdev_priv(dev);
3365 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3366 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3367 	bool sph_en;
3368 	u32 chan;
3369 	int ret;
3370 
3371 	/* Make sure RX clock is enabled */
3372 	if (priv->hw->phylink_pcs)
3373 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3374 
3375 	/* DMA initialization and SW reset */
3376 	ret = stmmac_init_dma_engine(priv);
3377 	if (ret < 0) {
3378 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3379 			   __func__);
3380 		return ret;
3381 	}
3382 
3383 	/* Copy the MAC addr into the HW  */
3384 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3385 
3386 	/* PS and related bits will be programmed according to the speed */
3387 	if (priv->hw->pcs) {
3388 		int speed = priv->plat->mac_port_sel_speed;
3389 
3390 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3391 		    (speed == SPEED_1000)) {
3392 			priv->hw->ps = speed;
3393 		} else {
3394 			dev_warn(priv->device, "invalid port speed\n");
3395 			priv->hw->ps = 0;
3396 		}
3397 	}
3398 
3399 	/* Initialize the MAC Core */
3400 	stmmac_core_init(priv, priv->hw, dev);
3401 
3402 	/* Initialize MTL*/
3403 	stmmac_mtl_configuration(priv);
3404 
3405 	/* Initialize Safety Features */
3406 	stmmac_safety_feat_configuration(priv);
3407 
3408 	ret = stmmac_rx_ipc(priv, priv->hw);
3409 	if (!ret) {
3410 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3411 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3412 		priv->hw->rx_csum = 0;
3413 	}
3414 
3415 	/* Enable the MAC Rx/Tx */
3416 	stmmac_mac_set(priv, priv->ioaddr, true);
3417 
3418 	/* Set the HW DMA mode and the COE */
3419 	stmmac_dma_operation_mode(priv);
3420 
3421 	stmmac_mmc_setup(priv);
3422 
3423 	if (ptp_register) {
3424 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3425 		if (ret < 0)
3426 			netdev_warn(priv->dev,
3427 				    "failed to enable PTP reference clock: %pe\n",
3428 				    ERR_PTR(ret));
3429 	}
3430 
3431 	ret = stmmac_init_ptp(priv);
3432 	if (ret == -EOPNOTSUPP)
3433 		netdev_info(priv->dev, "PTP not supported by HW\n");
3434 	else if (ret)
3435 		netdev_warn(priv->dev, "PTP init failed\n");
3436 	else if (ptp_register)
3437 		stmmac_ptp_register(priv);
3438 
3439 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3440 
3441 	/* Convert the timer from msec to usec */
3442 	if (!priv->tx_lpi_timer)
3443 		priv->tx_lpi_timer = eee_timer * 1000;
3444 
3445 	if (priv->use_riwt) {
3446 		u32 queue;
3447 
3448 		for (queue = 0; queue < rx_cnt; queue++) {
3449 			if (!priv->rx_riwt[queue])
3450 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3451 
3452 			stmmac_rx_watchdog(priv, priv->ioaddr,
3453 					   priv->rx_riwt[queue], queue);
3454 		}
3455 	}
3456 
3457 	if (priv->hw->pcs)
3458 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3459 
3460 	/* set TX and RX rings length */
3461 	stmmac_set_rings_length(priv);
3462 
3463 	/* Enable TSO */
3464 	if (priv->tso) {
3465 		for (chan = 0; chan < tx_cnt; chan++) {
3466 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3467 
3468 			/* TSO and TBS cannot co-exist */
3469 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3470 				continue;
3471 
3472 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3473 		}
3474 	}
3475 
3476 	/* Enable Split Header */
3477 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3478 	for (chan = 0; chan < rx_cnt; chan++)
3479 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3480 
3481 
3482 	/* VLAN Tag Insertion */
3483 	if (priv->dma_cap.vlins)
3484 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3485 
3486 	/* TBS */
3487 	for (chan = 0; chan < tx_cnt; chan++) {
3488 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3489 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3490 
3491 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3492 	}
3493 
3494 	/* Configure real RX and TX queues */
3495 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3496 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3497 
3498 	/* Start the ball rolling... */
3499 	stmmac_start_all_dma(priv);
3500 
3501 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3502 
3503 	return 0;
3504 }
3505 
3506 static void stmmac_hw_teardown(struct net_device *dev)
3507 {
3508 	struct stmmac_priv *priv = netdev_priv(dev);
3509 
3510 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3511 }
3512 
3513 static void stmmac_free_irq(struct net_device *dev,
3514 			    enum request_irq_err irq_err, int irq_idx)
3515 {
3516 	struct stmmac_priv *priv = netdev_priv(dev);
3517 	int j;
3518 
3519 	switch (irq_err) {
3520 	case REQ_IRQ_ERR_ALL:
3521 		irq_idx = priv->plat->tx_queues_to_use;
3522 		fallthrough;
3523 	case REQ_IRQ_ERR_TX:
3524 		for (j = irq_idx - 1; j >= 0; j--) {
3525 			if (priv->tx_irq[j] > 0) {
3526 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3527 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3528 			}
3529 		}
3530 		irq_idx = priv->plat->rx_queues_to_use;
3531 		fallthrough;
3532 	case REQ_IRQ_ERR_RX:
3533 		for (j = irq_idx - 1; j >= 0; j--) {
3534 			if (priv->rx_irq[j] > 0) {
3535 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3536 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3537 			}
3538 		}
3539 
3540 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3541 			free_irq(priv->sfty_ue_irq, dev);
3542 		fallthrough;
3543 	case REQ_IRQ_ERR_SFTY_UE:
3544 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3545 			free_irq(priv->sfty_ce_irq, dev);
3546 		fallthrough;
3547 	case REQ_IRQ_ERR_SFTY_CE:
3548 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3549 			free_irq(priv->lpi_irq, dev);
3550 		fallthrough;
3551 	case REQ_IRQ_ERR_LPI:
3552 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3553 			free_irq(priv->wol_irq, dev);
3554 		fallthrough;
3555 	case REQ_IRQ_ERR_SFTY:
3556 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3557 			free_irq(priv->sfty_irq, dev);
3558 		fallthrough;
3559 	case REQ_IRQ_ERR_WOL:
3560 		free_irq(dev->irq, dev);
3561 		fallthrough;
3562 	case REQ_IRQ_ERR_MAC:
3563 	case REQ_IRQ_ERR_NO:
3564 		/* If MAC IRQ request error, no more IRQ to free */
3565 		break;
3566 	}
3567 }
3568 
3569 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3570 {
3571 	struct stmmac_priv *priv = netdev_priv(dev);
3572 	enum request_irq_err irq_err;
3573 	cpumask_t cpu_mask;
3574 	int irq_idx = 0;
3575 	char *int_name;
3576 	int ret;
3577 	int i;
3578 
3579 	/* For common interrupt */
3580 	int_name = priv->int_name_mac;
3581 	sprintf(int_name, "%s:%s", dev->name, "mac");
3582 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3583 			  0, int_name, dev);
3584 	if (unlikely(ret < 0)) {
3585 		netdev_err(priv->dev,
3586 			   "%s: alloc mac MSI %d (error: %d)\n",
3587 			   __func__, dev->irq, ret);
3588 		irq_err = REQ_IRQ_ERR_MAC;
3589 		goto irq_error;
3590 	}
3591 
3592 	/* Request the Wake IRQ in case of another line
3593 	 * is used for WoL
3594 	 */
3595 	priv->wol_irq_disabled = true;
3596 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3597 		int_name = priv->int_name_wol;
3598 		sprintf(int_name, "%s:%s", dev->name, "wol");
3599 		ret = request_irq(priv->wol_irq,
3600 				  stmmac_mac_interrupt,
3601 				  0, int_name, dev);
3602 		if (unlikely(ret < 0)) {
3603 			netdev_err(priv->dev,
3604 				   "%s: alloc wol MSI %d (error: %d)\n",
3605 				   __func__, priv->wol_irq, ret);
3606 			irq_err = REQ_IRQ_ERR_WOL;
3607 			goto irq_error;
3608 		}
3609 	}
3610 
3611 	/* Request the LPI IRQ in case of another line
3612 	 * is used for LPI
3613 	 */
3614 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3615 		int_name = priv->int_name_lpi;
3616 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3617 		ret = request_irq(priv->lpi_irq,
3618 				  stmmac_mac_interrupt,
3619 				  0, int_name, dev);
3620 		if (unlikely(ret < 0)) {
3621 			netdev_err(priv->dev,
3622 				   "%s: alloc lpi MSI %d (error: %d)\n",
3623 				   __func__, priv->lpi_irq, ret);
3624 			irq_err = REQ_IRQ_ERR_LPI;
3625 			goto irq_error;
3626 		}
3627 	}
3628 
3629 	/* Request the common Safety Feature Correctible/Uncorrectible
3630 	 * Error line in case of another line is used
3631 	 */
3632 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3633 		int_name = priv->int_name_sfty;
3634 		sprintf(int_name, "%s:%s", dev->name, "safety");
3635 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3636 				  0, int_name, dev);
3637 		if (unlikely(ret < 0)) {
3638 			netdev_err(priv->dev,
3639 				   "%s: alloc sfty MSI %d (error: %d)\n",
3640 				   __func__, priv->sfty_irq, ret);
3641 			irq_err = REQ_IRQ_ERR_SFTY;
3642 			goto irq_error;
3643 		}
3644 	}
3645 
3646 	/* Request the Safety Feature Correctible Error line in
3647 	 * case of another line is used
3648 	 */
3649 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3650 		int_name = priv->int_name_sfty_ce;
3651 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3652 		ret = request_irq(priv->sfty_ce_irq,
3653 				  stmmac_safety_interrupt,
3654 				  0, int_name, dev);
3655 		if (unlikely(ret < 0)) {
3656 			netdev_err(priv->dev,
3657 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3658 				   __func__, priv->sfty_ce_irq, ret);
3659 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3660 			goto irq_error;
3661 		}
3662 	}
3663 
3664 	/* Request the Safety Feature Uncorrectible Error line in
3665 	 * case of another line is used
3666 	 */
3667 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3668 		int_name = priv->int_name_sfty_ue;
3669 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3670 		ret = request_irq(priv->sfty_ue_irq,
3671 				  stmmac_safety_interrupt,
3672 				  0, int_name, dev);
3673 		if (unlikely(ret < 0)) {
3674 			netdev_err(priv->dev,
3675 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3676 				   __func__, priv->sfty_ue_irq, ret);
3677 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3678 			goto irq_error;
3679 		}
3680 	}
3681 
3682 	/* Request Rx MSI irq */
3683 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3684 		if (i >= MTL_MAX_RX_QUEUES)
3685 			break;
3686 		if (priv->rx_irq[i] == 0)
3687 			continue;
3688 
3689 		int_name = priv->int_name_rx_irq[i];
3690 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3691 		ret = request_irq(priv->rx_irq[i],
3692 				  stmmac_msi_intr_rx,
3693 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3694 		if (unlikely(ret < 0)) {
3695 			netdev_err(priv->dev,
3696 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3697 				   __func__, i, priv->rx_irq[i], ret);
3698 			irq_err = REQ_IRQ_ERR_RX;
3699 			irq_idx = i;
3700 			goto irq_error;
3701 		}
3702 		cpumask_clear(&cpu_mask);
3703 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3704 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3705 	}
3706 
3707 	/* Request Tx MSI irq */
3708 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3709 		if (i >= MTL_MAX_TX_QUEUES)
3710 			break;
3711 		if (priv->tx_irq[i] == 0)
3712 			continue;
3713 
3714 		int_name = priv->int_name_tx_irq[i];
3715 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3716 		ret = request_irq(priv->tx_irq[i],
3717 				  stmmac_msi_intr_tx,
3718 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3719 		if (unlikely(ret < 0)) {
3720 			netdev_err(priv->dev,
3721 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3722 				   __func__, i, priv->tx_irq[i], ret);
3723 			irq_err = REQ_IRQ_ERR_TX;
3724 			irq_idx = i;
3725 			goto irq_error;
3726 		}
3727 		cpumask_clear(&cpu_mask);
3728 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3729 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3730 	}
3731 
3732 	return 0;
3733 
3734 irq_error:
3735 	stmmac_free_irq(dev, irq_err, irq_idx);
3736 	return ret;
3737 }
3738 
3739 static int stmmac_request_irq_single(struct net_device *dev)
3740 {
3741 	struct stmmac_priv *priv = netdev_priv(dev);
3742 	enum request_irq_err irq_err;
3743 	int ret;
3744 
3745 	ret = request_irq(dev->irq, stmmac_interrupt,
3746 			  IRQF_SHARED, dev->name, dev);
3747 	if (unlikely(ret < 0)) {
3748 		netdev_err(priv->dev,
3749 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3750 			   __func__, dev->irq, ret);
3751 		irq_err = REQ_IRQ_ERR_MAC;
3752 		goto irq_error;
3753 	}
3754 
3755 	/* Request the Wake IRQ in case of another line
3756 	 * is used for WoL
3757 	 */
3758 	priv->wol_irq_disabled = true;
3759 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3760 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3761 				  IRQF_SHARED, dev->name, dev);
3762 		if (unlikely(ret < 0)) {
3763 			netdev_err(priv->dev,
3764 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3765 				   __func__, priv->wol_irq, ret);
3766 			irq_err = REQ_IRQ_ERR_WOL;
3767 			goto irq_error;
3768 		}
3769 	}
3770 
3771 	/* Request the IRQ lines */
3772 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3773 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3774 				  IRQF_SHARED, dev->name, dev);
3775 		if (unlikely(ret < 0)) {
3776 			netdev_err(priv->dev,
3777 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3778 				   __func__, priv->lpi_irq, ret);
3779 			irq_err = REQ_IRQ_ERR_LPI;
3780 			goto irq_error;
3781 		}
3782 	}
3783 
3784 	/* Request the common Safety Feature Correctible/Uncorrectible
3785 	 * Error line in case of another line is used
3786 	 */
3787 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3788 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3789 				  IRQF_SHARED, dev->name, dev);
3790 		if (unlikely(ret < 0)) {
3791 			netdev_err(priv->dev,
3792 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3793 				   __func__, priv->sfty_irq, ret);
3794 			irq_err = REQ_IRQ_ERR_SFTY;
3795 			goto irq_error;
3796 		}
3797 	}
3798 
3799 	return 0;
3800 
3801 irq_error:
3802 	stmmac_free_irq(dev, irq_err, 0);
3803 	return ret;
3804 }
3805 
3806 static int stmmac_request_irq(struct net_device *dev)
3807 {
3808 	struct stmmac_priv *priv = netdev_priv(dev);
3809 	int ret;
3810 
3811 	/* Request the IRQ lines */
3812 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3813 		ret = stmmac_request_irq_multi_msi(dev);
3814 	else
3815 		ret = stmmac_request_irq_single(dev);
3816 
3817 	return ret;
3818 }
3819 
3820 /**
3821  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3822  *  @priv: driver private structure
3823  *  @mtu: MTU to setup the dma queue and buf with
3824  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3825  *  Allocate the Tx/Rx DMA queue and init them.
3826  *  Return value:
3827  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3828  */
3829 static struct stmmac_dma_conf *
3830 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3831 {
3832 	struct stmmac_dma_conf *dma_conf;
3833 	int chan, bfsize, ret;
3834 
3835 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3836 	if (!dma_conf) {
3837 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3838 			   __func__);
3839 		return ERR_PTR(-ENOMEM);
3840 	}
3841 
3842 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3843 	if (bfsize < 0)
3844 		bfsize = 0;
3845 
3846 	if (bfsize < BUF_SIZE_16KiB)
3847 		bfsize = stmmac_set_bfsize(mtu, 0);
3848 
3849 	dma_conf->dma_buf_sz = bfsize;
3850 	/* Chose the tx/rx size from the already defined one in the
3851 	 * priv struct. (if defined)
3852 	 */
3853 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3854 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3855 
3856 	if (!dma_conf->dma_tx_size)
3857 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3858 	if (!dma_conf->dma_rx_size)
3859 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3860 
3861 	/* Earlier check for TBS */
3862 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3863 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3864 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3865 
3866 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3867 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3868 	}
3869 
3870 	ret = alloc_dma_desc_resources(priv, dma_conf);
3871 	if (ret < 0) {
3872 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3873 			   __func__);
3874 		goto alloc_error;
3875 	}
3876 
3877 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3878 	if (ret < 0) {
3879 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3880 			   __func__);
3881 		goto init_error;
3882 	}
3883 
3884 	return dma_conf;
3885 
3886 init_error:
3887 	free_dma_desc_resources(priv, dma_conf);
3888 alloc_error:
3889 	kfree(dma_conf);
3890 	return ERR_PTR(ret);
3891 }
3892 
3893 /**
3894  *  __stmmac_open - open entry point of the driver
3895  *  @dev : pointer to the device structure.
3896  *  @dma_conf :  structure to take the dma data
3897  *  Description:
3898  *  This function is the open entry point of the driver.
3899  *  Return value:
3900  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3901  *  file on failure.
3902  */
3903 static int __stmmac_open(struct net_device *dev,
3904 			 struct stmmac_dma_conf *dma_conf)
3905 {
3906 	struct stmmac_priv *priv = netdev_priv(dev);
3907 	int mode = priv->plat->phy_interface;
3908 	u32 chan;
3909 	int ret;
3910 
3911 	ret = pm_runtime_resume_and_get(priv->device);
3912 	if (ret < 0)
3913 		return ret;
3914 
3915 	if ((!priv->hw->xpcs ||
3916 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3917 		ret = stmmac_init_phy(dev);
3918 		if (ret) {
3919 			netdev_err(priv->dev,
3920 				   "%s: Cannot attach to PHY (error: %d)\n",
3921 				   __func__, ret);
3922 			goto init_phy_error;
3923 		}
3924 	}
3925 
3926 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3927 
3928 	buf_sz = dma_conf->dma_buf_sz;
3929 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3930 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3931 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3932 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3933 
3934 	stmmac_reset_queues_param(priv);
3935 
3936 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3937 	    priv->plat->serdes_powerup) {
3938 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3939 		if (ret < 0) {
3940 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3941 				   __func__);
3942 			goto init_error;
3943 		}
3944 	}
3945 
3946 	ret = stmmac_hw_setup(dev, true);
3947 	if (ret < 0) {
3948 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3949 		goto init_error;
3950 	}
3951 
3952 	stmmac_init_coalesce(priv);
3953 
3954 	phylink_start(priv->phylink);
3955 	/* We may have called phylink_speed_down before */
3956 	phylink_speed_up(priv->phylink);
3957 
3958 	ret = stmmac_request_irq(dev);
3959 	if (ret)
3960 		goto irq_error;
3961 
3962 	stmmac_enable_all_queues(priv);
3963 	netif_tx_start_all_queues(priv->dev);
3964 	stmmac_enable_all_dma_irq(priv);
3965 
3966 	return 0;
3967 
3968 irq_error:
3969 	phylink_stop(priv->phylink);
3970 
3971 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3972 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3973 
3974 	stmmac_hw_teardown(dev);
3975 init_error:
3976 	phylink_disconnect_phy(priv->phylink);
3977 init_phy_error:
3978 	pm_runtime_put(priv->device);
3979 	return ret;
3980 }
3981 
3982 static int stmmac_open(struct net_device *dev)
3983 {
3984 	struct stmmac_priv *priv = netdev_priv(dev);
3985 	struct stmmac_dma_conf *dma_conf;
3986 	int ret;
3987 
3988 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3989 	if (IS_ERR(dma_conf))
3990 		return PTR_ERR(dma_conf);
3991 
3992 	ret = __stmmac_open(dev, dma_conf);
3993 	if (ret)
3994 		free_dma_desc_resources(priv, dma_conf);
3995 
3996 	kfree(dma_conf);
3997 	return ret;
3998 }
3999 
4000 /**
4001  *  stmmac_release - close entry point of the driver
4002  *  @dev : device pointer.
4003  *  Description:
4004  *  This is the stop entry point of the driver.
4005  */
4006 static int stmmac_release(struct net_device *dev)
4007 {
4008 	struct stmmac_priv *priv = netdev_priv(dev);
4009 	u32 chan;
4010 
4011 	if (device_may_wakeup(priv->device))
4012 		phylink_speed_down(priv->phylink, false);
4013 	/* Stop and disconnect the PHY */
4014 	phylink_stop(priv->phylink);
4015 	phylink_disconnect_phy(priv->phylink);
4016 
4017 	stmmac_disable_all_queues(priv);
4018 
4019 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4020 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4021 
4022 	netif_tx_disable(dev);
4023 
4024 	/* Free the IRQ lines */
4025 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4026 
4027 	if (priv->eee_enabled) {
4028 		priv->tx_path_in_lpi_mode = false;
4029 		del_timer_sync(&priv->eee_ctrl_timer);
4030 	}
4031 
4032 	/* Stop TX/RX DMA and clear the descriptors */
4033 	stmmac_stop_all_dma(priv);
4034 
4035 	/* Release and free the Rx/Tx resources */
4036 	free_dma_desc_resources(priv, &priv->dma_conf);
4037 
4038 	/* Disable the MAC Rx/Tx */
4039 	stmmac_mac_set(priv, priv->ioaddr, false);
4040 
4041 	/* Powerdown Serdes if there is */
4042 	if (priv->plat->serdes_powerdown)
4043 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4044 
4045 	stmmac_release_ptp(priv);
4046 
4047 	if (stmmac_fpe_supported(priv))
4048 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4049 
4050 	pm_runtime_put(priv->device);
4051 
4052 	return 0;
4053 }
4054 
4055 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4056 			       struct stmmac_tx_queue *tx_q)
4057 {
4058 	u16 tag = 0x0, inner_tag = 0x0;
4059 	u32 inner_type = 0x0;
4060 	struct dma_desc *p;
4061 
4062 	if (!priv->dma_cap.vlins)
4063 		return false;
4064 	if (!skb_vlan_tag_present(skb))
4065 		return false;
4066 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4067 		inner_tag = skb_vlan_tag_get(skb);
4068 		inner_type = STMMAC_VLAN_INSERT;
4069 	}
4070 
4071 	tag = skb_vlan_tag_get(skb);
4072 
4073 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4074 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4075 	else
4076 		p = &tx_q->dma_tx[tx_q->cur_tx];
4077 
4078 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4079 		return false;
4080 
4081 	stmmac_set_tx_owner(priv, p);
4082 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4083 	return true;
4084 }
4085 
4086 /**
4087  *  stmmac_tso_allocator - close entry point of the driver
4088  *  @priv: driver private structure
4089  *  @des: buffer start address
4090  *  @total_len: total length to fill in descriptors
4091  *  @last_segment: condition for the last descriptor
4092  *  @queue: TX queue index
4093  *  Description:
4094  *  This function fills descriptor and request new descriptors according to
4095  *  buffer length to fill
4096  */
4097 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4098 				 int total_len, bool last_segment, u32 queue)
4099 {
4100 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4101 	struct dma_desc *desc;
4102 	u32 buff_size;
4103 	int tmp_len;
4104 
4105 	tmp_len = total_len;
4106 
4107 	while (tmp_len > 0) {
4108 		dma_addr_t curr_addr;
4109 
4110 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4111 						priv->dma_conf.dma_tx_size);
4112 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4113 
4114 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4115 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4116 		else
4117 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4118 
4119 		curr_addr = des + (total_len - tmp_len);
4120 		if (priv->dma_cap.addr64 <= 32)
4121 			desc->des0 = cpu_to_le32(curr_addr);
4122 		else
4123 			stmmac_set_desc_addr(priv, desc, curr_addr);
4124 
4125 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4126 			    TSO_MAX_BUFF_SIZE : tmp_len;
4127 
4128 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4129 				0, 1,
4130 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4131 				0, 0);
4132 
4133 		tmp_len -= TSO_MAX_BUFF_SIZE;
4134 	}
4135 }
4136 
4137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4138 {
4139 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4140 	int desc_size;
4141 
4142 	if (likely(priv->extend_desc))
4143 		desc_size = sizeof(struct dma_extended_desc);
4144 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4145 		desc_size = sizeof(struct dma_edesc);
4146 	else
4147 		desc_size = sizeof(struct dma_desc);
4148 
4149 	/* The own bit must be the latest setting done when prepare the
4150 	 * descriptor and then barrier is needed to make sure that
4151 	 * all is coherent before granting the DMA engine.
4152 	 */
4153 	wmb();
4154 
4155 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4156 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4157 }
4158 
4159 /**
4160  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4161  *  @skb : the socket buffer
4162  *  @dev : device pointer
4163  *  Description: this is the transmit function that is called on TSO frames
4164  *  (support available on GMAC4 and newer chips).
4165  *  Diagram below show the ring programming in case of TSO frames:
4166  *
4167  *  First Descriptor
4168  *   --------
4169  *   | DES0 |---> buffer1 = L2/L3/L4 header
4170  *   | DES1 |---> TCP Payload (can continue on next descr...)
4171  *   | DES2 |---> buffer 1 and 2 len
4172  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4173  *   --------
4174  *	|
4175  *     ...
4176  *	|
4177  *   --------
4178  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4179  *   | DES1 | --|
4180  *   | DES2 | --> buffer 1 and 2 len
4181  *   | DES3 |
4182  *   --------
4183  *
4184  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4185  */
4186 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4187 {
4188 	struct dma_desc *desc, *first, *mss_desc = NULL;
4189 	struct stmmac_priv *priv = netdev_priv(dev);
4190 	int tmp_pay_len = 0, first_tx, nfrags;
4191 	unsigned int first_entry, tx_packets;
4192 	struct stmmac_txq_stats *txq_stats;
4193 	struct stmmac_tx_queue *tx_q;
4194 	u32 pay_len, mss, queue;
4195 	u8 proto_hdr_len, hdr;
4196 	dma_addr_t des;
4197 	bool set_ic;
4198 	int i;
4199 
4200 	/* Always insert VLAN tag to SKB payload for TSO frames.
4201 	 *
4202 	 * Never insert VLAN tag by HW, since segments splited by
4203 	 * TSO engine will be un-tagged by mistake.
4204 	 */
4205 	if (skb_vlan_tag_present(skb)) {
4206 		skb = __vlan_hwaccel_push_inside(skb);
4207 		if (unlikely(!skb)) {
4208 			priv->xstats.tx_dropped++;
4209 			return NETDEV_TX_OK;
4210 		}
4211 	}
4212 
4213 	nfrags = skb_shinfo(skb)->nr_frags;
4214 	queue = skb_get_queue_mapping(skb);
4215 
4216 	tx_q = &priv->dma_conf.tx_queue[queue];
4217 	txq_stats = &priv->xstats.txq_stats[queue];
4218 	first_tx = tx_q->cur_tx;
4219 
4220 	/* Compute header lengths */
4221 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4222 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4223 		hdr = sizeof(struct udphdr);
4224 	} else {
4225 		proto_hdr_len = skb_tcp_all_headers(skb);
4226 		hdr = tcp_hdrlen(skb);
4227 	}
4228 
4229 	/* Desc availability based on threshold should be enough safe */
4230 	if (unlikely(stmmac_tx_avail(priv, queue) <
4231 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4232 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4233 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4234 								queue));
4235 			/* This is a hard error, log it. */
4236 			netdev_err(priv->dev,
4237 				   "%s: Tx Ring full when queue awake\n",
4238 				   __func__);
4239 		}
4240 		return NETDEV_TX_BUSY;
4241 	}
4242 
4243 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4244 
4245 	mss = skb_shinfo(skb)->gso_size;
4246 
4247 	/* set new MSS value if needed */
4248 	if (mss != tx_q->mss) {
4249 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4250 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4251 		else
4252 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4253 
4254 		stmmac_set_mss(priv, mss_desc, mss);
4255 		tx_q->mss = mss;
4256 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4257 						priv->dma_conf.dma_tx_size);
4258 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4259 	}
4260 
4261 	if (netif_msg_tx_queued(priv)) {
4262 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4263 			__func__, hdr, proto_hdr_len, pay_len, mss);
4264 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4265 			skb->data_len);
4266 	}
4267 
4268 	first_entry = tx_q->cur_tx;
4269 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4270 
4271 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4272 		desc = &tx_q->dma_entx[first_entry].basic;
4273 	else
4274 		desc = &tx_q->dma_tx[first_entry];
4275 	first = desc;
4276 
4277 	/* first descriptor: fill Headers on Buf1 */
4278 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4279 			     DMA_TO_DEVICE);
4280 	if (dma_mapping_error(priv->device, des))
4281 		goto dma_map_err;
4282 
4283 	if (priv->dma_cap.addr64 <= 32) {
4284 		first->des0 = cpu_to_le32(des);
4285 
4286 		/* Fill start of payload in buff2 of first descriptor */
4287 		if (pay_len)
4288 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4289 
4290 		/* If needed take extra descriptors to fill the remaining payload */
4291 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4292 	} else {
4293 		stmmac_set_desc_addr(priv, first, des);
4294 		tmp_pay_len = pay_len;
4295 		des += proto_hdr_len;
4296 		pay_len = 0;
4297 	}
4298 
4299 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4300 
4301 	/* In case two or more DMA transmit descriptors are allocated for this
4302 	 * non-paged SKB data, the DMA buffer address should be saved to
4303 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4304 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4305 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4306 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4307 	 * sooner or later.
4308 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4309 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4310 	 * this DMA buffer right after the DMA engine completely finishes the
4311 	 * full buffer transmission.
4312 	 */
4313 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4314 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4315 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4316 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4317 
4318 	/* Prepare fragments */
4319 	for (i = 0; i < nfrags; i++) {
4320 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4321 
4322 		des = skb_frag_dma_map(priv->device, frag, 0,
4323 				       skb_frag_size(frag),
4324 				       DMA_TO_DEVICE);
4325 		if (dma_mapping_error(priv->device, des))
4326 			goto dma_map_err;
4327 
4328 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4329 				     (i == nfrags - 1), queue);
4330 
4331 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4332 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4333 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4334 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4335 	}
4336 
4337 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4338 
4339 	/* Only the last descriptor gets to point to the skb. */
4340 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4341 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4342 
4343 	/* Manage tx mitigation */
4344 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4345 	tx_q->tx_count_frames += tx_packets;
4346 
4347 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4348 		set_ic = true;
4349 	else if (!priv->tx_coal_frames[queue])
4350 		set_ic = false;
4351 	else if (tx_packets > priv->tx_coal_frames[queue])
4352 		set_ic = true;
4353 	else if ((tx_q->tx_count_frames %
4354 		  priv->tx_coal_frames[queue]) < tx_packets)
4355 		set_ic = true;
4356 	else
4357 		set_ic = false;
4358 
4359 	if (set_ic) {
4360 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4361 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4362 		else
4363 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4364 
4365 		tx_q->tx_count_frames = 0;
4366 		stmmac_set_tx_ic(priv, desc);
4367 	}
4368 
4369 	/* We've used all descriptors we need for this skb, however,
4370 	 * advance cur_tx so that it references a fresh descriptor.
4371 	 * ndo_start_xmit will fill this descriptor the next time it's
4372 	 * called and stmmac_tx_clean may clean up to this descriptor.
4373 	 */
4374 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4375 
4376 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4377 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4378 			  __func__);
4379 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4380 	}
4381 
4382 	u64_stats_update_begin(&txq_stats->q_syncp);
4383 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4384 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4385 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4386 	if (set_ic)
4387 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4388 	u64_stats_update_end(&txq_stats->q_syncp);
4389 
4390 	if (priv->sarc_type)
4391 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4392 
4393 	skb_tx_timestamp(skb);
4394 
4395 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4396 		     priv->hwts_tx_en)) {
4397 		/* declare that device is doing timestamping */
4398 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4399 		stmmac_enable_tx_timestamp(priv, first);
4400 	}
4401 
4402 	/* Complete the first descriptor before granting the DMA */
4403 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4404 			proto_hdr_len,
4405 			pay_len,
4406 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4407 			hdr / 4, (skb->len - proto_hdr_len));
4408 
4409 	/* If context desc is used to change MSS */
4410 	if (mss_desc) {
4411 		/* Make sure that first descriptor has been completely
4412 		 * written, including its own bit. This is because MSS is
4413 		 * actually before first descriptor, so we need to make
4414 		 * sure that MSS's own bit is the last thing written.
4415 		 */
4416 		dma_wmb();
4417 		stmmac_set_tx_owner(priv, mss_desc);
4418 	}
4419 
4420 	if (netif_msg_pktdata(priv)) {
4421 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4422 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4423 			tx_q->cur_tx, first, nfrags);
4424 		pr_info(">>> frame to be transmitted: ");
4425 		print_pkt(skb->data, skb_headlen(skb));
4426 	}
4427 
4428 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4429 
4430 	stmmac_flush_tx_descriptors(priv, queue);
4431 	stmmac_tx_timer_arm(priv, queue);
4432 
4433 	return NETDEV_TX_OK;
4434 
4435 dma_map_err:
4436 	dev_err(priv->device, "Tx dma map failed\n");
4437 	dev_kfree_skb(skb);
4438 	priv->xstats.tx_dropped++;
4439 	return NETDEV_TX_OK;
4440 }
4441 
4442 /**
4443  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4444  * @skb: socket buffer to check
4445  *
4446  * Check if a packet has an ethertype that will trigger the IP header checks
4447  * and IP/TCP checksum engine of the stmmac core.
4448  *
4449  * Return: true if the ethertype can trigger the checksum engine, false
4450  * otherwise
4451  */
4452 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4453 {
4454 	int depth = 0;
4455 	__be16 proto;
4456 
4457 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4458 				    &depth);
4459 
4460 	return (depth <= ETH_HLEN) &&
4461 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4462 }
4463 
4464 /**
4465  *  stmmac_xmit - Tx entry point of the driver
4466  *  @skb : the socket buffer
4467  *  @dev : device pointer
4468  *  Description : this is the tx entry point of the driver.
4469  *  It programs the chain or the ring and supports oversized frames
4470  *  and SG feature.
4471  */
4472 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4473 {
4474 	unsigned int first_entry, tx_packets, enh_desc;
4475 	struct stmmac_priv *priv = netdev_priv(dev);
4476 	unsigned int nopaged_len = skb_headlen(skb);
4477 	int i, csum_insertion = 0, is_jumbo = 0;
4478 	u32 queue = skb_get_queue_mapping(skb);
4479 	int nfrags = skb_shinfo(skb)->nr_frags;
4480 	int gso = skb_shinfo(skb)->gso_type;
4481 	struct stmmac_txq_stats *txq_stats;
4482 	struct dma_edesc *tbs_desc = NULL;
4483 	struct dma_desc *desc, *first;
4484 	struct stmmac_tx_queue *tx_q;
4485 	bool has_vlan, set_ic;
4486 	int entry, first_tx;
4487 	dma_addr_t des;
4488 
4489 	tx_q = &priv->dma_conf.tx_queue[queue];
4490 	txq_stats = &priv->xstats.txq_stats[queue];
4491 	first_tx = tx_q->cur_tx;
4492 
4493 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4494 		stmmac_disable_eee_mode(priv);
4495 
4496 	/* Manage oversized TCP frames for GMAC4 device */
4497 	if (skb_is_gso(skb) && priv->tso) {
4498 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4499 			return stmmac_tso_xmit(skb, dev);
4500 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4501 			return stmmac_tso_xmit(skb, dev);
4502 	}
4503 
4504 	if (priv->est && priv->est->enable &&
4505 	    priv->est->max_sdu[queue] &&
4506 	    skb->len > priv->est->max_sdu[queue]){
4507 		priv->xstats.max_sdu_txq_drop[queue]++;
4508 		goto max_sdu_err;
4509 	}
4510 
4511 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4512 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4513 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4514 								queue));
4515 			/* This is a hard error, log it. */
4516 			netdev_err(priv->dev,
4517 				   "%s: Tx Ring full when queue awake\n",
4518 				   __func__);
4519 		}
4520 		return NETDEV_TX_BUSY;
4521 	}
4522 
4523 	/* Check if VLAN can be inserted by HW */
4524 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4525 
4526 	entry = tx_q->cur_tx;
4527 	first_entry = entry;
4528 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4529 
4530 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4531 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4532 	 * queues. In that case, checksum offloading for those queues that don't
4533 	 * support tx coe needs to fallback to software checksum calculation.
4534 	 *
4535 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4536 	 * also have to be checksummed in software.
4537 	 */
4538 	if (csum_insertion &&
4539 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4540 	     !stmmac_has_ip_ethertype(skb))) {
4541 		if (unlikely(skb_checksum_help(skb)))
4542 			goto dma_map_err;
4543 		csum_insertion = !csum_insertion;
4544 	}
4545 
4546 	if (likely(priv->extend_desc))
4547 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4548 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4549 		desc = &tx_q->dma_entx[entry].basic;
4550 	else
4551 		desc = tx_q->dma_tx + entry;
4552 
4553 	first = desc;
4554 
4555 	if (has_vlan)
4556 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4557 
4558 	enh_desc = priv->plat->enh_desc;
4559 	/* To program the descriptors according to the size of the frame */
4560 	if (enh_desc)
4561 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4562 
4563 	if (unlikely(is_jumbo)) {
4564 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4565 		if (unlikely(entry < 0) && (entry != -EINVAL))
4566 			goto dma_map_err;
4567 	}
4568 
4569 	for (i = 0; i < nfrags; i++) {
4570 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4571 		int len = skb_frag_size(frag);
4572 		bool last_segment = (i == (nfrags - 1));
4573 
4574 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4575 		WARN_ON(tx_q->tx_skbuff[entry]);
4576 
4577 		if (likely(priv->extend_desc))
4578 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4579 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4580 			desc = &tx_q->dma_entx[entry].basic;
4581 		else
4582 			desc = tx_q->dma_tx + entry;
4583 
4584 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4585 				       DMA_TO_DEVICE);
4586 		if (dma_mapping_error(priv->device, des))
4587 			goto dma_map_err; /* should reuse desc w/o issues */
4588 
4589 		tx_q->tx_skbuff_dma[entry].buf = des;
4590 
4591 		stmmac_set_desc_addr(priv, desc, des);
4592 
4593 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4594 		tx_q->tx_skbuff_dma[entry].len = len;
4595 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4596 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4597 
4598 		/* Prepare the descriptor and set the own bit too */
4599 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4600 				priv->mode, 1, last_segment, skb->len);
4601 	}
4602 
4603 	/* Only the last descriptor gets to point to the skb. */
4604 	tx_q->tx_skbuff[entry] = skb;
4605 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4606 
4607 	/* According to the coalesce parameter the IC bit for the latest
4608 	 * segment is reset and the timer re-started to clean the tx status.
4609 	 * This approach takes care about the fragments: desc is the first
4610 	 * element in case of no SG.
4611 	 */
4612 	tx_packets = (entry + 1) - first_tx;
4613 	tx_q->tx_count_frames += tx_packets;
4614 
4615 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4616 		set_ic = true;
4617 	else if (!priv->tx_coal_frames[queue])
4618 		set_ic = false;
4619 	else if (tx_packets > priv->tx_coal_frames[queue])
4620 		set_ic = true;
4621 	else if ((tx_q->tx_count_frames %
4622 		  priv->tx_coal_frames[queue]) < tx_packets)
4623 		set_ic = true;
4624 	else
4625 		set_ic = false;
4626 
4627 	if (set_ic) {
4628 		if (likely(priv->extend_desc))
4629 			desc = &tx_q->dma_etx[entry].basic;
4630 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4631 			desc = &tx_q->dma_entx[entry].basic;
4632 		else
4633 			desc = &tx_q->dma_tx[entry];
4634 
4635 		tx_q->tx_count_frames = 0;
4636 		stmmac_set_tx_ic(priv, desc);
4637 	}
4638 
4639 	/* We've used all descriptors we need for this skb, however,
4640 	 * advance cur_tx so that it references a fresh descriptor.
4641 	 * ndo_start_xmit will fill this descriptor the next time it's
4642 	 * called and stmmac_tx_clean may clean up to this descriptor.
4643 	 */
4644 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4645 	tx_q->cur_tx = entry;
4646 
4647 	if (netif_msg_pktdata(priv)) {
4648 		netdev_dbg(priv->dev,
4649 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4650 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4651 			   entry, first, nfrags);
4652 
4653 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4654 		print_pkt(skb->data, skb->len);
4655 	}
4656 
4657 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4658 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4659 			  __func__);
4660 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4661 	}
4662 
4663 	u64_stats_update_begin(&txq_stats->q_syncp);
4664 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4665 	if (set_ic)
4666 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4667 	u64_stats_update_end(&txq_stats->q_syncp);
4668 
4669 	if (priv->sarc_type)
4670 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4671 
4672 	skb_tx_timestamp(skb);
4673 
4674 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4675 	 * problems because all the descriptors are actually ready to be
4676 	 * passed to the DMA engine.
4677 	 */
4678 	if (likely(!is_jumbo)) {
4679 		bool last_segment = (nfrags == 0);
4680 
4681 		des = dma_map_single(priv->device, skb->data,
4682 				     nopaged_len, DMA_TO_DEVICE);
4683 		if (dma_mapping_error(priv->device, des))
4684 			goto dma_map_err;
4685 
4686 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4687 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4688 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4689 
4690 		stmmac_set_desc_addr(priv, first, des);
4691 
4692 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4693 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4694 
4695 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4696 			     priv->hwts_tx_en)) {
4697 			/* declare that device is doing timestamping */
4698 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4699 			stmmac_enable_tx_timestamp(priv, first);
4700 		}
4701 
4702 		/* Prepare the first descriptor setting the OWN bit too */
4703 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4704 				csum_insertion, priv->mode, 0, last_segment,
4705 				skb->len);
4706 	}
4707 
4708 	if (tx_q->tbs & STMMAC_TBS_EN) {
4709 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4710 
4711 		tbs_desc = &tx_q->dma_entx[first_entry];
4712 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4713 	}
4714 
4715 	stmmac_set_tx_owner(priv, first);
4716 
4717 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4718 
4719 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4720 
4721 	stmmac_flush_tx_descriptors(priv, queue);
4722 	stmmac_tx_timer_arm(priv, queue);
4723 
4724 	return NETDEV_TX_OK;
4725 
4726 dma_map_err:
4727 	netdev_err(priv->dev, "Tx DMA map failed\n");
4728 max_sdu_err:
4729 	dev_kfree_skb(skb);
4730 	priv->xstats.tx_dropped++;
4731 	return NETDEV_TX_OK;
4732 }
4733 
4734 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4735 {
4736 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4737 	__be16 vlan_proto = veth->h_vlan_proto;
4738 	u16 vlanid;
4739 
4740 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4741 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4742 	    (vlan_proto == htons(ETH_P_8021AD) &&
4743 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4744 		/* pop the vlan tag */
4745 		vlanid = ntohs(veth->h_vlan_TCI);
4746 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4747 		skb_pull(skb, VLAN_HLEN);
4748 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4749 	}
4750 }
4751 
4752 /**
4753  * stmmac_rx_refill - refill used skb preallocated buffers
4754  * @priv: driver private structure
4755  * @queue: RX queue index
4756  * Description : this is to reallocate the skb for the reception process
4757  * that is based on zero-copy.
4758  */
4759 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4760 {
4761 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4762 	int dirty = stmmac_rx_dirty(priv, queue);
4763 	unsigned int entry = rx_q->dirty_rx;
4764 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4765 
4766 	if (priv->dma_cap.host_dma_width <= 32)
4767 		gfp |= GFP_DMA32;
4768 
4769 	while (dirty-- > 0) {
4770 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4771 		struct dma_desc *p;
4772 		bool use_rx_wd;
4773 
4774 		if (priv->extend_desc)
4775 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4776 		else
4777 			p = rx_q->dma_rx + entry;
4778 
4779 		if (!buf->page) {
4780 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4781 			if (!buf->page)
4782 				break;
4783 		}
4784 
4785 		if (priv->sph && !buf->sec_page) {
4786 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4787 			if (!buf->sec_page)
4788 				break;
4789 
4790 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4791 		}
4792 
4793 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4794 
4795 		stmmac_set_desc_addr(priv, p, buf->addr);
4796 		if (priv->sph)
4797 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4798 		else
4799 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4800 		stmmac_refill_desc3(priv, rx_q, p);
4801 
4802 		rx_q->rx_count_frames++;
4803 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4804 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4805 			rx_q->rx_count_frames = 0;
4806 
4807 		use_rx_wd = !priv->rx_coal_frames[queue];
4808 		use_rx_wd |= rx_q->rx_count_frames > 0;
4809 		if (!priv->use_riwt)
4810 			use_rx_wd = false;
4811 
4812 		dma_wmb();
4813 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4814 
4815 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4816 	}
4817 	rx_q->dirty_rx = entry;
4818 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4819 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4820 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4821 }
4822 
4823 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4824 				       struct dma_desc *p,
4825 				       int status, unsigned int len)
4826 {
4827 	unsigned int plen = 0, hlen = 0;
4828 	int coe = priv->hw->rx_csum;
4829 
4830 	/* Not first descriptor, buffer is always zero */
4831 	if (priv->sph && len)
4832 		return 0;
4833 
4834 	/* First descriptor, get split header length */
4835 	stmmac_get_rx_header_len(priv, p, &hlen);
4836 	if (priv->sph && hlen) {
4837 		priv->xstats.rx_split_hdr_pkt_n++;
4838 		return hlen;
4839 	}
4840 
4841 	/* First descriptor, not last descriptor and not split header */
4842 	if (status & rx_not_ls)
4843 		return priv->dma_conf.dma_buf_sz;
4844 
4845 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4846 
4847 	/* First descriptor and last descriptor and not split header */
4848 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4849 }
4850 
4851 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4852 				       struct dma_desc *p,
4853 				       int status, unsigned int len)
4854 {
4855 	int coe = priv->hw->rx_csum;
4856 	unsigned int plen = 0;
4857 
4858 	/* Not split header, buffer is not available */
4859 	if (!priv->sph)
4860 		return 0;
4861 
4862 	/* Not last descriptor */
4863 	if (status & rx_not_ls)
4864 		return priv->dma_conf.dma_buf_sz;
4865 
4866 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4867 
4868 	/* Last descriptor */
4869 	return plen - len;
4870 }
4871 
4872 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4873 				struct xdp_frame *xdpf, bool dma_map)
4874 {
4875 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4876 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4877 	unsigned int entry = tx_q->cur_tx;
4878 	struct dma_desc *tx_desc;
4879 	dma_addr_t dma_addr;
4880 	bool set_ic;
4881 
4882 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4883 		return STMMAC_XDP_CONSUMED;
4884 
4885 	if (priv->est && priv->est->enable &&
4886 	    priv->est->max_sdu[queue] &&
4887 	    xdpf->len > priv->est->max_sdu[queue]) {
4888 		priv->xstats.max_sdu_txq_drop[queue]++;
4889 		return STMMAC_XDP_CONSUMED;
4890 	}
4891 
4892 	if (likely(priv->extend_desc))
4893 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4894 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4895 		tx_desc = &tx_q->dma_entx[entry].basic;
4896 	else
4897 		tx_desc = tx_q->dma_tx + entry;
4898 
4899 	if (dma_map) {
4900 		dma_addr = dma_map_single(priv->device, xdpf->data,
4901 					  xdpf->len, DMA_TO_DEVICE);
4902 		if (dma_mapping_error(priv->device, dma_addr))
4903 			return STMMAC_XDP_CONSUMED;
4904 
4905 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4906 	} else {
4907 		struct page *page = virt_to_page(xdpf->data);
4908 
4909 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4910 			   xdpf->headroom;
4911 		dma_sync_single_for_device(priv->device, dma_addr,
4912 					   xdpf->len, DMA_BIDIRECTIONAL);
4913 
4914 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4915 	}
4916 
4917 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4918 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4919 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4920 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4921 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4922 
4923 	tx_q->xdpf[entry] = xdpf;
4924 
4925 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4926 
4927 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4928 			       true, priv->mode, true, true,
4929 			       xdpf->len);
4930 
4931 	tx_q->tx_count_frames++;
4932 
4933 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4934 		set_ic = true;
4935 	else
4936 		set_ic = false;
4937 
4938 	if (set_ic) {
4939 		tx_q->tx_count_frames = 0;
4940 		stmmac_set_tx_ic(priv, tx_desc);
4941 		u64_stats_update_begin(&txq_stats->q_syncp);
4942 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4943 		u64_stats_update_end(&txq_stats->q_syncp);
4944 	}
4945 
4946 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4947 
4948 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4949 	tx_q->cur_tx = entry;
4950 
4951 	return STMMAC_XDP_TX;
4952 }
4953 
4954 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4955 				   int cpu)
4956 {
4957 	int index = cpu;
4958 
4959 	if (unlikely(index < 0))
4960 		index = 0;
4961 
4962 	while (index >= priv->plat->tx_queues_to_use)
4963 		index -= priv->plat->tx_queues_to_use;
4964 
4965 	return index;
4966 }
4967 
4968 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4969 				struct xdp_buff *xdp)
4970 {
4971 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4972 	int cpu = smp_processor_id();
4973 	struct netdev_queue *nq;
4974 	int queue;
4975 	int res;
4976 
4977 	if (unlikely(!xdpf))
4978 		return STMMAC_XDP_CONSUMED;
4979 
4980 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4981 	nq = netdev_get_tx_queue(priv->dev, queue);
4982 
4983 	__netif_tx_lock(nq, cpu);
4984 	/* Avoids TX time-out as we are sharing with slow path */
4985 	txq_trans_cond_update(nq);
4986 
4987 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4988 	if (res == STMMAC_XDP_TX)
4989 		stmmac_flush_tx_descriptors(priv, queue);
4990 
4991 	__netif_tx_unlock(nq);
4992 
4993 	return res;
4994 }
4995 
4996 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4997 				 struct bpf_prog *prog,
4998 				 struct xdp_buff *xdp)
4999 {
5000 	u32 act;
5001 	int res;
5002 
5003 	act = bpf_prog_run_xdp(prog, xdp);
5004 	switch (act) {
5005 	case XDP_PASS:
5006 		res = STMMAC_XDP_PASS;
5007 		break;
5008 	case XDP_TX:
5009 		res = stmmac_xdp_xmit_back(priv, xdp);
5010 		break;
5011 	case XDP_REDIRECT:
5012 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5013 			res = STMMAC_XDP_CONSUMED;
5014 		else
5015 			res = STMMAC_XDP_REDIRECT;
5016 		break;
5017 	default:
5018 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5019 		fallthrough;
5020 	case XDP_ABORTED:
5021 		trace_xdp_exception(priv->dev, prog, act);
5022 		fallthrough;
5023 	case XDP_DROP:
5024 		res = STMMAC_XDP_CONSUMED;
5025 		break;
5026 	}
5027 
5028 	return res;
5029 }
5030 
5031 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5032 					   struct xdp_buff *xdp)
5033 {
5034 	struct bpf_prog *prog;
5035 	int res;
5036 
5037 	prog = READ_ONCE(priv->xdp_prog);
5038 	if (!prog) {
5039 		res = STMMAC_XDP_PASS;
5040 		goto out;
5041 	}
5042 
5043 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5044 out:
5045 	return ERR_PTR(-res);
5046 }
5047 
5048 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5049 				   int xdp_status)
5050 {
5051 	int cpu = smp_processor_id();
5052 	int queue;
5053 
5054 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5055 
5056 	if (xdp_status & STMMAC_XDP_TX)
5057 		stmmac_tx_timer_arm(priv, queue);
5058 
5059 	if (xdp_status & STMMAC_XDP_REDIRECT)
5060 		xdp_do_flush();
5061 }
5062 
5063 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5064 					       struct xdp_buff *xdp)
5065 {
5066 	unsigned int metasize = xdp->data - xdp->data_meta;
5067 	unsigned int datasize = xdp->data_end - xdp->data;
5068 	struct sk_buff *skb;
5069 
5070 	skb = napi_alloc_skb(&ch->rxtx_napi,
5071 			     xdp->data_end - xdp->data_hard_start);
5072 	if (unlikely(!skb))
5073 		return NULL;
5074 
5075 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5076 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5077 	if (metasize)
5078 		skb_metadata_set(skb, metasize);
5079 
5080 	return skb;
5081 }
5082 
5083 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5084 				   struct dma_desc *p, struct dma_desc *np,
5085 				   struct xdp_buff *xdp)
5086 {
5087 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5088 	struct stmmac_channel *ch = &priv->channel[queue];
5089 	unsigned int len = xdp->data_end - xdp->data;
5090 	enum pkt_hash_types hash_type;
5091 	int coe = priv->hw->rx_csum;
5092 	struct sk_buff *skb;
5093 	u32 hash;
5094 
5095 	skb = stmmac_construct_skb_zc(ch, xdp);
5096 	if (!skb) {
5097 		priv->xstats.rx_dropped++;
5098 		return;
5099 	}
5100 
5101 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5102 	if (priv->hw->hw_vlan_en)
5103 		/* MAC level stripping. */
5104 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5105 	else
5106 		/* Driver level stripping. */
5107 		stmmac_rx_vlan(priv->dev, skb);
5108 	skb->protocol = eth_type_trans(skb, priv->dev);
5109 
5110 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5111 		skb_checksum_none_assert(skb);
5112 	else
5113 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5114 
5115 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5116 		skb_set_hash(skb, hash, hash_type);
5117 
5118 	skb_record_rx_queue(skb, queue);
5119 	napi_gro_receive(&ch->rxtx_napi, skb);
5120 
5121 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5122 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5123 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5124 	u64_stats_update_end(&rxq_stats->napi_syncp);
5125 }
5126 
5127 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5128 {
5129 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5130 	unsigned int entry = rx_q->dirty_rx;
5131 	struct dma_desc *rx_desc = NULL;
5132 	bool ret = true;
5133 
5134 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5135 
5136 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5137 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5138 		dma_addr_t dma_addr;
5139 		bool use_rx_wd;
5140 
5141 		if (!buf->xdp) {
5142 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5143 			if (!buf->xdp) {
5144 				ret = false;
5145 				break;
5146 			}
5147 		}
5148 
5149 		if (priv->extend_desc)
5150 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5151 		else
5152 			rx_desc = rx_q->dma_rx + entry;
5153 
5154 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5155 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5156 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5157 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5158 
5159 		rx_q->rx_count_frames++;
5160 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5161 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5162 			rx_q->rx_count_frames = 0;
5163 
5164 		use_rx_wd = !priv->rx_coal_frames[queue];
5165 		use_rx_wd |= rx_q->rx_count_frames > 0;
5166 		if (!priv->use_riwt)
5167 			use_rx_wd = false;
5168 
5169 		dma_wmb();
5170 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5171 
5172 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5173 	}
5174 
5175 	if (rx_desc) {
5176 		rx_q->dirty_rx = entry;
5177 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5178 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5179 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5180 	}
5181 
5182 	return ret;
5183 }
5184 
5185 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5186 {
5187 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5188 	 * to represent incoming packet, whereas cb field in the same structure
5189 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5190 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5191 	 */
5192 	return (struct stmmac_xdp_buff *)xdp;
5193 }
5194 
5195 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5196 {
5197 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5198 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5199 	unsigned int count = 0, error = 0, len = 0;
5200 	int dirty = stmmac_rx_dirty(priv, queue);
5201 	unsigned int next_entry = rx_q->cur_rx;
5202 	u32 rx_errors = 0, rx_dropped = 0;
5203 	unsigned int desc_size;
5204 	struct bpf_prog *prog;
5205 	bool failure = false;
5206 	int xdp_status = 0;
5207 	int status = 0;
5208 
5209 	if (netif_msg_rx_status(priv)) {
5210 		void *rx_head;
5211 
5212 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5213 		if (priv->extend_desc) {
5214 			rx_head = (void *)rx_q->dma_erx;
5215 			desc_size = sizeof(struct dma_extended_desc);
5216 		} else {
5217 			rx_head = (void *)rx_q->dma_rx;
5218 			desc_size = sizeof(struct dma_desc);
5219 		}
5220 
5221 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5222 				    rx_q->dma_rx_phy, desc_size);
5223 	}
5224 	while (count < limit) {
5225 		struct stmmac_rx_buffer *buf;
5226 		struct stmmac_xdp_buff *ctx;
5227 		unsigned int buf1_len = 0;
5228 		struct dma_desc *np, *p;
5229 		int entry;
5230 		int res;
5231 
5232 		if (!count && rx_q->state_saved) {
5233 			error = rx_q->state.error;
5234 			len = rx_q->state.len;
5235 		} else {
5236 			rx_q->state_saved = false;
5237 			error = 0;
5238 			len = 0;
5239 		}
5240 
5241 		if (count >= limit)
5242 			break;
5243 
5244 read_again:
5245 		buf1_len = 0;
5246 		entry = next_entry;
5247 		buf = &rx_q->buf_pool[entry];
5248 
5249 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5250 			failure = failure ||
5251 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5252 			dirty = 0;
5253 		}
5254 
5255 		if (priv->extend_desc)
5256 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5257 		else
5258 			p = rx_q->dma_rx + entry;
5259 
5260 		/* read the status of the incoming frame */
5261 		status = stmmac_rx_status(priv, &priv->xstats, p);
5262 		/* check if managed by the DMA otherwise go ahead */
5263 		if (unlikely(status & dma_own))
5264 			break;
5265 
5266 		/* Prefetch the next RX descriptor */
5267 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5268 						priv->dma_conf.dma_rx_size);
5269 		next_entry = rx_q->cur_rx;
5270 
5271 		if (priv->extend_desc)
5272 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5273 		else
5274 			np = rx_q->dma_rx + next_entry;
5275 
5276 		prefetch(np);
5277 
5278 		/* Ensure a valid XSK buffer before proceed */
5279 		if (!buf->xdp)
5280 			break;
5281 
5282 		if (priv->extend_desc)
5283 			stmmac_rx_extended_status(priv, &priv->xstats,
5284 						  rx_q->dma_erx + entry);
5285 		if (unlikely(status == discard_frame)) {
5286 			xsk_buff_free(buf->xdp);
5287 			buf->xdp = NULL;
5288 			dirty++;
5289 			error = 1;
5290 			if (!priv->hwts_rx_en)
5291 				rx_errors++;
5292 		}
5293 
5294 		if (unlikely(error && (status & rx_not_ls)))
5295 			goto read_again;
5296 		if (unlikely(error)) {
5297 			count++;
5298 			continue;
5299 		}
5300 
5301 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5302 		if (likely(status & rx_not_ls)) {
5303 			xsk_buff_free(buf->xdp);
5304 			buf->xdp = NULL;
5305 			dirty++;
5306 			count++;
5307 			goto read_again;
5308 		}
5309 
5310 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5311 		ctx->priv = priv;
5312 		ctx->desc = p;
5313 		ctx->ndesc = np;
5314 
5315 		/* XDP ZC Frame only support primary buffers for now */
5316 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5317 		len += buf1_len;
5318 
5319 		/* ACS is disabled; strip manually. */
5320 		if (likely(!(status & rx_not_ls))) {
5321 			buf1_len -= ETH_FCS_LEN;
5322 			len -= ETH_FCS_LEN;
5323 		}
5324 
5325 		/* RX buffer is good and fit into a XSK pool buffer */
5326 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5327 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5328 
5329 		prog = READ_ONCE(priv->xdp_prog);
5330 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5331 
5332 		switch (res) {
5333 		case STMMAC_XDP_PASS:
5334 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5335 			xsk_buff_free(buf->xdp);
5336 			break;
5337 		case STMMAC_XDP_CONSUMED:
5338 			xsk_buff_free(buf->xdp);
5339 			rx_dropped++;
5340 			break;
5341 		case STMMAC_XDP_TX:
5342 		case STMMAC_XDP_REDIRECT:
5343 			xdp_status |= res;
5344 			break;
5345 		}
5346 
5347 		buf->xdp = NULL;
5348 		dirty++;
5349 		count++;
5350 	}
5351 
5352 	if (status & rx_not_ls) {
5353 		rx_q->state_saved = true;
5354 		rx_q->state.error = error;
5355 		rx_q->state.len = len;
5356 	}
5357 
5358 	stmmac_finalize_xdp_rx(priv, xdp_status);
5359 
5360 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5361 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5362 	u64_stats_update_end(&rxq_stats->napi_syncp);
5363 
5364 	priv->xstats.rx_dropped += rx_dropped;
5365 	priv->xstats.rx_errors += rx_errors;
5366 
5367 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5368 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5369 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5370 		else
5371 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5372 
5373 		return (int)count;
5374 	}
5375 
5376 	return failure ? limit : (int)count;
5377 }
5378 
5379 /**
5380  * stmmac_rx - manage the receive process
5381  * @priv: driver private structure
5382  * @limit: napi bugget
5383  * @queue: RX queue index.
5384  * Description :  this the function called by the napi poll method.
5385  * It gets all the frames inside the ring.
5386  */
5387 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5388 {
5389 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5390 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5391 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5392 	struct stmmac_channel *ch = &priv->channel[queue];
5393 	unsigned int count = 0, error = 0, len = 0;
5394 	int status = 0, coe = priv->hw->rx_csum;
5395 	unsigned int next_entry = rx_q->cur_rx;
5396 	enum dma_data_direction dma_dir;
5397 	unsigned int desc_size;
5398 	struct sk_buff *skb = NULL;
5399 	struct stmmac_xdp_buff ctx;
5400 	int xdp_status = 0;
5401 	int buf_sz;
5402 
5403 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5404 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5405 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5406 
5407 	if (netif_msg_rx_status(priv)) {
5408 		void *rx_head;
5409 
5410 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5411 		if (priv->extend_desc) {
5412 			rx_head = (void *)rx_q->dma_erx;
5413 			desc_size = sizeof(struct dma_extended_desc);
5414 		} else {
5415 			rx_head = (void *)rx_q->dma_rx;
5416 			desc_size = sizeof(struct dma_desc);
5417 		}
5418 
5419 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5420 				    rx_q->dma_rx_phy, desc_size);
5421 	}
5422 	while (count < limit) {
5423 		unsigned int buf1_len = 0, buf2_len = 0;
5424 		enum pkt_hash_types hash_type;
5425 		struct stmmac_rx_buffer *buf;
5426 		struct dma_desc *np, *p;
5427 		int entry;
5428 		u32 hash;
5429 
5430 		if (!count && rx_q->state_saved) {
5431 			skb = rx_q->state.skb;
5432 			error = rx_q->state.error;
5433 			len = rx_q->state.len;
5434 		} else {
5435 			rx_q->state_saved = false;
5436 			skb = NULL;
5437 			error = 0;
5438 			len = 0;
5439 		}
5440 
5441 read_again:
5442 		if (count >= limit)
5443 			break;
5444 
5445 		buf1_len = 0;
5446 		buf2_len = 0;
5447 		entry = next_entry;
5448 		buf = &rx_q->buf_pool[entry];
5449 
5450 		if (priv->extend_desc)
5451 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5452 		else
5453 			p = rx_q->dma_rx + entry;
5454 
5455 		/* read the status of the incoming frame */
5456 		status = stmmac_rx_status(priv, &priv->xstats, p);
5457 		/* check if managed by the DMA otherwise go ahead */
5458 		if (unlikely(status & dma_own))
5459 			break;
5460 
5461 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5462 						priv->dma_conf.dma_rx_size);
5463 		next_entry = rx_q->cur_rx;
5464 
5465 		if (priv->extend_desc)
5466 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5467 		else
5468 			np = rx_q->dma_rx + next_entry;
5469 
5470 		prefetch(np);
5471 
5472 		if (priv->extend_desc)
5473 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5474 		if (unlikely(status == discard_frame)) {
5475 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5476 			buf->page = NULL;
5477 			error = 1;
5478 			if (!priv->hwts_rx_en)
5479 				rx_errors++;
5480 		}
5481 
5482 		if (unlikely(error && (status & rx_not_ls)))
5483 			goto read_again;
5484 		if (unlikely(error)) {
5485 			dev_kfree_skb(skb);
5486 			skb = NULL;
5487 			count++;
5488 			continue;
5489 		}
5490 
5491 		/* Buffer is good. Go on. */
5492 
5493 		prefetch(page_address(buf->page) + buf->page_offset);
5494 		if (buf->sec_page)
5495 			prefetch(page_address(buf->sec_page));
5496 
5497 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5498 		len += buf1_len;
5499 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5500 		len += buf2_len;
5501 
5502 		/* ACS is disabled; strip manually. */
5503 		if (likely(!(status & rx_not_ls))) {
5504 			if (buf2_len) {
5505 				buf2_len -= ETH_FCS_LEN;
5506 				len -= ETH_FCS_LEN;
5507 			} else if (buf1_len) {
5508 				buf1_len -= ETH_FCS_LEN;
5509 				len -= ETH_FCS_LEN;
5510 			}
5511 		}
5512 
5513 		if (!skb) {
5514 			unsigned int pre_len, sync_len;
5515 
5516 			dma_sync_single_for_cpu(priv->device, buf->addr,
5517 						buf1_len, dma_dir);
5518 
5519 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5520 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5521 					 buf->page_offset, buf1_len, true);
5522 
5523 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5524 				  buf->page_offset;
5525 
5526 			ctx.priv = priv;
5527 			ctx.desc = p;
5528 			ctx.ndesc = np;
5529 
5530 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5531 			/* Due xdp_adjust_tail: DMA sync for_device
5532 			 * cover max len CPU touch
5533 			 */
5534 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5535 				   buf->page_offset;
5536 			sync_len = max(sync_len, pre_len);
5537 
5538 			/* For Not XDP_PASS verdict */
5539 			if (IS_ERR(skb)) {
5540 				unsigned int xdp_res = -PTR_ERR(skb);
5541 
5542 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5543 					page_pool_put_page(rx_q->page_pool,
5544 							   virt_to_head_page(ctx.xdp.data),
5545 							   sync_len, true);
5546 					buf->page = NULL;
5547 					rx_dropped++;
5548 
5549 					/* Clear skb as it was set as
5550 					 * status by XDP program.
5551 					 */
5552 					skb = NULL;
5553 
5554 					if (unlikely((status & rx_not_ls)))
5555 						goto read_again;
5556 
5557 					count++;
5558 					continue;
5559 				} else if (xdp_res & (STMMAC_XDP_TX |
5560 						      STMMAC_XDP_REDIRECT)) {
5561 					xdp_status |= xdp_res;
5562 					buf->page = NULL;
5563 					skb = NULL;
5564 					count++;
5565 					continue;
5566 				}
5567 			}
5568 		}
5569 
5570 		if (!skb) {
5571 			/* XDP program may expand or reduce tail */
5572 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5573 
5574 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5575 			if (!skb) {
5576 				rx_dropped++;
5577 				count++;
5578 				goto drain_data;
5579 			}
5580 
5581 			/* XDP program may adjust header */
5582 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5583 			skb_put(skb, buf1_len);
5584 
5585 			/* Data payload copied into SKB, page ready for recycle */
5586 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5587 			buf->page = NULL;
5588 		} else if (buf1_len) {
5589 			dma_sync_single_for_cpu(priv->device, buf->addr,
5590 						buf1_len, dma_dir);
5591 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5592 					buf->page, buf->page_offset, buf1_len,
5593 					priv->dma_conf.dma_buf_sz);
5594 
5595 			/* Data payload appended into SKB */
5596 			skb_mark_for_recycle(skb);
5597 			buf->page = NULL;
5598 		}
5599 
5600 		if (buf2_len) {
5601 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5602 						buf2_len, dma_dir);
5603 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5604 					buf->sec_page, 0, buf2_len,
5605 					priv->dma_conf.dma_buf_sz);
5606 
5607 			/* Data payload appended into SKB */
5608 			skb_mark_for_recycle(skb);
5609 			buf->sec_page = NULL;
5610 		}
5611 
5612 drain_data:
5613 		if (likely(status & rx_not_ls))
5614 			goto read_again;
5615 		if (!skb)
5616 			continue;
5617 
5618 		/* Got entire packet into SKB. Finish it. */
5619 
5620 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5621 
5622 		if (priv->hw->hw_vlan_en)
5623 			/* MAC level stripping. */
5624 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5625 		else
5626 			/* Driver level stripping. */
5627 			stmmac_rx_vlan(priv->dev, skb);
5628 
5629 		skb->protocol = eth_type_trans(skb, priv->dev);
5630 
5631 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5632 			skb_checksum_none_assert(skb);
5633 		else
5634 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5635 
5636 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5637 			skb_set_hash(skb, hash, hash_type);
5638 
5639 		skb_record_rx_queue(skb, queue);
5640 		napi_gro_receive(&ch->rx_napi, skb);
5641 		skb = NULL;
5642 
5643 		rx_packets++;
5644 		rx_bytes += len;
5645 		count++;
5646 	}
5647 
5648 	if (status & rx_not_ls || skb) {
5649 		rx_q->state_saved = true;
5650 		rx_q->state.skb = skb;
5651 		rx_q->state.error = error;
5652 		rx_q->state.len = len;
5653 	}
5654 
5655 	stmmac_finalize_xdp_rx(priv, xdp_status);
5656 
5657 	stmmac_rx_refill(priv, queue);
5658 
5659 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5660 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5661 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5662 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5663 	u64_stats_update_end(&rxq_stats->napi_syncp);
5664 
5665 	priv->xstats.rx_dropped += rx_dropped;
5666 	priv->xstats.rx_errors += rx_errors;
5667 
5668 	return count;
5669 }
5670 
5671 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5672 {
5673 	struct stmmac_channel *ch =
5674 		container_of(napi, struct stmmac_channel, rx_napi);
5675 	struct stmmac_priv *priv = ch->priv_data;
5676 	struct stmmac_rxq_stats *rxq_stats;
5677 	u32 chan = ch->index;
5678 	int work_done;
5679 
5680 	rxq_stats = &priv->xstats.rxq_stats[chan];
5681 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5682 	u64_stats_inc(&rxq_stats->napi.poll);
5683 	u64_stats_update_end(&rxq_stats->napi_syncp);
5684 
5685 	work_done = stmmac_rx(priv, budget, chan);
5686 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5687 		unsigned long flags;
5688 
5689 		spin_lock_irqsave(&ch->lock, flags);
5690 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5691 		spin_unlock_irqrestore(&ch->lock, flags);
5692 	}
5693 
5694 	return work_done;
5695 }
5696 
5697 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5698 {
5699 	struct stmmac_channel *ch =
5700 		container_of(napi, struct stmmac_channel, tx_napi);
5701 	struct stmmac_priv *priv = ch->priv_data;
5702 	struct stmmac_txq_stats *txq_stats;
5703 	bool pending_packets = false;
5704 	u32 chan = ch->index;
5705 	int work_done;
5706 
5707 	txq_stats = &priv->xstats.txq_stats[chan];
5708 	u64_stats_update_begin(&txq_stats->napi_syncp);
5709 	u64_stats_inc(&txq_stats->napi.poll);
5710 	u64_stats_update_end(&txq_stats->napi_syncp);
5711 
5712 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5713 	work_done = min(work_done, budget);
5714 
5715 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5716 		unsigned long flags;
5717 
5718 		spin_lock_irqsave(&ch->lock, flags);
5719 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5720 		spin_unlock_irqrestore(&ch->lock, flags);
5721 	}
5722 
5723 	/* TX still have packet to handle, check if we need to arm tx timer */
5724 	if (pending_packets)
5725 		stmmac_tx_timer_arm(priv, chan);
5726 
5727 	return work_done;
5728 }
5729 
5730 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5731 {
5732 	struct stmmac_channel *ch =
5733 		container_of(napi, struct stmmac_channel, rxtx_napi);
5734 	struct stmmac_priv *priv = ch->priv_data;
5735 	bool tx_pending_packets = false;
5736 	int rx_done, tx_done, rxtx_done;
5737 	struct stmmac_rxq_stats *rxq_stats;
5738 	struct stmmac_txq_stats *txq_stats;
5739 	u32 chan = ch->index;
5740 
5741 	rxq_stats = &priv->xstats.rxq_stats[chan];
5742 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5743 	u64_stats_inc(&rxq_stats->napi.poll);
5744 	u64_stats_update_end(&rxq_stats->napi_syncp);
5745 
5746 	txq_stats = &priv->xstats.txq_stats[chan];
5747 	u64_stats_update_begin(&txq_stats->napi_syncp);
5748 	u64_stats_inc(&txq_stats->napi.poll);
5749 	u64_stats_update_end(&txq_stats->napi_syncp);
5750 
5751 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5752 	tx_done = min(tx_done, budget);
5753 
5754 	rx_done = stmmac_rx_zc(priv, budget, chan);
5755 
5756 	rxtx_done = max(tx_done, rx_done);
5757 
5758 	/* If either TX or RX work is not complete, return budget
5759 	 * and keep pooling
5760 	 */
5761 	if (rxtx_done >= budget)
5762 		return budget;
5763 
5764 	/* all work done, exit the polling mode */
5765 	if (napi_complete_done(napi, rxtx_done)) {
5766 		unsigned long flags;
5767 
5768 		spin_lock_irqsave(&ch->lock, flags);
5769 		/* Both RX and TX work done are compelte,
5770 		 * so enable both RX & TX IRQs.
5771 		 */
5772 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5773 		spin_unlock_irqrestore(&ch->lock, flags);
5774 	}
5775 
5776 	/* TX still have packet to handle, check if we need to arm tx timer */
5777 	if (tx_pending_packets)
5778 		stmmac_tx_timer_arm(priv, chan);
5779 
5780 	return min(rxtx_done, budget - 1);
5781 }
5782 
5783 /**
5784  *  stmmac_tx_timeout
5785  *  @dev : Pointer to net device structure
5786  *  @txqueue: the index of the hanging transmit queue
5787  *  Description: this function is called when a packet transmission fails to
5788  *   complete within a reasonable time. The driver will mark the error in the
5789  *   netdev structure and arrange for the device to be reset to a sane state
5790  *   in order to transmit a new packet.
5791  */
5792 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5793 {
5794 	struct stmmac_priv *priv = netdev_priv(dev);
5795 
5796 	stmmac_global_err(priv);
5797 }
5798 
5799 /**
5800  *  stmmac_set_rx_mode - entry point for multicast addressing
5801  *  @dev : pointer to the device structure
5802  *  Description:
5803  *  This function is a driver entry point which gets called by the kernel
5804  *  whenever multicast addresses must be enabled/disabled.
5805  *  Return value:
5806  *  void.
5807  */
5808 static void stmmac_set_rx_mode(struct net_device *dev)
5809 {
5810 	struct stmmac_priv *priv = netdev_priv(dev);
5811 
5812 	stmmac_set_filter(priv, priv->hw, dev);
5813 }
5814 
5815 /**
5816  *  stmmac_change_mtu - entry point to change MTU size for the device.
5817  *  @dev : device pointer.
5818  *  @new_mtu : the new MTU size for the device.
5819  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5820  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5821  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5822  *  Return value:
5823  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5824  *  file on failure.
5825  */
5826 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5827 {
5828 	struct stmmac_priv *priv = netdev_priv(dev);
5829 	int txfifosz = priv->plat->tx_fifo_size;
5830 	struct stmmac_dma_conf *dma_conf;
5831 	const int mtu = new_mtu;
5832 	int ret;
5833 
5834 	if (txfifosz == 0)
5835 		txfifosz = priv->dma_cap.tx_fifo_size;
5836 
5837 	txfifosz /= priv->plat->tx_queues_to_use;
5838 
5839 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5840 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5841 		return -EINVAL;
5842 	}
5843 
5844 	new_mtu = STMMAC_ALIGN(new_mtu);
5845 
5846 	/* If condition true, FIFO is too small or MTU too large */
5847 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5848 		return -EINVAL;
5849 
5850 	if (netif_running(dev)) {
5851 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5852 		/* Try to allocate the new DMA conf with the new mtu */
5853 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5854 		if (IS_ERR(dma_conf)) {
5855 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5856 				   mtu);
5857 			return PTR_ERR(dma_conf);
5858 		}
5859 
5860 		stmmac_release(dev);
5861 
5862 		ret = __stmmac_open(dev, dma_conf);
5863 		if (ret) {
5864 			free_dma_desc_resources(priv, dma_conf);
5865 			kfree(dma_conf);
5866 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5867 			return ret;
5868 		}
5869 
5870 		kfree(dma_conf);
5871 
5872 		stmmac_set_rx_mode(dev);
5873 	}
5874 
5875 	WRITE_ONCE(dev->mtu, mtu);
5876 	netdev_update_features(dev);
5877 
5878 	return 0;
5879 }
5880 
5881 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5882 					     netdev_features_t features)
5883 {
5884 	struct stmmac_priv *priv = netdev_priv(dev);
5885 
5886 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5887 		features &= ~NETIF_F_RXCSUM;
5888 
5889 	if (!priv->plat->tx_coe)
5890 		features &= ~NETIF_F_CSUM_MASK;
5891 
5892 	/* Some GMAC devices have a bugged Jumbo frame support that
5893 	 * needs to have the Tx COE disabled for oversized frames
5894 	 * (due to limited buffer sizes). In this case we disable
5895 	 * the TX csum insertion in the TDES and not use SF.
5896 	 */
5897 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5898 		features &= ~NETIF_F_CSUM_MASK;
5899 
5900 	/* Disable tso if asked by ethtool */
5901 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5902 		if (features & NETIF_F_TSO)
5903 			priv->tso = true;
5904 		else
5905 			priv->tso = false;
5906 	}
5907 
5908 	return features;
5909 }
5910 
5911 static int stmmac_set_features(struct net_device *netdev,
5912 			       netdev_features_t features)
5913 {
5914 	struct stmmac_priv *priv = netdev_priv(netdev);
5915 
5916 	/* Keep the COE Type in case of csum is supporting */
5917 	if (features & NETIF_F_RXCSUM)
5918 		priv->hw->rx_csum = priv->plat->rx_coe;
5919 	else
5920 		priv->hw->rx_csum = 0;
5921 	/* No check needed because rx_coe has been set before and it will be
5922 	 * fixed in case of issue.
5923 	 */
5924 	stmmac_rx_ipc(priv, priv->hw);
5925 
5926 	if (priv->sph_cap) {
5927 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5928 		u32 chan;
5929 
5930 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5931 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5932 	}
5933 
5934 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5935 		priv->hw->hw_vlan_en = true;
5936 	else
5937 		priv->hw->hw_vlan_en = false;
5938 
5939 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5940 
5941 	return 0;
5942 }
5943 
5944 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5945 {
5946 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5947 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5948 	u32 queues_count;
5949 	u32 queue;
5950 	bool xmac;
5951 
5952 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5953 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5954 
5955 	if (priv->irq_wake)
5956 		pm_wakeup_event(priv->device, 0);
5957 
5958 	if (priv->dma_cap.estsel)
5959 		stmmac_est_irq_status(priv, priv, priv->dev,
5960 				      &priv->xstats, tx_cnt);
5961 
5962 	if (stmmac_fpe_supported(priv))
5963 		stmmac_fpe_irq_status(priv);
5964 
5965 	/* To handle GMAC own interrupts */
5966 	if ((priv->plat->has_gmac) || xmac) {
5967 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5968 
5969 		if (unlikely(status)) {
5970 			/* For LPI we need to save the tx status */
5971 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5972 				priv->tx_path_in_lpi_mode = true;
5973 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5974 				priv->tx_path_in_lpi_mode = false;
5975 		}
5976 
5977 		for (queue = 0; queue < queues_count; queue++)
5978 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
5979 
5980 		/* PCS link status */
5981 		if (priv->hw->pcs &&
5982 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5983 			if (priv->xstats.pcs_link)
5984 				netif_carrier_on(priv->dev);
5985 			else
5986 				netif_carrier_off(priv->dev);
5987 		}
5988 
5989 		stmmac_timestamp_interrupt(priv, priv);
5990 	}
5991 }
5992 
5993 /**
5994  *  stmmac_interrupt - main ISR
5995  *  @irq: interrupt number.
5996  *  @dev_id: to pass the net device pointer.
5997  *  Description: this is the main driver interrupt service routine.
5998  *  It can call:
5999  *  o DMA service routine (to manage incoming frame reception and transmission
6000  *    status)
6001  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6002  *    interrupts.
6003  */
6004 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6005 {
6006 	struct net_device *dev = (struct net_device *)dev_id;
6007 	struct stmmac_priv *priv = netdev_priv(dev);
6008 
6009 	/* Check if adapter is up */
6010 	if (test_bit(STMMAC_DOWN, &priv->state))
6011 		return IRQ_HANDLED;
6012 
6013 	/* Check ASP error if it isn't delivered via an individual IRQ */
6014 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6015 		return IRQ_HANDLED;
6016 
6017 	/* To handle Common interrupts */
6018 	stmmac_common_interrupt(priv);
6019 
6020 	/* To handle DMA interrupts */
6021 	stmmac_dma_interrupt(priv);
6022 
6023 	return IRQ_HANDLED;
6024 }
6025 
6026 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6027 {
6028 	struct net_device *dev = (struct net_device *)dev_id;
6029 	struct stmmac_priv *priv = netdev_priv(dev);
6030 
6031 	/* Check if adapter is up */
6032 	if (test_bit(STMMAC_DOWN, &priv->state))
6033 		return IRQ_HANDLED;
6034 
6035 	/* To handle Common interrupts */
6036 	stmmac_common_interrupt(priv);
6037 
6038 	return IRQ_HANDLED;
6039 }
6040 
6041 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6042 {
6043 	struct net_device *dev = (struct net_device *)dev_id;
6044 	struct stmmac_priv *priv = netdev_priv(dev);
6045 
6046 	/* Check if adapter is up */
6047 	if (test_bit(STMMAC_DOWN, &priv->state))
6048 		return IRQ_HANDLED;
6049 
6050 	/* Check if a fatal error happened */
6051 	stmmac_safety_feat_interrupt(priv);
6052 
6053 	return IRQ_HANDLED;
6054 }
6055 
6056 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6057 {
6058 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6059 	struct stmmac_dma_conf *dma_conf;
6060 	int chan = tx_q->queue_index;
6061 	struct stmmac_priv *priv;
6062 	int status;
6063 
6064 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6065 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6066 
6067 	/* Check if adapter is up */
6068 	if (test_bit(STMMAC_DOWN, &priv->state))
6069 		return IRQ_HANDLED;
6070 
6071 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6072 
6073 	if (unlikely(status & tx_hard_error_bump_tc)) {
6074 		/* Try to bump up the dma threshold on this failure */
6075 		stmmac_bump_dma_threshold(priv, chan);
6076 	} else if (unlikely(status == tx_hard_error)) {
6077 		stmmac_tx_err(priv, chan);
6078 	}
6079 
6080 	return IRQ_HANDLED;
6081 }
6082 
6083 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6084 {
6085 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6086 	struct stmmac_dma_conf *dma_conf;
6087 	int chan = rx_q->queue_index;
6088 	struct stmmac_priv *priv;
6089 
6090 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6091 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6092 
6093 	/* Check if adapter is up */
6094 	if (test_bit(STMMAC_DOWN, &priv->state))
6095 		return IRQ_HANDLED;
6096 
6097 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6098 
6099 	return IRQ_HANDLED;
6100 }
6101 
6102 /**
6103  *  stmmac_ioctl - Entry point for the Ioctl
6104  *  @dev: Device pointer.
6105  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6106  *  a proprietary structure used to pass information to the driver.
6107  *  @cmd: IOCTL command
6108  *  Description:
6109  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6110  */
6111 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6112 {
6113 	struct stmmac_priv *priv = netdev_priv (dev);
6114 	int ret = -EOPNOTSUPP;
6115 
6116 	if (!netif_running(dev))
6117 		return -EINVAL;
6118 
6119 	switch (cmd) {
6120 	case SIOCGMIIPHY:
6121 	case SIOCGMIIREG:
6122 	case SIOCSMIIREG:
6123 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6124 		break;
6125 	case SIOCSHWTSTAMP:
6126 		ret = stmmac_hwtstamp_set(dev, rq);
6127 		break;
6128 	case SIOCGHWTSTAMP:
6129 		ret = stmmac_hwtstamp_get(dev, rq);
6130 		break;
6131 	default:
6132 		break;
6133 	}
6134 
6135 	return ret;
6136 }
6137 
6138 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6139 				    void *cb_priv)
6140 {
6141 	struct stmmac_priv *priv = cb_priv;
6142 	int ret = -EOPNOTSUPP;
6143 
6144 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6145 		return ret;
6146 
6147 	__stmmac_disable_all_queues(priv);
6148 
6149 	switch (type) {
6150 	case TC_SETUP_CLSU32:
6151 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6152 		break;
6153 	case TC_SETUP_CLSFLOWER:
6154 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6155 		break;
6156 	default:
6157 		break;
6158 	}
6159 
6160 	stmmac_enable_all_queues(priv);
6161 	return ret;
6162 }
6163 
6164 static LIST_HEAD(stmmac_block_cb_list);
6165 
6166 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6167 			   void *type_data)
6168 {
6169 	struct stmmac_priv *priv = netdev_priv(ndev);
6170 
6171 	switch (type) {
6172 	case TC_QUERY_CAPS:
6173 		return stmmac_tc_query_caps(priv, priv, type_data);
6174 	case TC_SETUP_QDISC_MQPRIO:
6175 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6176 	case TC_SETUP_BLOCK:
6177 		return flow_block_cb_setup_simple(type_data,
6178 						  &stmmac_block_cb_list,
6179 						  stmmac_setup_tc_block_cb,
6180 						  priv, priv, true);
6181 	case TC_SETUP_QDISC_CBS:
6182 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6183 	case TC_SETUP_QDISC_TAPRIO:
6184 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6185 	case TC_SETUP_QDISC_ETF:
6186 		return stmmac_tc_setup_etf(priv, priv, type_data);
6187 	default:
6188 		return -EOPNOTSUPP;
6189 	}
6190 }
6191 
6192 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6193 			       struct net_device *sb_dev)
6194 {
6195 	int gso = skb_shinfo(skb)->gso_type;
6196 
6197 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6198 		/*
6199 		 * There is no way to determine the number of TSO/USO
6200 		 * capable Queues. Let's use always the Queue 0
6201 		 * because if TSO/USO is supported then at least this
6202 		 * one will be capable.
6203 		 */
6204 		return 0;
6205 	}
6206 
6207 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6208 }
6209 
6210 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6211 {
6212 	struct stmmac_priv *priv = netdev_priv(ndev);
6213 	int ret = 0;
6214 
6215 	ret = pm_runtime_resume_and_get(priv->device);
6216 	if (ret < 0)
6217 		return ret;
6218 
6219 	ret = eth_mac_addr(ndev, addr);
6220 	if (ret)
6221 		goto set_mac_error;
6222 
6223 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6224 
6225 set_mac_error:
6226 	pm_runtime_put(priv->device);
6227 
6228 	return ret;
6229 }
6230 
6231 #ifdef CONFIG_DEBUG_FS
6232 static struct dentry *stmmac_fs_dir;
6233 
6234 static void sysfs_display_ring(void *head, int size, int extend_desc,
6235 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6236 {
6237 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6238 	struct dma_desc *p = (struct dma_desc *)head;
6239 	unsigned int desc_size;
6240 	dma_addr_t dma_addr;
6241 	int i;
6242 
6243 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6244 	for (i = 0; i < size; i++) {
6245 		dma_addr = dma_phy_addr + i * desc_size;
6246 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6247 				i, &dma_addr,
6248 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6249 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6250 		if (extend_desc)
6251 			p = &(++ep)->basic;
6252 		else
6253 			p++;
6254 	}
6255 }
6256 
6257 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6258 {
6259 	struct net_device *dev = seq->private;
6260 	struct stmmac_priv *priv = netdev_priv(dev);
6261 	u32 rx_count = priv->plat->rx_queues_to_use;
6262 	u32 tx_count = priv->plat->tx_queues_to_use;
6263 	u32 queue;
6264 
6265 	if ((dev->flags & IFF_UP) == 0)
6266 		return 0;
6267 
6268 	for (queue = 0; queue < rx_count; queue++) {
6269 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6270 
6271 		seq_printf(seq, "RX Queue %d:\n", queue);
6272 
6273 		if (priv->extend_desc) {
6274 			seq_printf(seq, "Extended descriptor ring:\n");
6275 			sysfs_display_ring((void *)rx_q->dma_erx,
6276 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6277 		} else {
6278 			seq_printf(seq, "Descriptor ring:\n");
6279 			sysfs_display_ring((void *)rx_q->dma_rx,
6280 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6281 		}
6282 	}
6283 
6284 	for (queue = 0; queue < tx_count; queue++) {
6285 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6286 
6287 		seq_printf(seq, "TX Queue %d:\n", queue);
6288 
6289 		if (priv->extend_desc) {
6290 			seq_printf(seq, "Extended descriptor ring:\n");
6291 			sysfs_display_ring((void *)tx_q->dma_etx,
6292 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6293 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6294 			seq_printf(seq, "Descriptor ring:\n");
6295 			sysfs_display_ring((void *)tx_q->dma_tx,
6296 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6297 		}
6298 	}
6299 
6300 	return 0;
6301 }
6302 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6303 
6304 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6305 {
6306 	static const char * const dwxgmac_timestamp_source[] = {
6307 		"None",
6308 		"Internal",
6309 		"External",
6310 		"Both",
6311 	};
6312 	static const char * const dwxgmac_safety_feature_desc[] = {
6313 		"No",
6314 		"All Safety Features with ECC and Parity",
6315 		"All Safety Features without ECC or Parity",
6316 		"All Safety Features with Parity Only",
6317 		"ECC Only",
6318 		"UNDEFINED",
6319 		"UNDEFINED",
6320 		"UNDEFINED",
6321 	};
6322 	struct net_device *dev = seq->private;
6323 	struct stmmac_priv *priv = netdev_priv(dev);
6324 
6325 	if (!priv->hw_cap_support) {
6326 		seq_printf(seq, "DMA HW features not supported\n");
6327 		return 0;
6328 	}
6329 
6330 	seq_printf(seq, "==============================\n");
6331 	seq_printf(seq, "\tDMA HW features\n");
6332 	seq_printf(seq, "==============================\n");
6333 
6334 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6335 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6336 	seq_printf(seq, "\t1000 Mbps: %s\n",
6337 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6338 	seq_printf(seq, "\tHalf duplex: %s\n",
6339 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6340 	if (priv->plat->has_xgmac) {
6341 		seq_printf(seq,
6342 			   "\tNumber of Additional MAC address registers: %d\n",
6343 			   priv->dma_cap.multi_addr);
6344 	} else {
6345 		seq_printf(seq, "\tHash Filter: %s\n",
6346 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6347 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6348 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6349 	}
6350 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6351 		   (priv->dma_cap.pcs) ? "Y" : "N");
6352 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6353 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6354 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6355 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6356 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6357 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6358 	seq_printf(seq, "\tRMON module: %s\n",
6359 		   (priv->dma_cap.rmon) ? "Y" : "N");
6360 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6361 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6362 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6363 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6364 	if (priv->plat->has_xgmac)
6365 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6366 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6367 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6368 		   (priv->dma_cap.eee) ? "Y" : "N");
6369 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6370 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6371 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6372 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6373 	    priv->plat->has_xgmac) {
6374 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6375 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6376 	} else {
6377 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6378 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6379 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6380 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6381 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6382 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6383 	}
6384 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6385 		   priv->dma_cap.number_rx_channel);
6386 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6387 		   priv->dma_cap.number_tx_channel);
6388 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6389 		   priv->dma_cap.number_rx_queues);
6390 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6391 		   priv->dma_cap.number_tx_queues);
6392 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6393 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6394 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6395 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6396 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6397 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6398 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6399 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6400 		   priv->dma_cap.pps_out_num);
6401 	seq_printf(seq, "\tSafety Features: %s\n",
6402 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6403 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6404 		   priv->dma_cap.frpsel ? "Y" : "N");
6405 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6406 		   priv->dma_cap.host_dma_width);
6407 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6408 		   priv->dma_cap.rssen ? "Y" : "N");
6409 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6410 		   priv->dma_cap.vlhash ? "Y" : "N");
6411 	seq_printf(seq, "\tSplit Header: %s\n",
6412 		   priv->dma_cap.sphen ? "Y" : "N");
6413 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6414 		   priv->dma_cap.vlins ? "Y" : "N");
6415 	seq_printf(seq, "\tDouble VLAN: %s\n",
6416 		   priv->dma_cap.dvlan ? "Y" : "N");
6417 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6418 		   priv->dma_cap.l3l4fnum);
6419 	seq_printf(seq, "\tARP Offloading: %s\n",
6420 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6421 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6422 		   priv->dma_cap.estsel ? "Y" : "N");
6423 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6424 		   priv->dma_cap.fpesel ? "Y" : "N");
6425 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6426 		   priv->dma_cap.tbssel ? "Y" : "N");
6427 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6428 		   priv->dma_cap.tbs_ch_num);
6429 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6430 		   priv->dma_cap.sgfsel ? "Y" : "N");
6431 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6432 		   BIT(priv->dma_cap.ttsfd) >> 1);
6433 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6434 		   priv->dma_cap.numtc);
6435 	seq_printf(seq, "\tDCB Feature: %s\n",
6436 		   priv->dma_cap.dcben ? "Y" : "N");
6437 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6438 		   priv->dma_cap.advthword ? "Y" : "N");
6439 	seq_printf(seq, "\tPTP Offload: %s\n",
6440 		   priv->dma_cap.ptoen ? "Y" : "N");
6441 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6442 		   priv->dma_cap.osten ? "Y" : "N");
6443 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6444 		   priv->dma_cap.pfcen ? "Y" : "N");
6445 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6446 		   BIT(priv->dma_cap.frpes) << 6);
6447 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6448 		   BIT(priv->dma_cap.frpbs) << 6);
6449 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6450 		   priv->dma_cap.frppipe_num);
6451 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6452 		   priv->dma_cap.nrvf_num ?
6453 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6454 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6455 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6456 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6457 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6458 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6459 		   priv->dma_cap.cbtisel ? "Y" : "N");
6460 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6461 		   priv->dma_cap.aux_snapshot_n);
6462 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6463 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6464 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6465 		   priv->dma_cap.edma ? "Y" : "N");
6466 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6467 		   priv->dma_cap.ediffc ? "Y" : "N");
6468 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6469 		   priv->dma_cap.vxn ? "Y" : "N");
6470 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6471 		   priv->dma_cap.dbgmem ? "Y" : "N");
6472 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6473 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6474 	return 0;
6475 }
6476 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6477 
6478 /* Use network device events to rename debugfs file entries.
6479  */
6480 static int stmmac_device_event(struct notifier_block *unused,
6481 			       unsigned long event, void *ptr)
6482 {
6483 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6484 	struct stmmac_priv *priv = netdev_priv(dev);
6485 
6486 	if (dev->netdev_ops != &stmmac_netdev_ops)
6487 		goto done;
6488 
6489 	switch (event) {
6490 	case NETDEV_CHANGENAME:
6491 		if (priv->dbgfs_dir)
6492 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6493 							 priv->dbgfs_dir,
6494 							 stmmac_fs_dir,
6495 							 dev->name);
6496 		break;
6497 	}
6498 done:
6499 	return NOTIFY_DONE;
6500 }
6501 
6502 static struct notifier_block stmmac_notifier = {
6503 	.notifier_call = stmmac_device_event,
6504 };
6505 
6506 static void stmmac_init_fs(struct net_device *dev)
6507 {
6508 	struct stmmac_priv *priv = netdev_priv(dev);
6509 
6510 	rtnl_lock();
6511 
6512 	/* Create per netdev entries */
6513 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6514 
6515 	/* Entry to report DMA RX/TX rings */
6516 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6517 			    &stmmac_rings_status_fops);
6518 
6519 	/* Entry to report the DMA HW features */
6520 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6521 			    &stmmac_dma_cap_fops);
6522 
6523 	rtnl_unlock();
6524 }
6525 
6526 static void stmmac_exit_fs(struct net_device *dev)
6527 {
6528 	struct stmmac_priv *priv = netdev_priv(dev);
6529 
6530 	debugfs_remove_recursive(priv->dbgfs_dir);
6531 }
6532 #endif /* CONFIG_DEBUG_FS */
6533 
6534 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6535 {
6536 	unsigned char *data = (unsigned char *)&vid_le;
6537 	unsigned char data_byte = 0;
6538 	u32 crc = ~0x0;
6539 	u32 temp = 0;
6540 	int i, bits;
6541 
6542 	bits = get_bitmask_order(VLAN_VID_MASK);
6543 	for (i = 0; i < bits; i++) {
6544 		if ((i % 8) == 0)
6545 			data_byte = data[i / 8];
6546 
6547 		temp = ((crc & 1) ^ data_byte) & 1;
6548 		crc >>= 1;
6549 		data_byte >>= 1;
6550 
6551 		if (temp)
6552 			crc ^= 0xedb88320;
6553 	}
6554 
6555 	return crc;
6556 }
6557 
6558 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6559 {
6560 	u32 crc, hash = 0;
6561 	u16 pmatch = 0;
6562 	int count = 0;
6563 	u16 vid = 0;
6564 
6565 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6566 		__le16 vid_le = cpu_to_le16(vid);
6567 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6568 		hash |= (1 << crc);
6569 		count++;
6570 	}
6571 
6572 	if (!priv->dma_cap.vlhash) {
6573 		if (count > 2) /* VID = 0 always passes filter */
6574 			return -EOPNOTSUPP;
6575 
6576 		pmatch = vid;
6577 		hash = 0;
6578 	}
6579 
6580 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6581 }
6582 
6583 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6584 {
6585 	struct stmmac_priv *priv = netdev_priv(ndev);
6586 	bool is_double = false;
6587 	int ret;
6588 
6589 	ret = pm_runtime_resume_and_get(priv->device);
6590 	if (ret < 0)
6591 		return ret;
6592 
6593 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6594 		is_double = true;
6595 
6596 	set_bit(vid, priv->active_vlans);
6597 	ret = stmmac_vlan_update(priv, is_double);
6598 	if (ret) {
6599 		clear_bit(vid, priv->active_vlans);
6600 		goto err_pm_put;
6601 	}
6602 
6603 	if (priv->hw->num_vlan) {
6604 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6605 		if (ret)
6606 			goto err_pm_put;
6607 	}
6608 err_pm_put:
6609 	pm_runtime_put(priv->device);
6610 
6611 	return ret;
6612 }
6613 
6614 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6615 {
6616 	struct stmmac_priv *priv = netdev_priv(ndev);
6617 	bool is_double = false;
6618 	int ret;
6619 
6620 	ret = pm_runtime_resume_and_get(priv->device);
6621 	if (ret < 0)
6622 		return ret;
6623 
6624 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6625 		is_double = true;
6626 
6627 	clear_bit(vid, priv->active_vlans);
6628 
6629 	if (priv->hw->num_vlan) {
6630 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6631 		if (ret)
6632 			goto del_vlan_error;
6633 	}
6634 
6635 	ret = stmmac_vlan_update(priv, is_double);
6636 
6637 del_vlan_error:
6638 	pm_runtime_put(priv->device);
6639 
6640 	return ret;
6641 }
6642 
6643 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6644 {
6645 	struct stmmac_priv *priv = netdev_priv(dev);
6646 
6647 	switch (bpf->command) {
6648 	case XDP_SETUP_PROG:
6649 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6650 	case XDP_SETUP_XSK_POOL:
6651 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6652 					     bpf->xsk.queue_id);
6653 	default:
6654 		return -EOPNOTSUPP;
6655 	}
6656 }
6657 
6658 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6659 			   struct xdp_frame **frames, u32 flags)
6660 {
6661 	struct stmmac_priv *priv = netdev_priv(dev);
6662 	int cpu = smp_processor_id();
6663 	struct netdev_queue *nq;
6664 	int i, nxmit = 0;
6665 	int queue;
6666 
6667 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6668 		return -ENETDOWN;
6669 
6670 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6671 		return -EINVAL;
6672 
6673 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6674 	nq = netdev_get_tx_queue(priv->dev, queue);
6675 
6676 	__netif_tx_lock(nq, cpu);
6677 	/* Avoids TX time-out as we are sharing with slow path */
6678 	txq_trans_cond_update(nq);
6679 
6680 	for (i = 0; i < num_frames; i++) {
6681 		int res;
6682 
6683 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6684 		if (res == STMMAC_XDP_CONSUMED)
6685 			break;
6686 
6687 		nxmit++;
6688 	}
6689 
6690 	if (flags & XDP_XMIT_FLUSH) {
6691 		stmmac_flush_tx_descriptors(priv, queue);
6692 		stmmac_tx_timer_arm(priv, queue);
6693 	}
6694 
6695 	__netif_tx_unlock(nq);
6696 
6697 	return nxmit;
6698 }
6699 
6700 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6701 {
6702 	struct stmmac_channel *ch = &priv->channel[queue];
6703 	unsigned long flags;
6704 
6705 	spin_lock_irqsave(&ch->lock, flags);
6706 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6707 	spin_unlock_irqrestore(&ch->lock, flags);
6708 
6709 	stmmac_stop_rx_dma(priv, queue);
6710 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6711 }
6712 
6713 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6714 {
6715 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6716 	struct stmmac_channel *ch = &priv->channel[queue];
6717 	unsigned long flags;
6718 	u32 buf_size;
6719 	int ret;
6720 
6721 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6722 	if (ret) {
6723 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6724 		return;
6725 	}
6726 
6727 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6728 	if (ret) {
6729 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6730 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6731 		return;
6732 	}
6733 
6734 	stmmac_reset_rx_queue(priv, queue);
6735 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6736 
6737 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6738 			    rx_q->dma_rx_phy, rx_q->queue_index);
6739 
6740 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6741 			     sizeof(struct dma_desc));
6742 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6743 			       rx_q->rx_tail_addr, rx_q->queue_index);
6744 
6745 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6746 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6747 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6748 				      buf_size,
6749 				      rx_q->queue_index);
6750 	} else {
6751 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6752 				      priv->dma_conf.dma_buf_sz,
6753 				      rx_q->queue_index);
6754 	}
6755 
6756 	stmmac_start_rx_dma(priv, queue);
6757 
6758 	spin_lock_irqsave(&ch->lock, flags);
6759 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6760 	spin_unlock_irqrestore(&ch->lock, flags);
6761 }
6762 
6763 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6764 {
6765 	struct stmmac_channel *ch = &priv->channel[queue];
6766 	unsigned long flags;
6767 
6768 	spin_lock_irqsave(&ch->lock, flags);
6769 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6770 	spin_unlock_irqrestore(&ch->lock, flags);
6771 
6772 	stmmac_stop_tx_dma(priv, queue);
6773 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6774 }
6775 
6776 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6777 {
6778 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6779 	struct stmmac_channel *ch = &priv->channel[queue];
6780 	unsigned long flags;
6781 	int ret;
6782 
6783 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6784 	if (ret) {
6785 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6786 		return;
6787 	}
6788 
6789 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6790 	if (ret) {
6791 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6792 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6793 		return;
6794 	}
6795 
6796 	stmmac_reset_tx_queue(priv, queue);
6797 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6798 
6799 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6800 			    tx_q->dma_tx_phy, tx_q->queue_index);
6801 
6802 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6803 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6804 
6805 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6806 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6807 			       tx_q->tx_tail_addr, tx_q->queue_index);
6808 
6809 	stmmac_start_tx_dma(priv, queue);
6810 
6811 	spin_lock_irqsave(&ch->lock, flags);
6812 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6813 	spin_unlock_irqrestore(&ch->lock, flags);
6814 }
6815 
6816 void stmmac_xdp_release(struct net_device *dev)
6817 {
6818 	struct stmmac_priv *priv = netdev_priv(dev);
6819 	u32 chan;
6820 
6821 	/* Ensure tx function is not running */
6822 	netif_tx_disable(dev);
6823 
6824 	/* Disable NAPI process */
6825 	stmmac_disable_all_queues(priv);
6826 
6827 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6828 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6829 
6830 	/* Free the IRQ lines */
6831 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6832 
6833 	/* Stop TX/RX DMA channels */
6834 	stmmac_stop_all_dma(priv);
6835 
6836 	/* Release and free the Rx/Tx resources */
6837 	free_dma_desc_resources(priv, &priv->dma_conf);
6838 
6839 	/* Disable the MAC Rx/Tx */
6840 	stmmac_mac_set(priv, priv->ioaddr, false);
6841 
6842 	/* set trans_start so we don't get spurious
6843 	 * watchdogs during reset
6844 	 */
6845 	netif_trans_update(dev);
6846 	netif_carrier_off(dev);
6847 }
6848 
6849 int stmmac_xdp_open(struct net_device *dev)
6850 {
6851 	struct stmmac_priv *priv = netdev_priv(dev);
6852 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6853 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6854 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6855 	struct stmmac_rx_queue *rx_q;
6856 	struct stmmac_tx_queue *tx_q;
6857 	u32 buf_size;
6858 	bool sph_en;
6859 	u32 chan;
6860 	int ret;
6861 
6862 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6863 	if (ret < 0) {
6864 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6865 			   __func__);
6866 		goto dma_desc_error;
6867 	}
6868 
6869 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6870 	if (ret < 0) {
6871 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6872 			   __func__);
6873 		goto init_error;
6874 	}
6875 
6876 	stmmac_reset_queues_param(priv);
6877 
6878 	/* DMA CSR Channel configuration */
6879 	for (chan = 0; chan < dma_csr_ch; chan++) {
6880 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6881 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6882 	}
6883 
6884 	/* Adjust Split header */
6885 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6886 
6887 	/* DMA RX Channel Configuration */
6888 	for (chan = 0; chan < rx_cnt; chan++) {
6889 		rx_q = &priv->dma_conf.rx_queue[chan];
6890 
6891 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6892 				    rx_q->dma_rx_phy, chan);
6893 
6894 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6895 				     (rx_q->buf_alloc_num *
6896 				      sizeof(struct dma_desc));
6897 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6898 				       rx_q->rx_tail_addr, chan);
6899 
6900 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6901 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6902 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6903 					      buf_size,
6904 					      rx_q->queue_index);
6905 		} else {
6906 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6907 					      priv->dma_conf.dma_buf_sz,
6908 					      rx_q->queue_index);
6909 		}
6910 
6911 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6912 	}
6913 
6914 	/* DMA TX Channel Configuration */
6915 	for (chan = 0; chan < tx_cnt; chan++) {
6916 		tx_q = &priv->dma_conf.tx_queue[chan];
6917 
6918 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6919 				    tx_q->dma_tx_phy, chan);
6920 
6921 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6922 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6923 				       tx_q->tx_tail_addr, chan);
6924 
6925 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6926 		tx_q->txtimer.function = stmmac_tx_timer;
6927 	}
6928 
6929 	/* Enable the MAC Rx/Tx */
6930 	stmmac_mac_set(priv, priv->ioaddr, true);
6931 
6932 	/* Start Rx & Tx DMA Channels */
6933 	stmmac_start_all_dma(priv);
6934 
6935 	ret = stmmac_request_irq(dev);
6936 	if (ret)
6937 		goto irq_error;
6938 
6939 	/* Enable NAPI process*/
6940 	stmmac_enable_all_queues(priv);
6941 	netif_carrier_on(dev);
6942 	netif_tx_start_all_queues(dev);
6943 	stmmac_enable_all_dma_irq(priv);
6944 
6945 	return 0;
6946 
6947 irq_error:
6948 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6949 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6950 
6951 	stmmac_hw_teardown(dev);
6952 init_error:
6953 	free_dma_desc_resources(priv, &priv->dma_conf);
6954 dma_desc_error:
6955 	return ret;
6956 }
6957 
6958 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6959 {
6960 	struct stmmac_priv *priv = netdev_priv(dev);
6961 	struct stmmac_rx_queue *rx_q;
6962 	struct stmmac_tx_queue *tx_q;
6963 	struct stmmac_channel *ch;
6964 
6965 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6966 	    !netif_carrier_ok(priv->dev))
6967 		return -ENETDOWN;
6968 
6969 	if (!stmmac_xdp_is_enabled(priv))
6970 		return -EINVAL;
6971 
6972 	if (queue >= priv->plat->rx_queues_to_use ||
6973 	    queue >= priv->plat->tx_queues_to_use)
6974 		return -EINVAL;
6975 
6976 	rx_q = &priv->dma_conf.rx_queue[queue];
6977 	tx_q = &priv->dma_conf.tx_queue[queue];
6978 	ch = &priv->channel[queue];
6979 
6980 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6981 		return -EINVAL;
6982 
6983 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6984 		/* EQoS does not have per-DMA channel SW interrupt,
6985 		 * so we schedule RX Napi straight-away.
6986 		 */
6987 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6988 			__napi_schedule(&ch->rxtx_napi);
6989 	}
6990 
6991 	return 0;
6992 }
6993 
6994 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6995 {
6996 	struct stmmac_priv *priv = netdev_priv(dev);
6997 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6998 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6999 	unsigned int start;
7000 	int q;
7001 
7002 	for (q = 0; q < tx_cnt; q++) {
7003 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7004 		u64 tx_packets;
7005 		u64 tx_bytes;
7006 
7007 		do {
7008 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7009 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7010 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7011 		do {
7012 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7013 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7014 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7015 
7016 		stats->tx_packets += tx_packets;
7017 		stats->tx_bytes += tx_bytes;
7018 	}
7019 
7020 	for (q = 0; q < rx_cnt; q++) {
7021 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7022 		u64 rx_packets;
7023 		u64 rx_bytes;
7024 
7025 		do {
7026 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7027 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7028 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7029 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7030 
7031 		stats->rx_packets += rx_packets;
7032 		stats->rx_bytes += rx_bytes;
7033 	}
7034 
7035 	stats->rx_dropped = priv->xstats.rx_dropped;
7036 	stats->rx_errors = priv->xstats.rx_errors;
7037 	stats->tx_dropped = priv->xstats.tx_dropped;
7038 	stats->tx_errors = priv->xstats.tx_errors;
7039 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7040 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7041 	stats->rx_length_errors = priv->xstats.rx_length;
7042 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7043 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7044 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7045 }
7046 
7047 static const struct net_device_ops stmmac_netdev_ops = {
7048 	.ndo_open = stmmac_open,
7049 	.ndo_start_xmit = stmmac_xmit,
7050 	.ndo_stop = stmmac_release,
7051 	.ndo_change_mtu = stmmac_change_mtu,
7052 	.ndo_fix_features = stmmac_fix_features,
7053 	.ndo_set_features = stmmac_set_features,
7054 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7055 	.ndo_tx_timeout = stmmac_tx_timeout,
7056 	.ndo_eth_ioctl = stmmac_ioctl,
7057 	.ndo_get_stats64 = stmmac_get_stats64,
7058 	.ndo_setup_tc = stmmac_setup_tc,
7059 	.ndo_select_queue = stmmac_select_queue,
7060 	.ndo_set_mac_address = stmmac_set_mac_address,
7061 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7062 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7063 	.ndo_bpf = stmmac_bpf,
7064 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7065 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7066 };
7067 
7068 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7069 {
7070 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7071 		return;
7072 	if (test_bit(STMMAC_DOWN, &priv->state))
7073 		return;
7074 
7075 	netdev_err(priv->dev, "Reset adapter.\n");
7076 
7077 	rtnl_lock();
7078 	netif_trans_update(priv->dev);
7079 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7080 		usleep_range(1000, 2000);
7081 
7082 	set_bit(STMMAC_DOWN, &priv->state);
7083 	dev_close(priv->dev);
7084 	dev_open(priv->dev, NULL);
7085 	clear_bit(STMMAC_DOWN, &priv->state);
7086 	clear_bit(STMMAC_RESETING, &priv->state);
7087 	rtnl_unlock();
7088 }
7089 
7090 static void stmmac_service_task(struct work_struct *work)
7091 {
7092 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7093 			service_task);
7094 
7095 	stmmac_reset_subtask(priv);
7096 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7097 }
7098 
7099 /**
7100  *  stmmac_hw_init - Init the MAC device
7101  *  @priv: driver private structure
7102  *  Description: this function is to configure the MAC device according to
7103  *  some platform parameters or the HW capability register. It prepares the
7104  *  driver to use either ring or chain modes and to setup either enhanced or
7105  *  normal descriptors.
7106  */
7107 static int stmmac_hw_init(struct stmmac_priv *priv)
7108 {
7109 	int ret;
7110 
7111 	/* dwmac-sun8i only work in chain mode */
7112 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7113 		chain_mode = 1;
7114 	priv->chain_mode = chain_mode;
7115 
7116 	/* Initialize HW Interface */
7117 	ret = stmmac_hwif_init(priv);
7118 	if (ret)
7119 		return ret;
7120 
7121 	/* Get the HW capability (new GMAC newer than 3.50a) */
7122 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7123 	if (priv->hw_cap_support) {
7124 		dev_info(priv->device, "DMA HW capability register supported\n");
7125 
7126 		/* We can override some gmac/dma configuration fields: e.g.
7127 		 * enh_desc, tx_coe (e.g. that are passed through the
7128 		 * platform) with the values from the HW capability
7129 		 * register (if supported).
7130 		 */
7131 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7132 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7133 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7134 		priv->hw->pmt = priv->plat->pmt;
7135 		if (priv->dma_cap.hash_tb_sz) {
7136 			priv->hw->multicast_filter_bins =
7137 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7138 			priv->hw->mcast_bits_log2 =
7139 					ilog2(priv->hw->multicast_filter_bins);
7140 		}
7141 
7142 		/* TXCOE doesn't work in thresh DMA mode */
7143 		if (priv->plat->force_thresh_dma_mode)
7144 			priv->plat->tx_coe = 0;
7145 		else
7146 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7147 
7148 		/* In case of GMAC4 rx_coe is from HW cap register. */
7149 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7150 
7151 		if (priv->dma_cap.rx_coe_type2)
7152 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7153 		else if (priv->dma_cap.rx_coe_type1)
7154 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7155 
7156 	} else {
7157 		dev_info(priv->device, "No HW DMA feature register supported\n");
7158 	}
7159 
7160 	if (priv->plat->rx_coe) {
7161 		priv->hw->rx_csum = priv->plat->rx_coe;
7162 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7163 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7164 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7165 	}
7166 	if (priv->plat->tx_coe)
7167 		dev_info(priv->device, "TX Checksum insertion supported\n");
7168 
7169 	if (priv->plat->pmt) {
7170 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7171 		device_set_wakeup_capable(priv->device, 1);
7172 	}
7173 
7174 	if (priv->dma_cap.tsoen)
7175 		dev_info(priv->device, "TSO supported\n");
7176 
7177 	priv->hw->vlan_fail_q_en =
7178 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7179 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7180 
7181 	/* Run HW quirks, if any */
7182 	if (priv->hwif_quirks) {
7183 		ret = priv->hwif_quirks(priv);
7184 		if (ret)
7185 			return ret;
7186 	}
7187 
7188 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7189 	 * In some case, for example on bugged HW this feature
7190 	 * has to be disable and this can be done by passing the
7191 	 * riwt_off field from the platform.
7192 	 */
7193 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7194 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7195 		priv->use_riwt = 1;
7196 		dev_info(priv->device,
7197 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7198 	}
7199 
7200 	return 0;
7201 }
7202 
7203 static void stmmac_napi_add(struct net_device *dev)
7204 {
7205 	struct stmmac_priv *priv = netdev_priv(dev);
7206 	u32 queue, maxq;
7207 
7208 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7209 
7210 	for (queue = 0; queue < maxq; queue++) {
7211 		struct stmmac_channel *ch = &priv->channel[queue];
7212 
7213 		ch->priv_data = priv;
7214 		ch->index = queue;
7215 		spin_lock_init(&ch->lock);
7216 
7217 		if (queue < priv->plat->rx_queues_to_use) {
7218 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7219 		}
7220 		if (queue < priv->plat->tx_queues_to_use) {
7221 			netif_napi_add_tx(dev, &ch->tx_napi,
7222 					  stmmac_napi_poll_tx);
7223 		}
7224 		if (queue < priv->plat->rx_queues_to_use &&
7225 		    queue < priv->plat->tx_queues_to_use) {
7226 			netif_napi_add(dev, &ch->rxtx_napi,
7227 				       stmmac_napi_poll_rxtx);
7228 		}
7229 	}
7230 }
7231 
7232 static void stmmac_napi_del(struct net_device *dev)
7233 {
7234 	struct stmmac_priv *priv = netdev_priv(dev);
7235 	u32 queue, maxq;
7236 
7237 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7238 
7239 	for (queue = 0; queue < maxq; queue++) {
7240 		struct stmmac_channel *ch = &priv->channel[queue];
7241 
7242 		if (queue < priv->plat->rx_queues_to_use)
7243 			netif_napi_del(&ch->rx_napi);
7244 		if (queue < priv->plat->tx_queues_to_use)
7245 			netif_napi_del(&ch->tx_napi);
7246 		if (queue < priv->plat->rx_queues_to_use &&
7247 		    queue < priv->plat->tx_queues_to_use) {
7248 			netif_napi_del(&ch->rxtx_napi);
7249 		}
7250 	}
7251 }
7252 
7253 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7254 {
7255 	struct stmmac_priv *priv = netdev_priv(dev);
7256 	int ret = 0, i;
7257 
7258 	if (netif_running(dev))
7259 		stmmac_release(dev);
7260 
7261 	stmmac_napi_del(dev);
7262 
7263 	priv->plat->rx_queues_to_use = rx_cnt;
7264 	priv->plat->tx_queues_to_use = tx_cnt;
7265 	if (!netif_is_rxfh_configured(dev))
7266 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7267 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7268 									rx_cnt);
7269 
7270 	stmmac_napi_add(dev);
7271 
7272 	if (netif_running(dev))
7273 		ret = stmmac_open(dev);
7274 
7275 	return ret;
7276 }
7277 
7278 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7279 {
7280 	struct stmmac_priv *priv = netdev_priv(dev);
7281 	int ret = 0;
7282 
7283 	if (netif_running(dev))
7284 		stmmac_release(dev);
7285 
7286 	priv->dma_conf.dma_rx_size = rx_size;
7287 	priv->dma_conf.dma_tx_size = tx_size;
7288 
7289 	if (netif_running(dev))
7290 		ret = stmmac_open(dev);
7291 
7292 	return ret;
7293 }
7294 
7295 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7296 {
7297 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7298 	struct dma_desc *desc_contains_ts = ctx->desc;
7299 	struct stmmac_priv *priv = ctx->priv;
7300 	struct dma_desc *ndesc = ctx->ndesc;
7301 	struct dma_desc *desc = ctx->desc;
7302 	u64 ns = 0;
7303 
7304 	if (!priv->hwts_rx_en)
7305 		return -ENODATA;
7306 
7307 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7308 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7309 		desc_contains_ts = ndesc;
7310 
7311 	/* Check if timestamp is available */
7312 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7313 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7314 		ns -= priv->plat->cdc_error_adj;
7315 		*timestamp = ns_to_ktime(ns);
7316 		return 0;
7317 	}
7318 
7319 	return -ENODATA;
7320 }
7321 
7322 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7323 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7324 };
7325 
7326 /**
7327  * stmmac_dvr_probe
7328  * @device: device pointer
7329  * @plat_dat: platform data pointer
7330  * @res: stmmac resource pointer
7331  * Description: this is the main probe function used to
7332  * call the alloc_etherdev, allocate the priv structure.
7333  * Return:
7334  * returns 0 on success, otherwise errno.
7335  */
7336 int stmmac_dvr_probe(struct device *device,
7337 		     struct plat_stmmacenet_data *plat_dat,
7338 		     struct stmmac_resources *res)
7339 {
7340 	struct net_device *ndev = NULL;
7341 	struct stmmac_priv *priv;
7342 	u32 rxq;
7343 	int i, ret = 0;
7344 
7345 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7346 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7347 	if (!ndev)
7348 		return -ENOMEM;
7349 
7350 	SET_NETDEV_DEV(ndev, device);
7351 
7352 	priv = netdev_priv(ndev);
7353 	priv->device = device;
7354 	priv->dev = ndev;
7355 
7356 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7357 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7358 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7359 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7360 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7361 	}
7362 
7363 	priv->xstats.pcpu_stats =
7364 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7365 	if (!priv->xstats.pcpu_stats)
7366 		return -ENOMEM;
7367 
7368 	stmmac_set_ethtool_ops(ndev);
7369 	priv->pause = pause;
7370 	priv->plat = plat_dat;
7371 	priv->ioaddr = res->addr;
7372 	priv->dev->base_addr = (unsigned long)res->addr;
7373 	priv->plat->dma_cfg->multi_msi_en =
7374 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7375 
7376 	priv->dev->irq = res->irq;
7377 	priv->wol_irq = res->wol_irq;
7378 	priv->lpi_irq = res->lpi_irq;
7379 	priv->sfty_irq = res->sfty_irq;
7380 	priv->sfty_ce_irq = res->sfty_ce_irq;
7381 	priv->sfty_ue_irq = res->sfty_ue_irq;
7382 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7383 		priv->rx_irq[i] = res->rx_irq[i];
7384 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7385 		priv->tx_irq[i] = res->tx_irq[i];
7386 
7387 	if (!is_zero_ether_addr(res->mac))
7388 		eth_hw_addr_set(priv->dev, res->mac);
7389 
7390 	dev_set_drvdata(device, priv->dev);
7391 
7392 	/* Verify driver arguments */
7393 	stmmac_verify_args();
7394 
7395 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7396 	if (!priv->af_xdp_zc_qps)
7397 		return -ENOMEM;
7398 
7399 	/* Allocate workqueue */
7400 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7401 	if (!priv->wq) {
7402 		dev_err(priv->device, "failed to create workqueue\n");
7403 		ret = -ENOMEM;
7404 		goto error_wq_init;
7405 	}
7406 
7407 	INIT_WORK(&priv->service_task, stmmac_service_task);
7408 
7409 	/* Override with kernel parameters if supplied XXX CRS XXX
7410 	 * this needs to have multiple instances
7411 	 */
7412 	if ((phyaddr >= 0) && (phyaddr <= 31))
7413 		priv->plat->phy_addr = phyaddr;
7414 
7415 	if (priv->plat->stmmac_rst) {
7416 		ret = reset_control_assert(priv->plat->stmmac_rst);
7417 		reset_control_deassert(priv->plat->stmmac_rst);
7418 		/* Some reset controllers have only reset callback instead of
7419 		 * assert + deassert callbacks pair.
7420 		 */
7421 		if (ret == -ENOTSUPP)
7422 			reset_control_reset(priv->plat->stmmac_rst);
7423 	}
7424 
7425 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7426 	if (ret == -ENOTSUPP)
7427 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7428 			ERR_PTR(ret));
7429 
7430 	/* Wait a bit for the reset to take effect */
7431 	udelay(10);
7432 
7433 	/* Init MAC and get the capabilities */
7434 	ret = stmmac_hw_init(priv);
7435 	if (ret)
7436 		goto error_hw_init;
7437 
7438 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7439 	 */
7440 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7441 		priv->plat->dma_cfg->dche = false;
7442 
7443 	stmmac_check_ether_addr(priv);
7444 
7445 	ndev->netdev_ops = &stmmac_netdev_ops;
7446 
7447 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7448 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7449 
7450 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7451 			    NETIF_F_RXCSUM;
7452 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7453 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7454 
7455 	ret = stmmac_tc_init(priv, priv);
7456 	if (!ret) {
7457 		ndev->hw_features |= NETIF_F_HW_TC;
7458 	}
7459 
7460 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7461 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7462 		if (priv->plat->has_gmac4)
7463 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7464 		priv->tso = true;
7465 		dev_info(priv->device, "TSO feature enabled\n");
7466 	}
7467 
7468 	if (priv->dma_cap.sphen &&
7469 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7470 		ndev->hw_features |= NETIF_F_GRO;
7471 		priv->sph_cap = true;
7472 		priv->sph = priv->sph_cap;
7473 		dev_info(priv->device, "SPH feature enabled\n");
7474 	}
7475 
7476 	/* Ideally our host DMA address width is the same as for the
7477 	 * device. However, it may differ and then we have to use our
7478 	 * host DMA width for allocation and the device DMA width for
7479 	 * register handling.
7480 	 */
7481 	if (priv->plat->host_dma_width)
7482 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7483 	else
7484 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7485 
7486 	if (priv->dma_cap.host_dma_width) {
7487 		ret = dma_set_mask_and_coherent(device,
7488 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7489 		if (!ret) {
7490 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7491 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7492 
7493 			/*
7494 			 * If more than 32 bits can be addressed, make sure to
7495 			 * enable enhanced addressing mode.
7496 			 */
7497 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7498 				priv->plat->dma_cfg->eame = true;
7499 		} else {
7500 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7501 			if (ret) {
7502 				dev_err(priv->device, "Failed to set DMA Mask\n");
7503 				goto error_hw_init;
7504 			}
7505 
7506 			priv->dma_cap.host_dma_width = 32;
7507 		}
7508 	}
7509 
7510 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7511 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7512 #ifdef STMMAC_VLAN_TAG_USED
7513 	/* Both mac100 and gmac support receive VLAN tag detection */
7514 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7515 	if (priv->plat->has_gmac4) {
7516 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7517 		priv->hw->hw_vlan_en = true;
7518 	}
7519 	if (priv->dma_cap.vlhash) {
7520 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7521 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7522 	}
7523 	if (priv->dma_cap.vlins) {
7524 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7525 		if (priv->dma_cap.dvlan)
7526 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7527 	}
7528 #endif
7529 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7530 
7531 	priv->xstats.threshold = tc;
7532 
7533 	/* Initialize RSS */
7534 	rxq = priv->plat->rx_queues_to_use;
7535 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7536 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7537 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7538 
7539 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7540 		ndev->features |= NETIF_F_RXHASH;
7541 
7542 	ndev->vlan_features |= ndev->features;
7543 
7544 	/* MTU range: 46 - hw-specific max */
7545 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7546 	if (priv->plat->has_xgmac)
7547 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7548 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7549 		ndev->max_mtu = JUMBO_LEN;
7550 	else
7551 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7552 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7553 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7554 	 */
7555 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7556 	    (priv->plat->maxmtu >= ndev->min_mtu))
7557 		ndev->max_mtu = priv->plat->maxmtu;
7558 	else if (priv->plat->maxmtu < ndev->min_mtu)
7559 		dev_warn(priv->device,
7560 			 "%s: warning: maxmtu having invalid value (%d)\n",
7561 			 __func__, priv->plat->maxmtu);
7562 
7563 	if (flow_ctrl)
7564 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7565 
7566 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7567 
7568 	/* Setup channels NAPI */
7569 	stmmac_napi_add(ndev);
7570 
7571 	mutex_init(&priv->lock);
7572 
7573 	stmmac_fpe_init(priv);
7574 
7575 	/* If a specific clk_csr value is passed from the platform
7576 	 * this means that the CSR Clock Range selection cannot be
7577 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7578 	 * set the MDC clock dynamically according to the csr actual
7579 	 * clock input.
7580 	 */
7581 	if (priv->plat->clk_csr >= 0)
7582 		priv->clk_csr = priv->plat->clk_csr;
7583 	else
7584 		stmmac_clk_csr_set(priv);
7585 
7586 	stmmac_check_pcs_mode(priv);
7587 
7588 	pm_runtime_get_noresume(device);
7589 	pm_runtime_set_active(device);
7590 	if (!pm_runtime_enabled(device))
7591 		pm_runtime_enable(device);
7592 
7593 	ret = stmmac_mdio_register(ndev);
7594 	if (ret < 0) {
7595 		dev_err_probe(priv->device, ret,
7596 			      "MDIO bus (id: %d) registration failed\n",
7597 			      priv->plat->bus_id);
7598 		goto error_mdio_register;
7599 	}
7600 
7601 	if (priv->plat->speed_mode_2500)
7602 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7603 
7604 	ret = stmmac_pcs_setup(ndev);
7605 	if (ret)
7606 		goto error_pcs_setup;
7607 
7608 	ret = stmmac_phy_setup(priv);
7609 	if (ret) {
7610 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7611 		goto error_phy_setup;
7612 	}
7613 
7614 	ret = register_netdev(ndev);
7615 	if (ret) {
7616 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7617 			__func__, ret);
7618 		goto error_netdev_register;
7619 	}
7620 
7621 #ifdef CONFIG_DEBUG_FS
7622 	stmmac_init_fs(ndev);
7623 #endif
7624 
7625 	if (priv->plat->dump_debug_regs)
7626 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7627 
7628 	/* Let pm_runtime_put() disable the clocks.
7629 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7630 	 */
7631 	pm_runtime_put(device);
7632 
7633 	return ret;
7634 
7635 error_netdev_register:
7636 	phylink_destroy(priv->phylink);
7637 error_phy_setup:
7638 	stmmac_pcs_clean(ndev);
7639 error_pcs_setup:
7640 	stmmac_mdio_unregister(ndev);
7641 error_mdio_register:
7642 	stmmac_napi_del(ndev);
7643 error_hw_init:
7644 	destroy_workqueue(priv->wq);
7645 error_wq_init:
7646 	bitmap_free(priv->af_xdp_zc_qps);
7647 
7648 	return ret;
7649 }
7650 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7651 
7652 /**
7653  * stmmac_dvr_remove
7654  * @dev: device pointer
7655  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7656  * changes the link status, releases the DMA descriptor rings.
7657  */
7658 void stmmac_dvr_remove(struct device *dev)
7659 {
7660 	struct net_device *ndev = dev_get_drvdata(dev);
7661 	struct stmmac_priv *priv = netdev_priv(ndev);
7662 
7663 	netdev_info(priv->dev, "%s: removing driver", __func__);
7664 
7665 	pm_runtime_get_sync(dev);
7666 
7667 	stmmac_stop_all_dma(priv);
7668 	stmmac_mac_set(priv, priv->ioaddr, false);
7669 	unregister_netdev(ndev);
7670 
7671 #ifdef CONFIG_DEBUG_FS
7672 	stmmac_exit_fs(ndev);
7673 #endif
7674 	phylink_destroy(priv->phylink);
7675 	if (priv->plat->stmmac_rst)
7676 		reset_control_assert(priv->plat->stmmac_rst);
7677 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7678 
7679 	stmmac_pcs_clean(ndev);
7680 	stmmac_mdio_unregister(ndev);
7681 
7682 	destroy_workqueue(priv->wq);
7683 	mutex_destroy(&priv->lock);
7684 	bitmap_free(priv->af_xdp_zc_qps);
7685 
7686 	pm_runtime_disable(dev);
7687 	pm_runtime_put_noidle(dev);
7688 }
7689 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7690 
7691 /**
7692  * stmmac_suspend - suspend callback
7693  * @dev: device pointer
7694  * Description: this is the function to suspend the device and it is called
7695  * by the platform driver to stop the network queue, release the resources,
7696  * program the PMT register (for WoL), clean and release driver resources.
7697  */
7698 int stmmac_suspend(struct device *dev)
7699 {
7700 	struct net_device *ndev = dev_get_drvdata(dev);
7701 	struct stmmac_priv *priv = netdev_priv(ndev);
7702 	u32 chan;
7703 
7704 	if (!ndev || !netif_running(ndev))
7705 		return 0;
7706 
7707 	mutex_lock(&priv->lock);
7708 
7709 	netif_device_detach(ndev);
7710 
7711 	stmmac_disable_all_queues(priv);
7712 
7713 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7714 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7715 
7716 	if (priv->eee_enabled) {
7717 		priv->tx_path_in_lpi_mode = false;
7718 		del_timer_sync(&priv->eee_ctrl_timer);
7719 	}
7720 
7721 	/* Stop TX/RX DMA */
7722 	stmmac_stop_all_dma(priv);
7723 
7724 	if (priv->plat->serdes_powerdown)
7725 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7726 
7727 	/* Enable Power down mode by programming the PMT regs */
7728 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7729 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7730 		priv->irq_wake = 1;
7731 	} else {
7732 		stmmac_mac_set(priv, priv->ioaddr, false);
7733 		pinctrl_pm_select_sleep_state(priv->device);
7734 	}
7735 
7736 	mutex_unlock(&priv->lock);
7737 
7738 	rtnl_lock();
7739 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7740 		phylink_suspend(priv->phylink, true);
7741 	} else {
7742 		if (device_may_wakeup(priv->device))
7743 			phylink_speed_down(priv->phylink, false);
7744 		phylink_suspend(priv->phylink, false);
7745 	}
7746 	rtnl_unlock();
7747 
7748 	if (stmmac_fpe_supported(priv))
7749 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7750 
7751 	priv->speed = SPEED_UNKNOWN;
7752 	return 0;
7753 }
7754 EXPORT_SYMBOL_GPL(stmmac_suspend);
7755 
7756 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7757 {
7758 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7759 
7760 	rx_q->cur_rx = 0;
7761 	rx_q->dirty_rx = 0;
7762 }
7763 
7764 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7765 {
7766 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7767 
7768 	tx_q->cur_tx = 0;
7769 	tx_q->dirty_tx = 0;
7770 	tx_q->mss = 0;
7771 
7772 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7773 }
7774 
7775 /**
7776  * stmmac_reset_queues_param - reset queue parameters
7777  * @priv: device pointer
7778  */
7779 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7780 {
7781 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7782 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7783 	u32 queue;
7784 
7785 	for (queue = 0; queue < rx_cnt; queue++)
7786 		stmmac_reset_rx_queue(priv, queue);
7787 
7788 	for (queue = 0; queue < tx_cnt; queue++)
7789 		stmmac_reset_tx_queue(priv, queue);
7790 }
7791 
7792 /**
7793  * stmmac_resume - resume callback
7794  * @dev: device pointer
7795  * Description: when resume this function is invoked to setup the DMA and CORE
7796  * in a usable state.
7797  */
7798 int stmmac_resume(struct device *dev)
7799 {
7800 	struct net_device *ndev = dev_get_drvdata(dev);
7801 	struct stmmac_priv *priv = netdev_priv(ndev);
7802 	int ret;
7803 
7804 	if (!netif_running(ndev))
7805 		return 0;
7806 
7807 	/* Power Down bit, into the PM register, is cleared
7808 	 * automatically as soon as a magic packet or a Wake-up frame
7809 	 * is received. Anyway, it's better to manually clear
7810 	 * this bit because it can generate problems while resuming
7811 	 * from another devices (e.g. serial console).
7812 	 */
7813 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7814 		mutex_lock(&priv->lock);
7815 		stmmac_pmt(priv, priv->hw, 0);
7816 		mutex_unlock(&priv->lock);
7817 		priv->irq_wake = 0;
7818 	} else {
7819 		pinctrl_pm_select_default_state(priv->device);
7820 		/* reset the phy so that it's ready */
7821 		if (priv->mii)
7822 			stmmac_mdio_reset(priv->mii);
7823 	}
7824 
7825 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7826 	    priv->plat->serdes_powerup) {
7827 		ret = priv->plat->serdes_powerup(ndev,
7828 						 priv->plat->bsp_priv);
7829 
7830 		if (ret < 0)
7831 			return ret;
7832 	}
7833 
7834 	rtnl_lock();
7835 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7836 		phylink_resume(priv->phylink);
7837 	} else {
7838 		phylink_resume(priv->phylink);
7839 		if (device_may_wakeup(priv->device))
7840 			phylink_speed_up(priv->phylink);
7841 	}
7842 	rtnl_unlock();
7843 
7844 	rtnl_lock();
7845 	mutex_lock(&priv->lock);
7846 
7847 	stmmac_reset_queues_param(priv);
7848 
7849 	stmmac_free_tx_skbufs(priv);
7850 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7851 
7852 	stmmac_hw_setup(ndev, false);
7853 	stmmac_init_coalesce(priv);
7854 	stmmac_set_rx_mode(ndev);
7855 
7856 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7857 
7858 	stmmac_enable_all_queues(priv);
7859 	stmmac_enable_all_dma_irq(priv);
7860 
7861 	mutex_unlock(&priv->lock);
7862 	rtnl_unlock();
7863 
7864 	netif_device_attach(ndev);
7865 
7866 	return 0;
7867 }
7868 EXPORT_SYMBOL_GPL(stmmac_resume);
7869 
7870 #ifndef MODULE
7871 static int __init stmmac_cmdline_opt(char *str)
7872 {
7873 	char *opt;
7874 
7875 	if (!str || !*str)
7876 		return 1;
7877 	while ((opt = strsep(&str, ",")) != NULL) {
7878 		if (!strncmp(opt, "debug:", 6)) {
7879 			if (kstrtoint(opt + 6, 0, &debug))
7880 				goto err;
7881 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7882 			if (kstrtoint(opt + 8, 0, &phyaddr))
7883 				goto err;
7884 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7885 			if (kstrtoint(opt + 7, 0, &buf_sz))
7886 				goto err;
7887 		} else if (!strncmp(opt, "tc:", 3)) {
7888 			if (kstrtoint(opt + 3, 0, &tc))
7889 				goto err;
7890 		} else if (!strncmp(opt, "watchdog:", 9)) {
7891 			if (kstrtoint(opt + 9, 0, &watchdog))
7892 				goto err;
7893 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7894 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7895 				goto err;
7896 		} else if (!strncmp(opt, "pause:", 6)) {
7897 			if (kstrtoint(opt + 6, 0, &pause))
7898 				goto err;
7899 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7900 			if (kstrtoint(opt + 10, 0, &eee_timer))
7901 				goto err;
7902 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7903 			if (kstrtoint(opt + 11, 0, &chain_mode))
7904 				goto err;
7905 		}
7906 	}
7907 	return 1;
7908 
7909 err:
7910 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7911 	return 1;
7912 }
7913 
7914 __setup("stmmaceth=", stmmac_cmdline_opt);
7915 #endif /* MODULE */
7916 
7917 static int __init stmmac_init(void)
7918 {
7919 #ifdef CONFIG_DEBUG_FS
7920 	/* Create debugfs main directory if it doesn't exist yet */
7921 	if (!stmmac_fs_dir)
7922 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7923 	register_netdevice_notifier(&stmmac_notifier);
7924 #endif
7925 
7926 	return 0;
7927 }
7928 
7929 static void __exit stmmac_exit(void)
7930 {
7931 #ifdef CONFIG_DEBUG_FS
7932 	unregister_netdevice_notifier(&stmmac_notifier);
7933 	debugfs_remove_recursive(stmmac_fs_dir);
7934 #endif
7935 }
7936 
7937 module_init(stmmac_init)
7938 module_exit(stmmac_exit)
7939 
7940 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7941 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7942 MODULE_LICENSE("GPL");
7943