xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 1cc3462159babb69c84c39cb1b4e262aef3ea325)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54 
55 /* As long as the interface is active, we keep the timestamping counter enabled
56  * with fine resolution and binary rollover. This avoid non-monotonic behavior
57  * (clock jumps) when changing timestamping settings at runtime.
58  */
59 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 				 PTP_TCR_TSCTRLSSR)
61 
62 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
64 
65 /* Module parameters */
66 #define TX_TIMEO	5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70 
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78 
79 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = 0xdead;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 /* This is unused */
105 #define	DEFAULT_BUFSIZE	1536
106 static int buf_sz = DEFAULT_BUFSIZE;
107 module_param(buf_sz, int, 0644);
108 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
109 
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
112 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113 
114 #define STMMAC_DEFAULT_LPI_TIMER	1000
115 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, uint, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119 
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126 
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139 					  u32 rxmode, u32 chan);
140 
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146 
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148 
149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151 	int ret = 0;
152 
153 	if (enabled) {
154 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
155 		if (ret)
156 			return ret;
157 		ret = clk_prepare_enable(priv->plat->pclk);
158 		if (ret) {
159 			clk_disable_unprepare(priv->plat->stmmac_clk);
160 			return ret;
161 		}
162 		if (priv->plat->clks_config) {
163 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164 			if (ret) {
165 				clk_disable_unprepare(priv->plat->stmmac_clk);
166 				clk_disable_unprepare(priv->plat->pclk);
167 				return ret;
168 			}
169 		}
170 	} else {
171 		clk_disable_unprepare(priv->plat->stmmac_clk);
172 		clk_disable_unprepare(priv->plat->pclk);
173 		if (priv->plat->clks_config)
174 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175 	}
176 
177 	return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180 
181 /**
182  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
183  * @bsp_priv: BSP private data structure (unused)
184  * @clk_tx_i: the transmit clock
185  * @interface: the selected interface mode
186  * @speed: the speed that the MAC will be operating at
187  *
188  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
189  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
190  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
191  * the plat_data->set_clk_tx_rate method directly, call it via their own
192  * implementation, or implement their own method should they have more
193  * complex requirements. It is intended to only be used in this method.
194  *
195  * plat_data->clk_tx_i must be filled in.
196  */
197 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
198 			   phy_interface_t interface, int speed)
199 {
200 	long rate = rgmii_clock(speed);
201 
202 	/* Silently ignore unsupported speeds as rgmii_clock() only
203 	 * supports 10, 100 and 1000Mbps. We do not want to spit
204 	 * errors for 2500 and higher speeds here.
205 	 */
206 	if (rate < 0)
207 		return 0;
208 
209 	return clk_set_rate(clk_tx_i, rate);
210 }
211 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
212 
213 /**
214  * stmmac_verify_args - verify the driver parameters.
215  * Description: it checks the driver parameters and set a default in case of
216  * errors.
217  */
218 static void stmmac_verify_args(void)
219 {
220 	if (unlikely(watchdog < 0))
221 		watchdog = TX_TIMEO;
222 	if (unlikely((pause < 0) || (pause > 0xffff)))
223 		pause = PAUSE_TIME;
224 
225 	if (flow_ctrl != 0xdead)
226 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
227 }
228 
229 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
233 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
234 	u32 queue;
235 
236 	for (queue = 0; queue < maxq; queue++) {
237 		struct stmmac_channel *ch = &priv->channel[queue];
238 
239 		if (stmmac_xdp_is_enabled(priv) &&
240 		    test_bit(queue, priv->af_xdp_zc_qps)) {
241 			napi_disable(&ch->rxtx_napi);
242 			continue;
243 		}
244 
245 		if (queue < rx_queues_cnt)
246 			napi_disable(&ch->rx_napi);
247 		if (queue < tx_queues_cnt)
248 			napi_disable(&ch->tx_napi);
249 	}
250 }
251 
252 /**
253  * stmmac_disable_all_queues - Disable all queues
254  * @priv: driver private structure
255  */
256 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
257 {
258 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
259 	struct stmmac_rx_queue *rx_q;
260 	u32 queue;
261 
262 	/* synchronize_rcu() needed for pending XDP buffers to drain */
263 	for (queue = 0; queue < rx_queues_cnt; queue++) {
264 		rx_q = &priv->dma_conf.rx_queue[queue];
265 		if (rx_q->xsk_pool) {
266 			synchronize_rcu();
267 			break;
268 		}
269 	}
270 
271 	__stmmac_disable_all_queues(priv);
272 }
273 
274 /**
275  * stmmac_enable_all_queues - Enable all queues
276  * @priv: driver private structure
277  */
278 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
279 {
280 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
281 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
282 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
283 	u32 queue;
284 
285 	for (queue = 0; queue < maxq; queue++) {
286 		struct stmmac_channel *ch = &priv->channel[queue];
287 
288 		if (stmmac_xdp_is_enabled(priv) &&
289 		    test_bit(queue, priv->af_xdp_zc_qps)) {
290 			napi_enable(&ch->rxtx_napi);
291 			continue;
292 		}
293 
294 		if (queue < rx_queues_cnt)
295 			napi_enable(&ch->rx_napi);
296 		if (queue < tx_queues_cnt)
297 			napi_enable(&ch->tx_napi);
298 	}
299 }
300 
301 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
302 {
303 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
304 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
305 		queue_work(priv->wq, &priv->service_task);
306 }
307 
308 static void stmmac_global_err(struct stmmac_priv *priv)
309 {
310 	netif_carrier_off(priv->dev);
311 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
312 	stmmac_service_event_schedule(priv);
313 }
314 
315 /**
316  * stmmac_clk_csr_set - dynamically set the MDC clock
317  * @priv: driver private structure
318  * Description: this is to dynamically set the MDC clock according to the csr
319  * clock input.
320  * Note:
321  *	If a specific clk_csr value is passed from the platform
322  *	this means that the CSR Clock Range selection cannot be
323  *	changed at run-time and it is fixed (as reported in the driver
324  *	documentation). Viceversa the driver will try to set the MDC
325  *	clock dynamically according to the actual clock input.
326  */
327 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
328 {
329 	unsigned long clk_rate;
330 
331 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
332 
333 	/* Platform provided default clk_csr would be assumed valid
334 	 * for all other cases except for the below mentioned ones.
335 	 * For values higher than the IEEE 802.3 specified frequency
336 	 * we can not estimate the proper divider as it is not known
337 	 * the frequency of clk_csr_i. So we do not change the default
338 	 * divider.
339 	 */
340 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
341 		if (clk_rate < CSR_F_35M)
342 			priv->clk_csr = STMMAC_CSR_20_35M;
343 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
344 			priv->clk_csr = STMMAC_CSR_35_60M;
345 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
346 			priv->clk_csr = STMMAC_CSR_60_100M;
347 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
348 			priv->clk_csr = STMMAC_CSR_100_150M;
349 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
350 			priv->clk_csr = STMMAC_CSR_150_250M;
351 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
352 			priv->clk_csr = STMMAC_CSR_250_300M;
353 		else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
354 			priv->clk_csr = STMMAC_CSR_300_500M;
355 		else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
356 			priv->clk_csr = STMMAC_CSR_500_800M;
357 	}
358 
359 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
360 		if (clk_rate > 160000000)
361 			priv->clk_csr = 0x03;
362 		else if (clk_rate > 80000000)
363 			priv->clk_csr = 0x02;
364 		else if (clk_rate > 40000000)
365 			priv->clk_csr = 0x01;
366 		else
367 			priv->clk_csr = 0;
368 	}
369 
370 	if (priv->plat->has_xgmac) {
371 		if (clk_rate > 400000000)
372 			priv->clk_csr = 0x5;
373 		else if (clk_rate > 350000000)
374 			priv->clk_csr = 0x4;
375 		else if (clk_rate > 300000000)
376 			priv->clk_csr = 0x3;
377 		else if (clk_rate > 250000000)
378 			priv->clk_csr = 0x2;
379 		else if (clk_rate > 150000000)
380 			priv->clk_csr = 0x1;
381 		else
382 			priv->clk_csr = 0x0;
383 	}
384 }
385 
386 static void print_pkt(unsigned char *buf, int len)
387 {
388 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
389 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
390 }
391 
392 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
393 {
394 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
395 	u32 avail;
396 
397 	if (tx_q->dirty_tx > tx_q->cur_tx)
398 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
399 	else
400 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
401 
402 	return avail;
403 }
404 
405 /**
406  * stmmac_rx_dirty - Get RX queue dirty
407  * @priv: driver private structure
408  * @queue: RX queue index
409  */
410 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
411 {
412 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
413 	u32 dirty;
414 
415 	if (rx_q->dirty_rx <= rx_q->cur_rx)
416 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
417 	else
418 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
419 
420 	return dirty;
421 }
422 
423 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
424 {
425 	u32 tx_cnt = priv->plat->tx_queues_to_use;
426 	u32 queue;
427 
428 	/* check if all TX queues have the work finished */
429 	for (queue = 0; queue < tx_cnt; queue++) {
430 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
431 
432 		if (tx_q->dirty_tx != tx_q->cur_tx)
433 			return true; /* still unfinished work */
434 	}
435 
436 	return false;
437 }
438 
439 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
440 {
441 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
442 }
443 
444 /**
445  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
446  * @priv: driver private structure
447  * Description: this function is to verify and enter in LPI mode in case of
448  * EEE.
449  */
450 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
451 {
452 	if (stmmac_eee_tx_busy(priv)) {
453 		stmmac_restart_sw_lpi_timer(priv);
454 		return;
455 	}
456 
457 	/* Check and enter in LPI mode */
458 	if (!priv->tx_path_in_lpi_mode)
459 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
460 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING,
461 			0);
462 }
463 
464 /**
465  * stmmac_stop_sw_lpi - stop transmitting LPI
466  * @priv: driver private structure
467  * Description: When using software-controlled LPI, stop transmitting LPI state.
468  */
469 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
470 {
471 	del_timer_sync(&priv->eee_ctrl_timer);
472 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
473 	priv->tx_path_in_lpi_mode = false;
474 }
475 
476 /**
477  * stmmac_eee_ctrl_timer - EEE TX SW timer.
478  * @t:  timer_list struct containing private info
479  * Description:
480  *  if there is no data transfer and if we are not in LPI state,
481  *  then MAC Transmitter can be moved to LPI state.
482  */
483 static void stmmac_eee_ctrl_timer(struct timer_list *t)
484 {
485 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
486 
487 	stmmac_try_to_start_sw_lpi(priv);
488 }
489 
490 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
491  * @priv: driver private structure
492  * @p : descriptor pointer
493  * @skb : the socket buffer
494  * Description :
495  * This function will read timestamp from the descriptor & pass it to stack.
496  * and also perform some sanity checks.
497  */
498 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
499 				   struct dma_desc *p, struct sk_buff *skb)
500 {
501 	struct skb_shared_hwtstamps shhwtstamp;
502 	bool found = false;
503 	u64 ns = 0;
504 
505 	if (!priv->hwts_tx_en)
506 		return;
507 
508 	/* exit if skb doesn't support hw tstamp */
509 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
510 		return;
511 
512 	/* check tx tstamp status */
513 	if (stmmac_get_tx_timestamp_status(priv, p)) {
514 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
515 		found = true;
516 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
517 		found = true;
518 	}
519 
520 	if (found) {
521 		ns -= priv->plat->cdc_error_adj;
522 
523 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
524 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
525 
526 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
527 		/* pass tstamp to stack */
528 		skb_tstamp_tx(skb, &shhwtstamp);
529 	}
530 }
531 
532 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
533  * @priv: driver private structure
534  * @p : descriptor pointer
535  * @np : next descriptor pointer
536  * @skb : the socket buffer
537  * Description :
538  * This function will read received packet's timestamp from the descriptor
539  * and pass it to stack. It also perform some sanity checks.
540  */
541 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
542 				   struct dma_desc *np, struct sk_buff *skb)
543 {
544 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
545 	struct dma_desc *desc = p;
546 	u64 ns = 0;
547 
548 	if (!priv->hwts_rx_en)
549 		return;
550 	/* For GMAC4, the valid timestamp is from CTX next desc. */
551 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
552 		desc = np;
553 
554 	/* Check if timestamp is available */
555 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
556 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
557 
558 		ns -= priv->plat->cdc_error_adj;
559 
560 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
561 		shhwtstamp = skb_hwtstamps(skb);
562 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
563 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
564 	} else  {
565 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
566 	}
567 }
568 
569 /**
570  *  stmmac_hwtstamp_set - control hardware timestamping.
571  *  @dev: device pointer.
572  *  @ifr: An IOCTL specific structure, that can contain a pointer to
573  *  a proprietary structure used to pass information to the driver.
574  *  Description:
575  *  This function configures the MAC to enable/disable both outgoing(TX)
576  *  and incoming(RX) packets time stamping based on user input.
577  *  Return Value:
578  *  0 on success and an appropriate -ve integer on failure.
579  */
580 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
581 {
582 	struct stmmac_priv *priv = netdev_priv(dev);
583 	struct hwtstamp_config config;
584 	u32 ptp_v2 = 0;
585 	u32 tstamp_all = 0;
586 	u32 ptp_over_ipv4_udp = 0;
587 	u32 ptp_over_ipv6_udp = 0;
588 	u32 ptp_over_ethernet = 0;
589 	u32 snap_type_sel = 0;
590 	u32 ts_master_en = 0;
591 	u32 ts_event_en = 0;
592 
593 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
594 		netdev_alert(priv->dev, "No support for HW time stamping\n");
595 		priv->hwts_tx_en = 0;
596 		priv->hwts_rx_en = 0;
597 
598 		return -EOPNOTSUPP;
599 	}
600 
601 	if (copy_from_user(&config, ifr->ifr_data,
602 			   sizeof(config)))
603 		return -EFAULT;
604 
605 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
606 		   __func__, config.flags, config.tx_type, config.rx_filter);
607 
608 	if (config.tx_type != HWTSTAMP_TX_OFF &&
609 	    config.tx_type != HWTSTAMP_TX_ON)
610 		return -ERANGE;
611 
612 	if (priv->adv_ts) {
613 		switch (config.rx_filter) {
614 		case HWTSTAMP_FILTER_NONE:
615 			/* time stamp no incoming packet at all */
616 			config.rx_filter = HWTSTAMP_FILTER_NONE;
617 			break;
618 
619 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
620 			/* PTP v1, UDP, any kind of event packet */
621 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
622 			/* 'xmac' hardware can support Sync, Pdelay_Req and
623 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
624 			 * This leaves Delay_Req timestamps out.
625 			 * Enable all events *and* general purpose message
626 			 * timestamping
627 			 */
628 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
629 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631 			break;
632 
633 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
634 			/* PTP v1, UDP, Sync packet */
635 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
636 			/* take time stamp for SYNC messages only */
637 			ts_event_en = PTP_TCR_TSEVNTENA;
638 
639 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
640 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
641 			break;
642 
643 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
644 			/* PTP v1, UDP, Delay_req packet */
645 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
646 			/* take time stamp for Delay_Req messages only */
647 			ts_master_en = PTP_TCR_TSMSTRENA;
648 			ts_event_en = PTP_TCR_TSEVNTENA;
649 
650 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
651 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
652 			break;
653 
654 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
655 			/* PTP v2, UDP, any kind of event packet */
656 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
657 			ptp_v2 = PTP_TCR_TSVER2ENA;
658 			/* take time stamp for all event messages */
659 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
660 
661 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
662 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
663 			break;
664 
665 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
666 			/* PTP v2, UDP, Sync packet */
667 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
668 			ptp_v2 = PTP_TCR_TSVER2ENA;
669 			/* take time stamp for SYNC messages only */
670 			ts_event_en = PTP_TCR_TSEVNTENA;
671 
672 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
673 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
674 			break;
675 
676 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
677 			/* PTP v2, UDP, Delay_req packet */
678 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
679 			ptp_v2 = PTP_TCR_TSVER2ENA;
680 			/* take time stamp for Delay_Req messages only */
681 			ts_master_en = PTP_TCR_TSMSTRENA;
682 			ts_event_en = PTP_TCR_TSEVNTENA;
683 
684 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
685 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
686 			break;
687 
688 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
689 			/* PTP v2/802.AS1 any layer, any kind of event packet */
690 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
691 			ptp_v2 = PTP_TCR_TSVER2ENA;
692 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
693 			if (priv->synopsys_id < DWMAC_CORE_4_10)
694 				ts_event_en = PTP_TCR_TSEVNTENA;
695 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
696 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
697 			ptp_over_ethernet = PTP_TCR_TSIPENA;
698 			break;
699 
700 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
701 			/* PTP v2/802.AS1, any layer, Sync packet */
702 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
703 			ptp_v2 = PTP_TCR_TSVER2ENA;
704 			/* take time stamp for SYNC messages only */
705 			ts_event_en = PTP_TCR_TSEVNTENA;
706 
707 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
708 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
709 			ptp_over_ethernet = PTP_TCR_TSIPENA;
710 			break;
711 
712 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
713 			/* PTP v2/802.AS1, any layer, Delay_req packet */
714 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
715 			ptp_v2 = PTP_TCR_TSVER2ENA;
716 			/* take time stamp for Delay_Req messages only */
717 			ts_master_en = PTP_TCR_TSMSTRENA;
718 			ts_event_en = PTP_TCR_TSEVNTENA;
719 
720 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
721 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
722 			ptp_over_ethernet = PTP_TCR_TSIPENA;
723 			break;
724 
725 		case HWTSTAMP_FILTER_NTP_ALL:
726 		case HWTSTAMP_FILTER_ALL:
727 			/* time stamp any incoming packet */
728 			config.rx_filter = HWTSTAMP_FILTER_ALL;
729 			tstamp_all = PTP_TCR_TSENALL;
730 			break;
731 
732 		default:
733 			return -ERANGE;
734 		}
735 	} else {
736 		switch (config.rx_filter) {
737 		case HWTSTAMP_FILTER_NONE:
738 			config.rx_filter = HWTSTAMP_FILTER_NONE;
739 			break;
740 		default:
741 			/* PTP v1, UDP, any kind of event packet */
742 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
743 			break;
744 		}
745 	}
746 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
747 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
748 
749 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
750 
751 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
752 		priv->systime_flags |= tstamp_all | ptp_v2 |
753 				       ptp_over_ethernet | ptp_over_ipv6_udp |
754 				       ptp_over_ipv4_udp | ts_event_en |
755 				       ts_master_en | snap_type_sel;
756 	}
757 
758 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
759 
760 	memcpy(&priv->tstamp_config, &config, sizeof(config));
761 
762 	return copy_to_user(ifr->ifr_data, &config,
763 			    sizeof(config)) ? -EFAULT : 0;
764 }
765 
766 /**
767  *  stmmac_hwtstamp_get - read hardware timestamping.
768  *  @dev: device pointer.
769  *  @ifr: An IOCTL specific structure, that can contain a pointer to
770  *  a proprietary structure used to pass information to the driver.
771  *  Description:
772  *  This function obtain the current hardware timestamping settings
773  *  as requested.
774  */
775 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
776 {
777 	struct stmmac_priv *priv = netdev_priv(dev);
778 	struct hwtstamp_config *config = &priv->tstamp_config;
779 
780 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
781 		return -EOPNOTSUPP;
782 
783 	return copy_to_user(ifr->ifr_data, config,
784 			    sizeof(*config)) ? -EFAULT : 0;
785 }
786 
787 /**
788  * stmmac_init_tstamp_counter - init hardware timestamping counter
789  * @priv: driver private structure
790  * @systime_flags: timestamping flags
791  * Description:
792  * Initialize hardware counter for packet timestamping.
793  * This is valid as long as the interface is open and not suspended.
794  * Will be rerun after resuming from suspend, case in which the timestamping
795  * flags updated by stmmac_hwtstamp_set() also need to be restored.
796  */
797 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
798 {
799 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
800 	struct timespec64 now;
801 	u32 sec_inc = 0;
802 	u64 temp = 0;
803 
804 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
805 		return -EOPNOTSUPP;
806 
807 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
808 	priv->systime_flags = systime_flags;
809 
810 	/* program Sub Second Increment reg */
811 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
812 					   priv->plat->clk_ptp_rate,
813 					   xmac, &sec_inc);
814 	temp = div_u64(1000000000ULL, sec_inc);
815 
816 	/* Store sub second increment for later use */
817 	priv->sub_second_inc = sec_inc;
818 
819 	/* calculate default added value:
820 	 * formula is :
821 	 * addend = (2^32)/freq_div_ratio;
822 	 * where, freq_div_ratio = 1e9ns/sec_inc
823 	 */
824 	temp = (u64)(temp << 32);
825 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
826 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
827 
828 	/* initialize system time */
829 	ktime_get_real_ts64(&now);
830 
831 	/* lower 32 bits of tv_sec are safe until y2106 */
832 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
833 
834 	return 0;
835 }
836 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
837 
838 /**
839  * stmmac_init_ptp - init PTP
840  * @priv: driver private structure
841  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
842  * This is done by looking at the HW cap. register.
843  * This function also registers the ptp driver.
844  */
845 static int stmmac_init_ptp(struct stmmac_priv *priv)
846 {
847 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
848 	int ret;
849 
850 	if (priv->plat->ptp_clk_freq_config)
851 		priv->plat->ptp_clk_freq_config(priv);
852 
853 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
854 	if (ret)
855 		return ret;
856 
857 	priv->adv_ts = 0;
858 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
859 	if (xmac && priv->dma_cap.atime_stamp)
860 		priv->adv_ts = 1;
861 	/* Dwmac 3.x core with extend_desc can support adv_ts */
862 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
863 		priv->adv_ts = 1;
864 
865 	if (priv->dma_cap.time_stamp)
866 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
867 
868 	if (priv->adv_ts)
869 		netdev_info(priv->dev,
870 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
871 
872 	priv->hwts_tx_en = 0;
873 	priv->hwts_rx_en = 0;
874 
875 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
876 		stmmac_hwtstamp_correct_latency(priv, priv);
877 
878 	return 0;
879 }
880 
881 static void stmmac_release_ptp(struct stmmac_priv *priv)
882 {
883 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
884 	stmmac_ptp_unregister(priv);
885 }
886 
887 /**
888  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
889  *  @priv: driver private structure
890  *  @duplex: duplex passed to the next function
891  *  @flow_ctrl: desired flow control modes
892  *  Description: It is used for configuring the flow control in all queues
893  */
894 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
895 				 unsigned int flow_ctrl)
896 {
897 	u32 tx_cnt = priv->plat->tx_queues_to_use;
898 
899 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
900 			 tx_cnt);
901 }
902 
903 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
904 					 phy_interface_t interface)
905 {
906 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
907 
908 	/* Refresh the MAC-specific capabilities */
909 	stmmac_mac_update_caps(priv);
910 
911 	config->mac_capabilities = priv->hw->link.caps;
912 
913 	if (priv->plat->max_speed)
914 		phylink_limit_mac_speed(config, priv->plat->max_speed);
915 
916 	return config->mac_capabilities;
917 }
918 
919 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
920 						 phy_interface_t interface)
921 {
922 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
923 	struct phylink_pcs *pcs;
924 
925 	if (priv->plat->select_pcs) {
926 		pcs = priv->plat->select_pcs(priv, interface);
927 		if (!IS_ERR(pcs))
928 			return pcs;
929 	}
930 
931 	return NULL;
932 }
933 
934 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
935 			      const struct phylink_link_state *state)
936 {
937 	/* Nothing to do, xpcs_config() handles everything */
938 }
939 
940 static void stmmac_mac_link_down(struct phylink_config *config,
941 				 unsigned int mode, phy_interface_t interface)
942 {
943 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
944 
945 	stmmac_mac_set(priv, priv->ioaddr, false);
946 	if (priv->dma_cap.eee)
947 		stmmac_set_eee_pls(priv, priv->hw, false);
948 
949 	if (stmmac_fpe_supported(priv))
950 		stmmac_fpe_link_state_handle(priv, false);
951 }
952 
953 static void stmmac_mac_link_up(struct phylink_config *config,
954 			       struct phy_device *phy,
955 			       unsigned int mode, phy_interface_t interface,
956 			       int speed, int duplex,
957 			       bool tx_pause, bool rx_pause)
958 {
959 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
960 	unsigned int flow_ctrl;
961 	u32 old_ctrl, ctrl;
962 	int ret;
963 
964 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
965 	    priv->plat->serdes_powerup)
966 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
967 
968 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
969 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
970 
971 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
972 		switch (speed) {
973 		case SPEED_10000:
974 			ctrl |= priv->hw->link.xgmii.speed10000;
975 			break;
976 		case SPEED_5000:
977 			ctrl |= priv->hw->link.xgmii.speed5000;
978 			break;
979 		case SPEED_2500:
980 			ctrl |= priv->hw->link.xgmii.speed2500;
981 			break;
982 		default:
983 			return;
984 		}
985 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
986 		switch (speed) {
987 		case SPEED_100000:
988 			ctrl |= priv->hw->link.xlgmii.speed100000;
989 			break;
990 		case SPEED_50000:
991 			ctrl |= priv->hw->link.xlgmii.speed50000;
992 			break;
993 		case SPEED_40000:
994 			ctrl |= priv->hw->link.xlgmii.speed40000;
995 			break;
996 		case SPEED_25000:
997 			ctrl |= priv->hw->link.xlgmii.speed25000;
998 			break;
999 		case SPEED_10000:
1000 			ctrl |= priv->hw->link.xgmii.speed10000;
1001 			break;
1002 		case SPEED_2500:
1003 			ctrl |= priv->hw->link.speed2500;
1004 			break;
1005 		case SPEED_1000:
1006 			ctrl |= priv->hw->link.speed1000;
1007 			break;
1008 		default:
1009 			return;
1010 		}
1011 	} else {
1012 		switch (speed) {
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.speed2500;
1015 			break;
1016 		case SPEED_1000:
1017 			ctrl |= priv->hw->link.speed1000;
1018 			break;
1019 		case SPEED_100:
1020 			ctrl |= priv->hw->link.speed100;
1021 			break;
1022 		case SPEED_10:
1023 			ctrl |= priv->hw->link.speed10;
1024 			break;
1025 		default:
1026 			return;
1027 		}
1028 	}
1029 
1030 	priv->speed = speed;
1031 
1032 	if (priv->plat->fix_mac_speed)
1033 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1034 
1035 	if (!duplex)
1036 		ctrl &= ~priv->hw->link.duplex;
1037 	else
1038 		ctrl |= priv->hw->link.duplex;
1039 
1040 	/* Flow Control operation */
1041 	if (rx_pause && tx_pause)
1042 		flow_ctrl = FLOW_AUTO;
1043 	else if (rx_pause && !tx_pause)
1044 		flow_ctrl = FLOW_RX;
1045 	else if (!rx_pause && tx_pause)
1046 		flow_ctrl = FLOW_TX;
1047 	else
1048 		flow_ctrl = FLOW_OFF;
1049 
1050 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1051 
1052 	if (ctrl != old_ctrl)
1053 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1054 
1055 	if (priv->plat->set_clk_tx_rate) {
1056 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1057 						priv->plat->clk_tx_i,
1058 						interface, speed);
1059 		if (ret < 0)
1060 			netdev_err(priv->dev,
1061 				   "failed to configure transmit clock for %dMbps: %pe\n",
1062 				   speed, ERR_PTR(ret));
1063 	}
1064 
1065 	stmmac_mac_set(priv, priv->ioaddr, true);
1066 	if (priv->dma_cap.eee)
1067 		stmmac_set_eee_pls(priv, priv->hw, true);
1068 
1069 	if (stmmac_fpe_supported(priv))
1070 		stmmac_fpe_link_state_handle(priv, true);
1071 
1072 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1073 		stmmac_hwtstamp_correct_latency(priv, priv);
1074 }
1075 
1076 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1077 {
1078 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1079 
1080 	priv->eee_active = false;
1081 
1082 	mutex_lock(&priv->lock);
1083 
1084 	priv->eee_enabled = false;
1085 
1086 	netdev_dbg(priv->dev, "disable EEE\n");
1087 	priv->eee_sw_timer_en = false;
1088 	del_timer_sync(&priv->eee_ctrl_timer);
1089 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1090 	priv->tx_path_in_lpi_mode = false;
1091 
1092 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1093 	mutex_unlock(&priv->lock);
1094 }
1095 
1096 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1097 				    bool tx_clk_stop)
1098 {
1099 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1100 	int ret;
1101 
1102 	priv->tx_lpi_timer = timer;
1103 	priv->eee_active = true;
1104 
1105 	mutex_lock(&priv->lock);
1106 
1107 	priv->eee_enabled = true;
1108 
1109 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1110 			     STMMAC_DEFAULT_TWT_LS);
1111 
1112 	/* Try to cnfigure the hardware timer. */
1113 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1114 				  priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING,
1115 				  priv->tx_lpi_timer);
1116 
1117 	if (ret) {
1118 		/* Hardware timer mode not supported, or value out of range.
1119 		 * Fall back to using software LPI mode
1120 		 */
1121 		priv->eee_sw_timer_en = true;
1122 		stmmac_restart_sw_lpi_timer(priv);
1123 	}
1124 
1125 	mutex_unlock(&priv->lock);
1126 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1127 
1128 	return 0;
1129 }
1130 
1131 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1132 			     phy_interface_t interface)
1133 {
1134 	struct net_device *ndev = to_net_dev(config->dev);
1135 	struct stmmac_priv *priv = netdev_priv(ndev);
1136 
1137 	if (priv->plat->mac_finish)
1138 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1139 
1140 	return 0;
1141 }
1142 
1143 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1144 	.mac_get_caps = stmmac_mac_get_caps,
1145 	.mac_select_pcs = stmmac_mac_select_pcs,
1146 	.mac_config = stmmac_mac_config,
1147 	.mac_link_down = stmmac_mac_link_down,
1148 	.mac_link_up = stmmac_mac_link_up,
1149 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1150 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1151 	.mac_finish = stmmac_mac_finish,
1152 };
1153 
1154 /**
1155  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1156  * @priv: driver private structure
1157  * Description: this is to verify if the HW supports the PCS.
1158  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1159  * configured for the TBI, RTBI, or SGMII PHY interface.
1160  */
1161 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1162 {
1163 	int interface = priv->plat->mac_interface;
1164 
1165 	if (priv->dma_cap.pcs) {
1166 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1167 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1168 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1169 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1170 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1171 			priv->hw->pcs = STMMAC_PCS_RGMII;
1172 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1173 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1174 			priv->hw->pcs = STMMAC_PCS_SGMII;
1175 		}
1176 	}
1177 }
1178 
1179 /**
1180  * stmmac_init_phy - PHY initialization
1181  * @dev: net device structure
1182  * Description: it initializes the driver's PHY state, and attaches the PHY
1183  * to the mac driver.
1184  *  Return value:
1185  *  0 on success
1186  */
1187 static int stmmac_init_phy(struct net_device *dev)
1188 {
1189 	struct stmmac_priv *priv = netdev_priv(dev);
1190 	struct fwnode_handle *phy_fwnode;
1191 	struct fwnode_handle *fwnode;
1192 	int ret;
1193 
1194 	if (!phylink_expects_phy(priv->phylink))
1195 		return 0;
1196 
1197 	fwnode = priv->plat->port_node;
1198 	if (!fwnode)
1199 		fwnode = dev_fwnode(priv->device);
1200 
1201 	if (fwnode)
1202 		phy_fwnode = fwnode_get_phy_node(fwnode);
1203 	else
1204 		phy_fwnode = NULL;
1205 
1206 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1207 	 * manually parse it
1208 	 */
1209 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1210 		int addr = priv->plat->phy_addr;
1211 		struct phy_device *phydev;
1212 
1213 		if (addr < 0) {
1214 			netdev_err(priv->dev, "no phy found\n");
1215 			return -ENODEV;
1216 		}
1217 
1218 		phydev = mdiobus_get_phy(priv->mii, addr);
1219 		if (!phydev) {
1220 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1221 			return -ENODEV;
1222 		}
1223 
1224 		ret = phylink_connect_phy(priv->phylink, phydev);
1225 	} else {
1226 		fwnode_handle_put(phy_fwnode);
1227 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1228 	}
1229 
1230 	if (ret == 0) {
1231 		struct ethtool_keee eee;
1232 
1233 		/* Configure phylib's copy of the LPI timer. Normally,
1234 		 * phylink_config.lpi_timer_default would do this, but there is
1235 		 * a chance that userspace could change the eee_timer setting
1236 		 * via sysfs before the first open. Thus, preserve existing
1237 		 * behaviour.
1238 		 */
1239 		if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1240 			eee.tx_lpi_timer = priv->tx_lpi_timer;
1241 			phylink_ethtool_set_eee(priv->phylink, &eee);
1242 		}
1243 	}
1244 
1245 	if (!priv->plat->pmt) {
1246 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1247 
1248 		phylink_ethtool_get_wol(priv->phylink, &wol);
1249 		device_set_wakeup_capable(priv->device, !!wol.supported);
1250 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1251 	}
1252 
1253 	return ret;
1254 }
1255 
1256 static int stmmac_phy_setup(struct stmmac_priv *priv)
1257 {
1258 	struct stmmac_mdio_bus_data *mdio_bus_data;
1259 	int mode = priv->plat->phy_interface;
1260 	struct fwnode_handle *fwnode;
1261 	struct phylink_pcs *pcs;
1262 	struct phylink *phylink;
1263 
1264 	priv->phylink_config.dev = &priv->dev->dev;
1265 	priv->phylink_config.type = PHYLINK_NETDEV;
1266 	priv->phylink_config.mac_managed_pm = true;
1267 
1268 	/* Stmmac always requires an RX clock for hardware initialization */
1269 	priv->phylink_config.mac_requires_rxc = true;
1270 
1271 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1272 		priv->phylink_config.eee_rx_clk_stop_enable = true;
1273 
1274 	mdio_bus_data = priv->plat->mdio_bus_data;
1275 	if (mdio_bus_data)
1276 		priv->phylink_config.default_an_inband =
1277 			mdio_bus_data->default_an_inband;
1278 
1279 	/* Set the platform/firmware specified interface mode. Note, phylink
1280 	 * deals with the PHY interface mode, not the MAC interface mode.
1281 	 */
1282 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1283 
1284 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1285 	if (priv->hw->xpcs)
1286 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1287 	else
1288 		pcs = priv->hw->phylink_pcs;
1289 
1290 	if (pcs)
1291 		phy_interface_or(priv->phylink_config.supported_interfaces,
1292 				 priv->phylink_config.supported_interfaces,
1293 				 pcs->supported_interfaces);
1294 
1295 	if (priv->dma_cap.eee) {
1296 		/* Assume all supported interfaces also support LPI */
1297 		memcpy(priv->phylink_config.lpi_interfaces,
1298 		       priv->phylink_config.supported_interfaces,
1299 		       sizeof(priv->phylink_config.lpi_interfaces));
1300 
1301 		/* All full duplex speeds above 100Mbps are supported */
1302 		priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) |
1303 							MAC_100FD;
1304 		priv->phylink_config.lpi_timer_default = eee_timer * 1000;
1305 		priv->phylink_config.eee_enabled_default = true;
1306 	}
1307 
1308 	fwnode = priv->plat->port_node;
1309 	if (!fwnode)
1310 		fwnode = dev_fwnode(priv->device);
1311 
1312 	phylink = phylink_create(&priv->phylink_config, fwnode,
1313 				 mode, &stmmac_phylink_mac_ops);
1314 	if (IS_ERR(phylink))
1315 		return PTR_ERR(phylink);
1316 
1317 	priv->phylink = phylink;
1318 	return 0;
1319 }
1320 
1321 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1322 				    struct stmmac_dma_conf *dma_conf)
1323 {
1324 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1325 	unsigned int desc_size;
1326 	void *head_rx;
1327 	u32 queue;
1328 
1329 	/* Display RX rings */
1330 	for (queue = 0; queue < rx_cnt; queue++) {
1331 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1332 
1333 		pr_info("\tRX Queue %u rings\n", queue);
1334 
1335 		if (priv->extend_desc) {
1336 			head_rx = (void *)rx_q->dma_erx;
1337 			desc_size = sizeof(struct dma_extended_desc);
1338 		} else {
1339 			head_rx = (void *)rx_q->dma_rx;
1340 			desc_size = sizeof(struct dma_desc);
1341 		}
1342 
1343 		/* Display RX ring */
1344 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1345 				    rx_q->dma_rx_phy, desc_size);
1346 	}
1347 }
1348 
1349 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1350 				    struct stmmac_dma_conf *dma_conf)
1351 {
1352 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1353 	unsigned int desc_size;
1354 	void *head_tx;
1355 	u32 queue;
1356 
1357 	/* Display TX rings */
1358 	for (queue = 0; queue < tx_cnt; queue++) {
1359 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1360 
1361 		pr_info("\tTX Queue %d rings\n", queue);
1362 
1363 		if (priv->extend_desc) {
1364 			head_tx = (void *)tx_q->dma_etx;
1365 			desc_size = sizeof(struct dma_extended_desc);
1366 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1367 			head_tx = (void *)tx_q->dma_entx;
1368 			desc_size = sizeof(struct dma_edesc);
1369 		} else {
1370 			head_tx = (void *)tx_q->dma_tx;
1371 			desc_size = sizeof(struct dma_desc);
1372 		}
1373 
1374 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1375 				    tx_q->dma_tx_phy, desc_size);
1376 	}
1377 }
1378 
1379 static void stmmac_display_rings(struct stmmac_priv *priv,
1380 				 struct stmmac_dma_conf *dma_conf)
1381 {
1382 	/* Display RX ring */
1383 	stmmac_display_rx_rings(priv, dma_conf);
1384 
1385 	/* Display TX ring */
1386 	stmmac_display_tx_rings(priv, dma_conf);
1387 }
1388 
1389 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1390 {
1391 	if (stmmac_xdp_is_enabled(priv))
1392 		return XDP_PACKET_HEADROOM;
1393 
1394 	return NET_SKB_PAD;
1395 }
1396 
1397 static int stmmac_set_bfsize(int mtu, int bufsize)
1398 {
1399 	int ret = bufsize;
1400 
1401 	if (mtu >= BUF_SIZE_8KiB)
1402 		ret = BUF_SIZE_16KiB;
1403 	else if (mtu >= BUF_SIZE_4KiB)
1404 		ret = BUF_SIZE_8KiB;
1405 	else if (mtu >= BUF_SIZE_2KiB)
1406 		ret = BUF_SIZE_4KiB;
1407 	else if (mtu > DEFAULT_BUFSIZE)
1408 		ret = BUF_SIZE_2KiB;
1409 	else
1410 		ret = DEFAULT_BUFSIZE;
1411 
1412 	return ret;
1413 }
1414 
1415 /**
1416  * stmmac_clear_rx_descriptors - clear RX descriptors
1417  * @priv: driver private structure
1418  * @dma_conf: structure to take the dma data
1419  * @queue: RX queue index
1420  * Description: this function is called to clear the RX descriptors
1421  * in case of both basic and extended descriptors are used.
1422  */
1423 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1424 					struct stmmac_dma_conf *dma_conf,
1425 					u32 queue)
1426 {
1427 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1428 	int i;
1429 
1430 	/* Clear the RX descriptors */
1431 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1432 		if (priv->extend_desc)
1433 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1434 					priv->use_riwt, priv->mode,
1435 					(i == dma_conf->dma_rx_size - 1),
1436 					dma_conf->dma_buf_sz);
1437 		else
1438 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1439 					priv->use_riwt, priv->mode,
1440 					(i == dma_conf->dma_rx_size - 1),
1441 					dma_conf->dma_buf_sz);
1442 }
1443 
1444 /**
1445  * stmmac_clear_tx_descriptors - clear tx descriptors
1446  * @priv: driver private structure
1447  * @dma_conf: structure to take the dma data
1448  * @queue: TX queue index.
1449  * Description: this function is called to clear the TX descriptors
1450  * in case of both basic and extended descriptors are used.
1451  */
1452 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1453 					struct stmmac_dma_conf *dma_conf,
1454 					u32 queue)
1455 {
1456 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1457 	int i;
1458 
1459 	/* Clear the TX descriptors */
1460 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1461 		int last = (i == (dma_conf->dma_tx_size - 1));
1462 		struct dma_desc *p;
1463 
1464 		if (priv->extend_desc)
1465 			p = &tx_q->dma_etx[i].basic;
1466 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1467 			p = &tx_q->dma_entx[i].basic;
1468 		else
1469 			p = &tx_q->dma_tx[i];
1470 
1471 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1472 	}
1473 }
1474 
1475 /**
1476  * stmmac_clear_descriptors - clear descriptors
1477  * @priv: driver private structure
1478  * @dma_conf: structure to take the dma data
1479  * Description: this function is called to clear the TX and RX descriptors
1480  * in case of both basic and extended descriptors are used.
1481  */
1482 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1483 				     struct stmmac_dma_conf *dma_conf)
1484 {
1485 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1486 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1487 	u32 queue;
1488 
1489 	/* Clear the RX descriptors */
1490 	for (queue = 0; queue < rx_queue_cnt; queue++)
1491 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1492 
1493 	/* Clear the TX descriptors */
1494 	for (queue = 0; queue < tx_queue_cnt; queue++)
1495 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1496 }
1497 
1498 /**
1499  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1500  * @priv: driver private structure
1501  * @dma_conf: structure to take the dma data
1502  * @p: descriptor pointer
1503  * @i: descriptor index
1504  * @flags: gfp flag
1505  * @queue: RX queue index
1506  * Description: this function is called to allocate a receive buffer, perform
1507  * the DMA mapping and init the descriptor.
1508  */
1509 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1510 				  struct stmmac_dma_conf *dma_conf,
1511 				  struct dma_desc *p,
1512 				  int i, gfp_t flags, u32 queue)
1513 {
1514 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1515 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1516 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1517 
1518 	if (priv->dma_cap.host_dma_width <= 32)
1519 		gfp |= GFP_DMA32;
1520 
1521 	if (!buf->page) {
1522 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1523 		if (!buf->page)
1524 			return -ENOMEM;
1525 		buf->page_offset = stmmac_rx_offset(priv);
1526 	}
1527 
1528 	if (priv->sph && !buf->sec_page) {
1529 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1530 		if (!buf->sec_page)
1531 			return -ENOMEM;
1532 
1533 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1534 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1535 	} else {
1536 		buf->sec_page = NULL;
1537 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1538 	}
1539 
1540 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1541 
1542 	stmmac_set_desc_addr(priv, p, buf->addr);
1543 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1544 		stmmac_init_desc3(priv, p);
1545 
1546 	return 0;
1547 }
1548 
1549 /**
1550  * stmmac_free_rx_buffer - free RX dma buffers
1551  * @priv: private structure
1552  * @rx_q: RX queue
1553  * @i: buffer index.
1554  */
1555 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1556 				  struct stmmac_rx_queue *rx_q,
1557 				  int i)
1558 {
1559 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1560 
1561 	if (buf->page)
1562 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1563 	buf->page = NULL;
1564 
1565 	if (buf->sec_page)
1566 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1567 	buf->sec_page = NULL;
1568 }
1569 
1570 /**
1571  * stmmac_free_tx_buffer - free RX dma buffers
1572  * @priv: private structure
1573  * @dma_conf: structure to take the dma data
1574  * @queue: RX queue index
1575  * @i: buffer index.
1576  */
1577 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1578 				  struct stmmac_dma_conf *dma_conf,
1579 				  u32 queue, int i)
1580 {
1581 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1582 
1583 	if (tx_q->tx_skbuff_dma[i].buf &&
1584 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1585 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1586 			dma_unmap_page(priv->device,
1587 				       tx_q->tx_skbuff_dma[i].buf,
1588 				       tx_q->tx_skbuff_dma[i].len,
1589 				       DMA_TO_DEVICE);
1590 		else
1591 			dma_unmap_single(priv->device,
1592 					 tx_q->tx_skbuff_dma[i].buf,
1593 					 tx_q->tx_skbuff_dma[i].len,
1594 					 DMA_TO_DEVICE);
1595 	}
1596 
1597 	if (tx_q->xdpf[i] &&
1598 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1599 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1600 		xdp_return_frame(tx_q->xdpf[i]);
1601 		tx_q->xdpf[i] = NULL;
1602 	}
1603 
1604 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1605 		tx_q->xsk_frames_done++;
1606 
1607 	if (tx_q->tx_skbuff[i] &&
1608 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1609 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1610 		tx_q->tx_skbuff[i] = NULL;
1611 	}
1612 
1613 	tx_q->tx_skbuff_dma[i].buf = 0;
1614 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1615 }
1616 
1617 /**
1618  * dma_free_rx_skbufs - free RX dma buffers
1619  * @priv: private structure
1620  * @dma_conf: structure to take the dma data
1621  * @queue: RX queue index
1622  */
1623 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1624 			       struct stmmac_dma_conf *dma_conf,
1625 			       u32 queue)
1626 {
1627 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1628 	int i;
1629 
1630 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1631 		stmmac_free_rx_buffer(priv, rx_q, i);
1632 }
1633 
1634 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1635 				   struct stmmac_dma_conf *dma_conf,
1636 				   u32 queue, gfp_t flags)
1637 {
1638 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1639 	int i;
1640 
1641 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1642 		struct dma_desc *p;
1643 		int ret;
1644 
1645 		if (priv->extend_desc)
1646 			p = &((rx_q->dma_erx + i)->basic);
1647 		else
1648 			p = rx_q->dma_rx + i;
1649 
1650 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1651 					     queue);
1652 		if (ret)
1653 			return ret;
1654 
1655 		rx_q->buf_alloc_num++;
1656 	}
1657 
1658 	return 0;
1659 }
1660 
1661 /**
1662  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1663  * @priv: private structure
1664  * @dma_conf: structure to take the dma data
1665  * @queue: RX queue index
1666  */
1667 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1668 				struct stmmac_dma_conf *dma_conf,
1669 				u32 queue)
1670 {
1671 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1672 	int i;
1673 
1674 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1675 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1676 
1677 		if (!buf->xdp)
1678 			continue;
1679 
1680 		xsk_buff_free(buf->xdp);
1681 		buf->xdp = NULL;
1682 	}
1683 }
1684 
1685 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1686 				      struct stmmac_dma_conf *dma_conf,
1687 				      u32 queue)
1688 {
1689 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1690 	int i;
1691 
1692 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1693 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1694 	 * use this macro to make sure no size violations.
1695 	 */
1696 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1697 
1698 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1699 		struct stmmac_rx_buffer *buf;
1700 		dma_addr_t dma_addr;
1701 		struct dma_desc *p;
1702 
1703 		if (priv->extend_desc)
1704 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1705 		else
1706 			p = rx_q->dma_rx + i;
1707 
1708 		buf = &rx_q->buf_pool[i];
1709 
1710 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1711 		if (!buf->xdp)
1712 			return -ENOMEM;
1713 
1714 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1715 		stmmac_set_desc_addr(priv, p, dma_addr);
1716 		rx_q->buf_alloc_num++;
1717 	}
1718 
1719 	return 0;
1720 }
1721 
1722 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1723 {
1724 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1725 		return NULL;
1726 
1727 	return xsk_get_pool_from_qid(priv->dev, queue);
1728 }
1729 
1730 /**
1731  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1732  * @priv: driver private structure
1733  * @dma_conf: structure to take the dma data
1734  * @queue: RX queue index
1735  * @flags: gfp flag.
1736  * Description: this function initializes the DMA RX descriptors
1737  * and allocates the socket buffers. It supports the chained and ring
1738  * modes.
1739  */
1740 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1741 				    struct stmmac_dma_conf *dma_conf,
1742 				    u32 queue, gfp_t flags)
1743 {
1744 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1745 	int ret;
1746 
1747 	netif_dbg(priv, probe, priv->dev,
1748 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1749 		  (u32)rx_q->dma_rx_phy);
1750 
1751 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1752 
1753 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1754 
1755 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1756 
1757 	if (rx_q->xsk_pool) {
1758 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1759 						   MEM_TYPE_XSK_BUFF_POOL,
1760 						   NULL));
1761 		netdev_info(priv->dev,
1762 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1763 			    rx_q->queue_index);
1764 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1765 	} else {
1766 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1767 						   MEM_TYPE_PAGE_POOL,
1768 						   rx_q->page_pool));
1769 		netdev_info(priv->dev,
1770 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1771 			    rx_q->queue_index);
1772 	}
1773 
1774 	if (rx_q->xsk_pool) {
1775 		/* RX XDP ZC buffer pool may not be populated, e.g.
1776 		 * xdpsock TX-only.
1777 		 */
1778 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1779 	} else {
1780 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1781 		if (ret < 0)
1782 			return -ENOMEM;
1783 	}
1784 
1785 	/* Setup the chained descriptor addresses */
1786 	if (priv->mode == STMMAC_CHAIN_MODE) {
1787 		if (priv->extend_desc)
1788 			stmmac_mode_init(priv, rx_q->dma_erx,
1789 					 rx_q->dma_rx_phy,
1790 					 dma_conf->dma_rx_size, 1);
1791 		else
1792 			stmmac_mode_init(priv, rx_q->dma_rx,
1793 					 rx_q->dma_rx_phy,
1794 					 dma_conf->dma_rx_size, 0);
1795 	}
1796 
1797 	return 0;
1798 }
1799 
1800 static int init_dma_rx_desc_rings(struct net_device *dev,
1801 				  struct stmmac_dma_conf *dma_conf,
1802 				  gfp_t flags)
1803 {
1804 	struct stmmac_priv *priv = netdev_priv(dev);
1805 	u32 rx_count = priv->plat->rx_queues_to_use;
1806 	int queue;
1807 	int ret;
1808 
1809 	/* RX INITIALIZATION */
1810 	netif_dbg(priv, probe, priv->dev,
1811 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1812 
1813 	for (queue = 0; queue < rx_count; queue++) {
1814 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1815 		if (ret)
1816 			goto err_init_rx_buffers;
1817 	}
1818 
1819 	return 0;
1820 
1821 err_init_rx_buffers:
1822 	while (queue >= 0) {
1823 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1824 
1825 		if (rx_q->xsk_pool)
1826 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1827 		else
1828 			dma_free_rx_skbufs(priv, dma_conf, queue);
1829 
1830 		rx_q->buf_alloc_num = 0;
1831 		rx_q->xsk_pool = NULL;
1832 
1833 		queue--;
1834 	}
1835 
1836 	return ret;
1837 }
1838 
1839 /**
1840  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1841  * @priv: driver private structure
1842  * @dma_conf: structure to take the dma data
1843  * @queue: TX queue index
1844  * Description: this function initializes the DMA TX descriptors
1845  * and allocates the socket buffers. It supports the chained and ring
1846  * modes.
1847  */
1848 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1849 				    struct stmmac_dma_conf *dma_conf,
1850 				    u32 queue)
1851 {
1852 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1853 	int i;
1854 
1855 	netif_dbg(priv, probe, priv->dev,
1856 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1857 		  (u32)tx_q->dma_tx_phy);
1858 
1859 	/* Setup the chained descriptor addresses */
1860 	if (priv->mode == STMMAC_CHAIN_MODE) {
1861 		if (priv->extend_desc)
1862 			stmmac_mode_init(priv, tx_q->dma_etx,
1863 					 tx_q->dma_tx_phy,
1864 					 dma_conf->dma_tx_size, 1);
1865 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1866 			stmmac_mode_init(priv, tx_q->dma_tx,
1867 					 tx_q->dma_tx_phy,
1868 					 dma_conf->dma_tx_size, 0);
1869 	}
1870 
1871 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1872 
1873 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1874 		struct dma_desc *p;
1875 
1876 		if (priv->extend_desc)
1877 			p = &((tx_q->dma_etx + i)->basic);
1878 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1879 			p = &((tx_q->dma_entx + i)->basic);
1880 		else
1881 			p = tx_q->dma_tx + i;
1882 
1883 		stmmac_clear_desc(priv, p);
1884 
1885 		tx_q->tx_skbuff_dma[i].buf = 0;
1886 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1887 		tx_q->tx_skbuff_dma[i].len = 0;
1888 		tx_q->tx_skbuff_dma[i].last_segment = false;
1889 		tx_q->tx_skbuff[i] = NULL;
1890 	}
1891 
1892 	return 0;
1893 }
1894 
1895 static int init_dma_tx_desc_rings(struct net_device *dev,
1896 				  struct stmmac_dma_conf *dma_conf)
1897 {
1898 	struct stmmac_priv *priv = netdev_priv(dev);
1899 	u32 tx_queue_cnt;
1900 	u32 queue;
1901 
1902 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1903 
1904 	for (queue = 0; queue < tx_queue_cnt; queue++)
1905 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1906 
1907 	return 0;
1908 }
1909 
1910 /**
1911  * init_dma_desc_rings - init the RX/TX descriptor rings
1912  * @dev: net device structure
1913  * @dma_conf: structure to take the dma data
1914  * @flags: gfp flag.
1915  * Description: this function initializes the DMA RX/TX descriptors
1916  * and allocates the socket buffers. It supports the chained and ring
1917  * modes.
1918  */
1919 static int init_dma_desc_rings(struct net_device *dev,
1920 			       struct stmmac_dma_conf *dma_conf,
1921 			       gfp_t flags)
1922 {
1923 	struct stmmac_priv *priv = netdev_priv(dev);
1924 	int ret;
1925 
1926 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1927 	if (ret)
1928 		return ret;
1929 
1930 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1931 
1932 	stmmac_clear_descriptors(priv, dma_conf);
1933 
1934 	if (netif_msg_hw(priv))
1935 		stmmac_display_rings(priv, dma_conf);
1936 
1937 	return ret;
1938 }
1939 
1940 /**
1941  * dma_free_tx_skbufs - free TX dma buffers
1942  * @priv: private structure
1943  * @dma_conf: structure to take the dma data
1944  * @queue: TX queue index
1945  */
1946 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1947 			       struct stmmac_dma_conf *dma_conf,
1948 			       u32 queue)
1949 {
1950 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1951 	int i;
1952 
1953 	tx_q->xsk_frames_done = 0;
1954 
1955 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1956 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1957 
1958 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1959 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1960 		tx_q->xsk_frames_done = 0;
1961 		tx_q->xsk_pool = NULL;
1962 	}
1963 }
1964 
1965 /**
1966  * stmmac_free_tx_skbufs - free TX skb buffers
1967  * @priv: private structure
1968  */
1969 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1970 {
1971 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1972 	u32 queue;
1973 
1974 	for (queue = 0; queue < tx_queue_cnt; queue++)
1975 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1976 }
1977 
1978 /**
1979  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1980  * @priv: private structure
1981  * @dma_conf: structure to take the dma data
1982  * @queue: RX queue index
1983  */
1984 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1985 					 struct stmmac_dma_conf *dma_conf,
1986 					 u32 queue)
1987 {
1988 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1989 
1990 	/* Release the DMA RX socket buffers */
1991 	if (rx_q->xsk_pool)
1992 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1993 	else
1994 		dma_free_rx_skbufs(priv, dma_conf, queue);
1995 
1996 	rx_q->buf_alloc_num = 0;
1997 	rx_q->xsk_pool = NULL;
1998 
1999 	/* Free DMA regions of consistent memory previously allocated */
2000 	if (!priv->extend_desc)
2001 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2002 				  sizeof(struct dma_desc),
2003 				  rx_q->dma_rx, rx_q->dma_rx_phy);
2004 	else
2005 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2006 				  sizeof(struct dma_extended_desc),
2007 				  rx_q->dma_erx, rx_q->dma_rx_phy);
2008 
2009 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2010 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2011 
2012 	kfree(rx_q->buf_pool);
2013 	if (rx_q->page_pool)
2014 		page_pool_destroy(rx_q->page_pool);
2015 }
2016 
2017 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2018 				       struct stmmac_dma_conf *dma_conf)
2019 {
2020 	u32 rx_count = priv->plat->rx_queues_to_use;
2021 	u32 queue;
2022 
2023 	/* Free RX queue resources */
2024 	for (queue = 0; queue < rx_count; queue++)
2025 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
2026 }
2027 
2028 /**
2029  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2030  * @priv: private structure
2031  * @dma_conf: structure to take the dma data
2032  * @queue: TX queue index
2033  */
2034 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2035 					 struct stmmac_dma_conf *dma_conf,
2036 					 u32 queue)
2037 {
2038 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2039 	size_t size;
2040 	void *addr;
2041 
2042 	/* Release the DMA TX socket buffers */
2043 	dma_free_tx_skbufs(priv, dma_conf, queue);
2044 
2045 	if (priv->extend_desc) {
2046 		size = sizeof(struct dma_extended_desc);
2047 		addr = tx_q->dma_etx;
2048 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2049 		size = sizeof(struct dma_edesc);
2050 		addr = tx_q->dma_entx;
2051 	} else {
2052 		size = sizeof(struct dma_desc);
2053 		addr = tx_q->dma_tx;
2054 	}
2055 
2056 	size *= dma_conf->dma_tx_size;
2057 
2058 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2059 
2060 	kfree(tx_q->tx_skbuff_dma);
2061 	kfree(tx_q->tx_skbuff);
2062 }
2063 
2064 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2065 				       struct stmmac_dma_conf *dma_conf)
2066 {
2067 	u32 tx_count = priv->plat->tx_queues_to_use;
2068 	u32 queue;
2069 
2070 	/* Free TX queue resources */
2071 	for (queue = 0; queue < tx_count; queue++)
2072 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2073 }
2074 
2075 /**
2076  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2077  * @priv: private structure
2078  * @dma_conf: structure to take the dma data
2079  * @queue: RX queue index
2080  * Description: according to which descriptor can be used (extend or basic)
2081  * this function allocates the resources for TX and RX paths. In case of
2082  * reception, for example, it pre-allocated the RX socket buffer in order to
2083  * allow zero-copy mechanism.
2084  */
2085 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2086 					 struct stmmac_dma_conf *dma_conf,
2087 					 u32 queue)
2088 {
2089 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2090 	struct stmmac_channel *ch = &priv->channel[queue];
2091 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2092 	struct page_pool_params pp_params = { 0 };
2093 	unsigned int dma_buf_sz_pad, num_pages;
2094 	unsigned int napi_id;
2095 	int ret;
2096 
2097 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2098 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2099 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2100 
2101 	rx_q->queue_index = queue;
2102 	rx_q->priv_data = priv;
2103 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2104 
2105 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2106 	pp_params.pool_size = dma_conf->dma_rx_size;
2107 	pp_params.order = order_base_2(num_pages);
2108 	pp_params.nid = dev_to_node(priv->device);
2109 	pp_params.dev = priv->device;
2110 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2111 	pp_params.offset = stmmac_rx_offset(priv);
2112 	pp_params.max_len = dma_conf->dma_buf_sz;
2113 
2114 	if (priv->sph) {
2115 		pp_params.offset = 0;
2116 		pp_params.max_len += stmmac_rx_offset(priv);
2117 	}
2118 
2119 	rx_q->page_pool = page_pool_create(&pp_params);
2120 	if (IS_ERR(rx_q->page_pool)) {
2121 		ret = PTR_ERR(rx_q->page_pool);
2122 		rx_q->page_pool = NULL;
2123 		return ret;
2124 	}
2125 
2126 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2127 				 sizeof(*rx_q->buf_pool),
2128 				 GFP_KERNEL);
2129 	if (!rx_q->buf_pool)
2130 		return -ENOMEM;
2131 
2132 	if (priv->extend_desc) {
2133 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2134 						   dma_conf->dma_rx_size *
2135 						   sizeof(struct dma_extended_desc),
2136 						   &rx_q->dma_rx_phy,
2137 						   GFP_KERNEL);
2138 		if (!rx_q->dma_erx)
2139 			return -ENOMEM;
2140 
2141 	} else {
2142 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2143 						  dma_conf->dma_rx_size *
2144 						  sizeof(struct dma_desc),
2145 						  &rx_q->dma_rx_phy,
2146 						  GFP_KERNEL);
2147 		if (!rx_q->dma_rx)
2148 			return -ENOMEM;
2149 	}
2150 
2151 	if (stmmac_xdp_is_enabled(priv) &&
2152 	    test_bit(queue, priv->af_xdp_zc_qps))
2153 		napi_id = ch->rxtx_napi.napi_id;
2154 	else
2155 		napi_id = ch->rx_napi.napi_id;
2156 
2157 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2158 			       rx_q->queue_index,
2159 			       napi_id);
2160 	if (ret) {
2161 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2162 		return -EINVAL;
2163 	}
2164 
2165 	return 0;
2166 }
2167 
2168 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2169 				       struct stmmac_dma_conf *dma_conf)
2170 {
2171 	u32 rx_count = priv->plat->rx_queues_to_use;
2172 	u32 queue;
2173 	int ret;
2174 
2175 	/* RX queues buffers and DMA */
2176 	for (queue = 0; queue < rx_count; queue++) {
2177 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2178 		if (ret)
2179 			goto err_dma;
2180 	}
2181 
2182 	return 0;
2183 
2184 err_dma:
2185 	free_dma_rx_desc_resources(priv, dma_conf);
2186 
2187 	return ret;
2188 }
2189 
2190 /**
2191  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2192  * @priv: private structure
2193  * @dma_conf: structure to take the dma data
2194  * @queue: TX queue index
2195  * Description: according to which descriptor can be used (extend or basic)
2196  * this function allocates the resources for TX and RX paths. In case of
2197  * reception, for example, it pre-allocated the RX socket buffer in order to
2198  * allow zero-copy mechanism.
2199  */
2200 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2201 					 struct stmmac_dma_conf *dma_conf,
2202 					 u32 queue)
2203 {
2204 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2205 	size_t size;
2206 	void *addr;
2207 
2208 	tx_q->queue_index = queue;
2209 	tx_q->priv_data = priv;
2210 
2211 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2212 				      sizeof(*tx_q->tx_skbuff_dma),
2213 				      GFP_KERNEL);
2214 	if (!tx_q->tx_skbuff_dma)
2215 		return -ENOMEM;
2216 
2217 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2218 				  sizeof(struct sk_buff *),
2219 				  GFP_KERNEL);
2220 	if (!tx_q->tx_skbuff)
2221 		return -ENOMEM;
2222 
2223 	if (priv->extend_desc)
2224 		size = sizeof(struct dma_extended_desc);
2225 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2226 		size = sizeof(struct dma_edesc);
2227 	else
2228 		size = sizeof(struct dma_desc);
2229 
2230 	size *= dma_conf->dma_tx_size;
2231 
2232 	addr = dma_alloc_coherent(priv->device, size,
2233 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2234 	if (!addr)
2235 		return -ENOMEM;
2236 
2237 	if (priv->extend_desc)
2238 		tx_q->dma_etx = addr;
2239 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2240 		tx_q->dma_entx = addr;
2241 	else
2242 		tx_q->dma_tx = addr;
2243 
2244 	return 0;
2245 }
2246 
2247 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2248 				       struct stmmac_dma_conf *dma_conf)
2249 {
2250 	u32 tx_count = priv->plat->tx_queues_to_use;
2251 	u32 queue;
2252 	int ret;
2253 
2254 	/* TX queues buffers and DMA */
2255 	for (queue = 0; queue < tx_count; queue++) {
2256 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2257 		if (ret)
2258 			goto err_dma;
2259 	}
2260 
2261 	return 0;
2262 
2263 err_dma:
2264 	free_dma_tx_desc_resources(priv, dma_conf);
2265 	return ret;
2266 }
2267 
2268 /**
2269  * alloc_dma_desc_resources - alloc TX/RX resources.
2270  * @priv: private structure
2271  * @dma_conf: structure to take the dma data
2272  * Description: according to which descriptor can be used (extend or basic)
2273  * this function allocates the resources for TX and RX paths. In case of
2274  * reception, for example, it pre-allocated the RX socket buffer in order to
2275  * allow zero-copy mechanism.
2276  */
2277 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2278 				    struct stmmac_dma_conf *dma_conf)
2279 {
2280 	/* RX Allocation */
2281 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2282 
2283 	if (ret)
2284 		return ret;
2285 
2286 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2287 
2288 	return ret;
2289 }
2290 
2291 /**
2292  * free_dma_desc_resources - free dma desc resources
2293  * @priv: private structure
2294  * @dma_conf: structure to take the dma data
2295  */
2296 static void free_dma_desc_resources(struct stmmac_priv *priv,
2297 				    struct stmmac_dma_conf *dma_conf)
2298 {
2299 	/* Release the DMA TX socket buffers */
2300 	free_dma_tx_desc_resources(priv, dma_conf);
2301 
2302 	/* Release the DMA RX socket buffers later
2303 	 * to ensure all pending XDP_TX buffers are returned.
2304 	 */
2305 	free_dma_rx_desc_resources(priv, dma_conf);
2306 }
2307 
2308 /**
2309  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2310  *  @priv: driver private structure
2311  *  Description: It is used for enabling the rx queues in the MAC
2312  */
2313 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2314 {
2315 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2316 	int queue;
2317 	u8 mode;
2318 
2319 	for (queue = 0; queue < rx_queues_count; queue++) {
2320 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2321 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2322 	}
2323 }
2324 
2325 /**
2326  * stmmac_start_rx_dma - start RX DMA channel
2327  * @priv: driver private structure
2328  * @chan: RX channel index
2329  * Description:
2330  * This starts a RX DMA channel
2331  */
2332 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2333 {
2334 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2335 	stmmac_start_rx(priv, priv->ioaddr, chan);
2336 }
2337 
2338 /**
2339  * stmmac_start_tx_dma - start TX DMA channel
2340  * @priv: driver private structure
2341  * @chan: TX channel index
2342  * Description:
2343  * This starts a TX DMA channel
2344  */
2345 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2346 {
2347 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2348 	stmmac_start_tx(priv, priv->ioaddr, chan);
2349 }
2350 
2351 /**
2352  * stmmac_stop_rx_dma - stop RX DMA channel
2353  * @priv: driver private structure
2354  * @chan: RX channel index
2355  * Description:
2356  * This stops a RX DMA channel
2357  */
2358 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2359 {
2360 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2361 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2362 }
2363 
2364 /**
2365  * stmmac_stop_tx_dma - stop TX DMA channel
2366  * @priv: driver private structure
2367  * @chan: TX channel index
2368  * Description:
2369  * This stops a TX DMA channel
2370  */
2371 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2372 {
2373 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2374 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2375 }
2376 
2377 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2378 {
2379 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2380 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2381 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2382 	u32 chan;
2383 
2384 	for (chan = 0; chan < dma_csr_ch; chan++) {
2385 		struct stmmac_channel *ch = &priv->channel[chan];
2386 		unsigned long flags;
2387 
2388 		spin_lock_irqsave(&ch->lock, flags);
2389 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2390 		spin_unlock_irqrestore(&ch->lock, flags);
2391 	}
2392 }
2393 
2394 /**
2395  * stmmac_start_all_dma - start all RX and TX DMA channels
2396  * @priv: driver private structure
2397  * Description:
2398  * This starts all the RX and TX DMA channels
2399  */
2400 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2401 {
2402 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2403 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2404 	u32 chan = 0;
2405 
2406 	for (chan = 0; chan < rx_channels_count; chan++)
2407 		stmmac_start_rx_dma(priv, chan);
2408 
2409 	for (chan = 0; chan < tx_channels_count; chan++)
2410 		stmmac_start_tx_dma(priv, chan);
2411 }
2412 
2413 /**
2414  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2415  * @priv: driver private structure
2416  * Description:
2417  * This stops the RX and TX DMA channels
2418  */
2419 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2420 {
2421 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2422 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2423 	u32 chan = 0;
2424 
2425 	for (chan = 0; chan < rx_channels_count; chan++)
2426 		stmmac_stop_rx_dma(priv, chan);
2427 
2428 	for (chan = 0; chan < tx_channels_count; chan++)
2429 		stmmac_stop_tx_dma(priv, chan);
2430 }
2431 
2432 /**
2433  *  stmmac_dma_operation_mode - HW DMA operation mode
2434  *  @priv: driver private structure
2435  *  Description: it is used for configuring the DMA operation mode register in
2436  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2437  */
2438 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2439 {
2440 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2441 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2442 	int rxfifosz = priv->plat->rx_fifo_size;
2443 	int txfifosz = priv->plat->tx_fifo_size;
2444 	u32 txmode = 0;
2445 	u32 rxmode = 0;
2446 	u32 chan = 0;
2447 	u8 qmode = 0;
2448 
2449 	if (rxfifosz == 0)
2450 		rxfifosz = priv->dma_cap.rx_fifo_size;
2451 	if (txfifosz == 0)
2452 		txfifosz = priv->dma_cap.tx_fifo_size;
2453 
2454 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2455 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2456 		rxfifosz /= rx_channels_count;
2457 		txfifosz /= tx_channels_count;
2458 	}
2459 
2460 	if (priv->plat->force_thresh_dma_mode) {
2461 		txmode = tc;
2462 		rxmode = tc;
2463 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2464 		/*
2465 		 * In case of GMAC, SF mode can be enabled
2466 		 * to perform the TX COE in HW. This depends on:
2467 		 * 1) TX COE if actually supported
2468 		 * 2) There is no bugged Jumbo frame support
2469 		 *    that needs to not insert csum in the TDES.
2470 		 */
2471 		txmode = SF_DMA_MODE;
2472 		rxmode = SF_DMA_MODE;
2473 		priv->xstats.threshold = SF_DMA_MODE;
2474 	} else {
2475 		txmode = tc;
2476 		rxmode = SF_DMA_MODE;
2477 	}
2478 
2479 	/* configure all channels */
2480 	for (chan = 0; chan < rx_channels_count; chan++) {
2481 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2482 		u32 buf_size;
2483 
2484 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2485 
2486 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2487 				rxfifosz, qmode);
2488 
2489 		if (rx_q->xsk_pool) {
2490 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2491 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2492 					      buf_size,
2493 					      chan);
2494 		} else {
2495 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2496 					      priv->dma_conf.dma_buf_sz,
2497 					      chan);
2498 		}
2499 	}
2500 
2501 	for (chan = 0; chan < tx_channels_count; chan++) {
2502 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2503 
2504 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2505 				txfifosz, qmode);
2506 	}
2507 }
2508 
2509 static void stmmac_xsk_request_timestamp(void *_priv)
2510 {
2511 	struct stmmac_metadata_request *meta_req = _priv;
2512 
2513 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2514 	*meta_req->set_ic = true;
2515 }
2516 
2517 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2518 {
2519 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2520 	struct stmmac_priv *priv = tx_compl->priv;
2521 	struct dma_desc *desc = tx_compl->desc;
2522 	bool found = false;
2523 	u64 ns = 0;
2524 
2525 	if (!priv->hwts_tx_en)
2526 		return 0;
2527 
2528 	/* check tx tstamp status */
2529 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2530 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2531 		found = true;
2532 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2533 		found = true;
2534 	}
2535 
2536 	if (found) {
2537 		ns -= priv->plat->cdc_error_adj;
2538 		return ns_to_ktime(ns);
2539 	}
2540 
2541 	return 0;
2542 }
2543 
2544 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2545 {
2546 	struct timespec64 ts = ns_to_timespec64(launch_time);
2547 	struct stmmac_metadata_request *meta_req = _priv;
2548 
2549 	if (meta_req->tbs & STMMAC_TBS_EN)
2550 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2551 				    ts.tv_nsec);
2552 }
2553 
2554 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2555 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2556 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2557 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2558 };
2559 
2560 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2561 {
2562 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2563 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2564 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2565 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2566 	unsigned int entry = tx_q->cur_tx;
2567 	struct dma_desc *tx_desc = NULL;
2568 	struct xdp_desc xdp_desc;
2569 	bool work_done = true;
2570 	u32 tx_set_ic_bit = 0;
2571 
2572 	/* Avoids TX time-out as we are sharing with slow path */
2573 	txq_trans_cond_update(nq);
2574 
2575 	budget = min(budget, stmmac_tx_avail(priv, queue));
2576 
2577 	while (budget-- > 0) {
2578 		struct stmmac_metadata_request meta_req;
2579 		struct xsk_tx_metadata *meta = NULL;
2580 		dma_addr_t dma_addr;
2581 		bool set_ic;
2582 
2583 		/* We are sharing with slow path and stop XSK TX desc submission when
2584 		 * available TX ring is less than threshold.
2585 		 */
2586 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2587 		    !netif_carrier_ok(priv->dev)) {
2588 			work_done = false;
2589 			break;
2590 		}
2591 
2592 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2593 			break;
2594 
2595 		if (priv->est && priv->est->enable &&
2596 		    priv->est->max_sdu[queue] &&
2597 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2598 			priv->xstats.max_sdu_txq_drop[queue]++;
2599 			continue;
2600 		}
2601 
2602 		if (likely(priv->extend_desc))
2603 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2604 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2605 			tx_desc = &tx_q->dma_entx[entry].basic;
2606 		else
2607 			tx_desc = tx_q->dma_tx + entry;
2608 
2609 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2610 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2611 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2612 
2613 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2614 
2615 		/* To return XDP buffer to XSK pool, we simple call
2616 		 * xsk_tx_completed(), so we don't need to fill up
2617 		 * 'buf' and 'xdpf'.
2618 		 */
2619 		tx_q->tx_skbuff_dma[entry].buf = 0;
2620 		tx_q->xdpf[entry] = NULL;
2621 
2622 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2623 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2624 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2625 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2626 
2627 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2628 
2629 		tx_q->tx_count_frames++;
2630 
2631 		if (!priv->tx_coal_frames[queue])
2632 			set_ic = false;
2633 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2634 			set_ic = true;
2635 		else
2636 			set_ic = false;
2637 
2638 		meta_req.priv = priv;
2639 		meta_req.tx_desc = tx_desc;
2640 		meta_req.set_ic = &set_ic;
2641 		meta_req.tbs = tx_q->tbs;
2642 		meta_req.edesc = &tx_q->dma_entx[entry];
2643 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2644 					&meta_req);
2645 		if (set_ic) {
2646 			tx_q->tx_count_frames = 0;
2647 			stmmac_set_tx_ic(priv, tx_desc);
2648 			tx_set_ic_bit++;
2649 		}
2650 
2651 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2652 				       true, priv->mode, true, true,
2653 				       xdp_desc.len);
2654 
2655 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2656 
2657 		xsk_tx_metadata_to_compl(meta,
2658 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2659 
2660 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2661 		entry = tx_q->cur_tx;
2662 	}
2663 	u64_stats_update_begin(&txq_stats->napi_syncp);
2664 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2665 	u64_stats_update_end(&txq_stats->napi_syncp);
2666 
2667 	if (tx_desc) {
2668 		stmmac_flush_tx_descriptors(priv, queue);
2669 		xsk_tx_release(pool);
2670 	}
2671 
2672 	/* Return true if all of the 3 conditions are met
2673 	 *  a) TX Budget is still available
2674 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2675 	 *     pending XSK TX for transmission)
2676 	 */
2677 	return !!budget && work_done;
2678 }
2679 
2680 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2681 {
2682 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2683 		tc += 64;
2684 
2685 		if (priv->plat->force_thresh_dma_mode)
2686 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2687 		else
2688 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2689 						      chan);
2690 
2691 		priv->xstats.threshold = tc;
2692 	}
2693 }
2694 
2695 /**
2696  * stmmac_tx_clean - to manage the transmission completion
2697  * @priv: driver private structure
2698  * @budget: napi budget limiting this functions packet handling
2699  * @queue: TX queue index
2700  * @pending_packets: signal to arm the TX coal timer
2701  * Description: it reclaims the transmit resources after transmission completes.
2702  * If some packets still needs to be handled, due to TX coalesce, set
2703  * pending_packets to true to make NAPI arm the TX coal timer.
2704  */
2705 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2706 			   bool *pending_packets)
2707 {
2708 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2709 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2710 	unsigned int bytes_compl = 0, pkts_compl = 0;
2711 	unsigned int entry, xmits = 0, count = 0;
2712 	u32 tx_packets = 0, tx_errors = 0;
2713 
2714 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2715 
2716 	tx_q->xsk_frames_done = 0;
2717 
2718 	entry = tx_q->dirty_tx;
2719 
2720 	/* Try to clean all TX complete frame in 1 shot */
2721 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2722 		struct xdp_frame *xdpf;
2723 		struct sk_buff *skb;
2724 		struct dma_desc *p;
2725 		int status;
2726 
2727 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2728 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2729 			xdpf = tx_q->xdpf[entry];
2730 			skb = NULL;
2731 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2732 			xdpf = NULL;
2733 			skb = tx_q->tx_skbuff[entry];
2734 		} else {
2735 			xdpf = NULL;
2736 			skb = NULL;
2737 		}
2738 
2739 		if (priv->extend_desc)
2740 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2741 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2742 			p = &tx_q->dma_entx[entry].basic;
2743 		else
2744 			p = tx_q->dma_tx + entry;
2745 
2746 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2747 		/* Check if the descriptor is owned by the DMA */
2748 		if (unlikely(status & tx_dma_own))
2749 			break;
2750 
2751 		count++;
2752 
2753 		/* Make sure descriptor fields are read after reading
2754 		 * the own bit.
2755 		 */
2756 		dma_rmb();
2757 
2758 		/* Just consider the last segment and ...*/
2759 		if (likely(!(status & tx_not_ls))) {
2760 			/* ... verify the status error condition */
2761 			if (unlikely(status & tx_err)) {
2762 				tx_errors++;
2763 				if (unlikely(status & tx_err_bump_tc))
2764 					stmmac_bump_dma_threshold(priv, queue);
2765 			} else {
2766 				tx_packets++;
2767 			}
2768 			if (skb) {
2769 				stmmac_get_tx_hwtstamp(priv, p, skb);
2770 			} else if (tx_q->xsk_pool &&
2771 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2772 				struct stmmac_xsk_tx_complete tx_compl = {
2773 					.priv = priv,
2774 					.desc = p,
2775 				};
2776 
2777 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2778 							 &stmmac_xsk_tx_metadata_ops,
2779 							 &tx_compl);
2780 			}
2781 		}
2782 
2783 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2784 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2785 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2786 				dma_unmap_page(priv->device,
2787 					       tx_q->tx_skbuff_dma[entry].buf,
2788 					       tx_q->tx_skbuff_dma[entry].len,
2789 					       DMA_TO_DEVICE);
2790 			else
2791 				dma_unmap_single(priv->device,
2792 						 tx_q->tx_skbuff_dma[entry].buf,
2793 						 tx_q->tx_skbuff_dma[entry].len,
2794 						 DMA_TO_DEVICE);
2795 			tx_q->tx_skbuff_dma[entry].buf = 0;
2796 			tx_q->tx_skbuff_dma[entry].len = 0;
2797 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2798 		}
2799 
2800 		stmmac_clean_desc3(priv, tx_q, p);
2801 
2802 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2803 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2804 
2805 		if (xdpf &&
2806 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2807 			xdp_return_frame_rx_napi(xdpf);
2808 			tx_q->xdpf[entry] = NULL;
2809 		}
2810 
2811 		if (xdpf &&
2812 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2813 			xdp_return_frame(xdpf);
2814 			tx_q->xdpf[entry] = NULL;
2815 		}
2816 
2817 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2818 			tx_q->xsk_frames_done++;
2819 
2820 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2821 			if (likely(skb)) {
2822 				pkts_compl++;
2823 				bytes_compl += skb->len;
2824 				dev_consume_skb_any(skb);
2825 				tx_q->tx_skbuff[entry] = NULL;
2826 			}
2827 		}
2828 
2829 		stmmac_release_tx_desc(priv, p, priv->mode);
2830 
2831 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2832 	}
2833 	tx_q->dirty_tx = entry;
2834 
2835 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2836 				  pkts_compl, bytes_compl);
2837 
2838 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2839 								queue))) &&
2840 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2841 
2842 		netif_dbg(priv, tx_done, priv->dev,
2843 			  "%s: restart transmit\n", __func__);
2844 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2845 	}
2846 
2847 	if (tx_q->xsk_pool) {
2848 		bool work_done;
2849 
2850 		if (tx_q->xsk_frames_done)
2851 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2852 
2853 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2854 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2855 
2856 		/* For XSK TX, we try to send as many as possible.
2857 		 * If XSK work done (XSK TX desc empty and budget still
2858 		 * available), return "budget - 1" to reenable TX IRQ.
2859 		 * Else, return "budget" to make NAPI continue polling.
2860 		 */
2861 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2862 					       STMMAC_XSK_TX_BUDGET_MAX);
2863 		if (work_done)
2864 			xmits = budget - 1;
2865 		else
2866 			xmits = budget;
2867 	}
2868 
2869 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2870 		stmmac_restart_sw_lpi_timer(priv);
2871 
2872 	/* We still have pending packets, let's call for a new scheduling */
2873 	if (tx_q->dirty_tx != tx_q->cur_tx)
2874 		*pending_packets = true;
2875 
2876 	u64_stats_update_begin(&txq_stats->napi_syncp);
2877 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2878 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2879 	u64_stats_inc(&txq_stats->napi.tx_clean);
2880 	u64_stats_update_end(&txq_stats->napi_syncp);
2881 
2882 	priv->xstats.tx_errors += tx_errors;
2883 
2884 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2885 
2886 	/* Combine decisions from TX clean and XSK TX */
2887 	return max(count, xmits);
2888 }
2889 
2890 /**
2891  * stmmac_tx_err - to manage the tx error
2892  * @priv: driver private structure
2893  * @chan: channel index
2894  * Description: it cleans the descriptors and restarts the transmission
2895  * in case of transmission errors.
2896  */
2897 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2898 {
2899 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2900 
2901 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2902 
2903 	stmmac_stop_tx_dma(priv, chan);
2904 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2905 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2906 	stmmac_reset_tx_queue(priv, chan);
2907 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2908 			    tx_q->dma_tx_phy, chan);
2909 	stmmac_start_tx_dma(priv, chan);
2910 
2911 	priv->xstats.tx_errors++;
2912 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2913 }
2914 
2915 /**
2916  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2917  *  @priv: driver private structure
2918  *  @txmode: TX operating mode
2919  *  @rxmode: RX operating mode
2920  *  @chan: channel index
2921  *  Description: it is used for configuring of the DMA operation mode in
2922  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2923  *  mode.
2924  */
2925 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2926 					  u32 rxmode, u32 chan)
2927 {
2928 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2929 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2930 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2931 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2932 	int rxfifosz = priv->plat->rx_fifo_size;
2933 	int txfifosz = priv->plat->tx_fifo_size;
2934 
2935 	if (rxfifosz == 0)
2936 		rxfifosz = priv->dma_cap.rx_fifo_size;
2937 	if (txfifosz == 0)
2938 		txfifosz = priv->dma_cap.tx_fifo_size;
2939 
2940 	/* Adjust for real per queue fifo size */
2941 	rxfifosz /= rx_channels_count;
2942 	txfifosz /= tx_channels_count;
2943 
2944 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2945 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2946 }
2947 
2948 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2949 {
2950 	int ret;
2951 
2952 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2953 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2954 	if (ret && (ret != -EINVAL)) {
2955 		stmmac_global_err(priv);
2956 		return true;
2957 	}
2958 
2959 	return false;
2960 }
2961 
2962 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2963 {
2964 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2965 						 &priv->xstats, chan, dir);
2966 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2967 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2968 	struct stmmac_channel *ch = &priv->channel[chan];
2969 	struct napi_struct *rx_napi;
2970 	struct napi_struct *tx_napi;
2971 	unsigned long flags;
2972 
2973 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2974 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2975 
2976 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2977 		if (napi_schedule_prep(rx_napi)) {
2978 			spin_lock_irqsave(&ch->lock, flags);
2979 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2980 			spin_unlock_irqrestore(&ch->lock, flags);
2981 			__napi_schedule(rx_napi);
2982 		}
2983 	}
2984 
2985 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2986 		if (napi_schedule_prep(tx_napi)) {
2987 			spin_lock_irqsave(&ch->lock, flags);
2988 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2989 			spin_unlock_irqrestore(&ch->lock, flags);
2990 			__napi_schedule(tx_napi);
2991 		}
2992 	}
2993 
2994 	return status;
2995 }
2996 
2997 /**
2998  * stmmac_dma_interrupt - DMA ISR
2999  * @priv: driver private structure
3000  * Description: this is the DMA ISR. It is called by the main ISR.
3001  * It calls the dwmac dma routine and schedule poll method in case of some
3002  * work can be done.
3003  */
3004 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3005 {
3006 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3007 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3008 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
3009 				tx_channel_count : rx_channel_count;
3010 	u32 chan;
3011 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3012 
3013 	/* Make sure we never check beyond our status buffer. */
3014 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3015 		channels_to_check = ARRAY_SIZE(status);
3016 
3017 	for (chan = 0; chan < channels_to_check; chan++)
3018 		status[chan] = stmmac_napi_check(priv, chan,
3019 						 DMA_DIR_RXTX);
3020 
3021 	for (chan = 0; chan < tx_channel_count; chan++) {
3022 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3023 			/* Try to bump up the dma threshold on this failure */
3024 			stmmac_bump_dma_threshold(priv, chan);
3025 		} else if (unlikely(status[chan] == tx_hard_error)) {
3026 			stmmac_tx_err(priv, chan);
3027 		}
3028 	}
3029 }
3030 
3031 /**
3032  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3033  * @priv: driver private structure
3034  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3035  */
3036 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3037 {
3038 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3039 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3040 
3041 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3042 
3043 	if (priv->dma_cap.rmon) {
3044 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3045 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3046 	} else
3047 		netdev_info(priv->dev, "No MAC Management Counters available\n");
3048 }
3049 
3050 /**
3051  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3052  * @priv: driver private structure
3053  * Description:
3054  *  new GMAC chip generations have a new register to indicate the
3055  *  presence of the optional feature/functions.
3056  *  This can be also used to override the value passed through the
3057  *  platform and necessary for old MAC10/100 and GMAC chips.
3058  */
3059 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3060 {
3061 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3062 }
3063 
3064 /**
3065  * stmmac_check_ether_addr - check if the MAC addr is valid
3066  * @priv: driver private structure
3067  * Description:
3068  * it is to verify if the MAC address is valid, in case of failures it
3069  * generates a random MAC address
3070  */
3071 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3072 {
3073 	u8 addr[ETH_ALEN];
3074 
3075 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3076 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3077 		if (is_valid_ether_addr(addr))
3078 			eth_hw_addr_set(priv->dev, addr);
3079 		else
3080 			eth_hw_addr_random(priv->dev);
3081 		dev_info(priv->device, "device MAC address %pM\n",
3082 			 priv->dev->dev_addr);
3083 	}
3084 }
3085 
3086 /**
3087  * stmmac_init_dma_engine - DMA init.
3088  * @priv: driver private structure
3089  * Description:
3090  * It inits the DMA invoking the specific MAC/GMAC callback.
3091  * Some DMA parameters can be passed from the platform;
3092  * in case of these are not passed a default is kept for the MAC or GMAC.
3093  */
3094 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3095 {
3096 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3097 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3098 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3099 	struct stmmac_rx_queue *rx_q;
3100 	struct stmmac_tx_queue *tx_q;
3101 	u32 chan = 0;
3102 	int ret = 0;
3103 
3104 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3105 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3106 		return -EINVAL;
3107 	}
3108 
3109 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3110 		priv->plat->dma_cfg->atds = 1;
3111 
3112 	ret = stmmac_reset(priv, priv->ioaddr);
3113 	if (ret) {
3114 		netdev_err(priv->dev, "Failed to reset the dma\n");
3115 		return ret;
3116 	}
3117 
3118 	/* DMA Configuration */
3119 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3120 
3121 	if (priv->plat->axi)
3122 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3123 
3124 	/* DMA CSR Channel configuration */
3125 	for (chan = 0; chan < dma_csr_ch; chan++) {
3126 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3127 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3128 	}
3129 
3130 	/* DMA RX Channel Configuration */
3131 	for (chan = 0; chan < rx_channels_count; chan++) {
3132 		rx_q = &priv->dma_conf.rx_queue[chan];
3133 
3134 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3135 				    rx_q->dma_rx_phy, chan);
3136 
3137 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3138 				     (rx_q->buf_alloc_num *
3139 				      sizeof(struct dma_desc));
3140 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3141 				       rx_q->rx_tail_addr, chan);
3142 	}
3143 
3144 	/* DMA TX Channel Configuration */
3145 	for (chan = 0; chan < tx_channels_count; chan++) {
3146 		tx_q = &priv->dma_conf.tx_queue[chan];
3147 
3148 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3149 				    tx_q->dma_tx_phy, chan);
3150 
3151 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3152 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3153 				       tx_q->tx_tail_addr, chan);
3154 	}
3155 
3156 	return ret;
3157 }
3158 
3159 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3160 {
3161 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3162 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3163 	struct stmmac_channel *ch;
3164 	struct napi_struct *napi;
3165 
3166 	if (!tx_coal_timer)
3167 		return;
3168 
3169 	ch = &priv->channel[tx_q->queue_index];
3170 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3171 
3172 	/* Arm timer only if napi is not already scheduled.
3173 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3174 	 * again in the next scheduled napi.
3175 	 */
3176 	if (unlikely(!napi_is_scheduled(napi)))
3177 		hrtimer_start(&tx_q->txtimer,
3178 			      STMMAC_COAL_TIMER(tx_coal_timer),
3179 			      HRTIMER_MODE_REL);
3180 	else
3181 		hrtimer_try_to_cancel(&tx_q->txtimer);
3182 }
3183 
3184 /**
3185  * stmmac_tx_timer - mitigation sw timer for tx.
3186  * @t: data pointer
3187  * Description:
3188  * This is the timer handler to directly invoke the stmmac_tx_clean.
3189  */
3190 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3191 {
3192 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3193 	struct stmmac_priv *priv = tx_q->priv_data;
3194 	struct stmmac_channel *ch;
3195 	struct napi_struct *napi;
3196 
3197 	ch = &priv->channel[tx_q->queue_index];
3198 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3199 
3200 	if (likely(napi_schedule_prep(napi))) {
3201 		unsigned long flags;
3202 
3203 		spin_lock_irqsave(&ch->lock, flags);
3204 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3205 		spin_unlock_irqrestore(&ch->lock, flags);
3206 		__napi_schedule(napi);
3207 	}
3208 
3209 	return HRTIMER_NORESTART;
3210 }
3211 
3212 /**
3213  * stmmac_init_coalesce - init mitigation options.
3214  * @priv: driver private structure
3215  * Description:
3216  * This inits the coalesce parameters: i.e. timer rate,
3217  * timer handler and default threshold used for enabling the
3218  * interrupt on completion bit.
3219  */
3220 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3221 {
3222 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3223 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3224 	u32 chan;
3225 
3226 	for (chan = 0; chan < tx_channel_count; chan++) {
3227 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3228 
3229 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3230 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3231 
3232 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3233 		tx_q->txtimer.function = stmmac_tx_timer;
3234 	}
3235 
3236 	for (chan = 0; chan < rx_channel_count; chan++)
3237 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3238 }
3239 
3240 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3241 {
3242 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3243 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3244 	u32 chan;
3245 
3246 	/* set TX ring length */
3247 	for (chan = 0; chan < tx_channels_count; chan++)
3248 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3249 				       (priv->dma_conf.dma_tx_size - 1), chan);
3250 
3251 	/* set RX ring length */
3252 	for (chan = 0; chan < rx_channels_count; chan++)
3253 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3254 				       (priv->dma_conf.dma_rx_size - 1), chan);
3255 }
3256 
3257 /**
3258  *  stmmac_set_tx_queue_weight - Set TX queue weight
3259  *  @priv: driver private structure
3260  *  Description: It is used for setting TX queues weight
3261  */
3262 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3263 {
3264 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3265 	u32 weight;
3266 	u32 queue;
3267 
3268 	for (queue = 0; queue < tx_queues_count; queue++) {
3269 		weight = priv->plat->tx_queues_cfg[queue].weight;
3270 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3271 	}
3272 }
3273 
3274 /**
3275  *  stmmac_configure_cbs - Configure CBS in TX queue
3276  *  @priv: driver private structure
3277  *  Description: It is used for configuring CBS in AVB TX queues
3278  */
3279 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3280 {
3281 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3282 	u32 mode_to_use;
3283 	u32 queue;
3284 
3285 	/* queue 0 is reserved for legacy traffic */
3286 	for (queue = 1; queue < tx_queues_count; queue++) {
3287 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3288 		if (mode_to_use == MTL_QUEUE_DCB)
3289 			continue;
3290 
3291 		stmmac_config_cbs(priv, priv->hw,
3292 				priv->plat->tx_queues_cfg[queue].send_slope,
3293 				priv->plat->tx_queues_cfg[queue].idle_slope,
3294 				priv->plat->tx_queues_cfg[queue].high_credit,
3295 				priv->plat->tx_queues_cfg[queue].low_credit,
3296 				queue);
3297 	}
3298 }
3299 
3300 /**
3301  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3302  *  @priv: driver private structure
3303  *  Description: It is used for mapping RX queues to RX dma channels
3304  */
3305 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3306 {
3307 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3308 	u32 queue;
3309 	u32 chan;
3310 
3311 	for (queue = 0; queue < rx_queues_count; queue++) {
3312 		chan = priv->plat->rx_queues_cfg[queue].chan;
3313 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3314 	}
3315 }
3316 
3317 /**
3318  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3319  *  @priv: driver private structure
3320  *  Description: It is used for configuring the RX Queue Priority
3321  */
3322 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3323 {
3324 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3325 	u32 queue;
3326 	u32 prio;
3327 
3328 	for (queue = 0; queue < rx_queues_count; queue++) {
3329 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3330 			continue;
3331 
3332 		prio = priv->plat->rx_queues_cfg[queue].prio;
3333 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3334 	}
3335 }
3336 
3337 /**
3338  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3339  *  @priv: driver private structure
3340  *  Description: It is used for configuring the TX Queue Priority
3341  */
3342 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3343 {
3344 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3345 	u32 queue;
3346 	u32 prio;
3347 
3348 	for (queue = 0; queue < tx_queues_count; queue++) {
3349 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3350 			continue;
3351 
3352 		prio = priv->plat->tx_queues_cfg[queue].prio;
3353 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3354 	}
3355 }
3356 
3357 /**
3358  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3359  *  @priv: driver private structure
3360  *  Description: It is used for configuring the RX queue routing
3361  */
3362 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3363 {
3364 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3365 	u32 queue;
3366 	u8 packet;
3367 
3368 	for (queue = 0; queue < rx_queues_count; queue++) {
3369 		/* no specific packet type routing specified for the queue */
3370 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3371 			continue;
3372 
3373 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3374 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3375 	}
3376 }
3377 
3378 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3379 {
3380 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3381 		priv->rss.enable = false;
3382 		return;
3383 	}
3384 
3385 	if (priv->dev->features & NETIF_F_RXHASH)
3386 		priv->rss.enable = true;
3387 	else
3388 		priv->rss.enable = false;
3389 
3390 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3391 			     priv->plat->rx_queues_to_use);
3392 }
3393 
3394 /**
3395  *  stmmac_mtl_configuration - Configure MTL
3396  *  @priv: driver private structure
3397  *  Description: It is used for configurring MTL
3398  */
3399 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3400 {
3401 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3402 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3403 
3404 	if (tx_queues_count > 1)
3405 		stmmac_set_tx_queue_weight(priv);
3406 
3407 	/* Configure MTL RX algorithms */
3408 	if (rx_queues_count > 1)
3409 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3410 				priv->plat->rx_sched_algorithm);
3411 
3412 	/* Configure MTL TX algorithms */
3413 	if (tx_queues_count > 1)
3414 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3415 				priv->plat->tx_sched_algorithm);
3416 
3417 	/* Configure CBS in AVB TX queues */
3418 	if (tx_queues_count > 1)
3419 		stmmac_configure_cbs(priv);
3420 
3421 	/* Map RX MTL to DMA channels */
3422 	stmmac_rx_queue_dma_chan_map(priv);
3423 
3424 	/* Enable MAC RX Queues */
3425 	stmmac_mac_enable_rx_queues(priv);
3426 
3427 	/* Set RX priorities */
3428 	if (rx_queues_count > 1)
3429 		stmmac_mac_config_rx_queues_prio(priv);
3430 
3431 	/* Set TX priorities */
3432 	if (tx_queues_count > 1)
3433 		stmmac_mac_config_tx_queues_prio(priv);
3434 
3435 	/* Set RX routing */
3436 	if (rx_queues_count > 1)
3437 		stmmac_mac_config_rx_queues_routing(priv);
3438 
3439 	/* Receive Side Scaling */
3440 	if (rx_queues_count > 1)
3441 		stmmac_mac_config_rss(priv);
3442 }
3443 
3444 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3445 {
3446 	if (priv->dma_cap.asp) {
3447 		netdev_info(priv->dev, "Enabling Safety Features\n");
3448 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3449 					  priv->plat->safety_feat_cfg);
3450 	} else {
3451 		netdev_info(priv->dev, "No Safety Features support found\n");
3452 	}
3453 }
3454 
3455 /**
3456  * stmmac_hw_setup - setup mac in a usable state.
3457  *  @dev : pointer to the device structure.
3458  *  @ptp_register: register PTP if set
3459  *  Description:
3460  *  this is the main function to setup the HW in a usable state because the
3461  *  dma engine is reset, the core registers are configured (e.g. AXI,
3462  *  Checksum features, timers). The DMA is ready to start receiving and
3463  *  transmitting.
3464  *  Return value:
3465  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3466  *  file on failure.
3467  */
3468 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3469 {
3470 	struct stmmac_priv *priv = netdev_priv(dev);
3471 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3472 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3473 	bool sph_en;
3474 	u32 chan;
3475 	int ret;
3476 
3477 	/* Make sure RX clock is enabled */
3478 	if (priv->hw->phylink_pcs)
3479 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3480 
3481 	/* DMA initialization and SW reset */
3482 	ret = stmmac_init_dma_engine(priv);
3483 	if (ret < 0) {
3484 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3485 			   __func__);
3486 		return ret;
3487 	}
3488 
3489 	/* Copy the MAC addr into the HW  */
3490 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3491 
3492 	/* PS and related bits will be programmed according to the speed */
3493 	if (priv->hw->pcs) {
3494 		int speed = priv->plat->mac_port_sel_speed;
3495 
3496 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3497 		    (speed == SPEED_1000)) {
3498 			priv->hw->ps = speed;
3499 		} else {
3500 			dev_warn(priv->device, "invalid port speed\n");
3501 			priv->hw->ps = 0;
3502 		}
3503 	}
3504 
3505 	/* Initialize the MAC Core */
3506 	stmmac_core_init(priv, priv->hw, dev);
3507 
3508 	/* Initialize MTL*/
3509 	stmmac_mtl_configuration(priv);
3510 
3511 	/* Initialize Safety Features */
3512 	stmmac_safety_feat_configuration(priv);
3513 
3514 	ret = stmmac_rx_ipc(priv, priv->hw);
3515 	if (!ret) {
3516 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3517 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3518 		priv->hw->rx_csum = 0;
3519 	}
3520 
3521 	/* Enable the MAC Rx/Tx */
3522 	stmmac_mac_set(priv, priv->ioaddr, true);
3523 
3524 	/* Set the HW DMA mode and the COE */
3525 	stmmac_dma_operation_mode(priv);
3526 
3527 	stmmac_mmc_setup(priv);
3528 
3529 	if (ptp_register) {
3530 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3531 		if (ret < 0)
3532 			netdev_warn(priv->dev,
3533 				    "failed to enable PTP reference clock: %pe\n",
3534 				    ERR_PTR(ret));
3535 	}
3536 
3537 	ret = stmmac_init_ptp(priv);
3538 	if (ret == -EOPNOTSUPP)
3539 		netdev_info(priv->dev, "PTP not supported by HW\n");
3540 	else if (ret)
3541 		netdev_warn(priv->dev, "PTP init failed\n");
3542 	else if (ptp_register)
3543 		stmmac_ptp_register(priv);
3544 
3545 	if (priv->use_riwt) {
3546 		u32 queue;
3547 
3548 		for (queue = 0; queue < rx_cnt; queue++) {
3549 			if (!priv->rx_riwt[queue])
3550 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3551 
3552 			stmmac_rx_watchdog(priv, priv->ioaddr,
3553 					   priv->rx_riwt[queue], queue);
3554 		}
3555 	}
3556 
3557 	if (priv->hw->pcs)
3558 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3559 
3560 	/* set TX and RX rings length */
3561 	stmmac_set_rings_length(priv);
3562 
3563 	/* Enable TSO */
3564 	if (priv->tso) {
3565 		for (chan = 0; chan < tx_cnt; chan++) {
3566 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3567 
3568 			/* TSO and TBS cannot co-exist */
3569 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3570 				continue;
3571 
3572 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3573 		}
3574 	}
3575 
3576 	/* Enable Split Header */
3577 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3578 	for (chan = 0; chan < rx_cnt; chan++)
3579 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3580 
3581 
3582 	/* VLAN Tag Insertion */
3583 	if (priv->dma_cap.vlins)
3584 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3585 
3586 	/* TBS */
3587 	for (chan = 0; chan < tx_cnt; chan++) {
3588 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3589 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3590 
3591 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3592 	}
3593 
3594 	/* Configure real RX and TX queues */
3595 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3596 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3597 
3598 	/* Start the ball rolling... */
3599 	stmmac_start_all_dma(priv);
3600 
3601 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3602 
3603 	return 0;
3604 }
3605 
3606 static void stmmac_hw_teardown(struct net_device *dev)
3607 {
3608 	struct stmmac_priv *priv = netdev_priv(dev);
3609 
3610 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3611 }
3612 
3613 static void stmmac_free_irq(struct net_device *dev,
3614 			    enum request_irq_err irq_err, int irq_idx)
3615 {
3616 	struct stmmac_priv *priv = netdev_priv(dev);
3617 	int j;
3618 
3619 	switch (irq_err) {
3620 	case REQ_IRQ_ERR_ALL:
3621 		irq_idx = priv->plat->tx_queues_to_use;
3622 		fallthrough;
3623 	case REQ_IRQ_ERR_TX:
3624 		for (j = irq_idx - 1; j >= 0; j--) {
3625 			if (priv->tx_irq[j] > 0) {
3626 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3627 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3628 			}
3629 		}
3630 		irq_idx = priv->plat->rx_queues_to_use;
3631 		fallthrough;
3632 	case REQ_IRQ_ERR_RX:
3633 		for (j = irq_idx - 1; j >= 0; j--) {
3634 			if (priv->rx_irq[j] > 0) {
3635 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3636 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3637 			}
3638 		}
3639 
3640 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3641 			free_irq(priv->sfty_ue_irq, dev);
3642 		fallthrough;
3643 	case REQ_IRQ_ERR_SFTY_UE:
3644 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3645 			free_irq(priv->sfty_ce_irq, dev);
3646 		fallthrough;
3647 	case REQ_IRQ_ERR_SFTY_CE:
3648 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3649 			free_irq(priv->lpi_irq, dev);
3650 		fallthrough;
3651 	case REQ_IRQ_ERR_LPI:
3652 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3653 			free_irq(priv->wol_irq, dev);
3654 		fallthrough;
3655 	case REQ_IRQ_ERR_SFTY:
3656 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3657 			free_irq(priv->sfty_irq, dev);
3658 		fallthrough;
3659 	case REQ_IRQ_ERR_WOL:
3660 		free_irq(dev->irq, dev);
3661 		fallthrough;
3662 	case REQ_IRQ_ERR_MAC:
3663 	case REQ_IRQ_ERR_NO:
3664 		/* If MAC IRQ request error, no more IRQ to free */
3665 		break;
3666 	}
3667 }
3668 
3669 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3670 {
3671 	struct stmmac_priv *priv = netdev_priv(dev);
3672 	enum request_irq_err irq_err;
3673 	cpumask_t cpu_mask;
3674 	int irq_idx = 0;
3675 	char *int_name;
3676 	int ret;
3677 	int i;
3678 
3679 	/* For common interrupt */
3680 	int_name = priv->int_name_mac;
3681 	sprintf(int_name, "%s:%s", dev->name, "mac");
3682 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3683 			  0, int_name, dev);
3684 	if (unlikely(ret < 0)) {
3685 		netdev_err(priv->dev,
3686 			   "%s: alloc mac MSI %d (error: %d)\n",
3687 			   __func__, dev->irq, ret);
3688 		irq_err = REQ_IRQ_ERR_MAC;
3689 		goto irq_error;
3690 	}
3691 
3692 	/* Request the Wake IRQ in case of another line
3693 	 * is used for WoL
3694 	 */
3695 	priv->wol_irq_disabled = true;
3696 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3697 		int_name = priv->int_name_wol;
3698 		sprintf(int_name, "%s:%s", dev->name, "wol");
3699 		ret = request_irq(priv->wol_irq,
3700 				  stmmac_mac_interrupt,
3701 				  0, int_name, dev);
3702 		if (unlikely(ret < 0)) {
3703 			netdev_err(priv->dev,
3704 				   "%s: alloc wol MSI %d (error: %d)\n",
3705 				   __func__, priv->wol_irq, ret);
3706 			irq_err = REQ_IRQ_ERR_WOL;
3707 			goto irq_error;
3708 		}
3709 	}
3710 
3711 	/* Request the LPI IRQ in case of another line
3712 	 * is used for LPI
3713 	 */
3714 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3715 		int_name = priv->int_name_lpi;
3716 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3717 		ret = request_irq(priv->lpi_irq,
3718 				  stmmac_mac_interrupt,
3719 				  0, int_name, dev);
3720 		if (unlikely(ret < 0)) {
3721 			netdev_err(priv->dev,
3722 				   "%s: alloc lpi MSI %d (error: %d)\n",
3723 				   __func__, priv->lpi_irq, ret);
3724 			irq_err = REQ_IRQ_ERR_LPI;
3725 			goto irq_error;
3726 		}
3727 	}
3728 
3729 	/* Request the common Safety Feature Correctible/Uncorrectible
3730 	 * Error line in case of another line is used
3731 	 */
3732 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3733 		int_name = priv->int_name_sfty;
3734 		sprintf(int_name, "%s:%s", dev->name, "safety");
3735 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3736 				  0, int_name, dev);
3737 		if (unlikely(ret < 0)) {
3738 			netdev_err(priv->dev,
3739 				   "%s: alloc sfty MSI %d (error: %d)\n",
3740 				   __func__, priv->sfty_irq, ret);
3741 			irq_err = REQ_IRQ_ERR_SFTY;
3742 			goto irq_error;
3743 		}
3744 	}
3745 
3746 	/* Request the Safety Feature Correctible Error line in
3747 	 * case of another line is used
3748 	 */
3749 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3750 		int_name = priv->int_name_sfty_ce;
3751 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3752 		ret = request_irq(priv->sfty_ce_irq,
3753 				  stmmac_safety_interrupt,
3754 				  0, int_name, dev);
3755 		if (unlikely(ret < 0)) {
3756 			netdev_err(priv->dev,
3757 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3758 				   __func__, priv->sfty_ce_irq, ret);
3759 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3760 			goto irq_error;
3761 		}
3762 	}
3763 
3764 	/* Request the Safety Feature Uncorrectible Error line in
3765 	 * case of another line is used
3766 	 */
3767 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3768 		int_name = priv->int_name_sfty_ue;
3769 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3770 		ret = request_irq(priv->sfty_ue_irq,
3771 				  stmmac_safety_interrupt,
3772 				  0, int_name, dev);
3773 		if (unlikely(ret < 0)) {
3774 			netdev_err(priv->dev,
3775 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3776 				   __func__, priv->sfty_ue_irq, ret);
3777 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3778 			goto irq_error;
3779 		}
3780 	}
3781 
3782 	/* Request Rx MSI irq */
3783 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3784 		if (i >= MTL_MAX_RX_QUEUES)
3785 			break;
3786 		if (priv->rx_irq[i] == 0)
3787 			continue;
3788 
3789 		int_name = priv->int_name_rx_irq[i];
3790 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3791 		ret = request_irq(priv->rx_irq[i],
3792 				  stmmac_msi_intr_rx,
3793 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3794 		if (unlikely(ret < 0)) {
3795 			netdev_err(priv->dev,
3796 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3797 				   __func__, i, priv->rx_irq[i], ret);
3798 			irq_err = REQ_IRQ_ERR_RX;
3799 			irq_idx = i;
3800 			goto irq_error;
3801 		}
3802 		cpumask_clear(&cpu_mask);
3803 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3804 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3805 	}
3806 
3807 	/* Request Tx MSI irq */
3808 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3809 		if (i >= MTL_MAX_TX_QUEUES)
3810 			break;
3811 		if (priv->tx_irq[i] == 0)
3812 			continue;
3813 
3814 		int_name = priv->int_name_tx_irq[i];
3815 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3816 		ret = request_irq(priv->tx_irq[i],
3817 				  stmmac_msi_intr_tx,
3818 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3819 		if (unlikely(ret < 0)) {
3820 			netdev_err(priv->dev,
3821 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3822 				   __func__, i, priv->tx_irq[i], ret);
3823 			irq_err = REQ_IRQ_ERR_TX;
3824 			irq_idx = i;
3825 			goto irq_error;
3826 		}
3827 		cpumask_clear(&cpu_mask);
3828 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3829 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3830 	}
3831 
3832 	return 0;
3833 
3834 irq_error:
3835 	stmmac_free_irq(dev, irq_err, irq_idx);
3836 	return ret;
3837 }
3838 
3839 static int stmmac_request_irq_single(struct net_device *dev)
3840 {
3841 	struct stmmac_priv *priv = netdev_priv(dev);
3842 	enum request_irq_err irq_err;
3843 	int ret;
3844 
3845 	ret = request_irq(dev->irq, stmmac_interrupt,
3846 			  IRQF_SHARED, dev->name, dev);
3847 	if (unlikely(ret < 0)) {
3848 		netdev_err(priv->dev,
3849 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3850 			   __func__, dev->irq, ret);
3851 		irq_err = REQ_IRQ_ERR_MAC;
3852 		goto irq_error;
3853 	}
3854 
3855 	/* Request the Wake IRQ in case of another line
3856 	 * is used for WoL
3857 	 */
3858 	priv->wol_irq_disabled = true;
3859 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3860 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3861 				  IRQF_SHARED, dev->name, dev);
3862 		if (unlikely(ret < 0)) {
3863 			netdev_err(priv->dev,
3864 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3865 				   __func__, priv->wol_irq, ret);
3866 			irq_err = REQ_IRQ_ERR_WOL;
3867 			goto irq_error;
3868 		}
3869 	}
3870 
3871 	/* Request the IRQ lines */
3872 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3873 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3874 				  IRQF_SHARED, dev->name, dev);
3875 		if (unlikely(ret < 0)) {
3876 			netdev_err(priv->dev,
3877 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3878 				   __func__, priv->lpi_irq, ret);
3879 			irq_err = REQ_IRQ_ERR_LPI;
3880 			goto irq_error;
3881 		}
3882 	}
3883 
3884 	/* Request the common Safety Feature Correctible/Uncorrectible
3885 	 * Error line in case of another line is used
3886 	 */
3887 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3888 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3889 				  IRQF_SHARED, dev->name, dev);
3890 		if (unlikely(ret < 0)) {
3891 			netdev_err(priv->dev,
3892 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3893 				   __func__, priv->sfty_irq, ret);
3894 			irq_err = REQ_IRQ_ERR_SFTY;
3895 			goto irq_error;
3896 		}
3897 	}
3898 
3899 	return 0;
3900 
3901 irq_error:
3902 	stmmac_free_irq(dev, irq_err, 0);
3903 	return ret;
3904 }
3905 
3906 static int stmmac_request_irq(struct net_device *dev)
3907 {
3908 	struct stmmac_priv *priv = netdev_priv(dev);
3909 	int ret;
3910 
3911 	/* Request the IRQ lines */
3912 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3913 		ret = stmmac_request_irq_multi_msi(dev);
3914 	else
3915 		ret = stmmac_request_irq_single(dev);
3916 
3917 	return ret;
3918 }
3919 
3920 /**
3921  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3922  *  @priv: driver private structure
3923  *  @mtu: MTU to setup the dma queue and buf with
3924  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3925  *  Allocate the Tx/Rx DMA queue and init them.
3926  *  Return value:
3927  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3928  */
3929 static struct stmmac_dma_conf *
3930 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3931 {
3932 	struct stmmac_dma_conf *dma_conf;
3933 	int chan, bfsize, ret;
3934 
3935 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3936 	if (!dma_conf) {
3937 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3938 			   __func__);
3939 		return ERR_PTR(-ENOMEM);
3940 	}
3941 
3942 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3943 	if (bfsize < 0)
3944 		bfsize = 0;
3945 
3946 	if (bfsize < BUF_SIZE_16KiB)
3947 		bfsize = stmmac_set_bfsize(mtu, 0);
3948 
3949 	dma_conf->dma_buf_sz = bfsize;
3950 	/* Chose the tx/rx size from the already defined one in the
3951 	 * priv struct. (if defined)
3952 	 */
3953 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3954 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3955 
3956 	if (!dma_conf->dma_tx_size)
3957 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3958 	if (!dma_conf->dma_rx_size)
3959 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3960 
3961 	/* Earlier check for TBS */
3962 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3963 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3964 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3965 
3966 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3967 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3968 	}
3969 
3970 	ret = alloc_dma_desc_resources(priv, dma_conf);
3971 	if (ret < 0) {
3972 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3973 			   __func__);
3974 		goto alloc_error;
3975 	}
3976 
3977 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3978 	if (ret < 0) {
3979 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3980 			   __func__);
3981 		goto init_error;
3982 	}
3983 
3984 	return dma_conf;
3985 
3986 init_error:
3987 	free_dma_desc_resources(priv, dma_conf);
3988 alloc_error:
3989 	kfree(dma_conf);
3990 	return ERR_PTR(ret);
3991 }
3992 
3993 /**
3994  *  __stmmac_open - open entry point of the driver
3995  *  @dev : pointer to the device structure.
3996  *  @dma_conf :  structure to take the dma data
3997  *  Description:
3998  *  This function is the open entry point of the driver.
3999  *  Return value:
4000  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4001  *  file on failure.
4002  */
4003 static int __stmmac_open(struct net_device *dev,
4004 			 struct stmmac_dma_conf *dma_conf)
4005 {
4006 	struct stmmac_priv *priv = netdev_priv(dev);
4007 	int mode = priv->plat->phy_interface;
4008 	u32 chan;
4009 	int ret;
4010 
4011 	/* Initialise the tx lpi timer, converting from msec to usec */
4012 	if (!priv->tx_lpi_timer)
4013 		priv->tx_lpi_timer = eee_timer * 1000;
4014 
4015 	ret = pm_runtime_resume_and_get(priv->device);
4016 	if (ret < 0)
4017 		return ret;
4018 
4019 	if ((!priv->hw->xpcs ||
4020 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
4021 		ret = stmmac_init_phy(dev);
4022 		if (ret) {
4023 			netdev_err(priv->dev,
4024 				   "%s: Cannot attach to PHY (error: %d)\n",
4025 				   __func__, ret);
4026 			goto init_phy_error;
4027 		}
4028 	}
4029 
4030 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4031 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4032 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4033 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4034 
4035 	stmmac_reset_queues_param(priv);
4036 
4037 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4038 	    priv->plat->serdes_powerup) {
4039 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4040 		if (ret < 0) {
4041 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4042 				   __func__);
4043 			goto init_error;
4044 		}
4045 	}
4046 
4047 	ret = stmmac_hw_setup(dev, true);
4048 	if (ret < 0) {
4049 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4050 		goto init_error;
4051 	}
4052 
4053 	stmmac_init_coalesce(priv);
4054 
4055 	phylink_start(priv->phylink);
4056 	/* We may have called phylink_speed_down before */
4057 	phylink_speed_up(priv->phylink);
4058 
4059 	ret = stmmac_request_irq(dev);
4060 	if (ret)
4061 		goto irq_error;
4062 
4063 	stmmac_enable_all_queues(priv);
4064 	netif_tx_start_all_queues(priv->dev);
4065 	stmmac_enable_all_dma_irq(priv);
4066 
4067 	return 0;
4068 
4069 irq_error:
4070 	phylink_stop(priv->phylink);
4071 
4072 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4073 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4074 
4075 	stmmac_hw_teardown(dev);
4076 init_error:
4077 	phylink_disconnect_phy(priv->phylink);
4078 init_phy_error:
4079 	pm_runtime_put(priv->device);
4080 	return ret;
4081 }
4082 
4083 static int stmmac_open(struct net_device *dev)
4084 {
4085 	struct stmmac_priv *priv = netdev_priv(dev);
4086 	struct stmmac_dma_conf *dma_conf;
4087 	int ret;
4088 
4089 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4090 	if (IS_ERR(dma_conf))
4091 		return PTR_ERR(dma_conf);
4092 
4093 	ret = __stmmac_open(dev, dma_conf);
4094 	if (ret)
4095 		free_dma_desc_resources(priv, dma_conf);
4096 
4097 	kfree(dma_conf);
4098 	return ret;
4099 }
4100 
4101 /**
4102  *  stmmac_release - close entry point of the driver
4103  *  @dev : device pointer.
4104  *  Description:
4105  *  This is the stop entry point of the driver.
4106  */
4107 static int stmmac_release(struct net_device *dev)
4108 {
4109 	struct stmmac_priv *priv = netdev_priv(dev);
4110 	u32 chan;
4111 
4112 	if (device_may_wakeup(priv->device))
4113 		phylink_speed_down(priv->phylink, false);
4114 	/* Stop and disconnect the PHY */
4115 	phylink_stop(priv->phylink);
4116 	phylink_disconnect_phy(priv->phylink);
4117 
4118 	stmmac_disable_all_queues(priv);
4119 
4120 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4121 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4122 
4123 	netif_tx_disable(dev);
4124 
4125 	/* Free the IRQ lines */
4126 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4127 
4128 	/* Stop TX/RX DMA and clear the descriptors */
4129 	stmmac_stop_all_dma(priv);
4130 
4131 	/* Release and free the Rx/Tx resources */
4132 	free_dma_desc_resources(priv, &priv->dma_conf);
4133 
4134 	/* Disable the MAC Rx/Tx */
4135 	stmmac_mac_set(priv, priv->ioaddr, false);
4136 
4137 	/* Powerdown Serdes if there is */
4138 	if (priv->plat->serdes_powerdown)
4139 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4140 
4141 	stmmac_release_ptp(priv);
4142 
4143 	if (stmmac_fpe_supported(priv))
4144 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4145 
4146 	pm_runtime_put(priv->device);
4147 
4148 	return 0;
4149 }
4150 
4151 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4152 			       struct stmmac_tx_queue *tx_q)
4153 {
4154 	u16 tag = 0x0, inner_tag = 0x0;
4155 	u32 inner_type = 0x0;
4156 	struct dma_desc *p;
4157 
4158 	if (!priv->dma_cap.vlins)
4159 		return false;
4160 	if (!skb_vlan_tag_present(skb))
4161 		return false;
4162 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4163 		inner_tag = skb_vlan_tag_get(skb);
4164 		inner_type = STMMAC_VLAN_INSERT;
4165 	}
4166 
4167 	tag = skb_vlan_tag_get(skb);
4168 
4169 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4170 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4171 	else
4172 		p = &tx_q->dma_tx[tx_q->cur_tx];
4173 
4174 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4175 		return false;
4176 
4177 	stmmac_set_tx_owner(priv, p);
4178 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4179 	return true;
4180 }
4181 
4182 /**
4183  *  stmmac_tso_allocator - close entry point of the driver
4184  *  @priv: driver private structure
4185  *  @des: buffer start address
4186  *  @total_len: total length to fill in descriptors
4187  *  @last_segment: condition for the last descriptor
4188  *  @queue: TX queue index
4189  *  Description:
4190  *  This function fills descriptor and request new descriptors according to
4191  *  buffer length to fill
4192  */
4193 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4194 				 int total_len, bool last_segment, u32 queue)
4195 {
4196 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4197 	struct dma_desc *desc;
4198 	u32 buff_size;
4199 	int tmp_len;
4200 
4201 	tmp_len = total_len;
4202 
4203 	while (tmp_len > 0) {
4204 		dma_addr_t curr_addr;
4205 
4206 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4207 						priv->dma_conf.dma_tx_size);
4208 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4209 
4210 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4211 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4212 		else
4213 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4214 
4215 		curr_addr = des + (total_len - tmp_len);
4216 		stmmac_set_desc_addr(priv, desc, curr_addr);
4217 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4218 			    TSO_MAX_BUFF_SIZE : tmp_len;
4219 
4220 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4221 				0, 1,
4222 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4223 				0, 0);
4224 
4225 		tmp_len -= TSO_MAX_BUFF_SIZE;
4226 	}
4227 }
4228 
4229 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4230 {
4231 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4232 	int desc_size;
4233 
4234 	if (likely(priv->extend_desc))
4235 		desc_size = sizeof(struct dma_extended_desc);
4236 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4237 		desc_size = sizeof(struct dma_edesc);
4238 	else
4239 		desc_size = sizeof(struct dma_desc);
4240 
4241 	/* The own bit must be the latest setting done when prepare the
4242 	 * descriptor and then barrier is needed to make sure that
4243 	 * all is coherent before granting the DMA engine.
4244 	 */
4245 	wmb();
4246 
4247 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4248 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4249 }
4250 
4251 /**
4252  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4253  *  @skb : the socket buffer
4254  *  @dev : device pointer
4255  *  Description: this is the transmit function that is called on TSO frames
4256  *  (support available on GMAC4 and newer chips).
4257  *  Diagram below show the ring programming in case of TSO frames:
4258  *
4259  *  First Descriptor
4260  *   --------
4261  *   | DES0 |---> buffer1 = L2/L3/L4 header
4262  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4263  *   |      |     width is 32-bit, but we never use it.
4264  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4265  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4266  *   |      |     or 48-bit, and we always use it.
4267  *   | DES2 |---> buffer1 len
4268  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4269  *   --------
4270  *   --------
4271  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4272  *   | DES1 |---> same as the First Descriptor
4273  *   | DES2 |---> buffer1 len
4274  *   | DES3 |
4275  *   --------
4276  *	|
4277  *     ...
4278  *	|
4279  *   --------
4280  *   | DES0 |---> buffer1 = Split TCP Payload
4281  *   | DES1 |---> same as the First Descriptor
4282  *   | DES2 |---> buffer1 len
4283  *   | DES3 |
4284  *   --------
4285  *
4286  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4287  */
4288 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4289 {
4290 	struct dma_desc *desc, *first, *mss_desc = NULL;
4291 	struct stmmac_priv *priv = netdev_priv(dev);
4292 	unsigned int first_entry, tx_packets;
4293 	struct stmmac_txq_stats *txq_stats;
4294 	struct stmmac_tx_queue *tx_q;
4295 	u32 pay_len, mss, queue;
4296 	int i, first_tx, nfrags;
4297 	u8 proto_hdr_len, hdr;
4298 	dma_addr_t des;
4299 	bool set_ic;
4300 
4301 	/* Always insert VLAN tag to SKB payload for TSO frames.
4302 	 *
4303 	 * Never insert VLAN tag by HW, since segments splited by
4304 	 * TSO engine will be un-tagged by mistake.
4305 	 */
4306 	if (skb_vlan_tag_present(skb)) {
4307 		skb = __vlan_hwaccel_push_inside(skb);
4308 		if (unlikely(!skb)) {
4309 			priv->xstats.tx_dropped++;
4310 			return NETDEV_TX_OK;
4311 		}
4312 	}
4313 
4314 	nfrags = skb_shinfo(skb)->nr_frags;
4315 	queue = skb_get_queue_mapping(skb);
4316 
4317 	tx_q = &priv->dma_conf.tx_queue[queue];
4318 	txq_stats = &priv->xstats.txq_stats[queue];
4319 	first_tx = tx_q->cur_tx;
4320 
4321 	/* Compute header lengths */
4322 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4323 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4324 		hdr = sizeof(struct udphdr);
4325 	} else {
4326 		proto_hdr_len = skb_tcp_all_headers(skb);
4327 		hdr = tcp_hdrlen(skb);
4328 	}
4329 
4330 	/* Desc availability based on threshold should be enough safe */
4331 	if (unlikely(stmmac_tx_avail(priv, queue) <
4332 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4333 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4334 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4335 								queue));
4336 			/* This is a hard error, log it. */
4337 			netdev_err(priv->dev,
4338 				   "%s: Tx Ring full when queue awake\n",
4339 				   __func__);
4340 		}
4341 		return NETDEV_TX_BUSY;
4342 	}
4343 
4344 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4345 
4346 	mss = skb_shinfo(skb)->gso_size;
4347 
4348 	/* set new MSS value if needed */
4349 	if (mss != tx_q->mss) {
4350 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4351 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4352 		else
4353 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4354 
4355 		stmmac_set_mss(priv, mss_desc, mss);
4356 		tx_q->mss = mss;
4357 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4358 						priv->dma_conf.dma_tx_size);
4359 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4360 	}
4361 
4362 	if (netif_msg_tx_queued(priv)) {
4363 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4364 			__func__, hdr, proto_hdr_len, pay_len, mss);
4365 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4366 			skb->data_len);
4367 	}
4368 
4369 	first_entry = tx_q->cur_tx;
4370 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4371 
4372 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4373 		desc = &tx_q->dma_entx[first_entry].basic;
4374 	else
4375 		desc = &tx_q->dma_tx[first_entry];
4376 	first = desc;
4377 
4378 	/* first descriptor: fill Headers on Buf1 */
4379 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4380 			     DMA_TO_DEVICE);
4381 	if (dma_mapping_error(priv->device, des))
4382 		goto dma_map_err;
4383 
4384 	stmmac_set_desc_addr(priv, first, des);
4385 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4386 			     (nfrags == 0), queue);
4387 
4388 	/* In case two or more DMA transmit descriptors are allocated for this
4389 	 * non-paged SKB data, the DMA buffer address should be saved to
4390 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4391 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4392 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4393 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4394 	 * sooner or later.
4395 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4396 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4397 	 * this DMA buffer right after the DMA engine completely finishes the
4398 	 * full buffer transmission.
4399 	 */
4400 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4401 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4402 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4403 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4404 
4405 	/* Prepare fragments */
4406 	for (i = 0; i < nfrags; i++) {
4407 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4408 
4409 		des = skb_frag_dma_map(priv->device, frag, 0,
4410 				       skb_frag_size(frag),
4411 				       DMA_TO_DEVICE);
4412 		if (dma_mapping_error(priv->device, des))
4413 			goto dma_map_err;
4414 
4415 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4416 				     (i == nfrags - 1), queue);
4417 
4418 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4419 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4420 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4421 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4422 	}
4423 
4424 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4425 
4426 	/* Only the last descriptor gets to point to the skb. */
4427 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4428 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4429 
4430 	/* Manage tx mitigation */
4431 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4432 	tx_q->tx_count_frames += tx_packets;
4433 
4434 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4435 		set_ic = true;
4436 	else if (!priv->tx_coal_frames[queue])
4437 		set_ic = false;
4438 	else if (tx_packets > priv->tx_coal_frames[queue])
4439 		set_ic = true;
4440 	else if ((tx_q->tx_count_frames %
4441 		  priv->tx_coal_frames[queue]) < tx_packets)
4442 		set_ic = true;
4443 	else
4444 		set_ic = false;
4445 
4446 	if (set_ic) {
4447 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4448 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4449 		else
4450 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4451 
4452 		tx_q->tx_count_frames = 0;
4453 		stmmac_set_tx_ic(priv, desc);
4454 	}
4455 
4456 	/* We've used all descriptors we need for this skb, however,
4457 	 * advance cur_tx so that it references a fresh descriptor.
4458 	 * ndo_start_xmit will fill this descriptor the next time it's
4459 	 * called and stmmac_tx_clean may clean up to this descriptor.
4460 	 */
4461 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4462 
4463 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4464 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4465 			  __func__);
4466 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4467 	}
4468 
4469 	u64_stats_update_begin(&txq_stats->q_syncp);
4470 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4471 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4472 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4473 	if (set_ic)
4474 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4475 	u64_stats_update_end(&txq_stats->q_syncp);
4476 
4477 	if (priv->sarc_type)
4478 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4479 
4480 	skb_tx_timestamp(skb);
4481 
4482 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4483 		     priv->hwts_tx_en)) {
4484 		/* declare that device is doing timestamping */
4485 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4486 		stmmac_enable_tx_timestamp(priv, first);
4487 	}
4488 
4489 	/* Complete the first descriptor before granting the DMA */
4490 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4491 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4492 				   hdr / 4, (skb->len - proto_hdr_len));
4493 
4494 	/* If context desc is used to change MSS */
4495 	if (mss_desc) {
4496 		/* Make sure that first descriptor has been completely
4497 		 * written, including its own bit. This is because MSS is
4498 		 * actually before first descriptor, so we need to make
4499 		 * sure that MSS's own bit is the last thing written.
4500 		 */
4501 		dma_wmb();
4502 		stmmac_set_tx_owner(priv, mss_desc);
4503 	}
4504 
4505 	if (netif_msg_pktdata(priv)) {
4506 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4507 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4508 			tx_q->cur_tx, first, nfrags);
4509 		pr_info(">>> frame to be transmitted: ");
4510 		print_pkt(skb->data, skb_headlen(skb));
4511 	}
4512 
4513 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4514 
4515 	stmmac_flush_tx_descriptors(priv, queue);
4516 	stmmac_tx_timer_arm(priv, queue);
4517 
4518 	return NETDEV_TX_OK;
4519 
4520 dma_map_err:
4521 	dev_err(priv->device, "Tx dma map failed\n");
4522 	dev_kfree_skb(skb);
4523 	priv->xstats.tx_dropped++;
4524 	return NETDEV_TX_OK;
4525 }
4526 
4527 /**
4528  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4529  * @skb: socket buffer to check
4530  *
4531  * Check if a packet has an ethertype that will trigger the IP header checks
4532  * and IP/TCP checksum engine of the stmmac core.
4533  *
4534  * Return: true if the ethertype can trigger the checksum engine, false
4535  * otherwise
4536  */
4537 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4538 {
4539 	int depth = 0;
4540 	__be16 proto;
4541 
4542 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4543 				    &depth);
4544 
4545 	return (depth <= ETH_HLEN) &&
4546 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4547 }
4548 
4549 /**
4550  *  stmmac_xmit - Tx entry point of the driver
4551  *  @skb : the socket buffer
4552  *  @dev : device pointer
4553  *  Description : this is the tx entry point of the driver.
4554  *  It programs the chain or the ring and supports oversized frames
4555  *  and SG feature.
4556  */
4557 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4558 {
4559 	unsigned int first_entry, tx_packets, enh_desc;
4560 	struct stmmac_priv *priv = netdev_priv(dev);
4561 	unsigned int nopaged_len = skb_headlen(skb);
4562 	int i, csum_insertion = 0, is_jumbo = 0;
4563 	u32 queue = skb_get_queue_mapping(skb);
4564 	int nfrags = skb_shinfo(skb)->nr_frags;
4565 	int gso = skb_shinfo(skb)->gso_type;
4566 	struct stmmac_txq_stats *txq_stats;
4567 	struct dma_edesc *tbs_desc = NULL;
4568 	struct dma_desc *desc, *first;
4569 	struct stmmac_tx_queue *tx_q;
4570 	bool has_vlan, set_ic;
4571 	int entry, first_tx;
4572 	dma_addr_t des;
4573 
4574 	tx_q = &priv->dma_conf.tx_queue[queue];
4575 	txq_stats = &priv->xstats.txq_stats[queue];
4576 	first_tx = tx_q->cur_tx;
4577 
4578 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4579 		stmmac_stop_sw_lpi(priv);
4580 
4581 	/* Manage oversized TCP frames for GMAC4 device */
4582 	if (skb_is_gso(skb) && priv->tso) {
4583 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4584 			return stmmac_tso_xmit(skb, dev);
4585 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4586 			return stmmac_tso_xmit(skb, dev);
4587 	}
4588 
4589 	if (priv->est && priv->est->enable &&
4590 	    priv->est->max_sdu[queue] &&
4591 	    skb->len > priv->est->max_sdu[queue]){
4592 		priv->xstats.max_sdu_txq_drop[queue]++;
4593 		goto max_sdu_err;
4594 	}
4595 
4596 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4597 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4598 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4599 								queue));
4600 			/* This is a hard error, log it. */
4601 			netdev_err(priv->dev,
4602 				   "%s: Tx Ring full when queue awake\n",
4603 				   __func__);
4604 		}
4605 		return NETDEV_TX_BUSY;
4606 	}
4607 
4608 	/* Check if VLAN can be inserted by HW */
4609 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4610 
4611 	entry = tx_q->cur_tx;
4612 	first_entry = entry;
4613 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4614 
4615 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4616 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4617 	 * queues. In that case, checksum offloading for those queues that don't
4618 	 * support tx coe needs to fallback to software checksum calculation.
4619 	 *
4620 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4621 	 * also have to be checksummed in software.
4622 	 */
4623 	if (csum_insertion &&
4624 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4625 	     !stmmac_has_ip_ethertype(skb))) {
4626 		if (unlikely(skb_checksum_help(skb)))
4627 			goto dma_map_err;
4628 		csum_insertion = !csum_insertion;
4629 	}
4630 
4631 	if (likely(priv->extend_desc))
4632 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4633 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4634 		desc = &tx_q->dma_entx[entry].basic;
4635 	else
4636 		desc = tx_q->dma_tx + entry;
4637 
4638 	first = desc;
4639 
4640 	if (has_vlan)
4641 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4642 
4643 	enh_desc = priv->plat->enh_desc;
4644 	/* To program the descriptors according to the size of the frame */
4645 	if (enh_desc)
4646 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4647 
4648 	if (unlikely(is_jumbo)) {
4649 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4650 		if (unlikely(entry < 0) && (entry != -EINVAL))
4651 			goto dma_map_err;
4652 	}
4653 
4654 	for (i = 0; i < nfrags; i++) {
4655 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4656 		int len = skb_frag_size(frag);
4657 		bool last_segment = (i == (nfrags - 1));
4658 
4659 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4660 		WARN_ON(tx_q->tx_skbuff[entry]);
4661 
4662 		if (likely(priv->extend_desc))
4663 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4664 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4665 			desc = &tx_q->dma_entx[entry].basic;
4666 		else
4667 			desc = tx_q->dma_tx + entry;
4668 
4669 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4670 				       DMA_TO_DEVICE);
4671 		if (dma_mapping_error(priv->device, des))
4672 			goto dma_map_err; /* should reuse desc w/o issues */
4673 
4674 		tx_q->tx_skbuff_dma[entry].buf = des;
4675 
4676 		stmmac_set_desc_addr(priv, desc, des);
4677 
4678 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4679 		tx_q->tx_skbuff_dma[entry].len = len;
4680 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4681 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4682 
4683 		/* Prepare the descriptor and set the own bit too */
4684 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4685 				priv->mode, 1, last_segment, skb->len);
4686 	}
4687 
4688 	/* Only the last descriptor gets to point to the skb. */
4689 	tx_q->tx_skbuff[entry] = skb;
4690 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4691 
4692 	/* According to the coalesce parameter the IC bit for the latest
4693 	 * segment is reset and the timer re-started to clean the tx status.
4694 	 * This approach takes care about the fragments: desc is the first
4695 	 * element in case of no SG.
4696 	 */
4697 	tx_packets = (entry + 1) - first_tx;
4698 	tx_q->tx_count_frames += tx_packets;
4699 
4700 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4701 		set_ic = true;
4702 	else if (!priv->tx_coal_frames[queue])
4703 		set_ic = false;
4704 	else if (tx_packets > priv->tx_coal_frames[queue])
4705 		set_ic = true;
4706 	else if ((tx_q->tx_count_frames %
4707 		  priv->tx_coal_frames[queue]) < tx_packets)
4708 		set_ic = true;
4709 	else
4710 		set_ic = false;
4711 
4712 	if (set_ic) {
4713 		if (likely(priv->extend_desc))
4714 			desc = &tx_q->dma_etx[entry].basic;
4715 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4716 			desc = &tx_q->dma_entx[entry].basic;
4717 		else
4718 			desc = &tx_q->dma_tx[entry];
4719 
4720 		tx_q->tx_count_frames = 0;
4721 		stmmac_set_tx_ic(priv, desc);
4722 	}
4723 
4724 	/* We've used all descriptors we need for this skb, however,
4725 	 * advance cur_tx so that it references a fresh descriptor.
4726 	 * ndo_start_xmit will fill this descriptor the next time it's
4727 	 * called and stmmac_tx_clean may clean up to this descriptor.
4728 	 */
4729 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4730 	tx_q->cur_tx = entry;
4731 
4732 	if (netif_msg_pktdata(priv)) {
4733 		netdev_dbg(priv->dev,
4734 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4735 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4736 			   entry, first, nfrags);
4737 
4738 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4739 		print_pkt(skb->data, skb->len);
4740 	}
4741 
4742 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4743 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4744 			  __func__);
4745 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4746 	}
4747 
4748 	u64_stats_update_begin(&txq_stats->q_syncp);
4749 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4750 	if (set_ic)
4751 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4752 	u64_stats_update_end(&txq_stats->q_syncp);
4753 
4754 	if (priv->sarc_type)
4755 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4756 
4757 	skb_tx_timestamp(skb);
4758 
4759 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4760 	 * problems because all the descriptors are actually ready to be
4761 	 * passed to the DMA engine.
4762 	 */
4763 	if (likely(!is_jumbo)) {
4764 		bool last_segment = (nfrags == 0);
4765 
4766 		des = dma_map_single(priv->device, skb->data,
4767 				     nopaged_len, DMA_TO_DEVICE);
4768 		if (dma_mapping_error(priv->device, des))
4769 			goto dma_map_err;
4770 
4771 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4772 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4773 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4774 
4775 		stmmac_set_desc_addr(priv, first, des);
4776 
4777 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4778 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4779 
4780 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4781 			     priv->hwts_tx_en)) {
4782 			/* declare that device is doing timestamping */
4783 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4784 			stmmac_enable_tx_timestamp(priv, first);
4785 		}
4786 
4787 		/* Prepare the first descriptor setting the OWN bit too */
4788 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4789 				csum_insertion, priv->mode, 0, last_segment,
4790 				skb->len);
4791 	}
4792 
4793 	if (tx_q->tbs & STMMAC_TBS_EN) {
4794 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4795 
4796 		tbs_desc = &tx_q->dma_entx[first_entry];
4797 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4798 	}
4799 
4800 	stmmac_set_tx_owner(priv, first);
4801 
4802 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4803 
4804 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4805 
4806 	stmmac_flush_tx_descriptors(priv, queue);
4807 	stmmac_tx_timer_arm(priv, queue);
4808 
4809 	return NETDEV_TX_OK;
4810 
4811 dma_map_err:
4812 	netdev_err(priv->dev, "Tx DMA map failed\n");
4813 max_sdu_err:
4814 	dev_kfree_skb(skb);
4815 	priv->xstats.tx_dropped++;
4816 	return NETDEV_TX_OK;
4817 }
4818 
4819 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4820 {
4821 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4822 	__be16 vlan_proto = veth->h_vlan_proto;
4823 	u16 vlanid;
4824 
4825 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4826 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4827 	    (vlan_proto == htons(ETH_P_8021AD) &&
4828 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4829 		/* pop the vlan tag */
4830 		vlanid = ntohs(veth->h_vlan_TCI);
4831 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4832 		skb_pull(skb, VLAN_HLEN);
4833 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4834 	}
4835 }
4836 
4837 /**
4838  * stmmac_rx_refill - refill used skb preallocated buffers
4839  * @priv: driver private structure
4840  * @queue: RX queue index
4841  * Description : this is to reallocate the skb for the reception process
4842  * that is based on zero-copy.
4843  */
4844 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4845 {
4846 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4847 	int dirty = stmmac_rx_dirty(priv, queue);
4848 	unsigned int entry = rx_q->dirty_rx;
4849 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4850 
4851 	if (priv->dma_cap.host_dma_width <= 32)
4852 		gfp |= GFP_DMA32;
4853 
4854 	while (dirty-- > 0) {
4855 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4856 		struct dma_desc *p;
4857 		bool use_rx_wd;
4858 
4859 		if (priv->extend_desc)
4860 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4861 		else
4862 			p = rx_q->dma_rx + entry;
4863 
4864 		if (!buf->page) {
4865 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4866 			if (!buf->page)
4867 				break;
4868 		}
4869 
4870 		if (priv->sph && !buf->sec_page) {
4871 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4872 			if (!buf->sec_page)
4873 				break;
4874 
4875 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4876 		}
4877 
4878 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4879 
4880 		stmmac_set_desc_addr(priv, p, buf->addr);
4881 		if (priv->sph)
4882 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4883 		else
4884 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4885 		stmmac_refill_desc3(priv, rx_q, p);
4886 
4887 		rx_q->rx_count_frames++;
4888 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4889 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4890 			rx_q->rx_count_frames = 0;
4891 
4892 		use_rx_wd = !priv->rx_coal_frames[queue];
4893 		use_rx_wd |= rx_q->rx_count_frames > 0;
4894 		if (!priv->use_riwt)
4895 			use_rx_wd = false;
4896 
4897 		dma_wmb();
4898 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4899 
4900 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4901 	}
4902 	rx_q->dirty_rx = entry;
4903 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4904 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4905 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4906 }
4907 
4908 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4909 				       struct dma_desc *p,
4910 				       int status, unsigned int len)
4911 {
4912 	unsigned int plen = 0, hlen = 0;
4913 	int coe = priv->hw->rx_csum;
4914 
4915 	/* Not first descriptor, buffer is always zero */
4916 	if (priv->sph && len)
4917 		return 0;
4918 
4919 	/* First descriptor, get split header length */
4920 	stmmac_get_rx_header_len(priv, p, &hlen);
4921 	if (priv->sph && hlen) {
4922 		priv->xstats.rx_split_hdr_pkt_n++;
4923 		return hlen;
4924 	}
4925 
4926 	/* First descriptor, not last descriptor and not split header */
4927 	if (status & rx_not_ls)
4928 		return priv->dma_conf.dma_buf_sz;
4929 
4930 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4931 
4932 	/* First descriptor and last descriptor and not split header */
4933 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4934 }
4935 
4936 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4937 				       struct dma_desc *p,
4938 				       int status, unsigned int len)
4939 {
4940 	int coe = priv->hw->rx_csum;
4941 	unsigned int plen = 0;
4942 
4943 	/* Not split header, buffer is not available */
4944 	if (!priv->sph)
4945 		return 0;
4946 
4947 	/* Not last descriptor */
4948 	if (status & rx_not_ls)
4949 		return priv->dma_conf.dma_buf_sz;
4950 
4951 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4952 
4953 	/* Last descriptor */
4954 	return plen - len;
4955 }
4956 
4957 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4958 				struct xdp_frame *xdpf, bool dma_map)
4959 {
4960 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4961 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4962 	unsigned int entry = tx_q->cur_tx;
4963 	struct dma_desc *tx_desc;
4964 	dma_addr_t dma_addr;
4965 	bool set_ic;
4966 
4967 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4968 		return STMMAC_XDP_CONSUMED;
4969 
4970 	if (priv->est && priv->est->enable &&
4971 	    priv->est->max_sdu[queue] &&
4972 	    xdpf->len > priv->est->max_sdu[queue]) {
4973 		priv->xstats.max_sdu_txq_drop[queue]++;
4974 		return STMMAC_XDP_CONSUMED;
4975 	}
4976 
4977 	if (likely(priv->extend_desc))
4978 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4979 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4980 		tx_desc = &tx_q->dma_entx[entry].basic;
4981 	else
4982 		tx_desc = tx_q->dma_tx + entry;
4983 
4984 	if (dma_map) {
4985 		dma_addr = dma_map_single(priv->device, xdpf->data,
4986 					  xdpf->len, DMA_TO_DEVICE);
4987 		if (dma_mapping_error(priv->device, dma_addr))
4988 			return STMMAC_XDP_CONSUMED;
4989 
4990 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4991 	} else {
4992 		struct page *page = virt_to_page(xdpf->data);
4993 
4994 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4995 			   xdpf->headroom;
4996 		dma_sync_single_for_device(priv->device, dma_addr,
4997 					   xdpf->len, DMA_BIDIRECTIONAL);
4998 
4999 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5000 	}
5001 
5002 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5003 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
5004 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5005 	tx_q->tx_skbuff_dma[entry].last_segment = true;
5006 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5007 
5008 	tx_q->xdpf[entry] = xdpf;
5009 
5010 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5011 
5012 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5013 			       true, priv->mode, true, true,
5014 			       xdpf->len);
5015 
5016 	tx_q->tx_count_frames++;
5017 
5018 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5019 		set_ic = true;
5020 	else
5021 		set_ic = false;
5022 
5023 	if (set_ic) {
5024 		tx_q->tx_count_frames = 0;
5025 		stmmac_set_tx_ic(priv, tx_desc);
5026 		u64_stats_update_begin(&txq_stats->q_syncp);
5027 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5028 		u64_stats_update_end(&txq_stats->q_syncp);
5029 	}
5030 
5031 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5032 
5033 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5034 	tx_q->cur_tx = entry;
5035 
5036 	return STMMAC_XDP_TX;
5037 }
5038 
5039 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5040 				   int cpu)
5041 {
5042 	int index = cpu;
5043 
5044 	if (unlikely(index < 0))
5045 		index = 0;
5046 
5047 	while (index >= priv->plat->tx_queues_to_use)
5048 		index -= priv->plat->tx_queues_to_use;
5049 
5050 	return index;
5051 }
5052 
5053 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5054 				struct xdp_buff *xdp)
5055 {
5056 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5057 	int cpu = smp_processor_id();
5058 	struct netdev_queue *nq;
5059 	int queue;
5060 	int res;
5061 
5062 	if (unlikely(!xdpf))
5063 		return STMMAC_XDP_CONSUMED;
5064 
5065 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5066 	nq = netdev_get_tx_queue(priv->dev, queue);
5067 
5068 	__netif_tx_lock(nq, cpu);
5069 	/* Avoids TX time-out as we are sharing with slow path */
5070 	txq_trans_cond_update(nq);
5071 
5072 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5073 	if (res == STMMAC_XDP_TX)
5074 		stmmac_flush_tx_descriptors(priv, queue);
5075 
5076 	__netif_tx_unlock(nq);
5077 
5078 	return res;
5079 }
5080 
5081 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5082 				 struct bpf_prog *prog,
5083 				 struct xdp_buff *xdp)
5084 {
5085 	u32 act;
5086 	int res;
5087 
5088 	act = bpf_prog_run_xdp(prog, xdp);
5089 	switch (act) {
5090 	case XDP_PASS:
5091 		res = STMMAC_XDP_PASS;
5092 		break;
5093 	case XDP_TX:
5094 		res = stmmac_xdp_xmit_back(priv, xdp);
5095 		break;
5096 	case XDP_REDIRECT:
5097 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5098 			res = STMMAC_XDP_CONSUMED;
5099 		else
5100 			res = STMMAC_XDP_REDIRECT;
5101 		break;
5102 	default:
5103 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5104 		fallthrough;
5105 	case XDP_ABORTED:
5106 		trace_xdp_exception(priv->dev, prog, act);
5107 		fallthrough;
5108 	case XDP_DROP:
5109 		res = STMMAC_XDP_CONSUMED;
5110 		break;
5111 	}
5112 
5113 	return res;
5114 }
5115 
5116 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5117 					   struct xdp_buff *xdp)
5118 {
5119 	struct bpf_prog *prog;
5120 	int res;
5121 
5122 	prog = READ_ONCE(priv->xdp_prog);
5123 	if (!prog) {
5124 		res = STMMAC_XDP_PASS;
5125 		goto out;
5126 	}
5127 
5128 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5129 out:
5130 	return ERR_PTR(-res);
5131 }
5132 
5133 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5134 				   int xdp_status)
5135 {
5136 	int cpu = smp_processor_id();
5137 	int queue;
5138 
5139 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5140 
5141 	if (xdp_status & STMMAC_XDP_TX)
5142 		stmmac_tx_timer_arm(priv, queue);
5143 
5144 	if (xdp_status & STMMAC_XDP_REDIRECT)
5145 		xdp_do_flush();
5146 }
5147 
5148 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5149 					       struct xdp_buff *xdp)
5150 {
5151 	unsigned int metasize = xdp->data - xdp->data_meta;
5152 	unsigned int datasize = xdp->data_end - xdp->data;
5153 	struct sk_buff *skb;
5154 
5155 	skb = napi_alloc_skb(&ch->rxtx_napi,
5156 			     xdp->data_end - xdp->data_hard_start);
5157 	if (unlikely(!skb))
5158 		return NULL;
5159 
5160 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5161 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5162 	if (metasize)
5163 		skb_metadata_set(skb, metasize);
5164 
5165 	return skb;
5166 }
5167 
5168 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5169 				   struct dma_desc *p, struct dma_desc *np,
5170 				   struct xdp_buff *xdp)
5171 {
5172 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5173 	struct stmmac_channel *ch = &priv->channel[queue];
5174 	unsigned int len = xdp->data_end - xdp->data;
5175 	enum pkt_hash_types hash_type;
5176 	int coe = priv->hw->rx_csum;
5177 	struct sk_buff *skb;
5178 	u32 hash;
5179 
5180 	skb = stmmac_construct_skb_zc(ch, xdp);
5181 	if (!skb) {
5182 		priv->xstats.rx_dropped++;
5183 		return;
5184 	}
5185 
5186 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5187 	if (priv->hw->hw_vlan_en)
5188 		/* MAC level stripping. */
5189 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5190 	else
5191 		/* Driver level stripping. */
5192 		stmmac_rx_vlan(priv->dev, skb);
5193 	skb->protocol = eth_type_trans(skb, priv->dev);
5194 
5195 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5196 		skb_checksum_none_assert(skb);
5197 	else
5198 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5199 
5200 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5201 		skb_set_hash(skb, hash, hash_type);
5202 
5203 	skb_record_rx_queue(skb, queue);
5204 	napi_gro_receive(&ch->rxtx_napi, skb);
5205 
5206 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5207 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5208 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5209 	u64_stats_update_end(&rxq_stats->napi_syncp);
5210 }
5211 
5212 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5213 {
5214 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5215 	unsigned int entry = rx_q->dirty_rx;
5216 	struct dma_desc *rx_desc = NULL;
5217 	bool ret = true;
5218 
5219 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5220 
5221 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5222 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5223 		dma_addr_t dma_addr;
5224 		bool use_rx_wd;
5225 
5226 		if (!buf->xdp) {
5227 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5228 			if (!buf->xdp) {
5229 				ret = false;
5230 				break;
5231 			}
5232 		}
5233 
5234 		if (priv->extend_desc)
5235 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5236 		else
5237 			rx_desc = rx_q->dma_rx + entry;
5238 
5239 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5240 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5241 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5242 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5243 
5244 		rx_q->rx_count_frames++;
5245 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5246 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5247 			rx_q->rx_count_frames = 0;
5248 
5249 		use_rx_wd = !priv->rx_coal_frames[queue];
5250 		use_rx_wd |= rx_q->rx_count_frames > 0;
5251 		if (!priv->use_riwt)
5252 			use_rx_wd = false;
5253 
5254 		dma_wmb();
5255 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5256 
5257 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5258 	}
5259 
5260 	if (rx_desc) {
5261 		rx_q->dirty_rx = entry;
5262 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5263 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5264 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5265 	}
5266 
5267 	return ret;
5268 }
5269 
5270 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5271 {
5272 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5273 	 * to represent incoming packet, whereas cb field in the same structure
5274 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5275 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5276 	 */
5277 	return (struct stmmac_xdp_buff *)xdp;
5278 }
5279 
5280 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5281 {
5282 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5283 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5284 	unsigned int count = 0, error = 0, len = 0;
5285 	int dirty = stmmac_rx_dirty(priv, queue);
5286 	unsigned int next_entry = rx_q->cur_rx;
5287 	u32 rx_errors = 0, rx_dropped = 0;
5288 	unsigned int desc_size;
5289 	struct bpf_prog *prog;
5290 	bool failure = false;
5291 	int xdp_status = 0;
5292 	int status = 0;
5293 
5294 	if (netif_msg_rx_status(priv)) {
5295 		void *rx_head;
5296 
5297 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5298 		if (priv->extend_desc) {
5299 			rx_head = (void *)rx_q->dma_erx;
5300 			desc_size = sizeof(struct dma_extended_desc);
5301 		} else {
5302 			rx_head = (void *)rx_q->dma_rx;
5303 			desc_size = sizeof(struct dma_desc);
5304 		}
5305 
5306 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5307 				    rx_q->dma_rx_phy, desc_size);
5308 	}
5309 	while (count < limit) {
5310 		struct stmmac_rx_buffer *buf;
5311 		struct stmmac_xdp_buff *ctx;
5312 		unsigned int buf1_len = 0;
5313 		struct dma_desc *np, *p;
5314 		int entry;
5315 		int res;
5316 
5317 		if (!count && rx_q->state_saved) {
5318 			error = rx_q->state.error;
5319 			len = rx_q->state.len;
5320 		} else {
5321 			rx_q->state_saved = false;
5322 			error = 0;
5323 			len = 0;
5324 		}
5325 
5326 		if (count >= limit)
5327 			break;
5328 
5329 read_again:
5330 		buf1_len = 0;
5331 		entry = next_entry;
5332 		buf = &rx_q->buf_pool[entry];
5333 
5334 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5335 			failure = failure ||
5336 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5337 			dirty = 0;
5338 		}
5339 
5340 		if (priv->extend_desc)
5341 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5342 		else
5343 			p = rx_q->dma_rx + entry;
5344 
5345 		/* read the status of the incoming frame */
5346 		status = stmmac_rx_status(priv, &priv->xstats, p);
5347 		/* check if managed by the DMA otherwise go ahead */
5348 		if (unlikely(status & dma_own))
5349 			break;
5350 
5351 		/* Prefetch the next RX descriptor */
5352 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5353 						priv->dma_conf.dma_rx_size);
5354 		next_entry = rx_q->cur_rx;
5355 
5356 		if (priv->extend_desc)
5357 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5358 		else
5359 			np = rx_q->dma_rx + next_entry;
5360 
5361 		prefetch(np);
5362 
5363 		/* Ensure a valid XSK buffer before proceed */
5364 		if (!buf->xdp)
5365 			break;
5366 
5367 		if (priv->extend_desc)
5368 			stmmac_rx_extended_status(priv, &priv->xstats,
5369 						  rx_q->dma_erx + entry);
5370 		if (unlikely(status == discard_frame)) {
5371 			xsk_buff_free(buf->xdp);
5372 			buf->xdp = NULL;
5373 			dirty++;
5374 			error = 1;
5375 			if (!priv->hwts_rx_en)
5376 				rx_errors++;
5377 		}
5378 
5379 		if (unlikely(error && (status & rx_not_ls)))
5380 			goto read_again;
5381 		if (unlikely(error)) {
5382 			count++;
5383 			continue;
5384 		}
5385 
5386 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5387 		if (likely(status & rx_not_ls)) {
5388 			xsk_buff_free(buf->xdp);
5389 			buf->xdp = NULL;
5390 			dirty++;
5391 			count++;
5392 			goto read_again;
5393 		}
5394 
5395 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5396 		ctx->priv = priv;
5397 		ctx->desc = p;
5398 		ctx->ndesc = np;
5399 
5400 		/* XDP ZC Frame only support primary buffers for now */
5401 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5402 		len += buf1_len;
5403 
5404 		/* ACS is disabled; strip manually. */
5405 		if (likely(!(status & rx_not_ls))) {
5406 			buf1_len -= ETH_FCS_LEN;
5407 			len -= ETH_FCS_LEN;
5408 		}
5409 
5410 		/* RX buffer is good and fit into a XSK pool buffer */
5411 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5412 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5413 
5414 		prog = READ_ONCE(priv->xdp_prog);
5415 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5416 
5417 		switch (res) {
5418 		case STMMAC_XDP_PASS:
5419 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5420 			xsk_buff_free(buf->xdp);
5421 			break;
5422 		case STMMAC_XDP_CONSUMED:
5423 			xsk_buff_free(buf->xdp);
5424 			rx_dropped++;
5425 			break;
5426 		case STMMAC_XDP_TX:
5427 		case STMMAC_XDP_REDIRECT:
5428 			xdp_status |= res;
5429 			break;
5430 		}
5431 
5432 		buf->xdp = NULL;
5433 		dirty++;
5434 		count++;
5435 	}
5436 
5437 	if (status & rx_not_ls) {
5438 		rx_q->state_saved = true;
5439 		rx_q->state.error = error;
5440 		rx_q->state.len = len;
5441 	}
5442 
5443 	stmmac_finalize_xdp_rx(priv, xdp_status);
5444 
5445 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5446 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5447 	u64_stats_update_end(&rxq_stats->napi_syncp);
5448 
5449 	priv->xstats.rx_dropped += rx_dropped;
5450 	priv->xstats.rx_errors += rx_errors;
5451 
5452 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5453 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5454 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5455 		else
5456 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5457 
5458 		return (int)count;
5459 	}
5460 
5461 	return failure ? limit : (int)count;
5462 }
5463 
5464 /**
5465  * stmmac_rx - manage the receive process
5466  * @priv: driver private structure
5467  * @limit: napi bugget
5468  * @queue: RX queue index.
5469  * Description :  this the function called by the napi poll method.
5470  * It gets all the frames inside the ring.
5471  */
5472 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5473 {
5474 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5475 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5476 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5477 	struct stmmac_channel *ch = &priv->channel[queue];
5478 	unsigned int count = 0, error = 0, len = 0;
5479 	int status = 0, coe = priv->hw->rx_csum;
5480 	unsigned int next_entry = rx_q->cur_rx;
5481 	enum dma_data_direction dma_dir;
5482 	unsigned int desc_size;
5483 	struct sk_buff *skb = NULL;
5484 	struct stmmac_xdp_buff ctx;
5485 	int xdp_status = 0;
5486 	int bufsz;
5487 
5488 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5489 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5490 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5491 
5492 	if (netif_msg_rx_status(priv)) {
5493 		void *rx_head;
5494 
5495 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5496 		if (priv->extend_desc) {
5497 			rx_head = (void *)rx_q->dma_erx;
5498 			desc_size = sizeof(struct dma_extended_desc);
5499 		} else {
5500 			rx_head = (void *)rx_q->dma_rx;
5501 			desc_size = sizeof(struct dma_desc);
5502 		}
5503 
5504 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5505 				    rx_q->dma_rx_phy, desc_size);
5506 	}
5507 	while (count < limit) {
5508 		unsigned int buf1_len = 0, buf2_len = 0;
5509 		enum pkt_hash_types hash_type;
5510 		struct stmmac_rx_buffer *buf;
5511 		struct dma_desc *np, *p;
5512 		int entry;
5513 		u32 hash;
5514 
5515 		if (!count && rx_q->state_saved) {
5516 			skb = rx_q->state.skb;
5517 			error = rx_q->state.error;
5518 			len = rx_q->state.len;
5519 		} else {
5520 			rx_q->state_saved = false;
5521 			skb = NULL;
5522 			error = 0;
5523 			len = 0;
5524 		}
5525 
5526 read_again:
5527 		if (count >= limit)
5528 			break;
5529 
5530 		buf1_len = 0;
5531 		buf2_len = 0;
5532 		entry = next_entry;
5533 		buf = &rx_q->buf_pool[entry];
5534 
5535 		if (priv->extend_desc)
5536 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5537 		else
5538 			p = rx_q->dma_rx + entry;
5539 
5540 		/* read the status of the incoming frame */
5541 		status = stmmac_rx_status(priv, &priv->xstats, p);
5542 		/* check if managed by the DMA otherwise go ahead */
5543 		if (unlikely(status & dma_own))
5544 			break;
5545 
5546 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5547 						priv->dma_conf.dma_rx_size);
5548 		next_entry = rx_q->cur_rx;
5549 
5550 		if (priv->extend_desc)
5551 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5552 		else
5553 			np = rx_q->dma_rx + next_entry;
5554 
5555 		prefetch(np);
5556 
5557 		if (priv->extend_desc)
5558 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5559 		if (unlikely(status == discard_frame)) {
5560 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5561 			buf->page = NULL;
5562 			error = 1;
5563 			if (!priv->hwts_rx_en)
5564 				rx_errors++;
5565 		}
5566 
5567 		if (unlikely(error && (status & rx_not_ls)))
5568 			goto read_again;
5569 		if (unlikely(error)) {
5570 			dev_kfree_skb(skb);
5571 			skb = NULL;
5572 			count++;
5573 			continue;
5574 		}
5575 
5576 		/* Buffer is good. Go on. */
5577 
5578 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5579 		len += buf1_len;
5580 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5581 		len += buf2_len;
5582 
5583 		/* ACS is disabled; strip manually. */
5584 		if (likely(!(status & rx_not_ls))) {
5585 			if (buf2_len) {
5586 				buf2_len -= ETH_FCS_LEN;
5587 				len -= ETH_FCS_LEN;
5588 			} else if (buf1_len) {
5589 				buf1_len -= ETH_FCS_LEN;
5590 				len -= ETH_FCS_LEN;
5591 			}
5592 		}
5593 
5594 		if (!skb) {
5595 			unsigned int pre_len, sync_len;
5596 
5597 			dma_sync_single_for_cpu(priv->device, buf->addr,
5598 						buf1_len, dma_dir);
5599 			net_prefetch(page_address(buf->page) +
5600 				     buf->page_offset);
5601 
5602 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5603 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5604 					 buf->page_offset, buf1_len, true);
5605 
5606 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5607 				  buf->page_offset;
5608 
5609 			ctx.priv = priv;
5610 			ctx.desc = p;
5611 			ctx.ndesc = np;
5612 
5613 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5614 			/* Due xdp_adjust_tail: DMA sync for_device
5615 			 * cover max len CPU touch
5616 			 */
5617 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5618 				   buf->page_offset;
5619 			sync_len = max(sync_len, pre_len);
5620 
5621 			/* For Not XDP_PASS verdict */
5622 			if (IS_ERR(skb)) {
5623 				unsigned int xdp_res = -PTR_ERR(skb);
5624 
5625 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5626 					page_pool_put_page(rx_q->page_pool,
5627 							   virt_to_head_page(ctx.xdp.data),
5628 							   sync_len, true);
5629 					buf->page = NULL;
5630 					rx_dropped++;
5631 
5632 					/* Clear skb as it was set as
5633 					 * status by XDP program.
5634 					 */
5635 					skb = NULL;
5636 
5637 					if (unlikely((status & rx_not_ls)))
5638 						goto read_again;
5639 
5640 					count++;
5641 					continue;
5642 				} else if (xdp_res & (STMMAC_XDP_TX |
5643 						      STMMAC_XDP_REDIRECT)) {
5644 					xdp_status |= xdp_res;
5645 					buf->page = NULL;
5646 					skb = NULL;
5647 					count++;
5648 					continue;
5649 				}
5650 			}
5651 		}
5652 
5653 		if (!skb) {
5654 			unsigned int head_pad_len;
5655 
5656 			/* XDP program may expand or reduce tail */
5657 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5658 
5659 			skb = napi_build_skb(page_address(buf->page),
5660 					     rx_q->napi_skb_frag_size);
5661 			if (!skb) {
5662 				page_pool_recycle_direct(rx_q->page_pool,
5663 							 buf->page);
5664 				rx_dropped++;
5665 				count++;
5666 				goto drain_data;
5667 			}
5668 
5669 			/* XDP program may adjust header */
5670 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5671 			skb_reserve(skb, head_pad_len);
5672 			skb_put(skb, buf1_len);
5673 			skb_mark_for_recycle(skb);
5674 			buf->page = NULL;
5675 		} else if (buf1_len) {
5676 			dma_sync_single_for_cpu(priv->device, buf->addr,
5677 						buf1_len, dma_dir);
5678 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5679 					buf->page, buf->page_offset, buf1_len,
5680 					priv->dma_conf.dma_buf_sz);
5681 			buf->page = NULL;
5682 		}
5683 
5684 		if (buf2_len) {
5685 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5686 						buf2_len, dma_dir);
5687 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5688 					buf->sec_page, 0, buf2_len,
5689 					priv->dma_conf.dma_buf_sz);
5690 			buf->sec_page = NULL;
5691 		}
5692 
5693 drain_data:
5694 		if (likely(status & rx_not_ls))
5695 			goto read_again;
5696 		if (!skb)
5697 			continue;
5698 
5699 		/* Got entire packet into SKB. Finish it. */
5700 
5701 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5702 
5703 		if (priv->hw->hw_vlan_en)
5704 			/* MAC level stripping. */
5705 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5706 		else
5707 			/* Driver level stripping. */
5708 			stmmac_rx_vlan(priv->dev, skb);
5709 
5710 		skb->protocol = eth_type_trans(skb, priv->dev);
5711 
5712 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5713 			skb_checksum_none_assert(skb);
5714 		else
5715 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5716 
5717 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5718 			skb_set_hash(skb, hash, hash_type);
5719 
5720 		skb_record_rx_queue(skb, queue);
5721 		napi_gro_receive(&ch->rx_napi, skb);
5722 		skb = NULL;
5723 
5724 		rx_packets++;
5725 		rx_bytes += len;
5726 		count++;
5727 	}
5728 
5729 	if (status & rx_not_ls || skb) {
5730 		rx_q->state_saved = true;
5731 		rx_q->state.skb = skb;
5732 		rx_q->state.error = error;
5733 		rx_q->state.len = len;
5734 	}
5735 
5736 	stmmac_finalize_xdp_rx(priv, xdp_status);
5737 
5738 	stmmac_rx_refill(priv, queue);
5739 
5740 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5741 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5742 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5743 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5744 	u64_stats_update_end(&rxq_stats->napi_syncp);
5745 
5746 	priv->xstats.rx_dropped += rx_dropped;
5747 	priv->xstats.rx_errors += rx_errors;
5748 
5749 	return count;
5750 }
5751 
5752 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5753 {
5754 	struct stmmac_channel *ch =
5755 		container_of(napi, struct stmmac_channel, rx_napi);
5756 	struct stmmac_priv *priv = ch->priv_data;
5757 	struct stmmac_rxq_stats *rxq_stats;
5758 	u32 chan = ch->index;
5759 	int work_done;
5760 
5761 	rxq_stats = &priv->xstats.rxq_stats[chan];
5762 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5763 	u64_stats_inc(&rxq_stats->napi.poll);
5764 	u64_stats_update_end(&rxq_stats->napi_syncp);
5765 
5766 	work_done = stmmac_rx(priv, budget, chan);
5767 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5768 		unsigned long flags;
5769 
5770 		spin_lock_irqsave(&ch->lock, flags);
5771 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5772 		spin_unlock_irqrestore(&ch->lock, flags);
5773 	}
5774 
5775 	return work_done;
5776 }
5777 
5778 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5779 {
5780 	struct stmmac_channel *ch =
5781 		container_of(napi, struct stmmac_channel, tx_napi);
5782 	struct stmmac_priv *priv = ch->priv_data;
5783 	struct stmmac_txq_stats *txq_stats;
5784 	bool pending_packets = false;
5785 	u32 chan = ch->index;
5786 	int work_done;
5787 
5788 	txq_stats = &priv->xstats.txq_stats[chan];
5789 	u64_stats_update_begin(&txq_stats->napi_syncp);
5790 	u64_stats_inc(&txq_stats->napi.poll);
5791 	u64_stats_update_end(&txq_stats->napi_syncp);
5792 
5793 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5794 	work_done = min(work_done, budget);
5795 
5796 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5797 		unsigned long flags;
5798 
5799 		spin_lock_irqsave(&ch->lock, flags);
5800 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5801 		spin_unlock_irqrestore(&ch->lock, flags);
5802 	}
5803 
5804 	/* TX still have packet to handle, check if we need to arm tx timer */
5805 	if (pending_packets)
5806 		stmmac_tx_timer_arm(priv, chan);
5807 
5808 	return work_done;
5809 }
5810 
5811 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5812 {
5813 	struct stmmac_channel *ch =
5814 		container_of(napi, struct stmmac_channel, rxtx_napi);
5815 	struct stmmac_priv *priv = ch->priv_data;
5816 	bool tx_pending_packets = false;
5817 	int rx_done, tx_done, rxtx_done;
5818 	struct stmmac_rxq_stats *rxq_stats;
5819 	struct stmmac_txq_stats *txq_stats;
5820 	u32 chan = ch->index;
5821 
5822 	rxq_stats = &priv->xstats.rxq_stats[chan];
5823 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5824 	u64_stats_inc(&rxq_stats->napi.poll);
5825 	u64_stats_update_end(&rxq_stats->napi_syncp);
5826 
5827 	txq_stats = &priv->xstats.txq_stats[chan];
5828 	u64_stats_update_begin(&txq_stats->napi_syncp);
5829 	u64_stats_inc(&txq_stats->napi.poll);
5830 	u64_stats_update_end(&txq_stats->napi_syncp);
5831 
5832 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5833 	tx_done = min(tx_done, budget);
5834 
5835 	rx_done = stmmac_rx_zc(priv, budget, chan);
5836 
5837 	rxtx_done = max(tx_done, rx_done);
5838 
5839 	/* If either TX or RX work is not complete, return budget
5840 	 * and keep pooling
5841 	 */
5842 	if (rxtx_done >= budget)
5843 		return budget;
5844 
5845 	/* all work done, exit the polling mode */
5846 	if (napi_complete_done(napi, rxtx_done)) {
5847 		unsigned long flags;
5848 
5849 		spin_lock_irqsave(&ch->lock, flags);
5850 		/* Both RX and TX work done are compelte,
5851 		 * so enable both RX & TX IRQs.
5852 		 */
5853 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5854 		spin_unlock_irqrestore(&ch->lock, flags);
5855 	}
5856 
5857 	/* TX still have packet to handle, check if we need to arm tx timer */
5858 	if (tx_pending_packets)
5859 		stmmac_tx_timer_arm(priv, chan);
5860 
5861 	return min(rxtx_done, budget - 1);
5862 }
5863 
5864 /**
5865  *  stmmac_tx_timeout
5866  *  @dev : Pointer to net device structure
5867  *  @txqueue: the index of the hanging transmit queue
5868  *  Description: this function is called when a packet transmission fails to
5869  *   complete within a reasonable time. The driver will mark the error in the
5870  *   netdev structure and arrange for the device to be reset to a sane state
5871  *   in order to transmit a new packet.
5872  */
5873 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5874 {
5875 	struct stmmac_priv *priv = netdev_priv(dev);
5876 
5877 	stmmac_global_err(priv);
5878 }
5879 
5880 /**
5881  *  stmmac_set_rx_mode - entry point for multicast addressing
5882  *  @dev : pointer to the device structure
5883  *  Description:
5884  *  This function is a driver entry point which gets called by the kernel
5885  *  whenever multicast addresses must be enabled/disabled.
5886  *  Return value:
5887  *  void.
5888  */
5889 static void stmmac_set_rx_mode(struct net_device *dev)
5890 {
5891 	struct stmmac_priv *priv = netdev_priv(dev);
5892 
5893 	stmmac_set_filter(priv, priv->hw, dev);
5894 }
5895 
5896 /**
5897  *  stmmac_change_mtu - entry point to change MTU size for the device.
5898  *  @dev : device pointer.
5899  *  @new_mtu : the new MTU size for the device.
5900  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5901  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5902  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5903  *  Return value:
5904  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5905  *  file on failure.
5906  */
5907 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5908 {
5909 	struct stmmac_priv *priv = netdev_priv(dev);
5910 	int txfifosz = priv->plat->tx_fifo_size;
5911 	struct stmmac_dma_conf *dma_conf;
5912 	const int mtu = new_mtu;
5913 	int ret;
5914 
5915 	if (txfifosz == 0)
5916 		txfifosz = priv->dma_cap.tx_fifo_size;
5917 
5918 	txfifosz /= priv->plat->tx_queues_to_use;
5919 
5920 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5921 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5922 		return -EINVAL;
5923 	}
5924 
5925 	new_mtu = STMMAC_ALIGN(new_mtu);
5926 
5927 	/* If condition true, FIFO is too small or MTU too large */
5928 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5929 		return -EINVAL;
5930 
5931 	if (netif_running(dev)) {
5932 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5933 		/* Try to allocate the new DMA conf with the new mtu */
5934 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5935 		if (IS_ERR(dma_conf)) {
5936 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5937 				   mtu);
5938 			return PTR_ERR(dma_conf);
5939 		}
5940 
5941 		stmmac_release(dev);
5942 
5943 		ret = __stmmac_open(dev, dma_conf);
5944 		if (ret) {
5945 			free_dma_desc_resources(priv, dma_conf);
5946 			kfree(dma_conf);
5947 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5948 			return ret;
5949 		}
5950 
5951 		kfree(dma_conf);
5952 
5953 		stmmac_set_rx_mode(dev);
5954 	}
5955 
5956 	WRITE_ONCE(dev->mtu, mtu);
5957 	netdev_update_features(dev);
5958 
5959 	return 0;
5960 }
5961 
5962 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5963 					     netdev_features_t features)
5964 {
5965 	struct stmmac_priv *priv = netdev_priv(dev);
5966 
5967 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5968 		features &= ~NETIF_F_RXCSUM;
5969 
5970 	if (!priv->plat->tx_coe)
5971 		features &= ~NETIF_F_CSUM_MASK;
5972 
5973 	/* Some GMAC devices have a bugged Jumbo frame support that
5974 	 * needs to have the Tx COE disabled for oversized frames
5975 	 * (due to limited buffer sizes). In this case we disable
5976 	 * the TX csum insertion in the TDES and not use SF.
5977 	 */
5978 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5979 		features &= ~NETIF_F_CSUM_MASK;
5980 
5981 	/* Disable tso if asked by ethtool */
5982 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5983 		if (features & NETIF_F_TSO)
5984 			priv->tso = true;
5985 		else
5986 			priv->tso = false;
5987 	}
5988 
5989 	return features;
5990 }
5991 
5992 static int stmmac_set_features(struct net_device *netdev,
5993 			       netdev_features_t features)
5994 {
5995 	struct stmmac_priv *priv = netdev_priv(netdev);
5996 
5997 	/* Keep the COE Type in case of csum is supporting */
5998 	if (features & NETIF_F_RXCSUM)
5999 		priv->hw->rx_csum = priv->plat->rx_coe;
6000 	else
6001 		priv->hw->rx_csum = 0;
6002 	/* No check needed because rx_coe has been set before and it will be
6003 	 * fixed in case of issue.
6004 	 */
6005 	stmmac_rx_ipc(priv, priv->hw);
6006 
6007 	if (priv->sph_cap) {
6008 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6009 		u32 chan;
6010 
6011 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6012 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6013 	}
6014 
6015 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
6016 		priv->hw->hw_vlan_en = true;
6017 	else
6018 		priv->hw->hw_vlan_en = false;
6019 
6020 	stmmac_set_hw_vlan_mode(priv, priv->hw);
6021 
6022 	return 0;
6023 }
6024 
6025 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6026 {
6027 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6028 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6029 	u32 queues_count;
6030 	u32 queue;
6031 	bool xmac;
6032 
6033 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6034 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6035 
6036 	if (priv->irq_wake)
6037 		pm_wakeup_event(priv->device, 0);
6038 
6039 	if (priv->dma_cap.estsel)
6040 		stmmac_est_irq_status(priv, priv, priv->dev,
6041 				      &priv->xstats, tx_cnt);
6042 
6043 	if (stmmac_fpe_supported(priv))
6044 		stmmac_fpe_irq_status(priv);
6045 
6046 	/* To handle GMAC own interrupts */
6047 	if ((priv->plat->has_gmac) || xmac) {
6048 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6049 
6050 		if (unlikely(status)) {
6051 			/* For LPI we need to save the tx status */
6052 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6053 				priv->tx_path_in_lpi_mode = true;
6054 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6055 				priv->tx_path_in_lpi_mode = false;
6056 		}
6057 
6058 		for (queue = 0; queue < queues_count; queue++)
6059 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6060 
6061 		/* PCS link status */
6062 		if (priv->hw->pcs &&
6063 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6064 			if (priv->xstats.pcs_link)
6065 				netif_carrier_on(priv->dev);
6066 			else
6067 				netif_carrier_off(priv->dev);
6068 		}
6069 
6070 		stmmac_timestamp_interrupt(priv, priv);
6071 	}
6072 }
6073 
6074 /**
6075  *  stmmac_interrupt - main ISR
6076  *  @irq: interrupt number.
6077  *  @dev_id: to pass the net device pointer.
6078  *  Description: this is the main driver interrupt service routine.
6079  *  It can call:
6080  *  o DMA service routine (to manage incoming frame reception and transmission
6081  *    status)
6082  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6083  *    interrupts.
6084  */
6085 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6086 {
6087 	struct net_device *dev = (struct net_device *)dev_id;
6088 	struct stmmac_priv *priv = netdev_priv(dev);
6089 
6090 	/* Check if adapter is up */
6091 	if (test_bit(STMMAC_DOWN, &priv->state))
6092 		return IRQ_HANDLED;
6093 
6094 	/* Check ASP error if it isn't delivered via an individual IRQ */
6095 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6096 		return IRQ_HANDLED;
6097 
6098 	/* To handle Common interrupts */
6099 	stmmac_common_interrupt(priv);
6100 
6101 	/* To handle DMA interrupts */
6102 	stmmac_dma_interrupt(priv);
6103 
6104 	return IRQ_HANDLED;
6105 }
6106 
6107 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6108 {
6109 	struct net_device *dev = (struct net_device *)dev_id;
6110 	struct stmmac_priv *priv = netdev_priv(dev);
6111 
6112 	/* Check if adapter is up */
6113 	if (test_bit(STMMAC_DOWN, &priv->state))
6114 		return IRQ_HANDLED;
6115 
6116 	/* To handle Common interrupts */
6117 	stmmac_common_interrupt(priv);
6118 
6119 	return IRQ_HANDLED;
6120 }
6121 
6122 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6123 {
6124 	struct net_device *dev = (struct net_device *)dev_id;
6125 	struct stmmac_priv *priv = netdev_priv(dev);
6126 
6127 	/* Check if adapter is up */
6128 	if (test_bit(STMMAC_DOWN, &priv->state))
6129 		return IRQ_HANDLED;
6130 
6131 	/* Check if a fatal error happened */
6132 	stmmac_safety_feat_interrupt(priv);
6133 
6134 	return IRQ_HANDLED;
6135 }
6136 
6137 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6138 {
6139 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6140 	struct stmmac_dma_conf *dma_conf;
6141 	int chan = tx_q->queue_index;
6142 	struct stmmac_priv *priv;
6143 	int status;
6144 
6145 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6146 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6147 
6148 	/* Check if adapter is up */
6149 	if (test_bit(STMMAC_DOWN, &priv->state))
6150 		return IRQ_HANDLED;
6151 
6152 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6153 
6154 	if (unlikely(status & tx_hard_error_bump_tc)) {
6155 		/* Try to bump up the dma threshold on this failure */
6156 		stmmac_bump_dma_threshold(priv, chan);
6157 	} else if (unlikely(status == tx_hard_error)) {
6158 		stmmac_tx_err(priv, chan);
6159 	}
6160 
6161 	return IRQ_HANDLED;
6162 }
6163 
6164 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6165 {
6166 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6167 	struct stmmac_dma_conf *dma_conf;
6168 	int chan = rx_q->queue_index;
6169 	struct stmmac_priv *priv;
6170 
6171 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6172 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6173 
6174 	/* Check if adapter is up */
6175 	if (test_bit(STMMAC_DOWN, &priv->state))
6176 		return IRQ_HANDLED;
6177 
6178 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6179 
6180 	return IRQ_HANDLED;
6181 }
6182 
6183 /**
6184  *  stmmac_ioctl - Entry point for the Ioctl
6185  *  @dev: Device pointer.
6186  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6187  *  a proprietary structure used to pass information to the driver.
6188  *  @cmd: IOCTL command
6189  *  Description:
6190  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6191  */
6192 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6193 {
6194 	struct stmmac_priv *priv = netdev_priv (dev);
6195 	int ret = -EOPNOTSUPP;
6196 
6197 	if (!netif_running(dev))
6198 		return -EINVAL;
6199 
6200 	switch (cmd) {
6201 	case SIOCGMIIPHY:
6202 	case SIOCGMIIREG:
6203 	case SIOCSMIIREG:
6204 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6205 		break;
6206 	case SIOCSHWTSTAMP:
6207 		ret = stmmac_hwtstamp_set(dev, rq);
6208 		break;
6209 	case SIOCGHWTSTAMP:
6210 		ret = stmmac_hwtstamp_get(dev, rq);
6211 		break;
6212 	default:
6213 		break;
6214 	}
6215 
6216 	return ret;
6217 }
6218 
6219 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6220 				    void *cb_priv)
6221 {
6222 	struct stmmac_priv *priv = cb_priv;
6223 	int ret = -EOPNOTSUPP;
6224 
6225 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6226 		return ret;
6227 
6228 	__stmmac_disable_all_queues(priv);
6229 
6230 	switch (type) {
6231 	case TC_SETUP_CLSU32:
6232 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6233 		break;
6234 	case TC_SETUP_CLSFLOWER:
6235 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6236 		break;
6237 	default:
6238 		break;
6239 	}
6240 
6241 	stmmac_enable_all_queues(priv);
6242 	return ret;
6243 }
6244 
6245 static LIST_HEAD(stmmac_block_cb_list);
6246 
6247 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6248 			   void *type_data)
6249 {
6250 	struct stmmac_priv *priv = netdev_priv(ndev);
6251 
6252 	switch (type) {
6253 	case TC_QUERY_CAPS:
6254 		return stmmac_tc_query_caps(priv, priv, type_data);
6255 	case TC_SETUP_QDISC_MQPRIO:
6256 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6257 	case TC_SETUP_BLOCK:
6258 		return flow_block_cb_setup_simple(type_data,
6259 						  &stmmac_block_cb_list,
6260 						  stmmac_setup_tc_block_cb,
6261 						  priv, priv, true);
6262 	case TC_SETUP_QDISC_CBS:
6263 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6264 	case TC_SETUP_QDISC_TAPRIO:
6265 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6266 	case TC_SETUP_QDISC_ETF:
6267 		return stmmac_tc_setup_etf(priv, priv, type_data);
6268 	default:
6269 		return -EOPNOTSUPP;
6270 	}
6271 }
6272 
6273 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6274 			       struct net_device *sb_dev)
6275 {
6276 	int gso = skb_shinfo(skb)->gso_type;
6277 
6278 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6279 		/*
6280 		 * There is no way to determine the number of TSO/USO
6281 		 * capable Queues. Let's use always the Queue 0
6282 		 * because if TSO/USO is supported then at least this
6283 		 * one will be capable.
6284 		 */
6285 		return 0;
6286 	}
6287 
6288 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6289 }
6290 
6291 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6292 {
6293 	struct stmmac_priv *priv = netdev_priv(ndev);
6294 	int ret = 0;
6295 
6296 	ret = pm_runtime_resume_and_get(priv->device);
6297 	if (ret < 0)
6298 		return ret;
6299 
6300 	ret = eth_mac_addr(ndev, addr);
6301 	if (ret)
6302 		goto set_mac_error;
6303 
6304 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6305 
6306 set_mac_error:
6307 	pm_runtime_put(priv->device);
6308 
6309 	return ret;
6310 }
6311 
6312 #ifdef CONFIG_DEBUG_FS
6313 static struct dentry *stmmac_fs_dir;
6314 
6315 static void sysfs_display_ring(void *head, int size, int extend_desc,
6316 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6317 {
6318 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6319 	struct dma_desc *p = (struct dma_desc *)head;
6320 	unsigned int desc_size;
6321 	dma_addr_t dma_addr;
6322 	int i;
6323 
6324 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6325 	for (i = 0; i < size; i++) {
6326 		dma_addr = dma_phy_addr + i * desc_size;
6327 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6328 				i, &dma_addr,
6329 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6330 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6331 		if (extend_desc)
6332 			p = &(++ep)->basic;
6333 		else
6334 			p++;
6335 	}
6336 }
6337 
6338 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6339 {
6340 	struct net_device *dev = seq->private;
6341 	struct stmmac_priv *priv = netdev_priv(dev);
6342 	u32 rx_count = priv->plat->rx_queues_to_use;
6343 	u32 tx_count = priv->plat->tx_queues_to_use;
6344 	u32 queue;
6345 
6346 	if ((dev->flags & IFF_UP) == 0)
6347 		return 0;
6348 
6349 	for (queue = 0; queue < rx_count; queue++) {
6350 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6351 
6352 		seq_printf(seq, "RX Queue %d:\n", queue);
6353 
6354 		if (priv->extend_desc) {
6355 			seq_printf(seq, "Extended descriptor ring:\n");
6356 			sysfs_display_ring((void *)rx_q->dma_erx,
6357 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6358 		} else {
6359 			seq_printf(seq, "Descriptor ring:\n");
6360 			sysfs_display_ring((void *)rx_q->dma_rx,
6361 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6362 		}
6363 	}
6364 
6365 	for (queue = 0; queue < tx_count; queue++) {
6366 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6367 
6368 		seq_printf(seq, "TX Queue %d:\n", queue);
6369 
6370 		if (priv->extend_desc) {
6371 			seq_printf(seq, "Extended descriptor ring:\n");
6372 			sysfs_display_ring((void *)tx_q->dma_etx,
6373 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6374 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6375 			seq_printf(seq, "Descriptor ring:\n");
6376 			sysfs_display_ring((void *)tx_q->dma_tx,
6377 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6378 		}
6379 	}
6380 
6381 	return 0;
6382 }
6383 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6384 
6385 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6386 {
6387 	static const char * const dwxgmac_timestamp_source[] = {
6388 		"None",
6389 		"Internal",
6390 		"External",
6391 		"Both",
6392 	};
6393 	static const char * const dwxgmac_safety_feature_desc[] = {
6394 		"No",
6395 		"All Safety Features with ECC and Parity",
6396 		"All Safety Features without ECC or Parity",
6397 		"All Safety Features with Parity Only",
6398 		"ECC Only",
6399 		"UNDEFINED",
6400 		"UNDEFINED",
6401 		"UNDEFINED",
6402 	};
6403 	struct net_device *dev = seq->private;
6404 	struct stmmac_priv *priv = netdev_priv(dev);
6405 
6406 	if (!priv->hw_cap_support) {
6407 		seq_printf(seq, "DMA HW features not supported\n");
6408 		return 0;
6409 	}
6410 
6411 	seq_printf(seq, "==============================\n");
6412 	seq_printf(seq, "\tDMA HW features\n");
6413 	seq_printf(seq, "==============================\n");
6414 
6415 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6416 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6417 	seq_printf(seq, "\t1000 Mbps: %s\n",
6418 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6419 	seq_printf(seq, "\tHalf duplex: %s\n",
6420 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6421 	if (priv->plat->has_xgmac) {
6422 		seq_printf(seq,
6423 			   "\tNumber of Additional MAC address registers: %d\n",
6424 			   priv->dma_cap.multi_addr);
6425 	} else {
6426 		seq_printf(seq, "\tHash Filter: %s\n",
6427 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6428 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6429 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6430 	}
6431 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6432 		   (priv->dma_cap.pcs) ? "Y" : "N");
6433 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6434 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6435 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6436 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6437 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6438 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6439 	seq_printf(seq, "\tRMON module: %s\n",
6440 		   (priv->dma_cap.rmon) ? "Y" : "N");
6441 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6442 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6443 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6444 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6445 	if (priv->plat->has_xgmac)
6446 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6447 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6448 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6449 		   (priv->dma_cap.eee) ? "Y" : "N");
6450 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6451 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6452 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6453 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6454 	    priv->plat->has_xgmac) {
6455 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6456 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6457 	} else {
6458 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6459 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6460 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6461 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6462 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6463 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6464 	}
6465 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6466 		   priv->dma_cap.number_rx_channel);
6467 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6468 		   priv->dma_cap.number_tx_channel);
6469 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6470 		   priv->dma_cap.number_rx_queues);
6471 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6472 		   priv->dma_cap.number_tx_queues);
6473 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6474 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6475 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6476 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6477 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6478 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6479 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6480 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6481 		   priv->dma_cap.pps_out_num);
6482 	seq_printf(seq, "\tSafety Features: %s\n",
6483 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6484 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6485 		   priv->dma_cap.frpsel ? "Y" : "N");
6486 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6487 		   priv->dma_cap.host_dma_width);
6488 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6489 		   priv->dma_cap.rssen ? "Y" : "N");
6490 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6491 		   priv->dma_cap.vlhash ? "Y" : "N");
6492 	seq_printf(seq, "\tSplit Header: %s\n",
6493 		   priv->dma_cap.sphen ? "Y" : "N");
6494 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6495 		   priv->dma_cap.vlins ? "Y" : "N");
6496 	seq_printf(seq, "\tDouble VLAN: %s\n",
6497 		   priv->dma_cap.dvlan ? "Y" : "N");
6498 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6499 		   priv->dma_cap.l3l4fnum);
6500 	seq_printf(seq, "\tARP Offloading: %s\n",
6501 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6502 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6503 		   priv->dma_cap.estsel ? "Y" : "N");
6504 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6505 		   priv->dma_cap.fpesel ? "Y" : "N");
6506 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6507 		   priv->dma_cap.tbssel ? "Y" : "N");
6508 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6509 		   priv->dma_cap.tbs_ch_num);
6510 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6511 		   priv->dma_cap.sgfsel ? "Y" : "N");
6512 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6513 		   BIT(priv->dma_cap.ttsfd) >> 1);
6514 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6515 		   priv->dma_cap.numtc);
6516 	seq_printf(seq, "\tDCB Feature: %s\n",
6517 		   priv->dma_cap.dcben ? "Y" : "N");
6518 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6519 		   priv->dma_cap.advthword ? "Y" : "N");
6520 	seq_printf(seq, "\tPTP Offload: %s\n",
6521 		   priv->dma_cap.ptoen ? "Y" : "N");
6522 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6523 		   priv->dma_cap.osten ? "Y" : "N");
6524 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6525 		   priv->dma_cap.pfcen ? "Y" : "N");
6526 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6527 		   BIT(priv->dma_cap.frpes) << 6);
6528 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6529 		   BIT(priv->dma_cap.frpbs) << 6);
6530 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6531 		   priv->dma_cap.frppipe_num);
6532 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6533 		   priv->dma_cap.nrvf_num ?
6534 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6535 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6536 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6537 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6538 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6539 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6540 		   priv->dma_cap.cbtisel ? "Y" : "N");
6541 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6542 		   priv->dma_cap.aux_snapshot_n);
6543 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6544 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6545 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6546 		   priv->dma_cap.edma ? "Y" : "N");
6547 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6548 		   priv->dma_cap.ediffc ? "Y" : "N");
6549 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6550 		   priv->dma_cap.vxn ? "Y" : "N");
6551 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6552 		   priv->dma_cap.dbgmem ? "Y" : "N");
6553 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6554 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6555 	return 0;
6556 }
6557 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6558 
6559 /* Use network device events to rename debugfs file entries.
6560  */
6561 static int stmmac_device_event(struct notifier_block *unused,
6562 			       unsigned long event, void *ptr)
6563 {
6564 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6565 	struct stmmac_priv *priv = netdev_priv(dev);
6566 
6567 	if (dev->netdev_ops != &stmmac_netdev_ops)
6568 		goto done;
6569 
6570 	switch (event) {
6571 	case NETDEV_CHANGENAME:
6572 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6573 		break;
6574 	}
6575 done:
6576 	return NOTIFY_DONE;
6577 }
6578 
6579 static struct notifier_block stmmac_notifier = {
6580 	.notifier_call = stmmac_device_event,
6581 };
6582 
6583 static void stmmac_init_fs(struct net_device *dev)
6584 {
6585 	struct stmmac_priv *priv = netdev_priv(dev);
6586 
6587 	rtnl_lock();
6588 
6589 	/* Create per netdev entries */
6590 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6591 
6592 	/* Entry to report DMA RX/TX rings */
6593 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6594 			    &stmmac_rings_status_fops);
6595 
6596 	/* Entry to report the DMA HW features */
6597 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6598 			    &stmmac_dma_cap_fops);
6599 
6600 	rtnl_unlock();
6601 }
6602 
6603 static void stmmac_exit_fs(struct net_device *dev)
6604 {
6605 	struct stmmac_priv *priv = netdev_priv(dev);
6606 
6607 	debugfs_remove_recursive(priv->dbgfs_dir);
6608 }
6609 #endif /* CONFIG_DEBUG_FS */
6610 
6611 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6612 {
6613 	unsigned char *data = (unsigned char *)&vid_le;
6614 	unsigned char data_byte = 0;
6615 	u32 crc = ~0x0;
6616 	u32 temp = 0;
6617 	int i, bits;
6618 
6619 	bits = get_bitmask_order(VLAN_VID_MASK);
6620 	for (i = 0; i < bits; i++) {
6621 		if ((i % 8) == 0)
6622 			data_byte = data[i / 8];
6623 
6624 		temp = ((crc & 1) ^ data_byte) & 1;
6625 		crc >>= 1;
6626 		data_byte >>= 1;
6627 
6628 		if (temp)
6629 			crc ^= 0xedb88320;
6630 	}
6631 
6632 	return crc;
6633 }
6634 
6635 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6636 {
6637 	u32 crc, hash = 0;
6638 	u16 pmatch = 0;
6639 	int count = 0;
6640 	u16 vid = 0;
6641 
6642 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6643 		__le16 vid_le = cpu_to_le16(vid);
6644 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6645 		hash |= (1 << crc);
6646 		count++;
6647 	}
6648 
6649 	if (!priv->dma_cap.vlhash) {
6650 		if (count > 2) /* VID = 0 always passes filter */
6651 			return -EOPNOTSUPP;
6652 
6653 		pmatch = vid;
6654 		hash = 0;
6655 	}
6656 
6657 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6658 }
6659 
6660 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6661 {
6662 	struct stmmac_priv *priv = netdev_priv(ndev);
6663 	bool is_double = false;
6664 	int ret;
6665 
6666 	ret = pm_runtime_resume_and_get(priv->device);
6667 	if (ret < 0)
6668 		return ret;
6669 
6670 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6671 		is_double = true;
6672 
6673 	set_bit(vid, priv->active_vlans);
6674 	ret = stmmac_vlan_update(priv, is_double);
6675 	if (ret) {
6676 		clear_bit(vid, priv->active_vlans);
6677 		goto err_pm_put;
6678 	}
6679 
6680 	if (priv->hw->num_vlan) {
6681 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6682 		if (ret)
6683 			goto err_pm_put;
6684 	}
6685 err_pm_put:
6686 	pm_runtime_put(priv->device);
6687 
6688 	return ret;
6689 }
6690 
6691 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6692 {
6693 	struct stmmac_priv *priv = netdev_priv(ndev);
6694 	bool is_double = false;
6695 	int ret;
6696 
6697 	ret = pm_runtime_resume_and_get(priv->device);
6698 	if (ret < 0)
6699 		return ret;
6700 
6701 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6702 		is_double = true;
6703 
6704 	clear_bit(vid, priv->active_vlans);
6705 
6706 	if (priv->hw->num_vlan) {
6707 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6708 		if (ret)
6709 			goto del_vlan_error;
6710 	}
6711 
6712 	ret = stmmac_vlan_update(priv, is_double);
6713 
6714 del_vlan_error:
6715 	pm_runtime_put(priv->device);
6716 
6717 	return ret;
6718 }
6719 
6720 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6721 {
6722 	struct stmmac_priv *priv = netdev_priv(dev);
6723 
6724 	switch (bpf->command) {
6725 	case XDP_SETUP_PROG:
6726 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6727 	case XDP_SETUP_XSK_POOL:
6728 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6729 					     bpf->xsk.queue_id);
6730 	default:
6731 		return -EOPNOTSUPP;
6732 	}
6733 }
6734 
6735 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6736 			   struct xdp_frame **frames, u32 flags)
6737 {
6738 	struct stmmac_priv *priv = netdev_priv(dev);
6739 	int cpu = smp_processor_id();
6740 	struct netdev_queue *nq;
6741 	int i, nxmit = 0;
6742 	int queue;
6743 
6744 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6745 		return -ENETDOWN;
6746 
6747 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6748 		return -EINVAL;
6749 
6750 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6751 	nq = netdev_get_tx_queue(priv->dev, queue);
6752 
6753 	__netif_tx_lock(nq, cpu);
6754 	/* Avoids TX time-out as we are sharing with slow path */
6755 	txq_trans_cond_update(nq);
6756 
6757 	for (i = 0; i < num_frames; i++) {
6758 		int res;
6759 
6760 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6761 		if (res == STMMAC_XDP_CONSUMED)
6762 			break;
6763 
6764 		nxmit++;
6765 	}
6766 
6767 	if (flags & XDP_XMIT_FLUSH) {
6768 		stmmac_flush_tx_descriptors(priv, queue);
6769 		stmmac_tx_timer_arm(priv, queue);
6770 	}
6771 
6772 	__netif_tx_unlock(nq);
6773 
6774 	return nxmit;
6775 }
6776 
6777 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6778 {
6779 	struct stmmac_channel *ch = &priv->channel[queue];
6780 	unsigned long flags;
6781 
6782 	spin_lock_irqsave(&ch->lock, flags);
6783 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6784 	spin_unlock_irqrestore(&ch->lock, flags);
6785 
6786 	stmmac_stop_rx_dma(priv, queue);
6787 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6788 }
6789 
6790 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6791 {
6792 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6793 	struct stmmac_channel *ch = &priv->channel[queue];
6794 	unsigned long flags;
6795 	u32 buf_size;
6796 	int ret;
6797 
6798 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6799 	if (ret) {
6800 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6801 		return;
6802 	}
6803 
6804 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6805 	if (ret) {
6806 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6807 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6808 		return;
6809 	}
6810 
6811 	stmmac_reset_rx_queue(priv, queue);
6812 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6813 
6814 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6815 			    rx_q->dma_rx_phy, rx_q->queue_index);
6816 
6817 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6818 			     sizeof(struct dma_desc));
6819 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6820 			       rx_q->rx_tail_addr, rx_q->queue_index);
6821 
6822 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6823 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6824 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6825 				      buf_size,
6826 				      rx_q->queue_index);
6827 	} else {
6828 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6829 				      priv->dma_conf.dma_buf_sz,
6830 				      rx_q->queue_index);
6831 	}
6832 
6833 	stmmac_start_rx_dma(priv, queue);
6834 
6835 	spin_lock_irqsave(&ch->lock, flags);
6836 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6837 	spin_unlock_irqrestore(&ch->lock, flags);
6838 }
6839 
6840 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6841 {
6842 	struct stmmac_channel *ch = &priv->channel[queue];
6843 	unsigned long flags;
6844 
6845 	spin_lock_irqsave(&ch->lock, flags);
6846 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6847 	spin_unlock_irqrestore(&ch->lock, flags);
6848 
6849 	stmmac_stop_tx_dma(priv, queue);
6850 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6851 }
6852 
6853 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6854 {
6855 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6856 	struct stmmac_channel *ch = &priv->channel[queue];
6857 	unsigned long flags;
6858 	int ret;
6859 
6860 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6861 	if (ret) {
6862 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6863 		return;
6864 	}
6865 
6866 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6867 	if (ret) {
6868 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6869 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6870 		return;
6871 	}
6872 
6873 	stmmac_reset_tx_queue(priv, queue);
6874 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6875 
6876 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6877 			    tx_q->dma_tx_phy, tx_q->queue_index);
6878 
6879 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6880 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6881 
6882 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6883 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6884 			       tx_q->tx_tail_addr, tx_q->queue_index);
6885 
6886 	stmmac_start_tx_dma(priv, queue);
6887 
6888 	spin_lock_irqsave(&ch->lock, flags);
6889 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6890 	spin_unlock_irqrestore(&ch->lock, flags);
6891 }
6892 
6893 void stmmac_xdp_release(struct net_device *dev)
6894 {
6895 	struct stmmac_priv *priv = netdev_priv(dev);
6896 	u32 chan;
6897 
6898 	/* Ensure tx function is not running */
6899 	netif_tx_disable(dev);
6900 
6901 	/* Disable NAPI process */
6902 	stmmac_disable_all_queues(priv);
6903 
6904 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6905 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6906 
6907 	/* Free the IRQ lines */
6908 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6909 
6910 	/* Stop TX/RX DMA channels */
6911 	stmmac_stop_all_dma(priv);
6912 
6913 	/* Release and free the Rx/Tx resources */
6914 	free_dma_desc_resources(priv, &priv->dma_conf);
6915 
6916 	/* Disable the MAC Rx/Tx */
6917 	stmmac_mac_set(priv, priv->ioaddr, false);
6918 
6919 	/* set trans_start so we don't get spurious
6920 	 * watchdogs during reset
6921 	 */
6922 	netif_trans_update(dev);
6923 	netif_carrier_off(dev);
6924 }
6925 
6926 int stmmac_xdp_open(struct net_device *dev)
6927 {
6928 	struct stmmac_priv *priv = netdev_priv(dev);
6929 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6930 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6931 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6932 	struct stmmac_rx_queue *rx_q;
6933 	struct stmmac_tx_queue *tx_q;
6934 	u32 buf_size;
6935 	bool sph_en;
6936 	u32 chan;
6937 	int ret;
6938 
6939 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6940 	if (ret < 0) {
6941 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6942 			   __func__);
6943 		goto dma_desc_error;
6944 	}
6945 
6946 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6947 	if (ret < 0) {
6948 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6949 			   __func__);
6950 		goto init_error;
6951 	}
6952 
6953 	stmmac_reset_queues_param(priv);
6954 
6955 	/* DMA CSR Channel configuration */
6956 	for (chan = 0; chan < dma_csr_ch; chan++) {
6957 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6958 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6959 	}
6960 
6961 	/* Adjust Split header */
6962 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6963 
6964 	/* DMA RX Channel Configuration */
6965 	for (chan = 0; chan < rx_cnt; chan++) {
6966 		rx_q = &priv->dma_conf.rx_queue[chan];
6967 
6968 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6969 				    rx_q->dma_rx_phy, chan);
6970 
6971 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6972 				     (rx_q->buf_alloc_num *
6973 				      sizeof(struct dma_desc));
6974 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6975 				       rx_q->rx_tail_addr, chan);
6976 
6977 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6978 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6979 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6980 					      buf_size,
6981 					      rx_q->queue_index);
6982 		} else {
6983 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6984 					      priv->dma_conf.dma_buf_sz,
6985 					      rx_q->queue_index);
6986 		}
6987 
6988 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6989 	}
6990 
6991 	/* DMA TX Channel Configuration */
6992 	for (chan = 0; chan < tx_cnt; chan++) {
6993 		tx_q = &priv->dma_conf.tx_queue[chan];
6994 
6995 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6996 				    tx_q->dma_tx_phy, chan);
6997 
6998 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6999 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7000 				       tx_q->tx_tail_addr, chan);
7001 
7002 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7003 		tx_q->txtimer.function = stmmac_tx_timer;
7004 	}
7005 
7006 	/* Enable the MAC Rx/Tx */
7007 	stmmac_mac_set(priv, priv->ioaddr, true);
7008 
7009 	/* Start Rx & Tx DMA Channels */
7010 	stmmac_start_all_dma(priv);
7011 
7012 	ret = stmmac_request_irq(dev);
7013 	if (ret)
7014 		goto irq_error;
7015 
7016 	/* Enable NAPI process*/
7017 	stmmac_enable_all_queues(priv);
7018 	netif_carrier_on(dev);
7019 	netif_tx_start_all_queues(dev);
7020 	stmmac_enable_all_dma_irq(priv);
7021 
7022 	return 0;
7023 
7024 irq_error:
7025 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7026 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7027 
7028 	stmmac_hw_teardown(dev);
7029 init_error:
7030 	free_dma_desc_resources(priv, &priv->dma_conf);
7031 dma_desc_error:
7032 	return ret;
7033 }
7034 
7035 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7036 {
7037 	struct stmmac_priv *priv = netdev_priv(dev);
7038 	struct stmmac_rx_queue *rx_q;
7039 	struct stmmac_tx_queue *tx_q;
7040 	struct stmmac_channel *ch;
7041 
7042 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7043 	    !netif_carrier_ok(priv->dev))
7044 		return -ENETDOWN;
7045 
7046 	if (!stmmac_xdp_is_enabled(priv))
7047 		return -EINVAL;
7048 
7049 	if (queue >= priv->plat->rx_queues_to_use ||
7050 	    queue >= priv->plat->tx_queues_to_use)
7051 		return -EINVAL;
7052 
7053 	rx_q = &priv->dma_conf.rx_queue[queue];
7054 	tx_q = &priv->dma_conf.tx_queue[queue];
7055 	ch = &priv->channel[queue];
7056 
7057 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7058 		return -EINVAL;
7059 
7060 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7061 		/* EQoS does not have per-DMA channel SW interrupt,
7062 		 * so we schedule RX Napi straight-away.
7063 		 */
7064 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7065 			__napi_schedule(&ch->rxtx_napi);
7066 	}
7067 
7068 	return 0;
7069 }
7070 
7071 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7072 {
7073 	struct stmmac_priv *priv = netdev_priv(dev);
7074 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7075 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7076 	unsigned int start;
7077 	int q;
7078 
7079 	for (q = 0; q < tx_cnt; q++) {
7080 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7081 		u64 tx_packets;
7082 		u64 tx_bytes;
7083 
7084 		do {
7085 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7086 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7087 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7088 		do {
7089 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7090 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7091 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7092 
7093 		stats->tx_packets += tx_packets;
7094 		stats->tx_bytes += tx_bytes;
7095 	}
7096 
7097 	for (q = 0; q < rx_cnt; q++) {
7098 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7099 		u64 rx_packets;
7100 		u64 rx_bytes;
7101 
7102 		do {
7103 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7104 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7105 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7106 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7107 
7108 		stats->rx_packets += rx_packets;
7109 		stats->rx_bytes += rx_bytes;
7110 	}
7111 
7112 	stats->rx_dropped = priv->xstats.rx_dropped;
7113 	stats->rx_errors = priv->xstats.rx_errors;
7114 	stats->tx_dropped = priv->xstats.tx_dropped;
7115 	stats->tx_errors = priv->xstats.tx_errors;
7116 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7117 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7118 	stats->rx_length_errors = priv->xstats.rx_length;
7119 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7120 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7121 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7122 }
7123 
7124 static const struct net_device_ops stmmac_netdev_ops = {
7125 	.ndo_open = stmmac_open,
7126 	.ndo_start_xmit = stmmac_xmit,
7127 	.ndo_stop = stmmac_release,
7128 	.ndo_change_mtu = stmmac_change_mtu,
7129 	.ndo_fix_features = stmmac_fix_features,
7130 	.ndo_set_features = stmmac_set_features,
7131 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7132 	.ndo_tx_timeout = stmmac_tx_timeout,
7133 	.ndo_eth_ioctl = stmmac_ioctl,
7134 	.ndo_get_stats64 = stmmac_get_stats64,
7135 	.ndo_setup_tc = stmmac_setup_tc,
7136 	.ndo_select_queue = stmmac_select_queue,
7137 	.ndo_set_mac_address = stmmac_set_mac_address,
7138 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7139 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7140 	.ndo_bpf = stmmac_bpf,
7141 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7142 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7143 };
7144 
7145 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7146 {
7147 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7148 		return;
7149 	if (test_bit(STMMAC_DOWN, &priv->state))
7150 		return;
7151 
7152 	netdev_err(priv->dev, "Reset adapter.\n");
7153 
7154 	rtnl_lock();
7155 	netif_trans_update(priv->dev);
7156 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7157 		usleep_range(1000, 2000);
7158 
7159 	set_bit(STMMAC_DOWN, &priv->state);
7160 	dev_close(priv->dev);
7161 	dev_open(priv->dev, NULL);
7162 	clear_bit(STMMAC_DOWN, &priv->state);
7163 	clear_bit(STMMAC_RESETING, &priv->state);
7164 	rtnl_unlock();
7165 }
7166 
7167 static void stmmac_service_task(struct work_struct *work)
7168 {
7169 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7170 			service_task);
7171 
7172 	stmmac_reset_subtask(priv);
7173 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7174 }
7175 
7176 /**
7177  *  stmmac_hw_init - Init the MAC device
7178  *  @priv: driver private structure
7179  *  Description: this function is to configure the MAC device according to
7180  *  some platform parameters or the HW capability register. It prepares the
7181  *  driver to use either ring or chain modes and to setup either enhanced or
7182  *  normal descriptors.
7183  */
7184 static int stmmac_hw_init(struct stmmac_priv *priv)
7185 {
7186 	int ret;
7187 
7188 	/* dwmac-sun8i only work in chain mode */
7189 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7190 		chain_mode = 1;
7191 	priv->chain_mode = chain_mode;
7192 
7193 	/* Initialize HW Interface */
7194 	ret = stmmac_hwif_init(priv);
7195 	if (ret)
7196 		return ret;
7197 
7198 	/* Get the HW capability (new GMAC newer than 3.50a) */
7199 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7200 	if (priv->hw_cap_support) {
7201 		dev_info(priv->device, "DMA HW capability register supported\n");
7202 
7203 		/* We can override some gmac/dma configuration fields: e.g.
7204 		 * enh_desc, tx_coe (e.g. that are passed through the
7205 		 * platform) with the values from the HW capability
7206 		 * register (if supported).
7207 		 */
7208 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7209 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7210 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7211 		priv->hw->pmt = priv->plat->pmt;
7212 		if (priv->dma_cap.hash_tb_sz) {
7213 			priv->hw->multicast_filter_bins =
7214 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7215 			priv->hw->mcast_bits_log2 =
7216 					ilog2(priv->hw->multicast_filter_bins);
7217 		}
7218 
7219 		/* TXCOE doesn't work in thresh DMA mode */
7220 		if (priv->plat->force_thresh_dma_mode)
7221 			priv->plat->tx_coe = 0;
7222 		else
7223 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7224 
7225 		/* In case of GMAC4 rx_coe is from HW cap register. */
7226 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7227 
7228 		if (priv->dma_cap.rx_coe_type2)
7229 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7230 		else if (priv->dma_cap.rx_coe_type1)
7231 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7232 
7233 	} else {
7234 		dev_info(priv->device, "No HW DMA feature register supported\n");
7235 	}
7236 
7237 	if (priv->plat->rx_coe) {
7238 		priv->hw->rx_csum = priv->plat->rx_coe;
7239 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7240 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7241 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7242 	}
7243 	if (priv->plat->tx_coe)
7244 		dev_info(priv->device, "TX Checksum insertion supported\n");
7245 
7246 	if (priv->plat->pmt) {
7247 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7248 		device_set_wakeup_capable(priv->device, 1);
7249 	}
7250 
7251 	if (priv->dma_cap.tsoen)
7252 		dev_info(priv->device, "TSO supported\n");
7253 
7254 	if (priv->dma_cap.number_rx_queues &&
7255 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7256 		dev_warn(priv->device,
7257 			 "Number of Rx queues (%u) exceeds dma capability\n",
7258 			 priv->plat->rx_queues_to_use);
7259 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7260 	}
7261 	if (priv->dma_cap.number_tx_queues &&
7262 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7263 		dev_warn(priv->device,
7264 			 "Number of Tx queues (%u) exceeds dma capability\n",
7265 			 priv->plat->tx_queues_to_use);
7266 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7267 	}
7268 
7269 	if (priv->dma_cap.rx_fifo_size &&
7270 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7271 		dev_warn(priv->device,
7272 			 "Rx FIFO size (%u) exceeds dma capability\n",
7273 			 priv->plat->rx_fifo_size);
7274 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7275 	}
7276 	if (priv->dma_cap.tx_fifo_size &&
7277 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7278 		dev_warn(priv->device,
7279 			 "Tx FIFO size (%u) exceeds dma capability\n",
7280 			 priv->plat->tx_fifo_size);
7281 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7282 	}
7283 
7284 	priv->hw->vlan_fail_q_en =
7285 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7286 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7287 
7288 	/* Run HW quirks, if any */
7289 	if (priv->hwif_quirks) {
7290 		ret = priv->hwif_quirks(priv);
7291 		if (ret)
7292 			return ret;
7293 	}
7294 
7295 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7296 	 * In some case, for example on bugged HW this feature
7297 	 * has to be disable and this can be done by passing the
7298 	 * riwt_off field from the platform.
7299 	 */
7300 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7301 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7302 		priv->use_riwt = 1;
7303 		dev_info(priv->device,
7304 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7305 	}
7306 
7307 	return 0;
7308 }
7309 
7310 static void stmmac_napi_add(struct net_device *dev)
7311 {
7312 	struct stmmac_priv *priv = netdev_priv(dev);
7313 	u32 queue, maxq;
7314 
7315 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7316 
7317 	for (queue = 0; queue < maxq; queue++) {
7318 		struct stmmac_channel *ch = &priv->channel[queue];
7319 
7320 		ch->priv_data = priv;
7321 		ch->index = queue;
7322 		spin_lock_init(&ch->lock);
7323 
7324 		if (queue < priv->plat->rx_queues_to_use) {
7325 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7326 		}
7327 		if (queue < priv->plat->tx_queues_to_use) {
7328 			netif_napi_add_tx(dev, &ch->tx_napi,
7329 					  stmmac_napi_poll_tx);
7330 		}
7331 		if (queue < priv->plat->rx_queues_to_use &&
7332 		    queue < priv->plat->tx_queues_to_use) {
7333 			netif_napi_add(dev, &ch->rxtx_napi,
7334 				       stmmac_napi_poll_rxtx);
7335 		}
7336 	}
7337 }
7338 
7339 static void stmmac_napi_del(struct net_device *dev)
7340 {
7341 	struct stmmac_priv *priv = netdev_priv(dev);
7342 	u32 queue, maxq;
7343 
7344 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7345 
7346 	for (queue = 0; queue < maxq; queue++) {
7347 		struct stmmac_channel *ch = &priv->channel[queue];
7348 
7349 		if (queue < priv->plat->rx_queues_to_use)
7350 			netif_napi_del(&ch->rx_napi);
7351 		if (queue < priv->plat->tx_queues_to_use)
7352 			netif_napi_del(&ch->tx_napi);
7353 		if (queue < priv->plat->rx_queues_to_use &&
7354 		    queue < priv->plat->tx_queues_to_use) {
7355 			netif_napi_del(&ch->rxtx_napi);
7356 		}
7357 	}
7358 }
7359 
7360 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7361 {
7362 	struct stmmac_priv *priv = netdev_priv(dev);
7363 	int ret = 0, i;
7364 
7365 	if (netif_running(dev))
7366 		stmmac_release(dev);
7367 
7368 	stmmac_napi_del(dev);
7369 
7370 	priv->plat->rx_queues_to_use = rx_cnt;
7371 	priv->plat->tx_queues_to_use = tx_cnt;
7372 	if (!netif_is_rxfh_configured(dev))
7373 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7374 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7375 									rx_cnt);
7376 
7377 	stmmac_napi_add(dev);
7378 
7379 	if (netif_running(dev))
7380 		ret = stmmac_open(dev);
7381 
7382 	return ret;
7383 }
7384 
7385 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7386 {
7387 	struct stmmac_priv *priv = netdev_priv(dev);
7388 	int ret = 0;
7389 
7390 	if (netif_running(dev))
7391 		stmmac_release(dev);
7392 
7393 	priv->dma_conf.dma_rx_size = rx_size;
7394 	priv->dma_conf.dma_tx_size = tx_size;
7395 
7396 	if (netif_running(dev))
7397 		ret = stmmac_open(dev);
7398 
7399 	return ret;
7400 }
7401 
7402 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7403 {
7404 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7405 	struct dma_desc *desc_contains_ts = ctx->desc;
7406 	struct stmmac_priv *priv = ctx->priv;
7407 	struct dma_desc *ndesc = ctx->ndesc;
7408 	struct dma_desc *desc = ctx->desc;
7409 	u64 ns = 0;
7410 
7411 	if (!priv->hwts_rx_en)
7412 		return -ENODATA;
7413 
7414 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7415 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7416 		desc_contains_ts = ndesc;
7417 
7418 	/* Check if timestamp is available */
7419 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7420 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7421 		ns -= priv->plat->cdc_error_adj;
7422 		*timestamp = ns_to_ktime(ns);
7423 		return 0;
7424 	}
7425 
7426 	return -ENODATA;
7427 }
7428 
7429 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7430 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7431 };
7432 
7433 /**
7434  * stmmac_dvr_probe
7435  * @device: device pointer
7436  * @plat_dat: platform data pointer
7437  * @res: stmmac resource pointer
7438  * Description: this is the main probe function used to
7439  * call the alloc_etherdev, allocate the priv structure.
7440  * Return:
7441  * returns 0 on success, otherwise errno.
7442  */
7443 int stmmac_dvr_probe(struct device *device,
7444 		     struct plat_stmmacenet_data *plat_dat,
7445 		     struct stmmac_resources *res)
7446 {
7447 	struct net_device *ndev = NULL;
7448 	struct stmmac_priv *priv;
7449 	u32 rxq;
7450 	int i, ret = 0;
7451 
7452 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7453 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7454 	if (!ndev)
7455 		return -ENOMEM;
7456 
7457 	SET_NETDEV_DEV(ndev, device);
7458 
7459 	priv = netdev_priv(ndev);
7460 	priv->device = device;
7461 	priv->dev = ndev;
7462 
7463 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7464 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7465 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7466 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7467 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7468 	}
7469 
7470 	priv->xstats.pcpu_stats =
7471 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7472 	if (!priv->xstats.pcpu_stats)
7473 		return -ENOMEM;
7474 
7475 	stmmac_set_ethtool_ops(ndev);
7476 	priv->pause_time = pause;
7477 	priv->plat = plat_dat;
7478 	priv->ioaddr = res->addr;
7479 	priv->dev->base_addr = (unsigned long)res->addr;
7480 	priv->plat->dma_cfg->multi_msi_en =
7481 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7482 
7483 	priv->dev->irq = res->irq;
7484 	priv->wol_irq = res->wol_irq;
7485 	priv->lpi_irq = res->lpi_irq;
7486 	priv->sfty_irq = res->sfty_irq;
7487 	priv->sfty_ce_irq = res->sfty_ce_irq;
7488 	priv->sfty_ue_irq = res->sfty_ue_irq;
7489 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7490 		priv->rx_irq[i] = res->rx_irq[i];
7491 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7492 		priv->tx_irq[i] = res->tx_irq[i];
7493 
7494 	if (!is_zero_ether_addr(res->mac))
7495 		eth_hw_addr_set(priv->dev, res->mac);
7496 
7497 	dev_set_drvdata(device, priv->dev);
7498 
7499 	/* Verify driver arguments */
7500 	stmmac_verify_args();
7501 
7502 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7503 	if (!priv->af_xdp_zc_qps)
7504 		return -ENOMEM;
7505 
7506 	/* Allocate workqueue */
7507 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7508 	if (!priv->wq) {
7509 		dev_err(priv->device, "failed to create workqueue\n");
7510 		ret = -ENOMEM;
7511 		goto error_wq_init;
7512 	}
7513 
7514 	INIT_WORK(&priv->service_task, stmmac_service_task);
7515 
7516 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7517 
7518 	/* Override with kernel parameters if supplied XXX CRS XXX
7519 	 * this needs to have multiple instances
7520 	 */
7521 	if ((phyaddr >= 0) && (phyaddr <= 31))
7522 		priv->plat->phy_addr = phyaddr;
7523 
7524 	if (priv->plat->stmmac_rst) {
7525 		ret = reset_control_assert(priv->plat->stmmac_rst);
7526 		reset_control_deassert(priv->plat->stmmac_rst);
7527 		/* Some reset controllers have only reset callback instead of
7528 		 * assert + deassert callbacks pair.
7529 		 */
7530 		if (ret == -ENOTSUPP)
7531 			reset_control_reset(priv->plat->stmmac_rst);
7532 	}
7533 
7534 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7535 	if (ret == -ENOTSUPP)
7536 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7537 			ERR_PTR(ret));
7538 
7539 	/* Wait a bit for the reset to take effect */
7540 	udelay(10);
7541 
7542 	/* Init MAC and get the capabilities */
7543 	ret = stmmac_hw_init(priv);
7544 	if (ret)
7545 		goto error_hw_init;
7546 
7547 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7548 	 */
7549 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7550 		priv->plat->dma_cfg->dche = false;
7551 
7552 	stmmac_check_ether_addr(priv);
7553 
7554 	ndev->netdev_ops = &stmmac_netdev_ops;
7555 
7556 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7557 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7558 
7559 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7560 			    NETIF_F_RXCSUM;
7561 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7562 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7563 
7564 	ret = stmmac_tc_init(priv, priv);
7565 	if (!ret) {
7566 		ndev->hw_features |= NETIF_F_HW_TC;
7567 	}
7568 
7569 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7570 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7571 		if (priv->plat->has_gmac4)
7572 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7573 		priv->tso = true;
7574 		dev_info(priv->device, "TSO feature enabled\n");
7575 	}
7576 
7577 	if (priv->dma_cap.sphen &&
7578 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7579 		ndev->hw_features |= NETIF_F_GRO;
7580 		priv->sph_cap = true;
7581 		priv->sph = priv->sph_cap;
7582 		dev_info(priv->device, "SPH feature enabled\n");
7583 	}
7584 
7585 	/* Ideally our host DMA address width is the same as for the
7586 	 * device. However, it may differ and then we have to use our
7587 	 * host DMA width for allocation and the device DMA width for
7588 	 * register handling.
7589 	 */
7590 	if (priv->plat->host_dma_width)
7591 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7592 	else
7593 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7594 
7595 	if (priv->dma_cap.host_dma_width) {
7596 		ret = dma_set_mask_and_coherent(device,
7597 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7598 		if (!ret) {
7599 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7600 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7601 
7602 			/*
7603 			 * If more than 32 bits can be addressed, make sure to
7604 			 * enable enhanced addressing mode.
7605 			 */
7606 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7607 				priv->plat->dma_cfg->eame = true;
7608 		} else {
7609 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7610 			if (ret) {
7611 				dev_err(priv->device, "Failed to set DMA Mask\n");
7612 				goto error_hw_init;
7613 			}
7614 
7615 			priv->dma_cap.host_dma_width = 32;
7616 		}
7617 	}
7618 
7619 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7620 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7621 #ifdef STMMAC_VLAN_TAG_USED
7622 	/* Both mac100 and gmac support receive VLAN tag detection */
7623 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7624 	if (priv->plat->has_gmac4) {
7625 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7626 		priv->hw->hw_vlan_en = true;
7627 	}
7628 	if (priv->dma_cap.vlhash) {
7629 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7630 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7631 	}
7632 	if (priv->dma_cap.vlins) {
7633 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7634 		if (priv->dma_cap.dvlan)
7635 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7636 	}
7637 #endif
7638 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7639 
7640 	priv->xstats.threshold = tc;
7641 
7642 	/* Initialize RSS */
7643 	rxq = priv->plat->rx_queues_to_use;
7644 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7645 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7646 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7647 
7648 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7649 		ndev->features |= NETIF_F_RXHASH;
7650 
7651 	ndev->vlan_features |= ndev->features;
7652 
7653 	/* MTU range: 46 - hw-specific max */
7654 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7655 	if (priv->plat->has_xgmac)
7656 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7657 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7658 		ndev->max_mtu = JUMBO_LEN;
7659 	else
7660 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7661 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7662 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7663 	 */
7664 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7665 	    (priv->plat->maxmtu >= ndev->min_mtu))
7666 		ndev->max_mtu = priv->plat->maxmtu;
7667 	else if (priv->plat->maxmtu < ndev->min_mtu)
7668 		dev_warn(priv->device,
7669 			 "%s: warning: maxmtu having invalid value (%d)\n",
7670 			 __func__, priv->plat->maxmtu);
7671 
7672 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7673 
7674 	/* Setup channels NAPI */
7675 	stmmac_napi_add(ndev);
7676 
7677 	mutex_init(&priv->lock);
7678 
7679 	stmmac_fpe_init(priv);
7680 
7681 	/* If a specific clk_csr value is passed from the platform
7682 	 * this means that the CSR Clock Range selection cannot be
7683 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7684 	 * set the MDC clock dynamically according to the csr actual
7685 	 * clock input.
7686 	 */
7687 	if (priv->plat->clk_csr >= 0)
7688 		priv->clk_csr = priv->plat->clk_csr;
7689 	else
7690 		stmmac_clk_csr_set(priv);
7691 
7692 	stmmac_check_pcs_mode(priv);
7693 
7694 	pm_runtime_get_noresume(device);
7695 	pm_runtime_set_active(device);
7696 	if (!pm_runtime_enabled(device))
7697 		pm_runtime_enable(device);
7698 
7699 	ret = stmmac_mdio_register(ndev);
7700 	if (ret < 0) {
7701 		dev_err_probe(priv->device, ret,
7702 			      "MDIO bus (id: %d) registration failed\n",
7703 			      priv->plat->bus_id);
7704 		goto error_mdio_register;
7705 	}
7706 
7707 	if (priv->plat->speed_mode_2500)
7708 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7709 
7710 	ret = stmmac_pcs_setup(ndev);
7711 	if (ret)
7712 		goto error_pcs_setup;
7713 
7714 	ret = stmmac_phy_setup(priv);
7715 	if (ret) {
7716 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7717 		goto error_phy_setup;
7718 	}
7719 
7720 	ret = register_netdev(ndev);
7721 	if (ret) {
7722 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7723 			__func__, ret);
7724 		goto error_netdev_register;
7725 	}
7726 
7727 #ifdef CONFIG_DEBUG_FS
7728 	stmmac_init_fs(ndev);
7729 #endif
7730 
7731 	if (priv->plat->dump_debug_regs)
7732 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7733 
7734 	/* Let pm_runtime_put() disable the clocks.
7735 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7736 	 */
7737 	pm_runtime_put(device);
7738 
7739 	return ret;
7740 
7741 error_netdev_register:
7742 	phylink_destroy(priv->phylink);
7743 error_phy_setup:
7744 	stmmac_pcs_clean(ndev);
7745 error_pcs_setup:
7746 	stmmac_mdio_unregister(ndev);
7747 error_mdio_register:
7748 	stmmac_napi_del(ndev);
7749 error_hw_init:
7750 	destroy_workqueue(priv->wq);
7751 error_wq_init:
7752 	bitmap_free(priv->af_xdp_zc_qps);
7753 
7754 	return ret;
7755 }
7756 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7757 
7758 /**
7759  * stmmac_dvr_remove
7760  * @dev: device pointer
7761  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7762  * changes the link status, releases the DMA descriptor rings.
7763  */
7764 void stmmac_dvr_remove(struct device *dev)
7765 {
7766 	struct net_device *ndev = dev_get_drvdata(dev);
7767 	struct stmmac_priv *priv = netdev_priv(ndev);
7768 
7769 	netdev_info(priv->dev, "%s: removing driver", __func__);
7770 
7771 	pm_runtime_get_sync(dev);
7772 
7773 	stmmac_stop_all_dma(priv);
7774 	stmmac_mac_set(priv, priv->ioaddr, false);
7775 	unregister_netdev(ndev);
7776 
7777 #ifdef CONFIG_DEBUG_FS
7778 	stmmac_exit_fs(ndev);
7779 #endif
7780 	phylink_destroy(priv->phylink);
7781 	if (priv->plat->stmmac_rst)
7782 		reset_control_assert(priv->plat->stmmac_rst);
7783 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7784 
7785 	stmmac_pcs_clean(ndev);
7786 	stmmac_mdio_unregister(ndev);
7787 
7788 	destroy_workqueue(priv->wq);
7789 	mutex_destroy(&priv->lock);
7790 	bitmap_free(priv->af_xdp_zc_qps);
7791 
7792 	pm_runtime_disable(dev);
7793 	pm_runtime_put_noidle(dev);
7794 }
7795 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7796 
7797 /**
7798  * stmmac_suspend - suspend callback
7799  * @dev: device pointer
7800  * Description: this is the function to suspend the device and it is called
7801  * by the platform driver to stop the network queue, release the resources,
7802  * program the PMT register (for WoL), clean and release driver resources.
7803  */
7804 int stmmac_suspend(struct device *dev)
7805 {
7806 	struct net_device *ndev = dev_get_drvdata(dev);
7807 	struct stmmac_priv *priv = netdev_priv(ndev);
7808 	u32 chan;
7809 
7810 	if (!ndev || !netif_running(ndev))
7811 		return 0;
7812 
7813 	mutex_lock(&priv->lock);
7814 
7815 	netif_device_detach(ndev);
7816 
7817 	stmmac_disable_all_queues(priv);
7818 
7819 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7820 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7821 
7822 	if (priv->eee_sw_timer_en) {
7823 		priv->tx_path_in_lpi_mode = false;
7824 		del_timer_sync(&priv->eee_ctrl_timer);
7825 	}
7826 
7827 	/* Stop TX/RX DMA */
7828 	stmmac_stop_all_dma(priv);
7829 
7830 	if (priv->plat->serdes_powerdown)
7831 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7832 
7833 	/* Enable Power down mode by programming the PMT regs */
7834 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7835 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7836 		priv->irq_wake = 1;
7837 	} else {
7838 		stmmac_mac_set(priv, priv->ioaddr, false);
7839 		pinctrl_pm_select_sleep_state(priv->device);
7840 	}
7841 
7842 	mutex_unlock(&priv->lock);
7843 
7844 	rtnl_lock();
7845 	if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7846 		phylink_speed_down(priv->phylink, false);
7847 
7848 	phylink_suspend(priv->phylink,
7849 			device_may_wakeup(priv->device) && priv->plat->pmt);
7850 	rtnl_unlock();
7851 
7852 	if (stmmac_fpe_supported(priv))
7853 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7854 
7855 	priv->speed = SPEED_UNKNOWN;
7856 	return 0;
7857 }
7858 EXPORT_SYMBOL_GPL(stmmac_suspend);
7859 
7860 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7861 {
7862 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7863 
7864 	rx_q->cur_rx = 0;
7865 	rx_q->dirty_rx = 0;
7866 }
7867 
7868 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7869 {
7870 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7871 
7872 	tx_q->cur_tx = 0;
7873 	tx_q->dirty_tx = 0;
7874 	tx_q->mss = 0;
7875 
7876 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7877 }
7878 
7879 /**
7880  * stmmac_reset_queues_param - reset queue parameters
7881  * @priv: device pointer
7882  */
7883 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7884 {
7885 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7886 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7887 	u32 queue;
7888 
7889 	for (queue = 0; queue < rx_cnt; queue++)
7890 		stmmac_reset_rx_queue(priv, queue);
7891 
7892 	for (queue = 0; queue < tx_cnt; queue++)
7893 		stmmac_reset_tx_queue(priv, queue);
7894 }
7895 
7896 /**
7897  * stmmac_resume - resume callback
7898  * @dev: device pointer
7899  * Description: when resume this function is invoked to setup the DMA and CORE
7900  * in a usable state.
7901  */
7902 int stmmac_resume(struct device *dev)
7903 {
7904 	struct net_device *ndev = dev_get_drvdata(dev);
7905 	struct stmmac_priv *priv = netdev_priv(ndev);
7906 	int ret;
7907 
7908 	if (!netif_running(ndev))
7909 		return 0;
7910 
7911 	/* Power Down bit, into the PM register, is cleared
7912 	 * automatically as soon as a magic packet or a Wake-up frame
7913 	 * is received. Anyway, it's better to manually clear
7914 	 * this bit because it can generate problems while resuming
7915 	 * from another devices (e.g. serial console).
7916 	 */
7917 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7918 		mutex_lock(&priv->lock);
7919 		stmmac_pmt(priv, priv->hw, 0);
7920 		mutex_unlock(&priv->lock);
7921 		priv->irq_wake = 0;
7922 	} else {
7923 		pinctrl_pm_select_default_state(priv->device);
7924 		/* reset the phy so that it's ready */
7925 		if (priv->mii)
7926 			stmmac_mdio_reset(priv->mii);
7927 	}
7928 
7929 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7930 	    priv->plat->serdes_powerup) {
7931 		ret = priv->plat->serdes_powerup(ndev,
7932 						 priv->plat->bsp_priv);
7933 
7934 		if (ret < 0)
7935 			return ret;
7936 	}
7937 
7938 	rtnl_lock();
7939 	phylink_resume(priv->phylink);
7940 	if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7941 		phylink_speed_up(priv->phylink);
7942 	rtnl_unlock();
7943 
7944 	rtnl_lock();
7945 	mutex_lock(&priv->lock);
7946 
7947 	stmmac_reset_queues_param(priv);
7948 
7949 	stmmac_free_tx_skbufs(priv);
7950 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7951 
7952 	stmmac_hw_setup(ndev, false);
7953 	stmmac_init_coalesce(priv);
7954 	stmmac_set_rx_mode(ndev);
7955 
7956 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7957 
7958 	stmmac_enable_all_queues(priv);
7959 	stmmac_enable_all_dma_irq(priv);
7960 
7961 	mutex_unlock(&priv->lock);
7962 	rtnl_unlock();
7963 
7964 	netif_device_attach(ndev);
7965 
7966 	return 0;
7967 }
7968 EXPORT_SYMBOL_GPL(stmmac_resume);
7969 
7970 #ifndef MODULE
7971 static int __init stmmac_cmdline_opt(char *str)
7972 {
7973 	char *opt;
7974 
7975 	if (!str || !*str)
7976 		return 1;
7977 	while ((opt = strsep(&str, ",")) != NULL) {
7978 		if (!strncmp(opt, "debug:", 6)) {
7979 			if (kstrtoint(opt + 6, 0, &debug))
7980 				goto err;
7981 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7982 			if (kstrtoint(opt + 8, 0, &phyaddr))
7983 				goto err;
7984 		} else if (!strncmp(opt, "tc:", 3)) {
7985 			if (kstrtoint(opt + 3, 0, &tc))
7986 				goto err;
7987 		} else if (!strncmp(opt, "watchdog:", 9)) {
7988 			if (kstrtoint(opt + 9, 0, &watchdog))
7989 				goto err;
7990 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7991 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7992 				goto err;
7993 		} else if (!strncmp(opt, "pause:", 6)) {
7994 			if (kstrtoint(opt + 6, 0, &pause))
7995 				goto err;
7996 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7997 			if (kstrtoint(opt + 10, 0, &eee_timer))
7998 				goto err;
7999 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8000 			if (kstrtoint(opt + 11, 0, &chain_mode))
8001 				goto err;
8002 		}
8003 	}
8004 	return 1;
8005 
8006 err:
8007 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8008 	return 1;
8009 }
8010 
8011 __setup("stmmaceth=", stmmac_cmdline_opt);
8012 #endif /* MODULE */
8013 
8014 static int __init stmmac_init(void)
8015 {
8016 #ifdef CONFIG_DEBUG_FS
8017 	/* Create debugfs main directory if it doesn't exist yet */
8018 	if (!stmmac_fs_dir)
8019 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8020 	register_netdevice_notifier(&stmmac_notifier);
8021 #endif
8022 
8023 	return 0;
8024 }
8025 
8026 static void __exit stmmac_exit(void)
8027 {
8028 #ifdef CONFIG_DEBUG_FS
8029 	unregister_netdevice_notifier(&stmmac_notifier);
8030 	debugfs_remove_recursive(stmmac_fs_dir);
8031 #endif
8032 }
8033 
8034 module_init(stmmac_init)
8035 module_exit(stmmac_exit)
8036 
8037 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8038 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8039 MODULE_LICENSE("GPL");
8040