xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision de5ca699bc3f7fe9f90ba927d8a6e7783cd7311d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54 
55 /* As long as the interface is active, we keep the timestamping counter enabled
56  * with fine resolution and binary rollover. This avoid non-monotonic behavior
57  * (clock jumps) when changing timestamping settings at runtime.
58  */
59 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 				 PTP_TCR_TSCTRLSSR)
61 
62 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
64 
65 /* Module parameters */
66 #define TX_TIMEO	5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70 
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78 
79 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = 0xdead;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 /* This is unused */
105 #define	DEFAULT_BUFSIZE	1536
106 static int buf_sz = DEFAULT_BUFSIZE;
107 module_param(buf_sz, int, 0644);
108 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
109 
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
112 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113 
114 #define STMMAC_DEFAULT_LPI_TIMER	1000
115 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, uint, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119 
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126 
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139 					  u32 rxmode, u32 chan);
140 
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146 
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148 
149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151 	int ret = 0;
152 
153 	if (enabled) {
154 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
155 		if (ret)
156 			return ret;
157 		ret = clk_prepare_enable(priv->plat->pclk);
158 		if (ret) {
159 			clk_disable_unprepare(priv->plat->stmmac_clk);
160 			return ret;
161 		}
162 		if (priv->plat->clks_config) {
163 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164 			if (ret) {
165 				clk_disable_unprepare(priv->plat->stmmac_clk);
166 				clk_disable_unprepare(priv->plat->pclk);
167 				return ret;
168 			}
169 		}
170 	} else {
171 		clk_disable_unprepare(priv->plat->stmmac_clk);
172 		clk_disable_unprepare(priv->plat->pclk);
173 		if (priv->plat->clks_config)
174 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175 	}
176 
177 	return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180 
181 /**
182  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
183  * @bsp_priv: BSP private data structure (unused)
184  * @clk_tx_i: the transmit clock
185  * @interface: the selected interface mode
186  * @speed: the speed that the MAC will be operating at
187  *
188  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
189  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
190  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
191  * the plat_data->set_clk_tx_rate method directly, call it via their own
192  * implementation, or implement their own method should they have more
193  * complex requirements. It is intended to only be used in this method.
194  *
195  * plat_data->clk_tx_i must be filled in.
196  */
197 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
198 			   phy_interface_t interface, int speed)
199 {
200 	long rate = rgmii_clock(speed);
201 
202 	/* Silently ignore unsupported speeds as rgmii_clock() only
203 	 * supports 10, 100 and 1000Mbps. We do not want to spit
204 	 * errors for 2500 and higher speeds here.
205 	 */
206 	if (rate < 0)
207 		return 0;
208 
209 	return clk_set_rate(clk_tx_i, rate);
210 }
211 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
212 
213 /**
214  * stmmac_verify_args - verify the driver parameters.
215  * Description: it checks the driver parameters and set a default in case of
216  * errors.
217  */
218 static void stmmac_verify_args(void)
219 {
220 	if (unlikely(watchdog < 0))
221 		watchdog = TX_TIMEO;
222 	if (unlikely((pause < 0) || (pause > 0xffff)))
223 		pause = PAUSE_TIME;
224 
225 	if (flow_ctrl != 0xdead)
226 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
227 }
228 
229 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
233 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
234 	u32 queue;
235 
236 	for (queue = 0; queue < maxq; queue++) {
237 		struct stmmac_channel *ch = &priv->channel[queue];
238 
239 		if (stmmac_xdp_is_enabled(priv) &&
240 		    test_bit(queue, priv->af_xdp_zc_qps)) {
241 			napi_disable(&ch->rxtx_napi);
242 			continue;
243 		}
244 
245 		if (queue < rx_queues_cnt)
246 			napi_disable(&ch->rx_napi);
247 		if (queue < tx_queues_cnt)
248 			napi_disable(&ch->tx_napi);
249 	}
250 }
251 
252 /**
253  * stmmac_disable_all_queues - Disable all queues
254  * @priv: driver private structure
255  */
256 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
257 {
258 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
259 	struct stmmac_rx_queue *rx_q;
260 	u32 queue;
261 
262 	/* synchronize_rcu() needed for pending XDP buffers to drain */
263 	for (queue = 0; queue < rx_queues_cnt; queue++) {
264 		rx_q = &priv->dma_conf.rx_queue[queue];
265 		if (rx_q->xsk_pool) {
266 			synchronize_rcu();
267 			break;
268 		}
269 	}
270 
271 	__stmmac_disable_all_queues(priv);
272 }
273 
274 /**
275  * stmmac_enable_all_queues - Enable all queues
276  * @priv: driver private structure
277  */
278 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
279 {
280 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
281 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
282 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
283 	u32 queue;
284 
285 	for (queue = 0; queue < maxq; queue++) {
286 		struct stmmac_channel *ch = &priv->channel[queue];
287 
288 		if (stmmac_xdp_is_enabled(priv) &&
289 		    test_bit(queue, priv->af_xdp_zc_qps)) {
290 			napi_enable(&ch->rxtx_napi);
291 			continue;
292 		}
293 
294 		if (queue < rx_queues_cnt)
295 			napi_enable(&ch->rx_napi);
296 		if (queue < tx_queues_cnt)
297 			napi_enable(&ch->tx_napi);
298 	}
299 }
300 
301 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
302 {
303 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
304 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
305 		queue_work(priv->wq, &priv->service_task);
306 }
307 
308 static void stmmac_global_err(struct stmmac_priv *priv)
309 {
310 	netif_carrier_off(priv->dev);
311 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
312 	stmmac_service_event_schedule(priv);
313 }
314 
315 /**
316  * stmmac_clk_csr_set - dynamically set the MDC clock
317  * @priv: driver private structure
318  * Description: this is to dynamically set the MDC clock according to the csr
319  * clock input.
320  * Note:
321  *	If a specific clk_csr value is passed from the platform
322  *	this means that the CSR Clock Range selection cannot be
323  *	changed at run-time and it is fixed (as reported in the driver
324  *	documentation). Viceversa the driver will try to set the MDC
325  *	clock dynamically according to the actual clock input.
326  */
327 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
328 {
329 	unsigned long clk_rate;
330 
331 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
332 
333 	/* Platform provided default clk_csr would be assumed valid
334 	 * for all other cases except for the below mentioned ones.
335 	 * For values higher than the IEEE 802.3 specified frequency
336 	 * we can not estimate the proper divider as it is not known
337 	 * the frequency of clk_csr_i. So we do not change the default
338 	 * divider.
339 	 */
340 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
341 		if (clk_rate < CSR_F_35M)
342 			priv->clk_csr = STMMAC_CSR_20_35M;
343 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
344 			priv->clk_csr = STMMAC_CSR_35_60M;
345 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
346 			priv->clk_csr = STMMAC_CSR_60_100M;
347 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
348 			priv->clk_csr = STMMAC_CSR_100_150M;
349 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
350 			priv->clk_csr = STMMAC_CSR_150_250M;
351 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
352 			priv->clk_csr = STMMAC_CSR_250_300M;
353 		else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
354 			priv->clk_csr = STMMAC_CSR_300_500M;
355 		else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
356 			priv->clk_csr = STMMAC_CSR_500_800M;
357 	}
358 
359 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
360 		if (clk_rate > 160000000)
361 			priv->clk_csr = 0x03;
362 		else if (clk_rate > 80000000)
363 			priv->clk_csr = 0x02;
364 		else if (clk_rate > 40000000)
365 			priv->clk_csr = 0x01;
366 		else
367 			priv->clk_csr = 0;
368 	}
369 
370 	if (priv->plat->has_xgmac) {
371 		if (clk_rate > 400000000)
372 			priv->clk_csr = 0x5;
373 		else if (clk_rate > 350000000)
374 			priv->clk_csr = 0x4;
375 		else if (clk_rate > 300000000)
376 			priv->clk_csr = 0x3;
377 		else if (clk_rate > 250000000)
378 			priv->clk_csr = 0x2;
379 		else if (clk_rate > 150000000)
380 			priv->clk_csr = 0x1;
381 		else
382 			priv->clk_csr = 0x0;
383 	}
384 }
385 
386 static void print_pkt(unsigned char *buf, int len)
387 {
388 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
389 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
390 }
391 
392 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
393 {
394 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
395 	u32 avail;
396 
397 	if (tx_q->dirty_tx > tx_q->cur_tx)
398 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
399 	else
400 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
401 
402 	return avail;
403 }
404 
405 /**
406  * stmmac_rx_dirty - Get RX queue dirty
407  * @priv: driver private structure
408  * @queue: RX queue index
409  */
410 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
411 {
412 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
413 	u32 dirty;
414 
415 	if (rx_q->dirty_rx <= rx_q->cur_rx)
416 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
417 	else
418 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
419 
420 	return dirty;
421 }
422 
423 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
424 {
425 	u32 tx_cnt = priv->plat->tx_queues_to_use;
426 	u32 queue;
427 
428 	/* check if all TX queues have the work finished */
429 	for (queue = 0; queue < tx_cnt; queue++) {
430 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
431 
432 		if (tx_q->dirty_tx != tx_q->cur_tx)
433 			return true; /* still unfinished work */
434 	}
435 
436 	return false;
437 }
438 
439 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
440 {
441 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
442 }
443 
444 /**
445  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
446  * @priv: driver private structure
447  * Description: this function is to verify and enter in LPI mode in case of
448  * EEE.
449  */
450 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
451 {
452 	if (stmmac_eee_tx_busy(priv)) {
453 		stmmac_restart_sw_lpi_timer(priv);
454 		return;
455 	}
456 
457 	/* Check and enter in LPI mode */
458 	if (!priv->tx_path_in_lpi_mode)
459 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
460 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING,
461 			0);
462 }
463 
464 /**
465  * stmmac_stop_sw_lpi - stop transmitting LPI
466  * @priv: driver private structure
467  * Description: When using software-controlled LPI, stop transmitting LPI state.
468  */
469 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
470 {
471 	del_timer_sync(&priv->eee_ctrl_timer);
472 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
473 	priv->tx_path_in_lpi_mode = false;
474 }
475 
476 /**
477  * stmmac_eee_ctrl_timer - EEE TX SW timer.
478  * @t:  timer_list struct containing private info
479  * Description:
480  *  if there is no data transfer and if we are not in LPI state,
481  *  then MAC Transmitter can be moved to LPI state.
482  */
483 static void stmmac_eee_ctrl_timer(struct timer_list *t)
484 {
485 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
486 
487 	stmmac_try_to_start_sw_lpi(priv);
488 }
489 
490 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
491  * @priv: driver private structure
492  * @p : descriptor pointer
493  * @skb : the socket buffer
494  * Description :
495  * This function will read timestamp from the descriptor & pass it to stack.
496  * and also perform some sanity checks.
497  */
498 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
499 				   struct dma_desc *p, struct sk_buff *skb)
500 {
501 	struct skb_shared_hwtstamps shhwtstamp;
502 	bool found = false;
503 	u64 ns = 0;
504 
505 	if (!priv->hwts_tx_en)
506 		return;
507 
508 	/* exit if skb doesn't support hw tstamp */
509 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
510 		return;
511 
512 	/* check tx tstamp status */
513 	if (stmmac_get_tx_timestamp_status(priv, p)) {
514 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
515 		found = true;
516 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
517 		found = true;
518 	}
519 
520 	if (found) {
521 		ns -= priv->plat->cdc_error_adj;
522 
523 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
524 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
525 
526 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
527 		/* pass tstamp to stack */
528 		skb_tstamp_tx(skb, &shhwtstamp);
529 	}
530 }
531 
532 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
533  * @priv: driver private structure
534  * @p : descriptor pointer
535  * @np : next descriptor pointer
536  * @skb : the socket buffer
537  * Description :
538  * This function will read received packet's timestamp from the descriptor
539  * and pass it to stack. It also perform some sanity checks.
540  */
541 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
542 				   struct dma_desc *np, struct sk_buff *skb)
543 {
544 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
545 	struct dma_desc *desc = p;
546 	u64 ns = 0;
547 
548 	if (!priv->hwts_rx_en)
549 		return;
550 	/* For GMAC4, the valid timestamp is from CTX next desc. */
551 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
552 		desc = np;
553 
554 	/* Check if timestamp is available */
555 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
556 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
557 
558 		ns -= priv->plat->cdc_error_adj;
559 
560 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
561 		shhwtstamp = skb_hwtstamps(skb);
562 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
563 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
564 	} else  {
565 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
566 	}
567 }
568 
569 /**
570  *  stmmac_hwtstamp_set - control hardware timestamping.
571  *  @dev: device pointer.
572  *  @ifr: An IOCTL specific structure, that can contain a pointer to
573  *  a proprietary structure used to pass information to the driver.
574  *  Description:
575  *  This function configures the MAC to enable/disable both outgoing(TX)
576  *  and incoming(RX) packets time stamping based on user input.
577  *  Return Value:
578  *  0 on success and an appropriate -ve integer on failure.
579  */
580 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
581 {
582 	struct stmmac_priv *priv = netdev_priv(dev);
583 	struct hwtstamp_config config;
584 	u32 ptp_v2 = 0;
585 	u32 tstamp_all = 0;
586 	u32 ptp_over_ipv4_udp = 0;
587 	u32 ptp_over_ipv6_udp = 0;
588 	u32 ptp_over_ethernet = 0;
589 	u32 snap_type_sel = 0;
590 	u32 ts_master_en = 0;
591 	u32 ts_event_en = 0;
592 
593 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
594 		netdev_alert(priv->dev, "No support for HW time stamping\n");
595 		priv->hwts_tx_en = 0;
596 		priv->hwts_rx_en = 0;
597 
598 		return -EOPNOTSUPP;
599 	}
600 
601 	if (copy_from_user(&config, ifr->ifr_data,
602 			   sizeof(config)))
603 		return -EFAULT;
604 
605 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
606 		   __func__, config.flags, config.tx_type, config.rx_filter);
607 
608 	if (config.tx_type != HWTSTAMP_TX_OFF &&
609 	    config.tx_type != HWTSTAMP_TX_ON)
610 		return -ERANGE;
611 
612 	if (priv->adv_ts) {
613 		switch (config.rx_filter) {
614 		case HWTSTAMP_FILTER_NONE:
615 			/* time stamp no incoming packet at all */
616 			config.rx_filter = HWTSTAMP_FILTER_NONE;
617 			break;
618 
619 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
620 			/* PTP v1, UDP, any kind of event packet */
621 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
622 			/* 'xmac' hardware can support Sync, Pdelay_Req and
623 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
624 			 * This leaves Delay_Req timestamps out.
625 			 * Enable all events *and* general purpose message
626 			 * timestamping
627 			 */
628 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
629 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631 			break;
632 
633 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
634 			/* PTP v1, UDP, Sync packet */
635 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
636 			/* take time stamp for SYNC messages only */
637 			ts_event_en = PTP_TCR_TSEVNTENA;
638 
639 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
640 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
641 			break;
642 
643 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
644 			/* PTP v1, UDP, Delay_req packet */
645 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
646 			/* take time stamp for Delay_Req messages only */
647 			ts_master_en = PTP_TCR_TSMSTRENA;
648 			ts_event_en = PTP_TCR_TSEVNTENA;
649 
650 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
651 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
652 			break;
653 
654 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
655 			/* PTP v2, UDP, any kind of event packet */
656 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
657 			ptp_v2 = PTP_TCR_TSVER2ENA;
658 			/* take time stamp for all event messages */
659 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
660 
661 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
662 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
663 			break;
664 
665 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
666 			/* PTP v2, UDP, Sync packet */
667 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
668 			ptp_v2 = PTP_TCR_TSVER2ENA;
669 			/* take time stamp for SYNC messages only */
670 			ts_event_en = PTP_TCR_TSEVNTENA;
671 
672 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
673 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
674 			break;
675 
676 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
677 			/* PTP v2, UDP, Delay_req packet */
678 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
679 			ptp_v2 = PTP_TCR_TSVER2ENA;
680 			/* take time stamp for Delay_Req messages only */
681 			ts_master_en = PTP_TCR_TSMSTRENA;
682 			ts_event_en = PTP_TCR_TSEVNTENA;
683 
684 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
685 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
686 			break;
687 
688 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
689 			/* PTP v2/802.AS1 any layer, any kind of event packet */
690 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
691 			ptp_v2 = PTP_TCR_TSVER2ENA;
692 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
693 			if (priv->synopsys_id < DWMAC_CORE_4_10)
694 				ts_event_en = PTP_TCR_TSEVNTENA;
695 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
696 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
697 			ptp_over_ethernet = PTP_TCR_TSIPENA;
698 			break;
699 
700 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
701 			/* PTP v2/802.AS1, any layer, Sync packet */
702 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
703 			ptp_v2 = PTP_TCR_TSVER2ENA;
704 			/* take time stamp for SYNC messages only */
705 			ts_event_en = PTP_TCR_TSEVNTENA;
706 
707 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
708 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
709 			ptp_over_ethernet = PTP_TCR_TSIPENA;
710 			break;
711 
712 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
713 			/* PTP v2/802.AS1, any layer, Delay_req packet */
714 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
715 			ptp_v2 = PTP_TCR_TSVER2ENA;
716 			/* take time stamp for Delay_Req messages only */
717 			ts_master_en = PTP_TCR_TSMSTRENA;
718 			ts_event_en = PTP_TCR_TSEVNTENA;
719 
720 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
721 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
722 			ptp_over_ethernet = PTP_TCR_TSIPENA;
723 			break;
724 
725 		case HWTSTAMP_FILTER_NTP_ALL:
726 		case HWTSTAMP_FILTER_ALL:
727 			/* time stamp any incoming packet */
728 			config.rx_filter = HWTSTAMP_FILTER_ALL;
729 			tstamp_all = PTP_TCR_TSENALL;
730 			break;
731 
732 		default:
733 			return -ERANGE;
734 		}
735 	} else {
736 		switch (config.rx_filter) {
737 		case HWTSTAMP_FILTER_NONE:
738 			config.rx_filter = HWTSTAMP_FILTER_NONE;
739 			break;
740 		default:
741 			/* PTP v1, UDP, any kind of event packet */
742 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
743 			break;
744 		}
745 	}
746 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
747 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
748 
749 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
750 
751 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
752 		priv->systime_flags |= tstamp_all | ptp_v2 |
753 				       ptp_over_ethernet | ptp_over_ipv6_udp |
754 				       ptp_over_ipv4_udp | ts_event_en |
755 				       ts_master_en | snap_type_sel;
756 	}
757 
758 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
759 
760 	memcpy(&priv->tstamp_config, &config, sizeof(config));
761 
762 	return copy_to_user(ifr->ifr_data, &config,
763 			    sizeof(config)) ? -EFAULT : 0;
764 }
765 
766 /**
767  *  stmmac_hwtstamp_get - read hardware timestamping.
768  *  @dev: device pointer.
769  *  @ifr: An IOCTL specific structure, that can contain a pointer to
770  *  a proprietary structure used to pass information to the driver.
771  *  Description:
772  *  This function obtain the current hardware timestamping settings
773  *  as requested.
774  */
775 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
776 {
777 	struct stmmac_priv *priv = netdev_priv(dev);
778 	struct hwtstamp_config *config = &priv->tstamp_config;
779 
780 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
781 		return -EOPNOTSUPP;
782 
783 	return copy_to_user(ifr->ifr_data, config,
784 			    sizeof(*config)) ? -EFAULT : 0;
785 }
786 
787 /**
788  * stmmac_init_tstamp_counter - init hardware timestamping counter
789  * @priv: driver private structure
790  * @systime_flags: timestamping flags
791  * Description:
792  * Initialize hardware counter for packet timestamping.
793  * This is valid as long as the interface is open and not suspended.
794  * Will be rerun after resuming from suspend, case in which the timestamping
795  * flags updated by stmmac_hwtstamp_set() also need to be restored.
796  */
797 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
798 {
799 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
800 	struct timespec64 now;
801 	u32 sec_inc = 0;
802 	u64 temp = 0;
803 
804 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
805 		return -EOPNOTSUPP;
806 
807 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
808 	priv->systime_flags = systime_flags;
809 
810 	/* program Sub Second Increment reg */
811 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
812 					   priv->plat->clk_ptp_rate,
813 					   xmac, &sec_inc);
814 	temp = div_u64(1000000000ULL, sec_inc);
815 
816 	/* Store sub second increment for later use */
817 	priv->sub_second_inc = sec_inc;
818 
819 	/* calculate default added value:
820 	 * formula is :
821 	 * addend = (2^32)/freq_div_ratio;
822 	 * where, freq_div_ratio = 1e9ns/sec_inc
823 	 */
824 	temp = (u64)(temp << 32);
825 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
826 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
827 
828 	/* initialize system time */
829 	ktime_get_real_ts64(&now);
830 
831 	/* lower 32 bits of tv_sec are safe until y2106 */
832 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
833 
834 	return 0;
835 }
836 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
837 
838 /**
839  * stmmac_init_ptp - init PTP
840  * @priv: driver private structure
841  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
842  * This is done by looking at the HW cap. register.
843  * This function also registers the ptp driver.
844  */
845 static int stmmac_init_ptp(struct stmmac_priv *priv)
846 {
847 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
848 	int ret;
849 
850 	if (priv->plat->ptp_clk_freq_config)
851 		priv->plat->ptp_clk_freq_config(priv);
852 
853 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
854 	if (ret)
855 		return ret;
856 
857 	priv->adv_ts = 0;
858 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
859 	if (xmac && priv->dma_cap.atime_stamp)
860 		priv->adv_ts = 1;
861 	/* Dwmac 3.x core with extend_desc can support adv_ts */
862 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
863 		priv->adv_ts = 1;
864 
865 	if (priv->dma_cap.time_stamp)
866 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
867 
868 	if (priv->adv_ts)
869 		netdev_info(priv->dev,
870 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
871 
872 	priv->hwts_tx_en = 0;
873 	priv->hwts_rx_en = 0;
874 
875 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
876 		stmmac_hwtstamp_correct_latency(priv, priv);
877 
878 	return 0;
879 }
880 
881 static void stmmac_release_ptp(struct stmmac_priv *priv)
882 {
883 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
884 	stmmac_ptp_unregister(priv);
885 }
886 
887 /**
888  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
889  *  @priv: driver private structure
890  *  @duplex: duplex passed to the next function
891  *  @flow_ctrl: desired flow control modes
892  *  Description: It is used for configuring the flow control in all queues
893  */
894 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
895 				 unsigned int flow_ctrl)
896 {
897 	u32 tx_cnt = priv->plat->tx_queues_to_use;
898 
899 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
900 			 tx_cnt);
901 }
902 
903 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
904 					 phy_interface_t interface)
905 {
906 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
907 
908 	/* Refresh the MAC-specific capabilities */
909 	stmmac_mac_update_caps(priv);
910 
911 	config->mac_capabilities = priv->hw->link.caps;
912 
913 	if (priv->plat->max_speed)
914 		phylink_limit_mac_speed(config, priv->plat->max_speed);
915 
916 	return config->mac_capabilities;
917 }
918 
919 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
920 						 phy_interface_t interface)
921 {
922 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
923 	struct phylink_pcs *pcs;
924 
925 	if (priv->plat->select_pcs) {
926 		pcs = priv->plat->select_pcs(priv, interface);
927 		if (!IS_ERR(pcs))
928 			return pcs;
929 	}
930 
931 	return NULL;
932 }
933 
934 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
935 			      const struct phylink_link_state *state)
936 {
937 	/* Nothing to do, xpcs_config() handles everything */
938 }
939 
940 static void stmmac_mac_link_down(struct phylink_config *config,
941 				 unsigned int mode, phy_interface_t interface)
942 {
943 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
944 
945 	stmmac_mac_set(priv, priv->ioaddr, false);
946 	if (priv->dma_cap.eee)
947 		stmmac_set_eee_pls(priv, priv->hw, false);
948 
949 	if (stmmac_fpe_supported(priv))
950 		stmmac_fpe_link_state_handle(priv, false);
951 }
952 
953 static void stmmac_mac_link_up(struct phylink_config *config,
954 			       struct phy_device *phy,
955 			       unsigned int mode, phy_interface_t interface,
956 			       int speed, int duplex,
957 			       bool tx_pause, bool rx_pause)
958 {
959 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
960 	unsigned int flow_ctrl;
961 	u32 old_ctrl, ctrl;
962 	int ret;
963 
964 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
965 	    priv->plat->serdes_powerup)
966 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
967 
968 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
969 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
970 
971 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
972 		switch (speed) {
973 		case SPEED_10000:
974 			ctrl |= priv->hw->link.xgmii.speed10000;
975 			break;
976 		case SPEED_5000:
977 			ctrl |= priv->hw->link.xgmii.speed5000;
978 			break;
979 		case SPEED_2500:
980 			ctrl |= priv->hw->link.xgmii.speed2500;
981 			break;
982 		default:
983 			return;
984 		}
985 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
986 		switch (speed) {
987 		case SPEED_100000:
988 			ctrl |= priv->hw->link.xlgmii.speed100000;
989 			break;
990 		case SPEED_50000:
991 			ctrl |= priv->hw->link.xlgmii.speed50000;
992 			break;
993 		case SPEED_40000:
994 			ctrl |= priv->hw->link.xlgmii.speed40000;
995 			break;
996 		case SPEED_25000:
997 			ctrl |= priv->hw->link.xlgmii.speed25000;
998 			break;
999 		case SPEED_10000:
1000 			ctrl |= priv->hw->link.xgmii.speed10000;
1001 			break;
1002 		case SPEED_2500:
1003 			ctrl |= priv->hw->link.speed2500;
1004 			break;
1005 		case SPEED_1000:
1006 			ctrl |= priv->hw->link.speed1000;
1007 			break;
1008 		default:
1009 			return;
1010 		}
1011 	} else {
1012 		switch (speed) {
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.speed2500;
1015 			break;
1016 		case SPEED_1000:
1017 			ctrl |= priv->hw->link.speed1000;
1018 			break;
1019 		case SPEED_100:
1020 			ctrl |= priv->hw->link.speed100;
1021 			break;
1022 		case SPEED_10:
1023 			ctrl |= priv->hw->link.speed10;
1024 			break;
1025 		default:
1026 			return;
1027 		}
1028 	}
1029 
1030 	if (priv->plat->fix_mac_speed)
1031 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1032 
1033 	if (!duplex)
1034 		ctrl &= ~priv->hw->link.duplex;
1035 	else
1036 		ctrl |= priv->hw->link.duplex;
1037 
1038 	/* Flow Control operation */
1039 	if (rx_pause && tx_pause)
1040 		flow_ctrl = FLOW_AUTO;
1041 	else if (rx_pause && !tx_pause)
1042 		flow_ctrl = FLOW_RX;
1043 	else if (!rx_pause && tx_pause)
1044 		flow_ctrl = FLOW_TX;
1045 	else
1046 		flow_ctrl = FLOW_OFF;
1047 
1048 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1049 
1050 	if (ctrl != old_ctrl)
1051 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1052 
1053 	if (priv->plat->set_clk_tx_rate) {
1054 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1055 						priv->plat->clk_tx_i,
1056 						interface, speed);
1057 		if (ret < 0)
1058 			netdev_err(priv->dev,
1059 				   "failed to configure transmit clock for %dMbps: %pe\n",
1060 				   speed, ERR_PTR(ret));
1061 	}
1062 
1063 	stmmac_mac_set(priv, priv->ioaddr, true);
1064 	if (priv->dma_cap.eee)
1065 		stmmac_set_eee_pls(priv, priv->hw, true);
1066 
1067 	if (stmmac_fpe_supported(priv))
1068 		stmmac_fpe_link_state_handle(priv, true);
1069 
1070 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1071 		stmmac_hwtstamp_correct_latency(priv, priv);
1072 }
1073 
1074 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1075 {
1076 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1077 
1078 	priv->eee_active = false;
1079 
1080 	mutex_lock(&priv->lock);
1081 
1082 	priv->eee_enabled = false;
1083 
1084 	netdev_dbg(priv->dev, "disable EEE\n");
1085 	priv->eee_sw_timer_en = false;
1086 	del_timer_sync(&priv->eee_ctrl_timer);
1087 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1088 	priv->tx_path_in_lpi_mode = false;
1089 
1090 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1091 	mutex_unlock(&priv->lock);
1092 }
1093 
1094 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1095 				    bool tx_clk_stop)
1096 {
1097 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1098 	int ret;
1099 
1100 	priv->tx_lpi_timer = timer;
1101 	priv->eee_active = true;
1102 
1103 	mutex_lock(&priv->lock);
1104 
1105 	priv->eee_enabled = true;
1106 
1107 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1108 			     STMMAC_DEFAULT_TWT_LS);
1109 
1110 	/* Try to cnfigure the hardware timer. */
1111 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1112 				  priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING,
1113 				  priv->tx_lpi_timer);
1114 
1115 	if (ret) {
1116 		/* Hardware timer mode not supported, or value out of range.
1117 		 * Fall back to using software LPI mode
1118 		 */
1119 		priv->eee_sw_timer_en = true;
1120 		stmmac_restart_sw_lpi_timer(priv);
1121 	}
1122 
1123 	mutex_unlock(&priv->lock);
1124 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1125 
1126 	return 0;
1127 }
1128 
1129 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1130 			     phy_interface_t interface)
1131 {
1132 	struct net_device *ndev = to_net_dev(config->dev);
1133 	struct stmmac_priv *priv = netdev_priv(ndev);
1134 
1135 	if (priv->plat->mac_finish)
1136 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1137 
1138 	return 0;
1139 }
1140 
1141 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1142 	.mac_get_caps = stmmac_mac_get_caps,
1143 	.mac_select_pcs = stmmac_mac_select_pcs,
1144 	.mac_config = stmmac_mac_config,
1145 	.mac_link_down = stmmac_mac_link_down,
1146 	.mac_link_up = stmmac_mac_link_up,
1147 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1148 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1149 	.mac_finish = stmmac_mac_finish,
1150 };
1151 
1152 /**
1153  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1154  * @priv: driver private structure
1155  * Description: this is to verify if the HW supports the PCS.
1156  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1157  * configured for the TBI, RTBI, or SGMII PHY interface.
1158  */
1159 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1160 {
1161 	int interface = priv->plat->mac_interface;
1162 
1163 	if (priv->dma_cap.pcs) {
1164 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1165 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1166 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1167 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1168 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1169 			priv->hw->pcs = STMMAC_PCS_RGMII;
1170 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1171 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1172 			priv->hw->pcs = STMMAC_PCS_SGMII;
1173 		}
1174 	}
1175 }
1176 
1177 /**
1178  * stmmac_init_phy - PHY initialization
1179  * @dev: net device structure
1180  * Description: it initializes the driver's PHY state, and attaches the PHY
1181  * to the mac driver.
1182  *  Return value:
1183  *  0 on success
1184  */
1185 static int stmmac_init_phy(struct net_device *dev)
1186 {
1187 	struct stmmac_priv *priv = netdev_priv(dev);
1188 	struct fwnode_handle *phy_fwnode;
1189 	struct fwnode_handle *fwnode;
1190 	int ret;
1191 
1192 	if (!phylink_expects_phy(priv->phylink))
1193 		return 0;
1194 
1195 	fwnode = priv->plat->port_node;
1196 	if (!fwnode)
1197 		fwnode = dev_fwnode(priv->device);
1198 
1199 	if (fwnode)
1200 		phy_fwnode = fwnode_get_phy_node(fwnode);
1201 	else
1202 		phy_fwnode = NULL;
1203 
1204 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1205 	 * manually parse it
1206 	 */
1207 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1208 		int addr = priv->plat->phy_addr;
1209 		struct phy_device *phydev;
1210 
1211 		if (addr < 0) {
1212 			netdev_err(priv->dev, "no phy found\n");
1213 			return -ENODEV;
1214 		}
1215 
1216 		phydev = mdiobus_get_phy(priv->mii, addr);
1217 		if (!phydev) {
1218 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1219 			return -ENODEV;
1220 		}
1221 
1222 		ret = phylink_connect_phy(priv->phylink, phydev);
1223 	} else {
1224 		fwnode_handle_put(phy_fwnode);
1225 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1226 	}
1227 
1228 	if (ret == 0) {
1229 		struct ethtool_keee eee;
1230 
1231 		/* Configure phylib's copy of the LPI timer. Normally,
1232 		 * phylink_config.lpi_timer_default would do this, but there is
1233 		 * a chance that userspace could change the eee_timer setting
1234 		 * via sysfs before the first open. Thus, preserve existing
1235 		 * behaviour.
1236 		 */
1237 		if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1238 			eee.tx_lpi_timer = priv->tx_lpi_timer;
1239 			phylink_ethtool_set_eee(priv->phylink, &eee);
1240 		}
1241 	}
1242 
1243 	if (!priv->plat->pmt) {
1244 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1245 
1246 		phylink_ethtool_get_wol(priv->phylink, &wol);
1247 		device_set_wakeup_capable(priv->device, !!wol.supported);
1248 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1249 	}
1250 
1251 	return ret;
1252 }
1253 
1254 static int stmmac_phy_setup(struct stmmac_priv *priv)
1255 {
1256 	struct stmmac_mdio_bus_data *mdio_bus_data;
1257 	int mode = priv->plat->phy_interface;
1258 	struct fwnode_handle *fwnode;
1259 	struct phylink_pcs *pcs;
1260 	struct phylink *phylink;
1261 
1262 	priv->phylink_config.dev = &priv->dev->dev;
1263 	priv->phylink_config.type = PHYLINK_NETDEV;
1264 	priv->phylink_config.mac_managed_pm = true;
1265 
1266 	/* Stmmac always requires an RX clock for hardware initialization */
1267 	priv->phylink_config.mac_requires_rxc = true;
1268 
1269 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1270 		priv->phylink_config.eee_rx_clk_stop_enable = true;
1271 
1272 	mdio_bus_data = priv->plat->mdio_bus_data;
1273 	if (mdio_bus_data)
1274 		priv->phylink_config.default_an_inband =
1275 			mdio_bus_data->default_an_inband;
1276 
1277 	/* Set the platform/firmware specified interface mode. Note, phylink
1278 	 * deals with the PHY interface mode, not the MAC interface mode.
1279 	 */
1280 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1281 
1282 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1283 	if (priv->hw->xpcs)
1284 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1285 	else
1286 		pcs = priv->hw->phylink_pcs;
1287 
1288 	if (pcs)
1289 		phy_interface_or(priv->phylink_config.supported_interfaces,
1290 				 priv->phylink_config.supported_interfaces,
1291 				 pcs->supported_interfaces);
1292 
1293 	if (priv->dma_cap.eee) {
1294 		/* Assume all supported interfaces also support LPI */
1295 		memcpy(priv->phylink_config.lpi_interfaces,
1296 		       priv->phylink_config.supported_interfaces,
1297 		       sizeof(priv->phylink_config.lpi_interfaces));
1298 
1299 		/* All full duplex speeds above 100Mbps are supported */
1300 		priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) |
1301 							MAC_100FD;
1302 		priv->phylink_config.lpi_timer_default = eee_timer * 1000;
1303 		priv->phylink_config.eee_enabled_default = true;
1304 	}
1305 
1306 	fwnode = priv->plat->port_node;
1307 	if (!fwnode)
1308 		fwnode = dev_fwnode(priv->device);
1309 
1310 	phylink = phylink_create(&priv->phylink_config, fwnode,
1311 				 mode, &stmmac_phylink_mac_ops);
1312 	if (IS_ERR(phylink))
1313 		return PTR_ERR(phylink);
1314 
1315 	priv->phylink = phylink;
1316 	return 0;
1317 }
1318 
1319 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1320 				    struct stmmac_dma_conf *dma_conf)
1321 {
1322 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1323 	unsigned int desc_size;
1324 	void *head_rx;
1325 	u32 queue;
1326 
1327 	/* Display RX rings */
1328 	for (queue = 0; queue < rx_cnt; queue++) {
1329 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1330 
1331 		pr_info("\tRX Queue %u rings\n", queue);
1332 
1333 		if (priv->extend_desc) {
1334 			head_rx = (void *)rx_q->dma_erx;
1335 			desc_size = sizeof(struct dma_extended_desc);
1336 		} else {
1337 			head_rx = (void *)rx_q->dma_rx;
1338 			desc_size = sizeof(struct dma_desc);
1339 		}
1340 
1341 		/* Display RX ring */
1342 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1343 				    rx_q->dma_rx_phy, desc_size);
1344 	}
1345 }
1346 
1347 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1348 				    struct stmmac_dma_conf *dma_conf)
1349 {
1350 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1351 	unsigned int desc_size;
1352 	void *head_tx;
1353 	u32 queue;
1354 
1355 	/* Display TX rings */
1356 	for (queue = 0; queue < tx_cnt; queue++) {
1357 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1358 
1359 		pr_info("\tTX Queue %d rings\n", queue);
1360 
1361 		if (priv->extend_desc) {
1362 			head_tx = (void *)tx_q->dma_etx;
1363 			desc_size = sizeof(struct dma_extended_desc);
1364 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1365 			head_tx = (void *)tx_q->dma_entx;
1366 			desc_size = sizeof(struct dma_edesc);
1367 		} else {
1368 			head_tx = (void *)tx_q->dma_tx;
1369 			desc_size = sizeof(struct dma_desc);
1370 		}
1371 
1372 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1373 				    tx_q->dma_tx_phy, desc_size);
1374 	}
1375 }
1376 
1377 static void stmmac_display_rings(struct stmmac_priv *priv,
1378 				 struct stmmac_dma_conf *dma_conf)
1379 {
1380 	/* Display RX ring */
1381 	stmmac_display_rx_rings(priv, dma_conf);
1382 
1383 	/* Display TX ring */
1384 	stmmac_display_tx_rings(priv, dma_conf);
1385 }
1386 
1387 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1388 {
1389 	if (stmmac_xdp_is_enabled(priv))
1390 		return XDP_PACKET_HEADROOM;
1391 
1392 	return NET_SKB_PAD;
1393 }
1394 
1395 static int stmmac_set_bfsize(int mtu, int bufsize)
1396 {
1397 	int ret = bufsize;
1398 
1399 	if (mtu >= BUF_SIZE_8KiB)
1400 		ret = BUF_SIZE_16KiB;
1401 	else if (mtu >= BUF_SIZE_4KiB)
1402 		ret = BUF_SIZE_8KiB;
1403 	else if (mtu >= BUF_SIZE_2KiB)
1404 		ret = BUF_SIZE_4KiB;
1405 	else if (mtu > DEFAULT_BUFSIZE)
1406 		ret = BUF_SIZE_2KiB;
1407 	else
1408 		ret = DEFAULT_BUFSIZE;
1409 
1410 	return ret;
1411 }
1412 
1413 /**
1414  * stmmac_clear_rx_descriptors - clear RX descriptors
1415  * @priv: driver private structure
1416  * @dma_conf: structure to take the dma data
1417  * @queue: RX queue index
1418  * Description: this function is called to clear the RX descriptors
1419  * in case of both basic and extended descriptors are used.
1420  */
1421 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1422 					struct stmmac_dma_conf *dma_conf,
1423 					u32 queue)
1424 {
1425 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1426 	int i;
1427 
1428 	/* Clear the RX descriptors */
1429 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1430 		if (priv->extend_desc)
1431 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1432 					priv->use_riwt, priv->mode,
1433 					(i == dma_conf->dma_rx_size - 1),
1434 					dma_conf->dma_buf_sz);
1435 		else
1436 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1437 					priv->use_riwt, priv->mode,
1438 					(i == dma_conf->dma_rx_size - 1),
1439 					dma_conf->dma_buf_sz);
1440 }
1441 
1442 /**
1443  * stmmac_clear_tx_descriptors - clear tx descriptors
1444  * @priv: driver private structure
1445  * @dma_conf: structure to take the dma data
1446  * @queue: TX queue index.
1447  * Description: this function is called to clear the TX descriptors
1448  * in case of both basic and extended descriptors are used.
1449  */
1450 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1451 					struct stmmac_dma_conf *dma_conf,
1452 					u32 queue)
1453 {
1454 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1455 	int i;
1456 
1457 	/* Clear the TX descriptors */
1458 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1459 		int last = (i == (dma_conf->dma_tx_size - 1));
1460 		struct dma_desc *p;
1461 
1462 		if (priv->extend_desc)
1463 			p = &tx_q->dma_etx[i].basic;
1464 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1465 			p = &tx_q->dma_entx[i].basic;
1466 		else
1467 			p = &tx_q->dma_tx[i];
1468 
1469 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1470 	}
1471 }
1472 
1473 /**
1474  * stmmac_clear_descriptors - clear descriptors
1475  * @priv: driver private structure
1476  * @dma_conf: structure to take the dma data
1477  * Description: this function is called to clear the TX and RX descriptors
1478  * in case of both basic and extended descriptors are used.
1479  */
1480 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1481 				     struct stmmac_dma_conf *dma_conf)
1482 {
1483 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1484 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1485 	u32 queue;
1486 
1487 	/* Clear the RX descriptors */
1488 	for (queue = 0; queue < rx_queue_cnt; queue++)
1489 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1490 
1491 	/* Clear the TX descriptors */
1492 	for (queue = 0; queue < tx_queue_cnt; queue++)
1493 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1494 }
1495 
1496 /**
1497  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1498  * @priv: driver private structure
1499  * @dma_conf: structure to take the dma data
1500  * @p: descriptor pointer
1501  * @i: descriptor index
1502  * @flags: gfp flag
1503  * @queue: RX queue index
1504  * Description: this function is called to allocate a receive buffer, perform
1505  * the DMA mapping and init the descriptor.
1506  */
1507 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1508 				  struct stmmac_dma_conf *dma_conf,
1509 				  struct dma_desc *p,
1510 				  int i, gfp_t flags, u32 queue)
1511 {
1512 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1513 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1514 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1515 
1516 	if (priv->dma_cap.host_dma_width <= 32)
1517 		gfp |= GFP_DMA32;
1518 
1519 	if (!buf->page) {
1520 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1521 		if (!buf->page)
1522 			return -ENOMEM;
1523 		buf->page_offset = stmmac_rx_offset(priv);
1524 	}
1525 
1526 	if (priv->sph && !buf->sec_page) {
1527 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1528 		if (!buf->sec_page)
1529 			return -ENOMEM;
1530 
1531 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1532 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1533 	} else {
1534 		buf->sec_page = NULL;
1535 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1536 	}
1537 
1538 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1539 
1540 	stmmac_set_desc_addr(priv, p, buf->addr);
1541 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1542 		stmmac_init_desc3(priv, p);
1543 
1544 	return 0;
1545 }
1546 
1547 /**
1548  * stmmac_free_rx_buffer - free RX dma buffers
1549  * @priv: private structure
1550  * @rx_q: RX queue
1551  * @i: buffer index.
1552  */
1553 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1554 				  struct stmmac_rx_queue *rx_q,
1555 				  int i)
1556 {
1557 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1558 
1559 	if (buf->page)
1560 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1561 	buf->page = NULL;
1562 
1563 	if (buf->sec_page)
1564 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1565 	buf->sec_page = NULL;
1566 }
1567 
1568 /**
1569  * stmmac_free_tx_buffer - free RX dma buffers
1570  * @priv: private structure
1571  * @dma_conf: structure to take the dma data
1572  * @queue: RX queue index
1573  * @i: buffer index.
1574  */
1575 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1576 				  struct stmmac_dma_conf *dma_conf,
1577 				  u32 queue, int i)
1578 {
1579 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1580 
1581 	if (tx_q->tx_skbuff_dma[i].buf &&
1582 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1583 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1584 			dma_unmap_page(priv->device,
1585 				       tx_q->tx_skbuff_dma[i].buf,
1586 				       tx_q->tx_skbuff_dma[i].len,
1587 				       DMA_TO_DEVICE);
1588 		else
1589 			dma_unmap_single(priv->device,
1590 					 tx_q->tx_skbuff_dma[i].buf,
1591 					 tx_q->tx_skbuff_dma[i].len,
1592 					 DMA_TO_DEVICE);
1593 	}
1594 
1595 	if (tx_q->xdpf[i] &&
1596 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1597 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1598 		xdp_return_frame(tx_q->xdpf[i]);
1599 		tx_q->xdpf[i] = NULL;
1600 	}
1601 
1602 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1603 		tx_q->xsk_frames_done++;
1604 
1605 	if (tx_q->tx_skbuff[i] &&
1606 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1607 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1608 		tx_q->tx_skbuff[i] = NULL;
1609 	}
1610 
1611 	tx_q->tx_skbuff_dma[i].buf = 0;
1612 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1613 }
1614 
1615 /**
1616  * dma_free_rx_skbufs - free RX dma buffers
1617  * @priv: private structure
1618  * @dma_conf: structure to take the dma data
1619  * @queue: RX queue index
1620  */
1621 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1622 			       struct stmmac_dma_conf *dma_conf,
1623 			       u32 queue)
1624 {
1625 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626 	int i;
1627 
1628 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1629 		stmmac_free_rx_buffer(priv, rx_q, i);
1630 }
1631 
1632 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1633 				   struct stmmac_dma_conf *dma_conf,
1634 				   u32 queue, gfp_t flags)
1635 {
1636 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1637 	int i;
1638 
1639 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1640 		struct dma_desc *p;
1641 		int ret;
1642 
1643 		if (priv->extend_desc)
1644 			p = &((rx_q->dma_erx + i)->basic);
1645 		else
1646 			p = rx_q->dma_rx + i;
1647 
1648 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1649 					     queue);
1650 		if (ret)
1651 			return ret;
1652 
1653 		rx_q->buf_alloc_num++;
1654 	}
1655 
1656 	return 0;
1657 }
1658 
1659 /**
1660  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1661  * @priv: private structure
1662  * @dma_conf: structure to take the dma data
1663  * @queue: RX queue index
1664  */
1665 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1666 				struct stmmac_dma_conf *dma_conf,
1667 				u32 queue)
1668 {
1669 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1670 	int i;
1671 
1672 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1673 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1674 
1675 		if (!buf->xdp)
1676 			continue;
1677 
1678 		xsk_buff_free(buf->xdp);
1679 		buf->xdp = NULL;
1680 	}
1681 }
1682 
1683 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1684 				      struct stmmac_dma_conf *dma_conf,
1685 				      u32 queue)
1686 {
1687 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1688 	int i;
1689 
1690 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1691 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1692 	 * use this macro to make sure no size violations.
1693 	 */
1694 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1695 
1696 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1697 		struct stmmac_rx_buffer *buf;
1698 		dma_addr_t dma_addr;
1699 		struct dma_desc *p;
1700 
1701 		if (priv->extend_desc)
1702 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1703 		else
1704 			p = rx_q->dma_rx + i;
1705 
1706 		buf = &rx_q->buf_pool[i];
1707 
1708 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1709 		if (!buf->xdp)
1710 			return -ENOMEM;
1711 
1712 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1713 		stmmac_set_desc_addr(priv, p, dma_addr);
1714 		rx_q->buf_alloc_num++;
1715 	}
1716 
1717 	return 0;
1718 }
1719 
1720 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1721 {
1722 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1723 		return NULL;
1724 
1725 	return xsk_get_pool_from_qid(priv->dev, queue);
1726 }
1727 
1728 /**
1729  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1730  * @priv: driver private structure
1731  * @dma_conf: structure to take the dma data
1732  * @queue: RX queue index
1733  * @flags: gfp flag.
1734  * Description: this function initializes the DMA RX descriptors
1735  * and allocates the socket buffers. It supports the chained and ring
1736  * modes.
1737  */
1738 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1739 				    struct stmmac_dma_conf *dma_conf,
1740 				    u32 queue, gfp_t flags)
1741 {
1742 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1743 	int ret;
1744 
1745 	netif_dbg(priv, probe, priv->dev,
1746 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1747 		  (u32)rx_q->dma_rx_phy);
1748 
1749 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1750 
1751 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1752 
1753 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1754 
1755 	if (rx_q->xsk_pool) {
1756 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1757 						   MEM_TYPE_XSK_BUFF_POOL,
1758 						   NULL));
1759 		netdev_info(priv->dev,
1760 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1761 			    rx_q->queue_index);
1762 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1763 	} else {
1764 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1765 						   MEM_TYPE_PAGE_POOL,
1766 						   rx_q->page_pool));
1767 		netdev_info(priv->dev,
1768 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1769 			    rx_q->queue_index);
1770 	}
1771 
1772 	if (rx_q->xsk_pool) {
1773 		/* RX XDP ZC buffer pool may not be populated, e.g.
1774 		 * xdpsock TX-only.
1775 		 */
1776 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1777 	} else {
1778 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1779 		if (ret < 0)
1780 			return -ENOMEM;
1781 	}
1782 
1783 	/* Setup the chained descriptor addresses */
1784 	if (priv->mode == STMMAC_CHAIN_MODE) {
1785 		if (priv->extend_desc)
1786 			stmmac_mode_init(priv, rx_q->dma_erx,
1787 					 rx_q->dma_rx_phy,
1788 					 dma_conf->dma_rx_size, 1);
1789 		else
1790 			stmmac_mode_init(priv, rx_q->dma_rx,
1791 					 rx_q->dma_rx_phy,
1792 					 dma_conf->dma_rx_size, 0);
1793 	}
1794 
1795 	return 0;
1796 }
1797 
1798 static int init_dma_rx_desc_rings(struct net_device *dev,
1799 				  struct stmmac_dma_conf *dma_conf,
1800 				  gfp_t flags)
1801 {
1802 	struct stmmac_priv *priv = netdev_priv(dev);
1803 	u32 rx_count = priv->plat->rx_queues_to_use;
1804 	int queue;
1805 	int ret;
1806 
1807 	/* RX INITIALIZATION */
1808 	netif_dbg(priv, probe, priv->dev,
1809 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1810 
1811 	for (queue = 0; queue < rx_count; queue++) {
1812 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1813 		if (ret)
1814 			goto err_init_rx_buffers;
1815 	}
1816 
1817 	return 0;
1818 
1819 err_init_rx_buffers:
1820 	while (queue >= 0) {
1821 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1822 
1823 		if (rx_q->xsk_pool)
1824 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1825 		else
1826 			dma_free_rx_skbufs(priv, dma_conf, queue);
1827 
1828 		rx_q->buf_alloc_num = 0;
1829 		rx_q->xsk_pool = NULL;
1830 
1831 		queue--;
1832 	}
1833 
1834 	return ret;
1835 }
1836 
1837 /**
1838  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1839  * @priv: driver private structure
1840  * @dma_conf: structure to take the dma data
1841  * @queue: TX queue index
1842  * Description: this function initializes the DMA TX descriptors
1843  * and allocates the socket buffers. It supports the chained and ring
1844  * modes.
1845  */
1846 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1847 				    struct stmmac_dma_conf *dma_conf,
1848 				    u32 queue)
1849 {
1850 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1851 	int i;
1852 
1853 	netif_dbg(priv, probe, priv->dev,
1854 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1855 		  (u32)tx_q->dma_tx_phy);
1856 
1857 	/* Setup the chained descriptor addresses */
1858 	if (priv->mode == STMMAC_CHAIN_MODE) {
1859 		if (priv->extend_desc)
1860 			stmmac_mode_init(priv, tx_q->dma_etx,
1861 					 tx_q->dma_tx_phy,
1862 					 dma_conf->dma_tx_size, 1);
1863 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1864 			stmmac_mode_init(priv, tx_q->dma_tx,
1865 					 tx_q->dma_tx_phy,
1866 					 dma_conf->dma_tx_size, 0);
1867 	}
1868 
1869 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1870 
1871 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1872 		struct dma_desc *p;
1873 
1874 		if (priv->extend_desc)
1875 			p = &((tx_q->dma_etx + i)->basic);
1876 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1877 			p = &((tx_q->dma_entx + i)->basic);
1878 		else
1879 			p = tx_q->dma_tx + i;
1880 
1881 		stmmac_clear_desc(priv, p);
1882 
1883 		tx_q->tx_skbuff_dma[i].buf = 0;
1884 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1885 		tx_q->tx_skbuff_dma[i].len = 0;
1886 		tx_q->tx_skbuff_dma[i].last_segment = false;
1887 		tx_q->tx_skbuff[i] = NULL;
1888 	}
1889 
1890 	return 0;
1891 }
1892 
1893 static int init_dma_tx_desc_rings(struct net_device *dev,
1894 				  struct stmmac_dma_conf *dma_conf)
1895 {
1896 	struct stmmac_priv *priv = netdev_priv(dev);
1897 	u32 tx_queue_cnt;
1898 	u32 queue;
1899 
1900 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1901 
1902 	for (queue = 0; queue < tx_queue_cnt; queue++)
1903 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1904 
1905 	return 0;
1906 }
1907 
1908 /**
1909  * init_dma_desc_rings - init the RX/TX descriptor rings
1910  * @dev: net device structure
1911  * @dma_conf: structure to take the dma data
1912  * @flags: gfp flag.
1913  * Description: this function initializes the DMA RX/TX descriptors
1914  * and allocates the socket buffers. It supports the chained and ring
1915  * modes.
1916  */
1917 static int init_dma_desc_rings(struct net_device *dev,
1918 			       struct stmmac_dma_conf *dma_conf,
1919 			       gfp_t flags)
1920 {
1921 	struct stmmac_priv *priv = netdev_priv(dev);
1922 	int ret;
1923 
1924 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1925 	if (ret)
1926 		return ret;
1927 
1928 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1929 
1930 	stmmac_clear_descriptors(priv, dma_conf);
1931 
1932 	if (netif_msg_hw(priv))
1933 		stmmac_display_rings(priv, dma_conf);
1934 
1935 	return ret;
1936 }
1937 
1938 /**
1939  * dma_free_tx_skbufs - free TX dma buffers
1940  * @priv: private structure
1941  * @dma_conf: structure to take the dma data
1942  * @queue: TX queue index
1943  */
1944 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1945 			       struct stmmac_dma_conf *dma_conf,
1946 			       u32 queue)
1947 {
1948 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1949 	int i;
1950 
1951 	tx_q->xsk_frames_done = 0;
1952 
1953 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1954 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1955 
1956 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1957 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1958 		tx_q->xsk_frames_done = 0;
1959 		tx_q->xsk_pool = NULL;
1960 	}
1961 }
1962 
1963 /**
1964  * stmmac_free_tx_skbufs - free TX skb buffers
1965  * @priv: private structure
1966  */
1967 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1968 {
1969 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1970 	u32 queue;
1971 
1972 	for (queue = 0; queue < tx_queue_cnt; queue++)
1973 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1974 }
1975 
1976 /**
1977  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1978  * @priv: private structure
1979  * @dma_conf: structure to take the dma data
1980  * @queue: RX queue index
1981  */
1982 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1983 					 struct stmmac_dma_conf *dma_conf,
1984 					 u32 queue)
1985 {
1986 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1987 
1988 	/* Release the DMA RX socket buffers */
1989 	if (rx_q->xsk_pool)
1990 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1991 	else
1992 		dma_free_rx_skbufs(priv, dma_conf, queue);
1993 
1994 	rx_q->buf_alloc_num = 0;
1995 	rx_q->xsk_pool = NULL;
1996 
1997 	/* Free DMA regions of consistent memory previously allocated */
1998 	if (!priv->extend_desc)
1999 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2000 				  sizeof(struct dma_desc),
2001 				  rx_q->dma_rx, rx_q->dma_rx_phy);
2002 	else
2003 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2004 				  sizeof(struct dma_extended_desc),
2005 				  rx_q->dma_erx, rx_q->dma_rx_phy);
2006 
2007 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2008 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2009 
2010 	kfree(rx_q->buf_pool);
2011 	if (rx_q->page_pool)
2012 		page_pool_destroy(rx_q->page_pool);
2013 }
2014 
2015 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2016 				       struct stmmac_dma_conf *dma_conf)
2017 {
2018 	u32 rx_count = priv->plat->rx_queues_to_use;
2019 	u32 queue;
2020 
2021 	/* Free RX queue resources */
2022 	for (queue = 0; queue < rx_count; queue++)
2023 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
2024 }
2025 
2026 /**
2027  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2028  * @priv: private structure
2029  * @dma_conf: structure to take the dma data
2030  * @queue: TX queue index
2031  */
2032 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2033 					 struct stmmac_dma_conf *dma_conf,
2034 					 u32 queue)
2035 {
2036 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2037 	size_t size;
2038 	void *addr;
2039 
2040 	/* Release the DMA TX socket buffers */
2041 	dma_free_tx_skbufs(priv, dma_conf, queue);
2042 
2043 	if (priv->extend_desc) {
2044 		size = sizeof(struct dma_extended_desc);
2045 		addr = tx_q->dma_etx;
2046 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2047 		size = sizeof(struct dma_edesc);
2048 		addr = tx_q->dma_entx;
2049 	} else {
2050 		size = sizeof(struct dma_desc);
2051 		addr = tx_q->dma_tx;
2052 	}
2053 
2054 	size *= dma_conf->dma_tx_size;
2055 
2056 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2057 
2058 	kfree(tx_q->tx_skbuff_dma);
2059 	kfree(tx_q->tx_skbuff);
2060 }
2061 
2062 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2063 				       struct stmmac_dma_conf *dma_conf)
2064 {
2065 	u32 tx_count = priv->plat->tx_queues_to_use;
2066 	u32 queue;
2067 
2068 	/* Free TX queue resources */
2069 	for (queue = 0; queue < tx_count; queue++)
2070 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2071 }
2072 
2073 /**
2074  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2075  * @priv: private structure
2076  * @dma_conf: structure to take the dma data
2077  * @queue: RX queue index
2078  * Description: according to which descriptor can be used (extend or basic)
2079  * this function allocates the resources for TX and RX paths. In case of
2080  * reception, for example, it pre-allocated the RX socket buffer in order to
2081  * allow zero-copy mechanism.
2082  */
2083 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2084 					 struct stmmac_dma_conf *dma_conf,
2085 					 u32 queue)
2086 {
2087 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2088 	struct stmmac_channel *ch = &priv->channel[queue];
2089 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2090 	struct page_pool_params pp_params = { 0 };
2091 	unsigned int dma_buf_sz_pad, num_pages;
2092 	unsigned int napi_id;
2093 	int ret;
2094 
2095 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2096 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2097 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2098 
2099 	rx_q->queue_index = queue;
2100 	rx_q->priv_data = priv;
2101 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2102 
2103 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2104 	pp_params.pool_size = dma_conf->dma_rx_size;
2105 	pp_params.order = order_base_2(num_pages);
2106 	pp_params.nid = dev_to_node(priv->device);
2107 	pp_params.dev = priv->device;
2108 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2109 	pp_params.offset = stmmac_rx_offset(priv);
2110 	pp_params.max_len = dma_conf->dma_buf_sz;
2111 
2112 	if (priv->sph) {
2113 		pp_params.offset = 0;
2114 		pp_params.max_len += stmmac_rx_offset(priv);
2115 	}
2116 
2117 	rx_q->page_pool = page_pool_create(&pp_params);
2118 	if (IS_ERR(rx_q->page_pool)) {
2119 		ret = PTR_ERR(rx_q->page_pool);
2120 		rx_q->page_pool = NULL;
2121 		return ret;
2122 	}
2123 
2124 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2125 				 sizeof(*rx_q->buf_pool),
2126 				 GFP_KERNEL);
2127 	if (!rx_q->buf_pool)
2128 		return -ENOMEM;
2129 
2130 	if (priv->extend_desc) {
2131 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2132 						   dma_conf->dma_rx_size *
2133 						   sizeof(struct dma_extended_desc),
2134 						   &rx_q->dma_rx_phy,
2135 						   GFP_KERNEL);
2136 		if (!rx_q->dma_erx)
2137 			return -ENOMEM;
2138 
2139 	} else {
2140 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2141 						  dma_conf->dma_rx_size *
2142 						  sizeof(struct dma_desc),
2143 						  &rx_q->dma_rx_phy,
2144 						  GFP_KERNEL);
2145 		if (!rx_q->dma_rx)
2146 			return -ENOMEM;
2147 	}
2148 
2149 	if (stmmac_xdp_is_enabled(priv) &&
2150 	    test_bit(queue, priv->af_xdp_zc_qps))
2151 		napi_id = ch->rxtx_napi.napi_id;
2152 	else
2153 		napi_id = ch->rx_napi.napi_id;
2154 
2155 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2156 			       rx_q->queue_index,
2157 			       napi_id);
2158 	if (ret) {
2159 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2160 		return -EINVAL;
2161 	}
2162 
2163 	return 0;
2164 }
2165 
2166 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2167 				       struct stmmac_dma_conf *dma_conf)
2168 {
2169 	u32 rx_count = priv->plat->rx_queues_to_use;
2170 	u32 queue;
2171 	int ret;
2172 
2173 	/* RX queues buffers and DMA */
2174 	for (queue = 0; queue < rx_count; queue++) {
2175 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2176 		if (ret)
2177 			goto err_dma;
2178 	}
2179 
2180 	return 0;
2181 
2182 err_dma:
2183 	free_dma_rx_desc_resources(priv, dma_conf);
2184 
2185 	return ret;
2186 }
2187 
2188 /**
2189  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2190  * @priv: private structure
2191  * @dma_conf: structure to take the dma data
2192  * @queue: TX queue index
2193  * Description: according to which descriptor can be used (extend or basic)
2194  * this function allocates the resources for TX and RX paths. In case of
2195  * reception, for example, it pre-allocated the RX socket buffer in order to
2196  * allow zero-copy mechanism.
2197  */
2198 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2199 					 struct stmmac_dma_conf *dma_conf,
2200 					 u32 queue)
2201 {
2202 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2203 	size_t size;
2204 	void *addr;
2205 
2206 	tx_q->queue_index = queue;
2207 	tx_q->priv_data = priv;
2208 
2209 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2210 				      sizeof(*tx_q->tx_skbuff_dma),
2211 				      GFP_KERNEL);
2212 	if (!tx_q->tx_skbuff_dma)
2213 		return -ENOMEM;
2214 
2215 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2216 				  sizeof(struct sk_buff *),
2217 				  GFP_KERNEL);
2218 	if (!tx_q->tx_skbuff)
2219 		return -ENOMEM;
2220 
2221 	if (priv->extend_desc)
2222 		size = sizeof(struct dma_extended_desc);
2223 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2224 		size = sizeof(struct dma_edesc);
2225 	else
2226 		size = sizeof(struct dma_desc);
2227 
2228 	size *= dma_conf->dma_tx_size;
2229 
2230 	addr = dma_alloc_coherent(priv->device, size,
2231 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2232 	if (!addr)
2233 		return -ENOMEM;
2234 
2235 	if (priv->extend_desc)
2236 		tx_q->dma_etx = addr;
2237 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2238 		tx_q->dma_entx = addr;
2239 	else
2240 		tx_q->dma_tx = addr;
2241 
2242 	return 0;
2243 }
2244 
2245 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2246 				       struct stmmac_dma_conf *dma_conf)
2247 {
2248 	u32 tx_count = priv->plat->tx_queues_to_use;
2249 	u32 queue;
2250 	int ret;
2251 
2252 	/* TX queues buffers and DMA */
2253 	for (queue = 0; queue < tx_count; queue++) {
2254 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2255 		if (ret)
2256 			goto err_dma;
2257 	}
2258 
2259 	return 0;
2260 
2261 err_dma:
2262 	free_dma_tx_desc_resources(priv, dma_conf);
2263 	return ret;
2264 }
2265 
2266 /**
2267  * alloc_dma_desc_resources - alloc TX/RX resources.
2268  * @priv: private structure
2269  * @dma_conf: structure to take the dma data
2270  * Description: according to which descriptor can be used (extend or basic)
2271  * this function allocates the resources for TX and RX paths. In case of
2272  * reception, for example, it pre-allocated the RX socket buffer in order to
2273  * allow zero-copy mechanism.
2274  */
2275 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2276 				    struct stmmac_dma_conf *dma_conf)
2277 {
2278 	/* RX Allocation */
2279 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2280 
2281 	if (ret)
2282 		return ret;
2283 
2284 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2285 
2286 	return ret;
2287 }
2288 
2289 /**
2290  * free_dma_desc_resources - free dma desc resources
2291  * @priv: private structure
2292  * @dma_conf: structure to take the dma data
2293  */
2294 static void free_dma_desc_resources(struct stmmac_priv *priv,
2295 				    struct stmmac_dma_conf *dma_conf)
2296 {
2297 	/* Release the DMA TX socket buffers */
2298 	free_dma_tx_desc_resources(priv, dma_conf);
2299 
2300 	/* Release the DMA RX socket buffers later
2301 	 * to ensure all pending XDP_TX buffers are returned.
2302 	 */
2303 	free_dma_rx_desc_resources(priv, dma_conf);
2304 }
2305 
2306 /**
2307  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2308  *  @priv: driver private structure
2309  *  Description: It is used for enabling the rx queues in the MAC
2310  */
2311 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2312 {
2313 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2314 	int queue;
2315 	u8 mode;
2316 
2317 	for (queue = 0; queue < rx_queues_count; queue++) {
2318 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2319 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2320 	}
2321 }
2322 
2323 /**
2324  * stmmac_start_rx_dma - start RX DMA channel
2325  * @priv: driver private structure
2326  * @chan: RX channel index
2327  * Description:
2328  * This starts a RX DMA channel
2329  */
2330 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2331 {
2332 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2333 	stmmac_start_rx(priv, priv->ioaddr, chan);
2334 }
2335 
2336 /**
2337  * stmmac_start_tx_dma - start TX DMA channel
2338  * @priv: driver private structure
2339  * @chan: TX channel index
2340  * Description:
2341  * This starts a TX DMA channel
2342  */
2343 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2344 {
2345 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2346 	stmmac_start_tx(priv, priv->ioaddr, chan);
2347 }
2348 
2349 /**
2350  * stmmac_stop_rx_dma - stop RX DMA channel
2351  * @priv: driver private structure
2352  * @chan: RX channel index
2353  * Description:
2354  * This stops a RX DMA channel
2355  */
2356 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2357 {
2358 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2359 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2360 }
2361 
2362 /**
2363  * stmmac_stop_tx_dma - stop TX DMA channel
2364  * @priv: driver private structure
2365  * @chan: TX channel index
2366  * Description:
2367  * This stops a TX DMA channel
2368  */
2369 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2370 {
2371 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2372 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2373 }
2374 
2375 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2376 {
2377 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2378 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2379 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2380 	u32 chan;
2381 
2382 	for (chan = 0; chan < dma_csr_ch; chan++) {
2383 		struct stmmac_channel *ch = &priv->channel[chan];
2384 		unsigned long flags;
2385 
2386 		spin_lock_irqsave(&ch->lock, flags);
2387 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2388 		spin_unlock_irqrestore(&ch->lock, flags);
2389 	}
2390 }
2391 
2392 /**
2393  * stmmac_start_all_dma - start all RX and TX DMA channels
2394  * @priv: driver private structure
2395  * Description:
2396  * This starts all the RX and TX DMA channels
2397  */
2398 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2399 {
2400 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2401 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2402 	u32 chan = 0;
2403 
2404 	for (chan = 0; chan < rx_channels_count; chan++)
2405 		stmmac_start_rx_dma(priv, chan);
2406 
2407 	for (chan = 0; chan < tx_channels_count; chan++)
2408 		stmmac_start_tx_dma(priv, chan);
2409 }
2410 
2411 /**
2412  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2413  * @priv: driver private structure
2414  * Description:
2415  * This stops the RX and TX DMA channels
2416  */
2417 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2418 {
2419 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2420 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2421 	u32 chan = 0;
2422 
2423 	for (chan = 0; chan < rx_channels_count; chan++)
2424 		stmmac_stop_rx_dma(priv, chan);
2425 
2426 	for (chan = 0; chan < tx_channels_count; chan++)
2427 		stmmac_stop_tx_dma(priv, chan);
2428 }
2429 
2430 /**
2431  *  stmmac_dma_operation_mode - HW DMA operation mode
2432  *  @priv: driver private structure
2433  *  Description: it is used for configuring the DMA operation mode register in
2434  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2435  */
2436 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2437 {
2438 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2439 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2440 	int rxfifosz = priv->plat->rx_fifo_size;
2441 	int txfifosz = priv->plat->tx_fifo_size;
2442 	u32 txmode = 0;
2443 	u32 rxmode = 0;
2444 	u32 chan = 0;
2445 	u8 qmode = 0;
2446 
2447 	if (rxfifosz == 0)
2448 		rxfifosz = priv->dma_cap.rx_fifo_size;
2449 	if (txfifosz == 0)
2450 		txfifosz = priv->dma_cap.tx_fifo_size;
2451 
2452 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2453 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2454 		rxfifosz /= rx_channels_count;
2455 		txfifosz /= tx_channels_count;
2456 	}
2457 
2458 	if (priv->plat->force_thresh_dma_mode) {
2459 		txmode = tc;
2460 		rxmode = tc;
2461 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2462 		/*
2463 		 * In case of GMAC, SF mode can be enabled
2464 		 * to perform the TX COE in HW. This depends on:
2465 		 * 1) TX COE if actually supported
2466 		 * 2) There is no bugged Jumbo frame support
2467 		 *    that needs to not insert csum in the TDES.
2468 		 */
2469 		txmode = SF_DMA_MODE;
2470 		rxmode = SF_DMA_MODE;
2471 		priv->xstats.threshold = SF_DMA_MODE;
2472 	} else {
2473 		txmode = tc;
2474 		rxmode = SF_DMA_MODE;
2475 	}
2476 
2477 	/* configure all channels */
2478 	for (chan = 0; chan < rx_channels_count; chan++) {
2479 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2480 		u32 buf_size;
2481 
2482 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2483 
2484 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2485 				rxfifosz, qmode);
2486 
2487 		if (rx_q->xsk_pool) {
2488 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2489 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2490 					      buf_size,
2491 					      chan);
2492 		} else {
2493 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2494 					      priv->dma_conf.dma_buf_sz,
2495 					      chan);
2496 		}
2497 	}
2498 
2499 	for (chan = 0; chan < tx_channels_count; chan++) {
2500 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2501 
2502 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2503 				txfifosz, qmode);
2504 	}
2505 }
2506 
2507 static void stmmac_xsk_request_timestamp(void *_priv)
2508 {
2509 	struct stmmac_metadata_request *meta_req = _priv;
2510 
2511 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2512 	*meta_req->set_ic = true;
2513 }
2514 
2515 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2516 {
2517 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2518 	struct stmmac_priv *priv = tx_compl->priv;
2519 	struct dma_desc *desc = tx_compl->desc;
2520 	bool found = false;
2521 	u64 ns = 0;
2522 
2523 	if (!priv->hwts_tx_en)
2524 		return 0;
2525 
2526 	/* check tx tstamp status */
2527 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2528 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2529 		found = true;
2530 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2531 		found = true;
2532 	}
2533 
2534 	if (found) {
2535 		ns -= priv->plat->cdc_error_adj;
2536 		return ns_to_ktime(ns);
2537 	}
2538 
2539 	return 0;
2540 }
2541 
2542 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2543 {
2544 	struct timespec64 ts = ns_to_timespec64(launch_time);
2545 	struct stmmac_metadata_request *meta_req = _priv;
2546 
2547 	if (meta_req->tbs & STMMAC_TBS_EN)
2548 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2549 				    ts.tv_nsec);
2550 }
2551 
2552 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2553 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2554 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2555 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2556 };
2557 
2558 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2559 {
2560 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2561 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2562 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2563 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2564 	unsigned int entry = tx_q->cur_tx;
2565 	struct dma_desc *tx_desc = NULL;
2566 	struct xdp_desc xdp_desc;
2567 	bool work_done = true;
2568 	u32 tx_set_ic_bit = 0;
2569 
2570 	/* Avoids TX time-out as we are sharing with slow path */
2571 	txq_trans_cond_update(nq);
2572 
2573 	budget = min(budget, stmmac_tx_avail(priv, queue));
2574 
2575 	while (budget-- > 0) {
2576 		struct stmmac_metadata_request meta_req;
2577 		struct xsk_tx_metadata *meta = NULL;
2578 		dma_addr_t dma_addr;
2579 		bool set_ic;
2580 
2581 		/* We are sharing with slow path and stop XSK TX desc submission when
2582 		 * available TX ring is less than threshold.
2583 		 */
2584 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2585 		    !netif_carrier_ok(priv->dev)) {
2586 			work_done = false;
2587 			break;
2588 		}
2589 
2590 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2591 			break;
2592 
2593 		if (priv->est && priv->est->enable &&
2594 		    priv->est->max_sdu[queue] &&
2595 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2596 			priv->xstats.max_sdu_txq_drop[queue]++;
2597 			continue;
2598 		}
2599 
2600 		if (likely(priv->extend_desc))
2601 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2602 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2603 			tx_desc = &tx_q->dma_entx[entry].basic;
2604 		else
2605 			tx_desc = tx_q->dma_tx + entry;
2606 
2607 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2608 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2609 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2610 
2611 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2612 
2613 		/* To return XDP buffer to XSK pool, we simple call
2614 		 * xsk_tx_completed(), so we don't need to fill up
2615 		 * 'buf' and 'xdpf'.
2616 		 */
2617 		tx_q->tx_skbuff_dma[entry].buf = 0;
2618 		tx_q->xdpf[entry] = NULL;
2619 
2620 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2621 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2622 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2623 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2624 
2625 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2626 
2627 		tx_q->tx_count_frames++;
2628 
2629 		if (!priv->tx_coal_frames[queue])
2630 			set_ic = false;
2631 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2632 			set_ic = true;
2633 		else
2634 			set_ic = false;
2635 
2636 		meta_req.priv = priv;
2637 		meta_req.tx_desc = tx_desc;
2638 		meta_req.set_ic = &set_ic;
2639 		meta_req.tbs = tx_q->tbs;
2640 		meta_req.edesc = &tx_q->dma_entx[entry];
2641 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2642 					&meta_req);
2643 		if (set_ic) {
2644 			tx_q->tx_count_frames = 0;
2645 			stmmac_set_tx_ic(priv, tx_desc);
2646 			tx_set_ic_bit++;
2647 		}
2648 
2649 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2650 				       true, priv->mode, true, true,
2651 				       xdp_desc.len);
2652 
2653 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2654 
2655 		xsk_tx_metadata_to_compl(meta,
2656 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2657 
2658 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2659 		entry = tx_q->cur_tx;
2660 	}
2661 	u64_stats_update_begin(&txq_stats->napi_syncp);
2662 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2663 	u64_stats_update_end(&txq_stats->napi_syncp);
2664 
2665 	if (tx_desc) {
2666 		stmmac_flush_tx_descriptors(priv, queue);
2667 		xsk_tx_release(pool);
2668 	}
2669 
2670 	/* Return true if all of the 3 conditions are met
2671 	 *  a) TX Budget is still available
2672 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2673 	 *     pending XSK TX for transmission)
2674 	 */
2675 	return !!budget && work_done;
2676 }
2677 
2678 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2679 {
2680 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2681 		tc += 64;
2682 
2683 		if (priv->plat->force_thresh_dma_mode)
2684 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2685 		else
2686 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2687 						      chan);
2688 
2689 		priv->xstats.threshold = tc;
2690 	}
2691 }
2692 
2693 /**
2694  * stmmac_tx_clean - to manage the transmission completion
2695  * @priv: driver private structure
2696  * @budget: napi budget limiting this functions packet handling
2697  * @queue: TX queue index
2698  * @pending_packets: signal to arm the TX coal timer
2699  * Description: it reclaims the transmit resources after transmission completes.
2700  * If some packets still needs to be handled, due to TX coalesce, set
2701  * pending_packets to true to make NAPI arm the TX coal timer.
2702  */
2703 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2704 			   bool *pending_packets)
2705 {
2706 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2707 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2708 	unsigned int bytes_compl = 0, pkts_compl = 0;
2709 	unsigned int entry, xmits = 0, count = 0;
2710 	u32 tx_packets = 0, tx_errors = 0;
2711 
2712 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2713 
2714 	tx_q->xsk_frames_done = 0;
2715 
2716 	entry = tx_q->dirty_tx;
2717 
2718 	/* Try to clean all TX complete frame in 1 shot */
2719 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2720 		struct xdp_frame *xdpf;
2721 		struct sk_buff *skb;
2722 		struct dma_desc *p;
2723 		int status;
2724 
2725 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2726 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2727 			xdpf = tx_q->xdpf[entry];
2728 			skb = NULL;
2729 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2730 			xdpf = NULL;
2731 			skb = tx_q->tx_skbuff[entry];
2732 		} else {
2733 			xdpf = NULL;
2734 			skb = NULL;
2735 		}
2736 
2737 		if (priv->extend_desc)
2738 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2739 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2740 			p = &tx_q->dma_entx[entry].basic;
2741 		else
2742 			p = tx_q->dma_tx + entry;
2743 
2744 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2745 		/* Check if the descriptor is owned by the DMA */
2746 		if (unlikely(status & tx_dma_own))
2747 			break;
2748 
2749 		count++;
2750 
2751 		/* Make sure descriptor fields are read after reading
2752 		 * the own bit.
2753 		 */
2754 		dma_rmb();
2755 
2756 		/* Just consider the last segment and ...*/
2757 		if (likely(!(status & tx_not_ls))) {
2758 			/* ... verify the status error condition */
2759 			if (unlikely(status & tx_err)) {
2760 				tx_errors++;
2761 				if (unlikely(status & tx_err_bump_tc))
2762 					stmmac_bump_dma_threshold(priv, queue);
2763 			} else {
2764 				tx_packets++;
2765 			}
2766 			if (skb) {
2767 				stmmac_get_tx_hwtstamp(priv, p, skb);
2768 			} else if (tx_q->xsk_pool &&
2769 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2770 				struct stmmac_xsk_tx_complete tx_compl = {
2771 					.priv = priv,
2772 					.desc = p,
2773 				};
2774 
2775 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2776 							 &stmmac_xsk_tx_metadata_ops,
2777 							 &tx_compl);
2778 			}
2779 		}
2780 
2781 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2782 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2783 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2784 				dma_unmap_page(priv->device,
2785 					       tx_q->tx_skbuff_dma[entry].buf,
2786 					       tx_q->tx_skbuff_dma[entry].len,
2787 					       DMA_TO_DEVICE);
2788 			else
2789 				dma_unmap_single(priv->device,
2790 						 tx_q->tx_skbuff_dma[entry].buf,
2791 						 tx_q->tx_skbuff_dma[entry].len,
2792 						 DMA_TO_DEVICE);
2793 			tx_q->tx_skbuff_dma[entry].buf = 0;
2794 			tx_q->tx_skbuff_dma[entry].len = 0;
2795 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2796 		}
2797 
2798 		stmmac_clean_desc3(priv, tx_q, p);
2799 
2800 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2801 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2802 
2803 		if (xdpf &&
2804 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2805 			xdp_return_frame_rx_napi(xdpf);
2806 			tx_q->xdpf[entry] = NULL;
2807 		}
2808 
2809 		if (xdpf &&
2810 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2811 			xdp_return_frame(xdpf);
2812 			tx_q->xdpf[entry] = NULL;
2813 		}
2814 
2815 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2816 			tx_q->xsk_frames_done++;
2817 
2818 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2819 			if (likely(skb)) {
2820 				pkts_compl++;
2821 				bytes_compl += skb->len;
2822 				dev_consume_skb_any(skb);
2823 				tx_q->tx_skbuff[entry] = NULL;
2824 			}
2825 		}
2826 
2827 		stmmac_release_tx_desc(priv, p, priv->mode);
2828 
2829 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2830 	}
2831 	tx_q->dirty_tx = entry;
2832 
2833 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2834 				  pkts_compl, bytes_compl);
2835 
2836 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2837 								queue))) &&
2838 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2839 
2840 		netif_dbg(priv, tx_done, priv->dev,
2841 			  "%s: restart transmit\n", __func__);
2842 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2843 	}
2844 
2845 	if (tx_q->xsk_pool) {
2846 		bool work_done;
2847 
2848 		if (tx_q->xsk_frames_done)
2849 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2850 
2851 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2852 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2853 
2854 		/* For XSK TX, we try to send as many as possible.
2855 		 * If XSK work done (XSK TX desc empty and budget still
2856 		 * available), return "budget - 1" to reenable TX IRQ.
2857 		 * Else, return "budget" to make NAPI continue polling.
2858 		 */
2859 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2860 					       STMMAC_XSK_TX_BUDGET_MAX);
2861 		if (work_done)
2862 			xmits = budget - 1;
2863 		else
2864 			xmits = budget;
2865 	}
2866 
2867 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2868 		stmmac_restart_sw_lpi_timer(priv);
2869 
2870 	/* We still have pending packets, let's call for a new scheduling */
2871 	if (tx_q->dirty_tx != tx_q->cur_tx)
2872 		*pending_packets = true;
2873 
2874 	u64_stats_update_begin(&txq_stats->napi_syncp);
2875 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2876 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2877 	u64_stats_inc(&txq_stats->napi.tx_clean);
2878 	u64_stats_update_end(&txq_stats->napi_syncp);
2879 
2880 	priv->xstats.tx_errors += tx_errors;
2881 
2882 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2883 
2884 	/* Combine decisions from TX clean and XSK TX */
2885 	return max(count, xmits);
2886 }
2887 
2888 /**
2889  * stmmac_tx_err - to manage the tx error
2890  * @priv: driver private structure
2891  * @chan: channel index
2892  * Description: it cleans the descriptors and restarts the transmission
2893  * in case of transmission errors.
2894  */
2895 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2896 {
2897 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2898 
2899 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2900 
2901 	stmmac_stop_tx_dma(priv, chan);
2902 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2903 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2904 	stmmac_reset_tx_queue(priv, chan);
2905 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2906 			    tx_q->dma_tx_phy, chan);
2907 	stmmac_start_tx_dma(priv, chan);
2908 
2909 	priv->xstats.tx_errors++;
2910 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2911 }
2912 
2913 /**
2914  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2915  *  @priv: driver private structure
2916  *  @txmode: TX operating mode
2917  *  @rxmode: RX operating mode
2918  *  @chan: channel index
2919  *  Description: it is used for configuring of the DMA operation mode in
2920  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2921  *  mode.
2922  */
2923 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2924 					  u32 rxmode, u32 chan)
2925 {
2926 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2927 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2928 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2929 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2930 	int rxfifosz = priv->plat->rx_fifo_size;
2931 	int txfifosz = priv->plat->tx_fifo_size;
2932 
2933 	if (rxfifosz == 0)
2934 		rxfifosz = priv->dma_cap.rx_fifo_size;
2935 	if (txfifosz == 0)
2936 		txfifosz = priv->dma_cap.tx_fifo_size;
2937 
2938 	/* Adjust for real per queue fifo size */
2939 	rxfifosz /= rx_channels_count;
2940 	txfifosz /= tx_channels_count;
2941 
2942 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2943 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2944 }
2945 
2946 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2947 {
2948 	int ret;
2949 
2950 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2951 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2952 	if (ret && (ret != -EINVAL)) {
2953 		stmmac_global_err(priv);
2954 		return true;
2955 	}
2956 
2957 	return false;
2958 }
2959 
2960 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2961 {
2962 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2963 						 &priv->xstats, chan, dir);
2964 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2965 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2966 	struct stmmac_channel *ch = &priv->channel[chan];
2967 	struct napi_struct *rx_napi;
2968 	struct napi_struct *tx_napi;
2969 	unsigned long flags;
2970 
2971 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2972 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2973 
2974 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2975 		if (napi_schedule_prep(rx_napi)) {
2976 			spin_lock_irqsave(&ch->lock, flags);
2977 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2978 			spin_unlock_irqrestore(&ch->lock, flags);
2979 			__napi_schedule(rx_napi);
2980 		}
2981 	}
2982 
2983 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2984 		if (napi_schedule_prep(tx_napi)) {
2985 			spin_lock_irqsave(&ch->lock, flags);
2986 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2987 			spin_unlock_irqrestore(&ch->lock, flags);
2988 			__napi_schedule(tx_napi);
2989 		}
2990 	}
2991 
2992 	return status;
2993 }
2994 
2995 /**
2996  * stmmac_dma_interrupt - DMA ISR
2997  * @priv: driver private structure
2998  * Description: this is the DMA ISR. It is called by the main ISR.
2999  * It calls the dwmac dma routine and schedule poll method in case of some
3000  * work can be done.
3001  */
3002 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3003 {
3004 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3005 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3006 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
3007 				tx_channel_count : rx_channel_count;
3008 	u32 chan;
3009 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3010 
3011 	/* Make sure we never check beyond our status buffer. */
3012 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3013 		channels_to_check = ARRAY_SIZE(status);
3014 
3015 	for (chan = 0; chan < channels_to_check; chan++)
3016 		status[chan] = stmmac_napi_check(priv, chan,
3017 						 DMA_DIR_RXTX);
3018 
3019 	for (chan = 0; chan < tx_channel_count; chan++) {
3020 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3021 			/* Try to bump up the dma threshold on this failure */
3022 			stmmac_bump_dma_threshold(priv, chan);
3023 		} else if (unlikely(status[chan] == tx_hard_error)) {
3024 			stmmac_tx_err(priv, chan);
3025 		}
3026 	}
3027 }
3028 
3029 /**
3030  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3031  * @priv: driver private structure
3032  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3033  */
3034 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3035 {
3036 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3037 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3038 
3039 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3040 
3041 	if (priv->dma_cap.rmon) {
3042 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3043 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3044 	} else
3045 		netdev_info(priv->dev, "No MAC Management Counters available\n");
3046 }
3047 
3048 /**
3049  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3050  * @priv: driver private structure
3051  * Description:
3052  *  new GMAC chip generations have a new register to indicate the
3053  *  presence of the optional feature/functions.
3054  *  This can be also used to override the value passed through the
3055  *  platform and necessary for old MAC10/100 and GMAC chips.
3056  */
3057 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3058 {
3059 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3060 }
3061 
3062 /**
3063  * stmmac_check_ether_addr - check if the MAC addr is valid
3064  * @priv: driver private structure
3065  * Description:
3066  * it is to verify if the MAC address is valid, in case of failures it
3067  * generates a random MAC address
3068  */
3069 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3070 {
3071 	u8 addr[ETH_ALEN];
3072 
3073 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3074 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3075 		if (is_valid_ether_addr(addr))
3076 			eth_hw_addr_set(priv->dev, addr);
3077 		else
3078 			eth_hw_addr_random(priv->dev);
3079 		dev_info(priv->device, "device MAC address %pM\n",
3080 			 priv->dev->dev_addr);
3081 	}
3082 }
3083 
3084 /**
3085  * stmmac_init_dma_engine - DMA init.
3086  * @priv: driver private structure
3087  * Description:
3088  * It inits the DMA invoking the specific MAC/GMAC callback.
3089  * Some DMA parameters can be passed from the platform;
3090  * in case of these are not passed a default is kept for the MAC or GMAC.
3091  */
3092 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3093 {
3094 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3095 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3096 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3097 	struct stmmac_rx_queue *rx_q;
3098 	struct stmmac_tx_queue *tx_q;
3099 	u32 chan = 0;
3100 	int ret = 0;
3101 
3102 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3103 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3104 		return -EINVAL;
3105 	}
3106 
3107 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3108 		priv->plat->dma_cfg->atds = 1;
3109 
3110 	ret = stmmac_reset(priv, priv->ioaddr);
3111 	if (ret) {
3112 		netdev_err(priv->dev, "Failed to reset the dma\n");
3113 		return ret;
3114 	}
3115 
3116 	/* DMA Configuration */
3117 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3118 
3119 	if (priv->plat->axi)
3120 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3121 
3122 	/* DMA CSR Channel configuration */
3123 	for (chan = 0; chan < dma_csr_ch; chan++) {
3124 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3125 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3126 	}
3127 
3128 	/* DMA RX Channel Configuration */
3129 	for (chan = 0; chan < rx_channels_count; chan++) {
3130 		rx_q = &priv->dma_conf.rx_queue[chan];
3131 
3132 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3133 				    rx_q->dma_rx_phy, chan);
3134 
3135 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3136 				     (rx_q->buf_alloc_num *
3137 				      sizeof(struct dma_desc));
3138 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3139 				       rx_q->rx_tail_addr, chan);
3140 	}
3141 
3142 	/* DMA TX Channel Configuration */
3143 	for (chan = 0; chan < tx_channels_count; chan++) {
3144 		tx_q = &priv->dma_conf.tx_queue[chan];
3145 
3146 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3147 				    tx_q->dma_tx_phy, chan);
3148 
3149 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3150 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3151 				       tx_q->tx_tail_addr, chan);
3152 	}
3153 
3154 	return ret;
3155 }
3156 
3157 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3158 {
3159 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3160 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3161 	struct stmmac_channel *ch;
3162 	struct napi_struct *napi;
3163 
3164 	if (!tx_coal_timer)
3165 		return;
3166 
3167 	ch = &priv->channel[tx_q->queue_index];
3168 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3169 
3170 	/* Arm timer only if napi is not already scheduled.
3171 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3172 	 * again in the next scheduled napi.
3173 	 */
3174 	if (unlikely(!napi_is_scheduled(napi)))
3175 		hrtimer_start(&tx_q->txtimer,
3176 			      STMMAC_COAL_TIMER(tx_coal_timer),
3177 			      HRTIMER_MODE_REL);
3178 	else
3179 		hrtimer_try_to_cancel(&tx_q->txtimer);
3180 }
3181 
3182 /**
3183  * stmmac_tx_timer - mitigation sw timer for tx.
3184  * @t: data pointer
3185  * Description:
3186  * This is the timer handler to directly invoke the stmmac_tx_clean.
3187  */
3188 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3189 {
3190 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3191 	struct stmmac_priv *priv = tx_q->priv_data;
3192 	struct stmmac_channel *ch;
3193 	struct napi_struct *napi;
3194 
3195 	ch = &priv->channel[tx_q->queue_index];
3196 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3197 
3198 	if (likely(napi_schedule_prep(napi))) {
3199 		unsigned long flags;
3200 
3201 		spin_lock_irqsave(&ch->lock, flags);
3202 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3203 		spin_unlock_irqrestore(&ch->lock, flags);
3204 		__napi_schedule(napi);
3205 	}
3206 
3207 	return HRTIMER_NORESTART;
3208 }
3209 
3210 /**
3211  * stmmac_init_coalesce - init mitigation options.
3212  * @priv: driver private structure
3213  * Description:
3214  * This inits the coalesce parameters: i.e. timer rate,
3215  * timer handler and default threshold used for enabling the
3216  * interrupt on completion bit.
3217  */
3218 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3219 {
3220 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3221 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3222 	u32 chan;
3223 
3224 	for (chan = 0; chan < tx_channel_count; chan++) {
3225 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3226 
3227 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3228 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3229 
3230 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3231 		tx_q->txtimer.function = stmmac_tx_timer;
3232 	}
3233 
3234 	for (chan = 0; chan < rx_channel_count; chan++)
3235 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3236 }
3237 
3238 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3239 {
3240 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3241 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3242 	u32 chan;
3243 
3244 	/* set TX ring length */
3245 	for (chan = 0; chan < tx_channels_count; chan++)
3246 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3247 				       (priv->dma_conf.dma_tx_size - 1), chan);
3248 
3249 	/* set RX ring length */
3250 	for (chan = 0; chan < rx_channels_count; chan++)
3251 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3252 				       (priv->dma_conf.dma_rx_size - 1), chan);
3253 }
3254 
3255 /**
3256  *  stmmac_set_tx_queue_weight - Set TX queue weight
3257  *  @priv: driver private structure
3258  *  Description: It is used for setting TX queues weight
3259  */
3260 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3261 {
3262 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3263 	u32 weight;
3264 	u32 queue;
3265 
3266 	for (queue = 0; queue < tx_queues_count; queue++) {
3267 		weight = priv->plat->tx_queues_cfg[queue].weight;
3268 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3269 	}
3270 }
3271 
3272 /**
3273  *  stmmac_configure_cbs - Configure CBS in TX queue
3274  *  @priv: driver private structure
3275  *  Description: It is used for configuring CBS in AVB TX queues
3276  */
3277 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3278 {
3279 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3280 	u32 mode_to_use;
3281 	u32 queue;
3282 
3283 	/* queue 0 is reserved for legacy traffic */
3284 	for (queue = 1; queue < tx_queues_count; queue++) {
3285 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3286 		if (mode_to_use == MTL_QUEUE_DCB)
3287 			continue;
3288 
3289 		stmmac_config_cbs(priv, priv->hw,
3290 				priv->plat->tx_queues_cfg[queue].send_slope,
3291 				priv->plat->tx_queues_cfg[queue].idle_slope,
3292 				priv->plat->tx_queues_cfg[queue].high_credit,
3293 				priv->plat->tx_queues_cfg[queue].low_credit,
3294 				queue);
3295 	}
3296 }
3297 
3298 /**
3299  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3300  *  @priv: driver private structure
3301  *  Description: It is used for mapping RX queues to RX dma channels
3302  */
3303 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3304 {
3305 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3306 	u32 queue;
3307 	u32 chan;
3308 
3309 	for (queue = 0; queue < rx_queues_count; queue++) {
3310 		chan = priv->plat->rx_queues_cfg[queue].chan;
3311 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3312 	}
3313 }
3314 
3315 /**
3316  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3317  *  @priv: driver private structure
3318  *  Description: It is used for configuring the RX Queue Priority
3319  */
3320 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3321 {
3322 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3323 	u32 queue;
3324 	u32 prio;
3325 
3326 	for (queue = 0; queue < rx_queues_count; queue++) {
3327 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3328 			continue;
3329 
3330 		prio = priv->plat->rx_queues_cfg[queue].prio;
3331 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3332 	}
3333 }
3334 
3335 /**
3336  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3337  *  @priv: driver private structure
3338  *  Description: It is used for configuring the TX Queue Priority
3339  */
3340 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3341 {
3342 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3343 	u32 queue;
3344 	u32 prio;
3345 
3346 	for (queue = 0; queue < tx_queues_count; queue++) {
3347 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3348 			continue;
3349 
3350 		prio = priv->plat->tx_queues_cfg[queue].prio;
3351 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3352 	}
3353 }
3354 
3355 /**
3356  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3357  *  @priv: driver private structure
3358  *  Description: It is used for configuring the RX queue routing
3359  */
3360 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3361 {
3362 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3363 	u32 queue;
3364 	u8 packet;
3365 
3366 	for (queue = 0; queue < rx_queues_count; queue++) {
3367 		/* no specific packet type routing specified for the queue */
3368 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3369 			continue;
3370 
3371 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3372 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3373 	}
3374 }
3375 
3376 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3377 {
3378 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3379 		priv->rss.enable = false;
3380 		return;
3381 	}
3382 
3383 	if (priv->dev->features & NETIF_F_RXHASH)
3384 		priv->rss.enable = true;
3385 	else
3386 		priv->rss.enable = false;
3387 
3388 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3389 			     priv->plat->rx_queues_to_use);
3390 }
3391 
3392 /**
3393  *  stmmac_mtl_configuration - Configure MTL
3394  *  @priv: driver private structure
3395  *  Description: It is used for configurring MTL
3396  */
3397 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3398 {
3399 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3400 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3401 
3402 	if (tx_queues_count > 1)
3403 		stmmac_set_tx_queue_weight(priv);
3404 
3405 	/* Configure MTL RX algorithms */
3406 	if (rx_queues_count > 1)
3407 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3408 				priv->plat->rx_sched_algorithm);
3409 
3410 	/* Configure MTL TX algorithms */
3411 	if (tx_queues_count > 1)
3412 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3413 				priv->plat->tx_sched_algorithm);
3414 
3415 	/* Configure CBS in AVB TX queues */
3416 	if (tx_queues_count > 1)
3417 		stmmac_configure_cbs(priv);
3418 
3419 	/* Map RX MTL to DMA channels */
3420 	stmmac_rx_queue_dma_chan_map(priv);
3421 
3422 	/* Enable MAC RX Queues */
3423 	stmmac_mac_enable_rx_queues(priv);
3424 
3425 	/* Set RX priorities */
3426 	if (rx_queues_count > 1)
3427 		stmmac_mac_config_rx_queues_prio(priv);
3428 
3429 	/* Set TX priorities */
3430 	if (tx_queues_count > 1)
3431 		stmmac_mac_config_tx_queues_prio(priv);
3432 
3433 	/* Set RX routing */
3434 	if (rx_queues_count > 1)
3435 		stmmac_mac_config_rx_queues_routing(priv);
3436 
3437 	/* Receive Side Scaling */
3438 	if (rx_queues_count > 1)
3439 		stmmac_mac_config_rss(priv);
3440 }
3441 
3442 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3443 {
3444 	if (priv->dma_cap.asp) {
3445 		netdev_info(priv->dev, "Enabling Safety Features\n");
3446 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3447 					  priv->plat->safety_feat_cfg);
3448 	} else {
3449 		netdev_info(priv->dev, "No Safety Features support found\n");
3450 	}
3451 }
3452 
3453 /**
3454  * stmmac_hw_setup - setup mac in a usable state.
3455  *  @dev : pointer to the device structure.
3456  *  @ptp_register: register PTP if set
3457  *  Description:
3458  *  this is the main function to setup the HW in a usable state because the
3459  *  dma engine is reset, the core registers are configured (e.g. AXI,
3460  *  Checksum features, timers). The DMA is ready to start receiving and
3461  *  transmitting.
3462  *  Return value:
3463  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3464  *  file on failure.
3465  */
3466 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3467 {
3468 	struct stmmac_priv *priv = netdev_priv(dev);
3469 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3470 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3471 	bool sph_en;
3472 	u32 chan;
3473 	int ret;
3474 
3475 	/* Make sure RX clock is enabled */
3476 	if (priv->hw->phylink_pcs)
3477 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3478 
3479 	/* DMA initialization and SW reset */
3480 	ret = stmmac_init_dma_engine(priv);
3481 	if (ret < 0) {
3482 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3483 			   __func__);
3484 		return ret;
3485 	}
3486 
3487 	/* Copy the MAC addr into the HW  */
3488 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3489 
3490 	/* PS and related bits will be programmed according to the speed */
3491 	if (priv->hw->pcs) {
3492 		int speed = priv->plat->mac_port_sel_speed;
3493 
3494 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3495 		    (speed == SPEED_1000)) {
3496 			priv->hw->ps = speed;
3497 		} else {
3498 			dev_warn(priv->device, "invalid port speed\n");
3499 			priv->hw->ps = 0;
3500 		}
3501 	}
3502 
3503 	/* Initialize the MAC Core */
3504 	stmmac_core_init(priv, priv->hw, dev);
3505 
3506 	/* Initialize MTL*/
3507 	stmmac_mtl_configuration(priv);
3508 
3509 	/* Initialize Safety Features */
3510 	stmmac_safety_feat_configuration(priv);
3511 
3512 	ret = stmmac_rx_ipc(priv, priv->hw);
3513 	if (!ret) {
3514 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3515 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3516 		priv->hw->rx_csum = 0;
3517 	}
3518 
3519 	/* Enable the MAC Rx/Tx */
3520 	stmmac_mac_set(priv, priv->ioaddr, true);
3521 
3522 	/* Set the HW DMA mode and the COE */
3523 	stmmac_dma_operation_mode(priv);
3524 
3525 	stmmac_mmc_setup(priv);
3526 
3527 	if (ptp_register) {
3528 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3529 		if (ret < 0)
3530 			netdev_warn(priv->dev,
3531 				    "failed to enable PTP reference clock: %pe\n",
3532 				    ERR_PTR(ret));
3533 	}
3534 
3535 	ret = stmmac_init_ptp(priv);
3536 	if (ret == -EOPNOTSUPP)
3537 		netdev_info(priv->dev, "PTP not supported by HW\n");
3538 	else if (ret)
3539 		netdev_warn(priv->dev, "PTP init failed\n");
3540 	else if (ptp_register)
3541 		stmmac_ptp_register(priv);
3542 
3543 	if (priv->use_riwt) {
3544 		u32 queue;
3545 
3546 		for (queue = 0; queue < rx_cnt; queue++) {
3547 			if (!priv->rx_riwt[queue])
3548 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3549 
3550 			stmmac_rx_watchdog(priv, priv->ioaddr,
3551 					   priv->rx_riwt[queue], queue);
3552 		}
3553 	}
3554 
3555 	if (priv->hw->pcs)
3556 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3557 
3558 	/* set TX and RX rings length */
3559 	stmmac_set_rings_length(priv);
3560 
3561 	/* Enable TSO */
3562 	if (priv->tso) {
3563 		for (chan = 0; chan < tx_cnt; chan++) {
3564 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3565 
3566 			/* TSO and TBS cannot co-exist */
3567 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3568 				continue;
3569 
3570 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3571 		}
3572 	}
3573 
3574 	/* Enable Split Header */
3575 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3576 	for (chan = 0; chan < rx_cnt; chan++)
3577 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3578 
3579 
3580 	/* VLAN Tag Insertion */
3581 	if (priv->dma_cap.vlins)
3582 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3583 
3584 	/* TBS */
3585 	for (chan = 0; chan < tx_cnt; chan++) {
3586 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3587 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3588 
3589 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3590 	}
3591 
3592 	/* Configure real RX and TX queues */
3593 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3594 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3595 
3596 	/* Start the ball rolling... */
3597 	stmmac_start_all_dma(priv);
3598 
3599 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3600 
3601 	return 0;
3602 }
3603 
3604 static void stmmac_hw_teardown(struct net_device *dev)
3605 {
3606 	struct stmmac_priv *priv = netdev_priv(dev);
3607 
3608 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3609 }
3610 
3611 static void stmmac_free_irq(struct net_device *dev,
3612 			    enum request_irq_err irq_err, int irq_idx)
3613 {
3614 	struct stmmac_priv *priv = netdev_priv(dev);
3615 	int j;
3616 
3617 	switch (irq_err) {
3618 	case REQ_IRQ_ERR_ALL:
3619 		irq_idx = priv->plat->tx_queues_to_use;
3620 		fallthrough;
3621 	case REQ_IRQ_ERR_TX:
3622 		for (j = irq_idx - 1; j >= 0; j--) {
3623 			if (priv->tx_irq[j] > 0) {
3624 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3625 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3626 			}
3627 		}
3628 		irq_idx = priv->plat->rx_queues_to_use;
3629 		fallthrough;
3630 	case REQ_IRQ_ERR_RX:
3631 		for (j = irq_idx - 1; j >= 0; j--) {
3632 			if (priv->rx_irq[j] > 0) {
3633 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3634 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3635 			}
3636 		}
3637 
3638 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3639 			free_irq(priv->sfty_ue_irq, dev);
3640 		fallthrough;
3641 	case REQ_IRQ_ERR_SFTY_UE:
3642 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3643 			free_irq(priv->sfty_ce_irq, dev);
3644 		fallthrough;
3645 	case REQ_IRQ_ERR_SFTY_CE:
3646 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3647 			free_irq(priv->lpi_irq, dev);
3648 		fallthrough;
3649 	case REQ_IRQ_ERR_LPI:
3650 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3651 			free_irq(priv->wol_irq, dev);
3652 		fallthrough;
3653 	case REQ_IRQ_ERR_SFTY:
3654 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3655 			free_irq(priv->sfty_irq, dev);
3656 		fallthrough;
3657 	case REQ_IRQ_ERR_WOL:
3658 		free_irq(dev->irq, dev);
3659 		fallthrough;
3660 	case REQ_IRQ_ERR_MAC:
3661 	case REQ_IRQ_ERR_NO:
3662 		/* If MAC IRQ request error, no more IRQ to free */
3663 		break;
3664 	}
3665 }
3666 
3667 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3668 {
3669 	struct stmmac_priv *priv = netdev_priv(dev);
3670 	enum request_irq_err irq_err;
3671 	cpumask_t cpu_mask;
3672 	int irq_idx = 0;
3673 	char *int_name;
3674 	int ret;
3675 	int i;
3676 
3677 	/* For common interrupt */
3678 	int_name = priv->int_name_mac;
3679 	sprintf(int_name, "%s:%s", dev->name, "mac");
3680 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3681 			  0, int_name, dev);
3682 	if (unlikely(ret < 0)) {
3683 		netdev_err(priv->dev,
3684 			   "%s: alloc mac MSI %d (error: %d)\n",
3685 			   __func__, dev->irq, ret);
3686 		irq_err = REQ_IRQ_ERR_MAC;
3687 		goto irq_error;
3688 	}
3689 
3690 	/* Request the Wake IRQ in case of another line
3691 	 * is used for WoL
3692 	 */
3693 	priv->wol_irq_disabled = true;
3694 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3695 		int_name = priv->int_name_wol;
3696 		sprintf(int_name, "%s:%s", dev->name, "wol");
3697 		ret = request_irq(priv->wol_irq,
3698 				  stmmac_mac_interrupt,
3699 				  0, int_name, dev);
3700 		if (unlikely(ret < 0)) {
3701 			netdev_err(priv->dev,
3702 				   "%s: alloc wol MSI %d (error: %d)\n",
3703 				   __func__, priv->wol_irq, ret);
3704 			irq_err = REQ_IRQ_ERR_WOL;
3705 			goto irq_error;
3706 		}
3707 	}
3708 
3709 	/* Request the LPI IRQ in case of another line
3710 	 * is used for LPI
3711 	 */
3712 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3713 		int_name = priv->int_name_lpi;
3714 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3715 		ret = request_irq(priv->lpi_irq,
3716 				  stmmac_mac_interrupt,
3717 				  0, int_name, dev);
3718 		if (unlikely(ret < 0)) {
3719 			netdev_err(priv->dev,
3720 				   "%s: alloc lpi MSI %d (error: %d)\n",
3721 				   __func__, priv->lpi_irq, ret);
3722 			irq_err = REQ_IRQ_ERR_LPI;
3723 			goto irq_error;
3724 		}
3725 	}
3726 
3727 	/* Request the common Safety Feature Correctible/Uncorrectible
3728 	 * Error line in case of another line is used
3729 	 */
3730 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3731 		int_name = priv->int_name_sfty;
3732 		sprintf(int_name, "%s:%s", dev->name, "safety");
3733 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3734 				  0, int_name, dev);
3735 		if (unlikely(ret < 0)) {
3736 			netdev_err(priv->dev,
3737 				   "%s: alloc sfty MSI %d (error: %d)\n",
3738 				   __func__, priv->sfty_irq, ret);
3739 			irq_err = REQ_IRQ_ERR_SFTY;
3740 			goto irq_error;
3741 		}
3742 	}
3743 
3744 	/* Request the Safety Feature Correctible Error line in
3745 	 * case of another line is used
3746 	 */
3747 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3748 		int_name = priv->int_name_sfty_ce;
3749 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3750 		ret = request_irq(priv->sfty_ce_irq,
3751 				  stmmac_safety_interrupt,
3752 				  0, int_name, dev);
3753 		if (unlikely(ret < 0)) {
3754 			netdev_err(priv->dev,
3755 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3756 				   __func__, priv->sfty_ce_irq, ret);
3757 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3758 			goto irq_error;
3759 		}
3760 	}
3761 
3762 	/* Request the Safety Feature Uncorrectible Error line in
3763 	 * case of another line is used
3764 	 */
3765 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3766 		int_name = priv->int_name_sfty_ue;
3767 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3768 		ret = request_irq(priv->sfty_ue_irq,
3769 				  stmmac_safety_interrupt,
3770 				  0, int_name, dev);
3771 		if (unlikely(ret < 0)) {
3772 			netdev_err(priv->dev,
3773 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3774 				   __func__, priv->sfty_ue_irq, ret);
3775 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3776 			goto irq_error;
3777 		}
3778 	}
3779 
3780 	/* Request Rx MSI irq */
3781 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3782 		if (i >= MTL_MAX_RX_QUEUES)
3783 			break;
3784 		if (priv->rx_irq[i] == 0)
3785 			continue;
3786 
3787 		int_name = priv->int_name_rx_irq[i];
3788 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3789 		ret = request_irq(priv->rx_irq[i],
3790 				  stmmac_msi_intr_rx,
3791 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3792 		if (unlikely(ret < 0)) {
3793 			netdev_err(priv->dev,
3794 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3795 				   __func__, i, priv->rx_irq[i], ret);
3796 			irq_err = REQ_IRQ_ERR_RX;
3797 			irq_idx = i;
3798 			goto irq_error;
3799 		}
3800 		cpumask_clear(&cpu_mask);
3801 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3802 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3803 	}
3804 
3805 	/* Request Tx MSI irq */
3806 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3807 		if (i >= MTL_MAX_TX_QUEUES)
3808 			break;
3809 		if (priv->tx_irq[i] == 0)
3810 			continue;
3811 
3812 		int_name = priv->int_name_tx_irq[i];
3813 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3814 		ret = request_irq(priv->tx_irq[i],
3815 				  stmmac_msi_intr_tx,
3816 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3817 		if (unlikely(ret < 0)) {
3818 			netdev_err(priv->dev,
3819 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3820 				   __func__, i, priv->tx_irq[i], ret);
3821 			irq_err = REQ_IRQ_ERR_TX;
3822 			irq_idx = i;
3823 			goto irq_error;
3824 		}
3825 		cpumask_clear(&cpu_mask);
3826 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3827 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3828 	}
3829 
3830 	return 0;
3831 
3832 irq_error:
3833 	stmmac_free_irq(dev, irq_err, irq_idx);
3834 	return ret;
3835 }
3836 
3837 static int stmmac_request_irq_single(struct net_device *dev)
3838 {
3839 	struct stmmac_priv *priv = netdev_priv(dev);
3840 	enum request_irq_err irq_err;
3841 	int ret;
3842 
3843 	ret = request_irq(dev->irq, stmmac_interrupt,
3844 			  IRQF_SHARED, dev->name, dev);
3845 	if (unlikely(ret < 0)) {
3846 		netdev_err(priv->dev,
3847 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3848 			   __func__, dev->irq, ret);
3849 		irq_err = REQ_IRQ_ERR_MAC;
3850 		goto irq_error;
3851 	}
3852 
3853 	/* Request the Wake IRQ in case of another line
3854 	 * is used for WoL
3855 	 */
3856 	priv->wol_irq_disabled = true;
3857 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3858 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3859 				  IRQF_SHARED, dev->name, dev);
3860 		if (unlikely(ret < 0)) {
3861 			netdev_err(priv->dev,
3862 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3863 				   __func__, priv->wol_irq, ret);
3864 			irq_err = REQ_IRQ_ERR_WOL;
3865 			goto irq_error;
3866 		}
3867 	}
3868 
3869 	/* Request the IRQ lines */
3870 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3871 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3872 				  IRQF_SHARED, dev->name, dev);
3873 		if (unlikely(ret < 0)) {
3874 			netdev_err(priv->dev,
3875 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3876 				   __func__, priv->lpi_irq, ret);
3877 			irq_err = REQ_IRQ_ERR_LPI;
3878 			goto irq_error;
3879 		}
3880 	}
3881 
3882 	/* Request the common Safety Feature Correctible/Uncorrectible
3883 	 * Error line in case of another line is used
3884 	 */
3885 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3886 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3887 				  IRQF_SHARED, dev->name, dev);
3888 		if (unlikely(ret < 0)) {
3889 			netdev_err(priv->dev,
3890 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3891 				   __func__, priv->sfty_irq, ret);
3892 			irq_err = REQ_IRQ_ERR_SFTY;
3893 			goto irq_error;
3894 		}
3895 	}
3896 
3897 	return 0;
3898 
3899 irq_error:
3900 	stmmac_free_irq(dev, irq_err, 0);
3901 	return ret;
3902 }
3903 
3904 static int stmmac_request_irq(struct net_device *dev)
3905 {
3906 	struct stmmac_priv *priv = netdev_priv(dev);
3907 	int ret;
3908 
3909 	/* Request the IRQ lines */
3910 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3911 		ret = stmmac_request_irq_multi_msi(dev);
3912 	else
3913 		ret = stmmac_request_irq_single(dev);
3914 
3915 	return ret;
3916 }
3917 
3918 /**
3919  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3920  *  @priv: driver private structure
3921  *  @mtu: MTU to setup the dma queue and buf with
3922  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3923  *  Allocate the Tx/Rx DMA queue and init them.
3924  *  Return value:
3925  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3926  */
3927 static struct stmmac_dma_conf *
3928 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3929 {
3930 	struct stmmac_dma_conf *dma_conf;
3931 	int chan, bfsize, ret;
3932 
3933 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3934 	if (!dma_conf) {
3935 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3936 			   __func__);
3937 		return ERR_PTR(-ENOMEM);
3938 	}
3939 
3940 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3941 	if (bfsize < 0)
3942 		bfsize = 0;
3943 
3944 	if (bfsize < BUF_SIZE_16KiB)
3945 		bfsize = stmmac_set_bfsize(mtu, 0);
3946 
3947 	dma_conf->dma_buf_sz = bfsize;
3948 	/* Chose the tx/rx size from the already defined one in the
3949 	 * priv struct. (if defined)
3950 	 */
3951 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3952 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3953 
3954 	if (!dma_conf->dma_tx_size)
3955 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3956 	if (!dma_conf->dma_rx_size)
3957 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3958 
3959 	/* Earlier check for TBS */
3960 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3961 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3962 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3963 
3964 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3965 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3966 	}
3967 
3968 	ret = alloc_dma_desc_resources(priv, dma_conf);
3969 	if (ret < 0) {
3970 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3971 			   __func__);
3972 		goto alloc_error;
3973 	}
3974 
3975 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3976 	if (ret < 0) {
3977 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3978 			   __func__);
3979 		goto init_error;
3980 	}
3981 
3982 	return dma_conf;
3983 
3984 init_error:
3985 	free_dma_desc_resources(priv, dma_conf);
3986 alloc_error:
3987 	kfree(dma_conf);
3988 	return ERR_PTR(ret);
3989 }
3990 
3991 /**
3992  *  __stmmac_open - open entry point of the driver
3993  *  @dev : pointer to the device structure.
3994  *  @dma_conf :  structure to take the dma data
3995  *  Description:
3996  *  This function is the open entry point of the driver.
3997  *  Return value:
3998  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3999  *  file on failure.
4000  */
4001 static int __stmmac_open(struct net_device *dev,
4002 			 struct stmmac_dma_conf *dma_conf)
4003 {
4004 	struct stmmac_priv *priv = netdev_priv(dev);
4005 	int mode = priv->plat->phy_interface;
4006 	u32 chan;
4007 	int ret;
4008 
4009 	/* Initialise the tx lpi timer, converting from msec to usec */
4010 	if (!priv->tx_lpi_timer)
4011 		priv->tx_lpi_timer = eee_timer * 1000;
4012 
4013 	ret = pm_runtime_resume_and_get(priv->device);
4014 	if (ret < 0)
4015 		return ret;
4016 
4017 	if ((!priv->hw->xpcs ||
4018 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
4019 		ret = stmmac_init_phy(dev);
4020 		if (ret) {
4021 			netdev_err(priv->dev,
4022 				   "%s: Cannot attach to PHY (error: %d)\n",
4023 				   __func__, ret);
4024 			goto init_phy_error;
4025 		}
4026 	}
4027 
4028 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4029 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4030 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4031 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4032 
4033 	stmmac_reset_queues_param(priv);
4034 
4035 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4036 	    priv->plat->serdes_powerup) {
4037 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4038 		if (ret < 0) {
4039 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4040 				   __func__);
4041 			goto init_error;
4042 		}
4043 	}
4044 
4045 	ret = stmmac_hw_setup(dev, true);
4046 	if (ret < 0) {
4047 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4048 		goto init_error;
4049 	}
4050 
4051 	stmmac_init_coalesce(priv);
4052 
4053 	phylink_start(priv->phylink);
4054 	/* We may have called phylink_speed_down before */
4055 	phylink_speed_up(priv->phylink);
4056 
4057 	ret = stmmac_request_irq(dev);
4058 	if (ret)
4059 		goto irq_error;
4060 
4061 	stmmac_enable_all_queues(priv);
4062 	netif_tx_start_all_queues(priv->dev);
4063 	stmmac_enable_all_dma_irq(priv);
4064 
4065 	return 0;
4066 
4067 irq_error:
4068 	phylink_stop(priv->phylink);
4069 
4070 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4071 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4072 
4073 	stmmac_hw_teardown(dev);
4074 init_error:
4075 	phylink_disconnect_phy(priv->phylink);
4076 init_phy_error:
4077 	pm_runtime_put(priv->device);
4078 	return ret;
4079 }
4080 
4081 static int stmmac_open(struct net_device *dev)
4082 {
4083 	struct stmmac_priv *priv = netdev_priv(dev);
4084 	struct stmmac_dma_conf *dma_conf;
4085 	int ret;
4086 
4087 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4088 	if (IS_ERR(dma_conf))
4089 		return PTR_ERR(dma_conf);
4090 
4091 	ret = __stmmac_open(dev, dma_conf);
4092 	if (ret)
4093 		free_dma_desc_resources(priv, dma_conf);
4094 
4095 	kfree(dma_conf);
4096 	return ret;
4097 }
4098 
4099 /**
4100  *  stmmac_release - close entry point of the driver
4101  *  @dev : device pointer.
4102  *  Description:
4103  *  This is the stop entry point of the driver.
4104  */
4105 static int stmmac_release(struct net_device *dev)
4106 {
4107 	struct stmmac_priv *priv = netdev_priv(dev);
4108 	u32 chan;
4109 
4110 	if (device_may_wakeup(priv->device))
4111 		phylink_speed_down(priv->phylink, false);
4112 	/* Stop and disconnect the PHY */
4113 	phylink_stop(priv->phylink);
4114 	phylink_disconnect_phy(priv->phylink);
4115 
4116 	stmmac_disable_all_queues(priv);
4117 
4118 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4119 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4120 
4121 	netif_tx_disable(dev);
4122 
4123 	/* Free the IRQ lines */
4124 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4125 
4126 	/* Stop TX/RX DMA and clear the descriptors */
4127 	stmmac_stop_all_dma(priv);
4128 
4129 	/* Release and free the Rx/Tx resources */
4130 	free_dma_desc_resources(priv, &priv->dma_conf);
4131 
4132 	/* Powerdown Serdes if there is */
4133 	if (priv->plat->serdes_powerdown)
4134 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4135 
4136 	stmmac_release_ptp(priv);
4137 
4138 	if (stmmac_fpe_supported(priv))
4139 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4140 
4141 	pm_runtime_put(priv->device);
4142 
4143 	return 0;
4144 }
4145 
4146 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4147 			       struct stmmac_tx_queue *tx_q)
4148 {
4149 	u16 tag = 0x0, inner_tag = 0x0;
4150 	u32 inner_type = 0x0;
4151 	struct dma_desc *p;
4152 
4153 	if (!priv->dma_cap.vlins)
4154 		return false;
4155 	if (!skb_vlan_tag_present(skb))
4156 		return false;
4157 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4158 		inner_tag = skb_vlan_tag_get(skb);
4159 		inner_type = STMMAC_VLAN_INSERT;
4160 	}
4161 
4162 	tag = skb_vlan_tag_get(skb);
4163 
4164 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4165 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4166 	else
4167 		p = &tx_q->dma_tx[tx_q->cur_tx];
4168 
4169 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4170 		return false;
4171 
4172 	stmmac_set_tx_owner(priv, p);
4173 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4174 	return true;
4175 }
4176 
4177 /**
4178  *  stmmac_tso_allocator - close entry point of the driver
4179  *  @priv: driver private structure
4180  *  @des: buffer start address
4181  *  @total_len: total length to fill in descriptors
4182  *  @last_segment: condition for the last descriptor
4183  *  @queue: TX queue index
4184  *  Description:
4185  *  This function fills descriptor and request new descriptors according to
4186  *  buffer length to fill
4187  */
4188 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4189 				 int total_len, bool last_segment, u32 queue)
4190 {
4191 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4192 	struct dma_desc *desc;
4193 	u32 buff_size;
4194 	int tmp_len;
4195 
4196 	tmp_len = total_len;
4197 
4198 	while (tmp_len > 0) {
4199 		dma_addr_t curr_addr;
4200 
4201 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4202 						priv->dma_conf.dma_tx_size);
4203 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4204 
4205 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4206 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4207 		else
4208 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4209 
4210 		curr_addr = des + (total_len - tmp_len);
4211 		stmmac_set_desc_addr(priv, desc, curr_addr);
4212 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4213 			    TSO_MAX_BUFF_SIZE : tmp_len;
4214 
4215 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4216 				0, 1,
4217 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4218 				0, 0);
4219 
4220 		tmp_len -= TSO_MAX_BUFF_SIZE;
4221 	}
4222 }
4223 
4224 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4225 {
4226 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4227 	int desc_size;
4228 
4229 	if (likely(priv->extend_desc))
4230 		desc_size = sizeof(struct dma_extended_desc);
4231 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4232 		desc_size = sizeof(struct dma_edesc);
4233 	else
4234 		desc_size = sizeof(struct dma_desc);
4235 
4236 	/* The own bit must be the latest setting done when prepare the
4237 	 * descriptor and then barrier is needed to make sure that
4238 	 * all is coherent before granting the DMA engine.
4239 	 */
4240 	wmb();
4241 
4242 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4243 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4244 }
4245 
4246 /**
4247  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4248  *  @skb : the socket buffer
4249  *  @dev : device pointer
4250  *  Description: this is the transmit function that is called on TSO frames
4251  *  (support available on GMAC4 and newer chips).
4252  *  Diagram below show the ring programming in case of TSO frames:
4253  *
4254  *  First Descriptor
4255  *   --------
4256  *   | DES0 |---> buffer1 = L2/L3/L4 header
4257  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4258  *   |      |     width is 32-bit, but we never use it.
4259  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4260  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4261  *   |      |     or 48-bit, and we always use it.
4262  *   | DES2 |---> buffer1 len
4263  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4264  *   --------
4265  *   --------
4266  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4267  *   | DES1 |---> same as the First Descriptor
4268  *   | DES2 |---> buffer1 len
4269  *   | DES3 |
4270  *   --------
4271  *	|
4272  *     ...
4273  *	|
4274  *   --------
4275  *   | DES0 |---> buffer1 = Split TCP Payload
4276  *   | DES1 |---> same as the First Descriptor
4277  *   | DES2 |---> buffer1 len
4278  *   | DES3 |
4279  *   --------
4280  *
4281  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4282  */
4283 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4284 {
4285 	struct dma_desc *desc, *first, *mss_desc = NULL;
4286 	struct stmmac_priv *priv = netdev_priv(dev);
4287 	unsigned int first_entry, tx_packets;
4288 	struct stmmac_txq_stats *txq_stats;
4289 	struct stmmac_tx_queue *tx_q;
4290 	u32 pay_len, mss, queue;
4291 	int i, first_tx, nfrags;
4292 	u8 proto_hdr_len, hdr;
4293 	dma_addr_t des;
4294 	bool set_ic;
4295 
4296 	/* Always insert VLAN tag to SKB payload for TSO frames.
4297 	 *
4298 	 * Never insert VLAN tag by HW, since segments splited by
4299 	 * TSO engine will be un-tagged by mistake.
4300 	 */
4301 	if (skb_vlan_tag_present(skb)) {
4302 		skb = __vlan_hwaccel_push_inside(skb);
4303 		if (unlikely(!skb)) {
4304 			priv->xstats.tx_dropped++;
4305 			return NETDEV_TX_OK;
4306 		}
4307 	}
4308 
4309 	nfrags = skb_shinfo(skb)->nr_frags;
4310 	queue = skb_get_queue_mapping(skb);
4311 
4312 	tx_q = &priv->dma_conf.tx_queue[queue];
4313 	txq_stats = &priv->xstats.txq_stats[queue];
4314 	first_tx = tx_q->cur_tx;
4315 
4316 	/* Compute header lengths */
4317 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4318 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4319 		hdr = sizeof(struct udphdr);
4320 	} else {
4321 		proto_hdr_len = skb_tcp_all_headers(skb);
4322 		hdr = tcp_hdrlen(skb);
4323 	}
4324 
4325 	/* Desc availability based on threshold should be enough safe */
4326 	if (unlikely(stmmac_tx_avail(priv, queue) <
4327 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4328 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4329 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4330 								queue));
4331 			/* This is a hard error, log it. */
4332 			netdev_err(priv->dev,
4333 				   "%s: Tx Ring full when queue awake\n",
4334 				   __func__);
4335 		}
4336 		return NETDEV_TX_BUSY;
4337 	}
4338 
4339 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4340 
4341 	mss = skb_shinfo(skb)->gso_size;
4342 
4343 	/* set new MSS value if needed */
4344 	if (mss != tx_q->mss) {
4345 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4346 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4347 		else
4348 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4349 
4350 		stmmac_set_mss(priv, mss_desc, mss);
4351 		tx_q->mss = mss;
4352 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4353 						priv->dma_conf.dma_tx_size);
4354 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4355 	}
4356 
4357 	if (netif_msg_tx_queued(priv)) {
4358 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4359 			__func__, hdr, proto_hdr_len, pay_len, mss);
4360 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4361 			skb->data_len);
4362 	}
4363 
4364 	first_entry = tx_q->cur_tx;
4365 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4366 
4367 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4368 		desc = &tx_q->dma_entx[first_entry].basic;
4369 	else
4370 		desc = &tx_q->dma_tx[first_entry];
4371 	first = desc;
4372 
4373 	/* first descriptor: fill Headers on Buf1 */
4374 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4375 			     DMA_TO_DEVICE);
4376 	if (dma_mapping_error(priv->device, des))
4377 		goto dma_map_err;
4378 
4379 	stmmac_set_desc_addr(priv, first, des);
4380 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4381 			     (nfrags == 0), queue);
4382 
4383 	/* In case two or more DMA transmit descriptors are allocated for this
4384 	 * non-paged SKB data, the DMA buffer address should be saved to
4385 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4386 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4387 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4388 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4389 	 * sooner or later.
4390 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4391 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4392 	 * this DMA buffer right after the DMA engine completely finishes the
4393 	 * full buffer transmission.
4394 	 */
4395 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4396 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4397 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4398 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4399 
4400 	/* Prepare fragments */
4401 	for (i = 0; i < nfrags; i++) {
4402 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4403 
4404 		des = skb_frag_dma_map(priv->device, frag, 0,
4405 				       skb_frag_size(frag),
4406 				       DMA_TO_DEVICE);
4407 		if (dma_mapping_error(priv->device, des))
4408 			goto dma_map_err;
4409 
4410 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4411 				     (i == nfrags - 1), queue);
4412 
4413 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4414 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4415 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4416 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4417 	}
4418 
4419 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4420 
4421 	/* Only the last descriptor gets to point to the skb. */
4422 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4423 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4424 
4425 	/* Manage tx mitigation */
4426 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4427 	tx_q->tx_count_frames += tx_packets;
4428 
4429 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4430 		set_ic = true;
4431 	else if (!priv->tx_coal_frames[queue])
4432 		set_ic = false;
4433 	else if (tx_packets > priv->tx_coal_frames[queue])
4434 		set_ic = true;
4435 	else if ((tx_q->tx_count_frames %
4436 		  priv->tx_coal_frames[queue]) < tx_packets)
4437 		set_ic = true;
4438 	else
4439 		set_ic = false;
4440 
4441 	if (set_ic) {
4442 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4443 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4444 		else
4445 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4446 
4447 		tx_q->tx_count_frames = 0;
4448 		stmmac_set_tx_ic(priv, desc);
4449 	}
4450 
4451 	/* We've used all descriptors we need for this skb, however,
4452 	 * advance cur_tx so that it references a fresh descriptor.
4453 	 * ndo_start_xmit will fill this descriptor the next time it's
4454 	 * called and stmmac_tx_clean may clean up to this descriptor.
4455 	 */
4456 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4457 
4458 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4459 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4460 			  __func__);
4461 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4462 	}
4463 
4464 	u64_stats_update_begin(&txq_stats->q_syncp);
4465 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4466 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4467 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4468 	if (set_ic)
4469 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4470 	u64_stats_update_end(&txq_stats->q_syncp);
4471 
4472 	if (priv->sarc_type)
4473 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4474 
4475 	skb_tx_timestamp(skb);
4476 
4477 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4478 		     priv->hwts_tx_en)) {
4479 		/* declare that device is doing timestamping */
4480 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4481 		stmmac_enable_tx_timestamp(priv, first);
4482 	}
4483 
4484 	/* Complete the first descriptor before granting the DMA */
4485 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4486 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4487 				   hdr / 4, (skb->len - proto_hdr_len));
4488 
4489 	/* If context desc is used to change MSS */
4490 	if (mss_desc) {
4491 		/* Make sure that first descriptor has been completely
4492 		 * written, including its own bit. This is because MSS is
4493 		 * actually before first descriptor, so we need to make
4494 		 * sure that MSS's own bit is the last thing written.
4495 		 */
4496 		dma_wmb();
4497 		stmmac_set_tx_owner(priv, mss_desc);
4498 	}
4499 
4500 	if (netif_msg_pktdata(priv)) {
4501 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4502 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4503 			tx_q->cur_tx, first, nfrags);
4504 		pr_info(">>> frame to be transmitted: ");
4505 		print_pkt(skb->data, skb_headlen(skb));
4506 	}
4507 
4508 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4509 
4510 	stmmac_flush_tx_descriptors(priv, queue);
4511 	stmmac_tx_timer_arm(priv, queue);
4512 
4513 	return NETDEV_TX_OK;
4514 
4515 dma_map_err:
4516 	dev_err(priv->device, "Tx dma map failed\n");
4517 	dev_kfree_skb(skb);
4518 	priv->xstats.tx_dropped++;
4519 	return NETDEV_TX_OK;
4520 }
4521 
4522 /**
4523  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4524  * @skb: socket buffer to check
4525  *
4526  * Check if a packet has an ethertype that will trigger the IP header checks
4527  * and IP/TCP checksum engine of the stmmac core.
4528  *
4529  * Return: true if the ethertype can trigger the checksum engine, false
4530  * otherwise
4531  */
4532 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4533 {
4534 	int depth = 0;
4535 	__be16 proto;
4536 
4537 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4538 				    &depth);
4539 
4540 	return (depth <= ETH_HLEN) &&
4541 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4542 }
4543 
4544 /**
4545  *  stmmac_xmit - Tx entry point of the driver
4546  *  @skb : the socket buffer
4547  *  @dev : device pointer
4548  *  Description : this is the tx entry point of the driver.
4549  *  It programs the chain or the ring and supports oversized frames
4550  *  and SG feature.
4551  */
4552 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4553 {
4554 	unsigned int first_entry, tx_packets, enh_desc;
4555 	struct stmmac_priv *priv = netdev_priv(dev);
4556 	unsigned int nopaged_len = skb_headlen(skb);
4557 	int i, csum_insertion = 0, is_jumbo = 0;
4558 	u32 queue = skb_get_queue_mapping(skb);
4559 	int nfrags = skb_shinfo(skb)->nr_frags;
4560 	int gso = skb_shinfo(skb)->gso_type;
4561 	struct stmmac_txq_stats *txq_stats;
4562 	struct dma_edesc *tbs_desc = NULL;
4563 	struct dma_desc *desc, *first;
4564 	struct stmmac_tx_queue *tx_q;
4565 	bool has_vlan, set_ic;
4566 	int entry, first_tx;
4567 	dma_addr_t des;
4568 
4569 	tx_q = &priv->dma_conf.tx_queue[queue];
4570 	txq_stats = &priv->xstats.txq_stats[queue];
4571 	first_tx = tx_q->cur_tx;
4572 
4573 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4574 		stmmac_stop_sw_lpi(priv);
4575 
4576 	/* Manage oversized TCP frames for GMAC4 device */
4577 	if (skb_is_gso(skb) && priv->tso) {
4578 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4579 			return stmmac_tso_xmit(skb, dev);
4580 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4581 			return stmmac_tso_xmit(skb, dev);
4582 	}
4583 
4584 	if (priv->est && priv->est->enable &&
4585 	    priv->est->max_sdu[queue] &&
4586 	    skb->len > priv->est->max_sdu[queue]){
4587 		priv->xstats.max_sdu_txq_drop[queue]++;
4588 		goto max_sdu_err;
4589 	}
4590 
4591 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4592 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4593 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4594 								queue));
4595 			/* This is a hard error, log it. */
4596 			netdev_err(priv->dev,
4597 				   "%s: Tx Ring full when queue awake\n",
4598 				   __func__);
4599 		}
4600 		return NETDEV_TX_BUSY;
4601 	}
4602 
4603 	/* Check if VLAN can be inserted by HW */
4604 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4605 
4606 	entry = tx_q->cur_tx;
4607 	first_entry = entry;
4608 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4609 
4610 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4611 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4612 	 * queues. In that case, checksum offloading for those queues that don't
4613 	 * support tx coe needs to fallback to software checksum calculation.
4614 	 *
4615 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4616 	 * also have to be checksummed in software.
4617 	 */
4618 	if (csum_insertion &&
4619 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4620 	     !stmmac_has_ip_ethertype(skb))) {
4621 		if (unlikely(skb_checksum_help(skb)))
4622 			goto dma_map_err;
4623 		csum_insertion = !csum_insertion;
4624 	}
4625 
4626 	if (likely(priv->extend_desc))
4627 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4628 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4629 		desc = &tx_q->dma_entx[entry].basic;
4630 	else
4631 		desc = tx_q->dma_tx + entry;
4632 
4633 	first = desc;
4634 
4635 	if (has_vlan)
4636 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4637 
4638 	enh_desc = priv->plat->enh_desc;
4639 	/* To program the descriptors according to the size of the frame */
4640 	if (enh_desc)
4641 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4642 
4643 	if (unlikely(is_jumbo)) {
4644 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4645 		if (unlikely(entry < 0) && (entry != -EINVAL))
4646 			goto dma_map_err;
4647 	}
4648 
4649 	for (i = 0; i < nfrags; i++) {
4650 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4651 		int len = skb_frag_size(frag);
4652 		bool last_segment = (i == (nfrags - 1));
4653 
4654 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4655 		WARN_ON(tx_q->tx_skbuff[entry]);
4656 
4657 		if (likely(priv->extend_desc))
4658 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4659 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4660 			desc = &tx_q->dma_entx[entry].basic;
4661 		else
4662 			desc = tx_q->dma_tx + entry;
4663 
4664 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4665 				       DMA_TO_DEVICE);
4666 		if (dma_mapping_error(priv->device, des))
4667 			goto dma_map_err; /* should reuse desc w/o issues */
4668 
4669 		tx_q->tx_skbuff_dma[entry].buf = des;
4670 
4671 		stmmac_set_desc_addr(priv, desc, des);
4672 
4673 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4674 		tx_q->tx_skbuff_dma[entry].len = len;
4675 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4676 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4677 
4678 		/* Prepare the descriptor and set the own bit too */
4679 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4680 				priv->mode, 1, last_segment, skb->len);
4681 	}
4682 
4683 	/* Only the last descriptor gets to point to the skb. */
4684 	tx_q->tx_skbuff[entry] = skb;
4685 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4686 
4687 	/* According to the coalesce parameter the IC bit for the latest
4688 	 * segment is reset and the timer re-started to clean the tx status.
4689 	 * This approach takes care about the fragments: desc is the first
4690 	 * element in case of no SG.
4691 	 */
4692 	tx_packets = (entry + 1) - first_tx;
4693 	tx_q->tx_count_frames += tx_packets;
4694 
4695 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4696 		set_ic = true;
4697 	else if (!priv->tx_coal_frames[queue])
4698 		set_ic = false;
4699 	else if (tx_packets > priv->tx_coal_frames[queue])
4700 		set_ic = true;
4701 	else if ((tx_q->tx_count_frames %
4702 		  priv->tx_coal_frames[queue]) < tx_packets)
4703 		set_ic = true;
4704 	else
4705 		set_ic = false;
4706 
4707 	if (set_ic) {
4708 		if (likely(priv->extend_desc))
4709 			desc = &tx_q->dma_etx[entry].basic;
4710 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4711 			desc = &tx_q->dma_entx[entry].basic;
4712 		else
4713 			desc = &tx_q->dma_tx[entry];
4714 
4715 		tx_q->tx_count_frames = 0;
4716 		stmmac_set_tx_ic(priv, desc);
4717 	}
4718 
4719 	/* We've used all descriptors we need for this skb, however,
4720 	 * advance cur_tx so that it references a fresh descriptor.
4721 	 * ndo_start_xmit will fill this descriptor the next time it's
4722 	 * called and stmmac_tx_clean may clean up to this descriptor.
4723 	 */
4724 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4725 	tx_q->cur_tx = entry;
4726 
4727 	if (netif_msg_pktdata(priv)) {
4728 		netdev_dbg(priv->dev,
4729 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4730 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4731 			   entry, first, nfrags);
4732 
4733 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4734 		print_pkt(skb->data, skb->len);
4735 	}
4736 
4737 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4738 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4739 			  __func__);
4740 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4741 	}
4742 
4743 	u64_stats_update_begin(&txq_stats->q_syncp);
4744 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4745 	if (set_ic)
4746 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4747 	u64_stats_update_end(&txq_stats->q_syncp);
4748 
4749 	if (priv->sarc_type)
4750 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4751 
4752 	skb_tx_timestamp(skb);
4753 
4754 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4755 	 * problems because all the descriptors are actually ready to be
4756 	 * passed to the DMA engine.
4757 	 */
4758 	if (likely(!is_jumbo)) {
4759 		bool last_segment = (nfrags == 0);
4760 
4761 		des = dma_map_single(priv->device, skb->data,
4762 				     nopaged_len, DMA_TO_DEVICE);
4763 		if (dma_mapping_error(priv->device, des))
4764 			goto dma_map_err;
4765 
4766 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4767 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4768 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4769 
4770 		stmmac_set_desc_addr(priv, first, des);
4771 
4772 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4773 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4774 
4775 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4776 			     priv->hwts_tx_en)) {
4777 			/* declare that device is doing timestamping */
4778 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4779 			stmmac_enable_tx_timestamp(priv, first);
4780 		}
4781 
4782 		/* Prepare the first descriptor setting the OWN bit too */
4783 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4784 				csum_insertion, priv->mode, 0, last_segment,
4785 				skb->len);
4786 	}
4787 
4788 	if (tx_q->tbs & STMMAC_TBS_EN) {
4789 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4790 
4791 		tbs_desc = &tx_q->dma_entx[first_entry];
4792 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4793 	}
4794 
4795 	stmmac_set_tx_owner(priv, first);
4796 
4797 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4798 
4799 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4800 
4801 	stmmac_flush_tx_descriptors(priv, queue);
4802 	stmmac_tx_timer_arm(priv, queue);
4803 
4804 	return NETDEV_TX_OK;
4805 
4806 dma_map_err:
4807 	netdev_err(priv->dev, "Tx DMA map failed\n");
4808 max_sdu_err:
4809 	dev_kfree_skb(skb);
4810 	priv->xstats.tx_dropped++;
4811 	return NETDEV_TX_OK;
4812 }
4813 
4814 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4815 {
4816 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4817 	__be16 vlan_proto = veth->h_vlan_proto;
4818 	u16 vlanid;
4819 
4820 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4821 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4822 	    (vlan_proto == htons(ETH_P_8021AD) &&
4823 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4824 		/* pop the vlan tag */
4825 		vlanid = ntohs(veth->h_vlan_TCI);
4826 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4827 		skb_pull(skb, VLAN_HLEN);
4828 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4829 	}
4830 }
4831 
4832 /**
4833  * stmmac_rx_refill - refill used skb preallocated buffers
4834  * @priv: driver private structure
4835  * @queue: RX queue index
4836  * Description : this is to reallocate the skb for the reception process
4837  * that is based on zero-copy.
4838  */
4839 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4840 {
4841 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4842 	int dirty = stmmac_rx_dirty(priv, queue);
4843 	unsigned int entry = rx_q->dirty_rx;
4844 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4845 
4846 	if (priv->dma_cap.host_dma_width <= 32)
4847 		gfp |= GFP_DMA32;
4848 
4849 	while (dirty-- > 0) {
4850 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4851 		struct dma_desc *p;
4852 		bool use_rx_wd;
4853 
4854 		if (priv->extend_desc)
4855 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4856 		else
4857 			p = rx_q->dma_rx + entry;
4858 
4859 		if (!buf->page) {
4860 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4861 			if (!buf->page)
4862 				break;
4863 		}
4864 
4865 		if (priv->sph && !buf->sec_page) {
4866 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4867 			if (!buf->sec_page)
4868 				break;
4869 
4870 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4871 		}
4872 
4873 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4874 
4875 		stmmac_set_desc_addr(priv, p, buf->addr);
4876 		if (priv->sph)
4877 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4878 		else
4879 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4880 		stmmac_refill_desc3(priv, rx_q, p);
4881 
4882 		rx_q->rx_count_frames++;
4883 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4884 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4885 			rx_q->rx_count_frames = 0;
4886 
4887 		use_rx_wd = !priv->rx_coal_frames[queue];
4888 		use_rx_wd |= rx_q->rx_count_frames > 0;
4889 		if (!priv->use_riwt)
4890 			use_rx_wd = false;
4891 
4892 		dma_wmb();
4893 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4894 
4895 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4896 	}
4897 	rx_q->dirty_rx = entry;
4898 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4899 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4900 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4901 }
4902 
4903 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4904 				       struct dma_desc *p,
4905 				       int status, unsigned int len)
4906 {
4907 	unsigned int plen = 0, hlen = 0;
4908 	int coe = priv->hw->rx_csum;
4909 
4910 	/* Not first descriptor, buffer is always zero */
4911 	if (priv->sph && len)
4912 		return 0;
4913 
4914 	/* First descriptor, get split header length */
4915 	stmmac_get_rx_header_len(priv, p, &hlen);
4916 	if (priv->sph && hlen) {
4917 		priv->xstats.rx_split_hdr_pkt_n++;
4918 		return hlen;
4919 	}
4920 
4921 	/* First descriptor, not last descriptor and not split header */
4922 	if (status & rx_not_ls)
4923 		return priv->dma_conf.dma_buf_sz;
4924 
4925 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4926 
4927 	/* First descriptor and last descriptor and not split header */
4928 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4929 }
4930 
4931 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4932 				       struct dma_desc *p,
4933 				       int status, unsigned int len)
4934 {
4935 	int coe = priv->hw->rx_csum;
4936 	unsigned int plen = 0;
4937 
4938 	/* Not split header, buffer is not available */
4939 	if (!priv->sph)
4940 		return 0;
4941 
4942 	/* Not last descriptor */
4943 	if (status & rx_not_ls)
4944 		return priv->dma_conf.dma_buf_sz;
4945 
4946 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4947 
4948 	/* Last descriptor */
4949 	return plen - len;
4950 }
4951 
4952 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4953 				struct xdp_frame *xdpf, bool dma_map)
4954 {
4955 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4956 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4957 	unsigned int entry = tx_q->cur_tx;
4958 	struct dma_desc *tx_desc;
4959 	dma_addr_t dma_addr;
4960 	bool set_ic;
4961 
4962 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4963 		return STMMAC_XDP_CONSUMED;
4964 
4965 	if (priv->est && priv->est->enable &&
4966 	    priv->est->max_sdu[queue] &&
4967 	    xdpf->len > priv->est->max_sdu[queue]) {
4968 		priv->xstats.max_sdu_txq_drop[queue]++;
4969 		return STMMAC_XDP_CONSUMED;
4970 	}
4971 
4972 	if (likely(priv->extend_desc))
4973 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4974 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4975 		tx_desc = &tx_q->dma_entx[entry].basic;
4976 	else
4977 		tx_desc = tx_q->dma_tx + entry;
4978 
4979 	if (dma_map) {
4980 		dma_addr = dma_map_single(priv->device, xdpf->data,
4981 					  xdpf->len, DMA_TO_DEVICE);
4982 		if (dma_mapping_error(priv->device, dma_addr))
4983 			return STMMAC_XDP_CONSUMED;
4984 
4985 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4986 	} else {
4987 		struct page *page = virt_to_page(xdpf->data);
4988 
4989 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4990 			   xdpf->headroom;
4991 		dma_sync_single_for_device(priv->device, dma_addr,
4992 					   xdpf->len, DMA_BIDIRECTIONAL);
4993 
4994 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4995 	}
4996 
4997 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4998 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4999 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5000 	tx_q->tx_skbuff_dma[entry].last_segment = true;
5001 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5002 
5003 	tx_q->xdpf[entry] = xdpf;
5004 
5005 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5006 
5007 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5008 			       true, priv->mode, true, true,
5009 			       xdpf->len);
5010 
5011 	tx_q->tx_count_frames++;
5012 
5013 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5014 		set_ic = true;
5015 	else
5016 		set_ic = false;
5017 
5018 	if (set_ic) {
5019 		tx_q->tx_count_frames = 0;
5020 		stmmac_set_tx_ic(priv, tx_desc);
5021 		u64_stats_update_begin(&txq_stats->q_syncp);
5022 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5023 		u64_stats_update_end(&txq_stats->q_syncp);
5024 	}
5025 
5026 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5027 
5028 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5029 	tx_q->cur_tx = entry;
5030 
5031 	return STMMAC_XDP_TX;
5032 }
5033 
5034 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5035 				   int cpu)
5036 {
5037 	int index = cpu;
5038 
5039 	if (unlikely(index < 0))
5040 		index = 0;
5041 
5042 	while (index >= priv->plat->tx_queues_to_use)
5043 		index -= priv->plat->tx_queues_to_use;
5044 
5045 	return index;
5046 }
5047 
5048 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5049 				struct xdp_buff *xdp)
5050 {
5051 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5052 	int cpu = smp_processor_id();
5053 	struct netdev_queue *nq;
5054 	int queue;
5055 	int res;
5056 
5057 	if (unlikely(!xdpf))
5058 		return STMMAC_XDP_CONSUMED;
5059 
5060 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5061 	nq = netdev_get_tx_queue(priv->dev, queue);
5062 
5063 	__netif_tx_lock(nq, cpu);
5064 	/* Avoids TX time-out as we are sharing with slow path */
5065 	txq_trans_cond_update(nq);
5066 
5067 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5068 	if (res == STMMAC_XDP_TX)
5069 		stmmac_flush_tx_descriptors(priv, queue);
5070 
5071 	__netif_tx_unlock(nq);
5072 
5073 	return res;
5074 }
5075 
5076 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5077 				 struct bpf_prog *prog,
5078 				 struct xdp_buff *xdp)
5079 {
5080 	u32 act;
5081 	int res;
5082 
5083 	act = bpf_prog_run_xdp(prog, xdp);
5084 	switch (act) {
5085 	case XDP_PASS:
5086 		res = STMMAC_XDP_PASS;
5087 		break;
5088 	case XDP_TX:
5089 		res = stmmac_xdp_xmit_back(priv, xdp);
5090 		break;
5091 	case XDP_REDIRECT:
5092 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5093 			res = STMMAC_XDP_CONSUMED;
5094 		else
5095 			res = STMMAC_XDP_REDIRECT;
5096 		break;
5097 	default:
5098 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5099 		fallthrough;
5100 	case XDP_ABORTED:
5101 		trace_xdp_exception(priv->dev, prog, act);
5102 		fallthrough;
5103 	case XDP_DROP:
5104 		res = STMMAC_XDP_CONSUMED;
5105 		break;
5106 	}
5107 
5108 	return res;
5109 }
5110 
5111 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5112 					   struct xdp_buff *xdp)
5113 {
5114 	struct bpf_prog *prog;
5115 	int res;
5116 
5117 	prog = READ_ONCE(priv->xdp_prog);
5118 	if (!prog) {
5119 		res = STMMAC_XDP_PASS;
5120 		goto out;
5121 	}
5122 
5123 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5124 out:
5125 	return ERR_PTR(-res);
5126 }
5127 
5128 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5129 				   int xdp_status)
5130 {
5131 	int cpu = smp_processor_id();
5132 	int queue;
5133 
5134 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5135 
5136 	if (xdp_status & STMMAC_XDP_TX)
5137 		stmmac_tx_timer_arm(priv, queue);
5138 
5139 	if (xdp_status & STMMAC_XDP_REDIRECT)
5140 		xdp_do_flush();
5141 }
5142 
5143 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5144 					       struct xdp_buff *xdp)
5145 {
5146 	unsigned int metasize = xdp->data - xdp->data_meta;
5147 	unsigned int datasize = xdp->data_end - xdp->data;
5148 	struct sk_buff *skb;
5149 
5150 	skb = napi_alloc_skb(&ch->rxtx_napi,
5151 			     xdp->data_end - xdp->data_hard_start);
5152 	if (unlikely(!skb))
5153 		return NULL;
5154 
5155 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5156 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5157 	if (metasize)
5158 		skb_metadata_set(skb, metasize);
5159 
5160 	return skb;
5161 }
5162 
5163 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5164 				   struct dma_desc *p, struct dma_desc *np,
5165 				   struct xdp_buff *xdp)
5166 {
5167 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5168 	struct stmmac_channel *ch = &priv->channel[queue];
5169 	unsigned int len = xdp->data_end - xdp->data;
5170 	enum pkt_hash_types hash_type;
5171 	int coe = priv->hw->rx_csum;
5172 	struct sk_buff *skb;
5173 	u32 hash;
5174 
5175 	skb = stmmac_construct_skb_zc(ch, xdp);
5176 	if (!skb) {
5177 		priv->xstats.rx_dropped++;
5178 		return;
5179 	}
5180 
5181 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5182 	if (priv->hw->hw_vlan_en)
5183 		/* MAC level stripping. */
5184 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5185 	else
5186 		/* Driver level stripping. */
5187 		stmmac_rx_vlan(priv->dev, skb);
5188 	skb->protocol = eth_type_trans(skb, priv->dev);
5189 
5190 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5191 		skb_checksum_none_assert(skb);
5192 	else
5193 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5194 
5195 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5196 		skb_set_hash(skb, hash, hash_type);
5197 
5198 	skb_record_rx_queue(skb, queue);
5199 	napi_gro_receive(&ch->rxtx_napi, skb);
5200 
5201 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5202 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5203 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5204 	u64_stats_update_end(&rxq_stats->napi_syncp);
5205 }
5206 
5207 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5208 {
5209 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5210 	unsigned int entry = rx_q->dirty_rx;
5211 	struct dma_desc *rx_desc = NULL;
5212 	bool ret = true;
5213 
5214 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5215 
5216 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5217 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5218 		dma_addr_t dma_addr;
5219 		bool use_rx_wd;
5220 
5221 		if (!buf->xdp) {
5222 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5223 			if (!buf->xdp) {
5224 				ret = false;
5225 				break;
5226 			}
5227 		}
5228 
5229 		if (priv->extend_desc)
5230 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5231 		else
5232 			rx_desc = rx_q->dma_rx + entry;
5233 
5234 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5235 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5236 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5237 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5238 
5239 		rx_q->rx_count_frames++;
5240 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5241 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5242 			rx_q->rx_count_frames = 0;
5243 
5244 		use_rx_wd = !priv->rx_coal_frames[queue];
5245 		use_rx_wd |= rx_q->rx_count_frames > 0;
5246 		if (!priv->use_riwt)
5247 			use_rx_wd = false;
5248 
5249 		dma_wmb();
5250 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5251 
5252 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5253 	}
5254 
5255 	if (rx_desc) {
5256 		rx_q->dirty_rx = entry;
5257 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5258 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5259 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5260 	}
5261 
5262 	return ret;
5263 }
5264 
5265 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5266 {
5267 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5268 	 * to represent incoming packet, whereas cb field in the same structure
5269 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5270 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5271 	 */
5272 	return (struct stmmac_xdp_buff *)xdp;
5273 }
5274 
5275 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5276 {
5277 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5278 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5279 	unsigned int count = 0, error = 0, len = 0;
5280 	int dirty = stmmac_rx_dirty(priv, queue);
5281 	unsigned int next_entry = rx_q->cur_rx;
5282 	u32 rx_errors = 0, rx_dropped = 0;
5283 	unsigned int desc_size;
5284 	struct bpf_prog *prog;
5285 	bool failure = false;
5286 	int xdp_status = 0;
5287 	int status = 0;
5288 
5289 	if (netif_msg_rx_status(priv)) {
5290 		void *rx_head;
5291 
5292 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5293 		if (priv->extend_desc) {
5294 			rx_head = (void *)rx_q->dma_erx;
5295 			desc_size = sizeof(struct dma_extended_desc);
5296 		} else {
5297 			rx_head = (void *)rx_q->dma_rx;
5298 			desc_size = sizeof(struct dma_desc);
5299 		}
5300 
5301 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5302 				    rx_q->dma_rx_phy, desc_size);
5303 	}
5304 	while (count < limit) {
5305 		struct stmmac_rx_buffer *buf;
5306 		struct stmmac_xdp_buff *ctx;
5307 		unsigned int buf1_len = 0;
5308 		struct dma_desc *np, *p;
5309 		int entry;
5310 		int res;
5311 
5312 		if (!count && rx_q->state_saved) {
5313 			error = rx_q->state.error;
5314 			len = rx_q->state.len;
5315 		} else {
5316 			rx_q->state_saved = false;
5317 			error = 0;
5318 			len = 0;
5319 		}
5320 
5321 		if (count >= limit)
5322 			break;
5323 
5324 read_again:
5325 		buf1_len = 0;
5326 		entry = next_entry;
5327 		buf = &rx_q->buf_pool[entry];
5328 
5329 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5330 			failure = failure ||
5331 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5332 			dirty = 0;
5333 		}
5334 
5335 		if (priv->extend_desc)
5336 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5337 		else
5338 			p = rx_q->dma_rx + entry;
5339 
5340 		/* read the status of the incoming frame */
5341 		status = stmmac_rx_status(priv, &priv->xstats, p);
5342 		/* check if managed by the DMA otherwise go ahead */
5343 		if (unlikely(status & dma_own))
5344 			break;
5345 
5346 		/* Prefetch the next RX descriptor */
5347 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5348 						priv->dma_conf.dma_rx_size);
5349 		next_entry = rx_q->cur_rx;
5350 
5351 		if (priv->extend_desc)
5352 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5353 		else
5354 			np = rx_q->dma_rx + next_entry;
5355 
5356 		prefetch(np);
5357 
5358 		/* Ensure a valid XSK buffer before proceed */
5359 		if (!buf->xdp)
5360 			break;
5361 
5362 		if (priv->extend_desc)
5363 			stmmac_rx_extended_status(priv, &priv->xstats,
5364 						  rx_q->dma_erx + entry);
5365 		if (unlikely(status == discard_frame)) {
5366 			xsk_buff_free(buf->xdp);
5367 			buf->xdp = NULL;
5368 			dirty++;
5369 			error = 1;
5370 			if (!priv->hwts_rx_en)
5371 				rx_errors++;
5372 		}
5373 
5374 		if (unlikely(error && (status & rx_not_ls)))
5375 			goto read_again;
5376 		if (unlikely(error)) {
5377 			count++;
5378 			continue;
5379 		}
5380 
5381 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5382 		if (likely(status & rx_not_ls)) {
5383 			xsk_buff_free(buf->xdp);
5384 			buf->xdp = NULL;
5385 			dirty++;
5386 			count++;
5387 			goto read_again;
5388 		}
5389 
5390 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5391 		ctx->priv = priv;
5392 		ctx->desc = p;
5393 		ctx->ndesc = np;
5394 
5395 		/* XDP ZC Frame only support primary buffers for now */
5396 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5397 		len += buf1_len;
5398 
5399 		/* ACS is disabled; strip manually. */
5400 		if (likely(!(status & rx_not_ls))) {
5401 			buf1_len -= ETH_FCS_LEN;
5402 			len -= ETH_FCS_LEN;
5403 		}
5404 
5405 		/* RX buffer is good and fit into a XSK pool buffer */
5406 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5407 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5408 
5409 		prog = READ_ONCE(priv->xdp_prog);
5410 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5411 
5412 		switch (res) {
5413 		case STMMAC_XDP_PASS:
5414 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5415 			xsk_buff_free(buf->xdp);
5416 			break;
5417 		case STMMAC_XDP_CONSUMED:
5418 			xsk_buff_free(buf->xdp);
5419 			rx_dropped++;
5420 			break;
5421 		case STMMAC_XDP_TX:
5422 		case STMMAC_XDP_REDIRECT:
5423 			xdp_status |= res;
5424 			break;
5425 		}
5426 
5427 		buf->xdp = NULL;
5428 		dirty++;
5429 		count++;
5430 	}
5431 
5432 	if (status & rx_not_ls) {
5433 		rx_q->state_saved = true;
5434 		rx_q->state.error = error;
5435 		rx_q->state.len = len;
5436 	}
5437 
5438 	stmmac_finalize_xdp_rx(priv, xdp_status);
5439 
5440 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5441 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5442 	u64_stats_update_end(&rxq_stats->napi_syncp);
5443 
5444 	priv->xstats.rx_dropped += rx_dropped;
5445 	priv->xstats.rx_errors += rx_errors;
5446 
5447 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5448 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5449 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5450 		else
5451 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5452 
5453 		return (int)count;
5454 	}
5455 
5456 	return failure ? limit : (int)count;
5457 }
5458 
5459 /**
5460  * stmmac_rx - manage the receive process
5461  * @priv: driver private structure
5462  * @limit: napi bugget
5463  * @queue: RX queue index.
5464  * Description :  this the function called by the napi poll method.
5465  * It gets all the frames inside the ring.
5466  */
5467 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5468 {
5469 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5470 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5471 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5472 	struct stmmac_channel *ch = &priv->channel[queue];
5473 	unsigned int count = 0, error = 0, len = 0;
5474 	int status = 0, coe = priv->hw->rx_csum;
5475 	unsigned int next_entry = rx_q->cur_rx;
5476 	enum dma_data_direction dma_dir;
5477 	unsigned int desc_size;
5478 	struct sk_buff *skb = NULL;
5479 	struct stmmac_xdp_buff ctx;
5480 	int xdp_status = 0;
5481 	int bufsz;
5482 
5483 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5484 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5485 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5486 
5487 	if (netif_msg_rx_status(priv)) {
5488 		void *rx_head;
5489 
5490 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5491 		if (priv->extend_desc) {
5492 			rx_head = (void *)rx_q->dma_erx;
5493 			desc_size = sizeof(struct dma_extended_desc);
5494 		} else {
5495 			rx_head = (void *)rx_q->dma_rx;
5496 			desc_size = sizeof(struct dma_desc);
5497 		}
5498 
5499 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5500 				    rx_q->dma_rx_phy, desc_size);
5501 	}
5502 	while (count < limit) {
5503 		unsigned int buf1_len = 0, buf2_len = 0;
5504 		enum pkt_hash_types hash_type;
5505 		struct stmmac_rx_buffer *buf;
5506 		struct dma_desc *np, *p;
5507 		int entry;
5508 		u32 hash;
5509 
5510 		if (!count && rx_q->state_saved) {
5511 			skb = rx_q->state.skb;
5512 			error = rx_q->state.error;
5513 			len = rx_q->state.len;
5514 		} else {
5515 			rx_q->state_saved = false;
5516 			skb = NULL;
5517 			error = 0;
5518 			len = 0;
5519 		}
5520 
5521 read_again:
5522 		if (count >= limit)
5523 			break;
5524 
5525 		buf1_len = 0;
5526 		buf2_len = 0;
5527 		entry = next_entry;
5528 		buf = &rx_q->buf_pool[entry];
5529 
5530 		if (priv->extend_desc)
5531 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5532 		else
5533 			p = rx_q->dma_rx + entry;
5534 
5535 		/* read the status of the incoming frame */
5536 		status = stmmac_rx_status(priv, &priv->xstats, p);
5537 		/* check if managed by the DMA otherwise go ahead */
5538 		if (unlikely(status & dma_own))
5539 			break;
5540 
5541 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5542 						priv->dma_conf.dma_rx_size);
5543 		next_entry = rx_q->cur_rx;
5544 
5545 		if (priv->extend_desc)
5546 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5547 		else
5548 			np = rx_q->dma_rx + next_entry;
5549 
5550 		prefetch(np);
5551 
5552 		if (priv->extend_desc)
5553 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5554 		if (unlikely(status == discard_frame)) {
5555 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5556 			buf->page = NULL;
5557 			error = 1;
5558 			if (!priv->hwts_rx_en)
5559 				rx_errors++;
5560 		}
5561 
5562 		if (unlikely(error && (status & rx_not_ls)))
5563 			goto read_again;
5564 		if (unlikely(error)) {
5565 			dev_kfree_skb(skb);
5566 			skb = NULL;
5567 			count++;
5568 			continue;
5569 		}
5570 
5571 		/* Buffer is good. Go on. */
5572 
5573 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5574 		len += buf1_len;
5575 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5576 		len += buf2_len;
5577 
5578 		/* ACS is disabled; strip manually. */
5579 		if (likely(!(status & rx_not_ls))) {
5580 			if (buf2_len) {
5581 				buf2_len -= ETH_FCS_LEN;
5582 				len -= ETH_FCS_LEN;
5583 			} else if (buf1_len) {
5584 				buf1_len -= ETH_FCS_LEN;
5585 				len -= ETH_FCS_LEN;
5586 			}
5587 		}
5588 
5589 		if (!skb) {
5590 			unsigned int pre_len, sync_len;
5591 
5592 			dma_sync_single_for_cpu(priv->device, buf->addr,
5593 						buf1_len, dma_dir);
5594 			net_prefetch(page_address(buf->page) +
5595 				     buf->page_offset);
5596 
5597 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5598 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5599 					 buf->page_offset, buf1_len, true);
5600 
5601 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5602 				  buf->page_offset;
5603 
5604 			ctx.priv = priv;
5605 			ctx.desc = p;
5606 			ctx.ndesc = np;
5607 
5608 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5609 			/* Due xdp_adjust_tail: DMA sync for_device
5610 			 * cover max len CPU touch
5611 			 */
5612 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5613 				   buf->page_offset;
5614 			sync_len = max(sync_len, pre_len);
5615 
5616 			/* For Not XDP_PASS verdict */
5617 			if (IS_ERR(skb)) {
5618 				unsigned int xdp_res = -PTR_ERR(skb);
5619 
5620 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5621 					page_pool_put_page(rx_q->page_pool,
5622 							   virt_to_head_page(ctx.xdp.data),
5623 							   sync_len, true);
5624 					buf->page = NULL;
5625 					rx_dropped++;
5626 
5627 					/* Clear skb as it was set as
5628 					 * status by XDP program.
5629 					 */
5630 					skb = NULL;
5631 
5632 					if (unlikely((status & rx_not_ls)))
5633 						goto read_again;
5634 
5635 					count++;
5636 					continue;
5637 				} else if (xdp_res & (STMMAC_XDP_TX |
5638 						      STMMAC_XDP_REDIRECT)) {
5639 					xdp_status |= xdp_res;
5640 					buf->page = NULL;
5641 					skb = NULL;
5642 					count++;
5643 					continue;
5644 				}
5645 			}
5646 		}
5647 
5648 		if (!skb) {
5649 			unsigned int head_pad_len;
5650 
5651 			/* XDP program may expand or reduce tail */
5652 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5653 
5654 			skb = napi_build_skb(page_address(buf->page),
5655 					     rx_q->napi_skb_frag_size);
5656 			if (!skb) {
5657 				page_pool_recycle_direct(rx_q->page_pool,
5658 							 buf->page);
5659 				rx_dropped++;
5660 				count++;
5661 				goto drain_data;
5662 			}
5663 
5664 			/* XDP program may adjust header */
5665 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5666 			skb_reserve(skb, head_pad_len);
5667 			skb_put(skb, buf1_len);
5668 			skb_mark_for_recycle(skb);
5669 			buf->page = NULL;
5670 		} else if (buf1_len) {
5671 			dma_sync_single_for_cpu(priv->device, buf->addr,
5672 						buf1_len, dma_dir);
5673 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5674 					buf->page, buf->page_offset, buf1_len,
5675 					priv->dma_conf.dma_buf_sz);
5676 			buf->page = NULL;
5677 		}
5678 
5679 		if (buf2_len) {
5680 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5681 						buf2_len, dma_dir);
5682 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5683 					buf->sec_page, 0, buf2_len,
5684 					priv->dma_conf.dma_buf_sz);
5685 			buf->sec_page = NULL;
5686 		}
5687 
5688 drain_data:
5689 		if (likely(status & rx_not_ls))
5690 			goto read_again;
5691 		if (!skb)
5692 			continue;
5693 
5694 		/* Got entire packet into SKB. Finish it. */
5695 
5696 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5697 
5698 		if (priv->hw->hw_vlan_en)
5699 			/* MAC level stripping. */
5700 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5701 		else
5702 			/* Driver level stripping. */
5703 			stmmac_rx_vlan(priv->dev, skb);
5704 
5705 		skb->protocol = eth_type_trans(skb, priv->dev);
5706 
5707 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5708 			skb_checksum_none_assert(skb);
5709 		else
5710 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5711 
5712 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5713 			skb_set_hash(skb, hash, hash_type);
5714 
5715 		skb_record_rx_queue(skb, queue);
5716 		napi_gro_receive(&ch->rx_napi, skb);
5717 		skb = NULL;
5718 
5719 		rx_packets++;
5720 		rx_bytes += len;
5721 		count++;
5722 	}
5723 
5724 	if (status & rx_not_ls || skb) {
5725 		rx_q->state_saved = true;
5726 		rx_q->state.skb = skb;
5727 		rx_q->state.error = error;
5728 		rx_q->state.len = len;
5729 	}
5730 
5731 	stmmac_finalize_xdp_rx(priv, xdp_status);
5732 
5733 	stmmac_rx_refill(priv, queue);
5734 
5735 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5736 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5737 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5738 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5739 	u64_stats_update_end(&rxq_stats->napi_syncp);
5740 
5741 	priv->xstats.rx_dropped += rx_dropped;
5742 	priv->xstats.rx_errors += rx_errors;
5743 
5744 	return count;
5745 }
5746 
5747 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5748 {
5749 	struct stmmac_channel *ch =
5750 		container_of(napi, struct stmmac_channel, rx_napi);
5751 	struct stmmac_priv *priv = ch->priv_data;
5752 	struct stmmac_rxq_stats *rxq_stats;
5753 	u32 chan = ch->index;
5754 	int work_done;
5755 
5756 	rxq_stats = &priv->xstats.rxq_stats[chan];
5757 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5758 	u64_stats_inc(&rxq_stats->napi.poll);
5759 	u64_stats_update_end(&rxq_stats->napi_syncp);
5760 
5761 	work_done = stmmac_rx(priv, budget, chan);
5762 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5763 		unsigned long flags;
5764 
5765 		spin_lock_irqsave(&ch->lock, flags);
5766 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5767 		spin_unlock_irqrestore(&ch->lock, flags);
5768 	}
5769 
5770 	return work_done;
5771 }
5772 
5773 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5774 {
5775 	struct stmmac_channel *ch =
5776 		container_of(napi, struct stmmac_channel, tx_napi);
5777 	struct stmmac_priv *priv = ch->priv_data;
5778 	struct stmmac_txq_stats *txq_stats;
5779 	bool pending_packets = false;
5780 	u32 chan = ch->index;
5781 	int work_done;
5782 
5783 	txq_stats = &priv->xstats.txq_stats[chan];
5784 	u64_stats_update_begin(&txq_stats->napi_syncp);
5785 	u64_stats_inc(&txq_stats->napi.poll);
5786 	u64_stats_update_end(&txq_stats->napi_syncp);
5787 
5788 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5789 	work_done = min(work_done, budget);
5790 
5791 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5792 		unsigned long flags;
5793 
5794 		spin_lock_irqsave(&ch->lock, flags);
5795 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5796 		spin_unlock_irqrestore(&ch->lock, flags);
5797 	}
5798 
5799 	/* TX still have packet to handle, check if we need to arm tx timer */
5800 	if (pending_packets)
5801 		stmmac_tx_timer_arm(priv, chan);
5802 
5803 	return work_done;
5804 }
5805 
5806 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5807 {
5808 	struct stmmac_channel *ch =
5809 		container_of(napi, struct stmmac_channel, rxtx_napi);
5810 	struct stmmac_priv *priv = ch->priv_data;
5811 	bool tx_pending_packets = false;
5812 	int rx_done, tx_done, rxtx_done;
5813 	struct stmmac_rxq_stats *rxq_stats;
5814 	struct stmmac_txq_stats *txq_stats;
5815 	u32 chan = ch->index;
5816 
5817 	rxq_stats = &priv->xstats.rxq_stats[chan];
5818 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5819 	u64_stats_inc(&rxq_stats->napi.poll);
5820 	u64_stats_update_end(&rxq_stats->napi_syncp);
5821 
5822 	txq_stats = &priv->xstats.txq_stats[chan];
5823 	u64_stats_update_begin(&txq_stats->napi_syncp);
5824 	u64_stats_inc(&txq_stats->napi.poll);
5825 	u64_stats_update_end(&txq_stats->napi_syncp);
5826 
5827 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5828 	tx_done = min(tx_done, budget);
5829 
5830 	rx_done = stmmac_rx_zc(priv, budget, chan);
5831 
5832 	rxtx_done = max(tx_done, rx_done);
5833 
5834 	/* If either TX or RX work is not complete, return budget
5835 	 * and keep pooling
5836 	 */
5837 	if (rxtx_done >= budget)
5838 		return budget;
5839 
5840 	/* all work done, exit the polling mode */
5841 	if (napi_complete_done(napi, rxtx_done)) {
5842 		unsigned long flags;
5843 
5844 		spin_lock_irqsave(&ch->lock, flags);
5845 		/* Both RX and TX work done are compelte,
5846 		 * so enable both RX & TX IRQs.
5847 		 */
5848 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5849 		spin_unlock_irqrestore(&ch->lock, flags);
5850 	}
5851 
5852 	/* TX still have packet to handle, check if we need to arm tx timer */
5853 	if (tx_pending_packets)
5854 		stmmac_tx_timer_arm(priv, chan);
5855 
5856 	return min(rxtx_done, budget - 1);
5857 }
5858 
5859 /**
5860  *  stmmac_tx_timeout
5861  *  @dev : Pointer to net device structure
5862  *  @txqueue: the index of the hanging transmit queue
5863  *  Description: this function is called when a packet transmission fails to
5864  *   complete within a reasonable time. The driver will mark the error in the
5865  *   netdev structure and arrange for the device to be reset to a sane state
5866  *   in order to transmit a new packet.
5867  */
5868 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5869 {
5870 	struct stmmac_priv *priv = netdev_priv(dev);
5871 
5872 	stmmac_global_err(priv);
5873 }
5874 
5875 /**
5876  *  stmmac_set_rx_mode - entry point for multicast addressing
5877  *  @dev : pointer to the device structure
5878  *  Description:
5879  *  This function is a driver entry point which gets called by the kernel
5880  *  whenever multicast addresses must be enabled/disabled.
5881  *  Return value:
5882  *  void.
5883  */
5884 static void stmmac_set_rx_mode(struct net_device *dev)
5885 {
5886 	struct stmmac_priv *priv = netdev_priv(dev);
5887 
5888 	stmmac_set_filter(priv, priv->hw, dev);
5889 }
5890 
5891 /**
5892  *  stmmac_change_mtu - entry point to change MTU size for the device.
5893  *  @dev : device pointer.
5894  *  @new_mtu : the new MTU size for the device.
5895  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5896  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5897  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5898  *  Return value:
5899  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5900  *  file on failure.
5901  */
5902 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5903 {
5904 	struct stmmac_priv *priv = netdev_priv(dev);
5905 	int txfifosz = priv->plat->tx_fifo_size;
5906 	struct stmmac_dma_conf *dma_conf;
5907 	const int mtu = new_mtu;
5908 	int ret;
5909 
5910 	if (txfifosz == 0)
5911 		txfifosz = priv->dma_cap.tx_fifo_size;
5912 
5913 	txfifosz /= priv->plat->tx_queues_to_use;
5914 
5915 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5916 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5917 		return -EINVAL;
5918 	}
5919 
5920 	new_mtu = STMMAC_ALIGN(new_mtu);
5921 
5922 	/* If condition true, FIFO is too small or MTU too large */
5923 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5924 		return -EINVAL;
5925 
5926 	if (netif_running(dev)) {
5927 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5928 		/* Try to allocate the new DMA conf with the new mtu */
5929 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5930 		if (IS_ERR(dma_conf)) {
5931 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5932 				   mtu);
5933 			return PTR_ERR(dma_conf);
5934 		}
5935 
5936 		stmmac_release(dev);
5937 
5938 		ret = __stmmac_open(dev, dma_conf);
5939 		if (ret) {
5940 			free_dma_desc_resources(priv, dma_conf);
5941 			kfree(dma_conf);
5942 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5943 			return ret;
5944 		}
5945 
5946 		kfree(dma_conf);
5947 
5948 		stmmac_set_rx_mode(dev);
5949 	}
5950 
5951 	WRITE_ONCE(dev->mtu, mtu);
5952 	netdev_update_features(dev);
5953 
5954 	return 0;
5955 }
5956 
5957 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5958 					     netdev_features_t features)
5959 {
5960 	struct stmmac_priv *priv = netdev_priv(dev);
5961 
5962 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5963 		features &= ~NETIF_F_RXCSUM;
5964 
5965 	if (!priv->plat->tx_coe)
5966 		features &= ~NETIF_F_CSUM_MASK;
5967 
5968 	/* Some GMAC devices have a bugged Jumbo frame support that
5969 	 * needs to have the Tx COE disabled for oversized frames
5970 	 * (due to limited buffer sizes). In this case we disable
5971 	 * the TX csum insertion in the TDES and not use SF.
5972 	 */
5973 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5974 		features &= ~NETIF_F_CSUM_MASK;
5975 
5976 	/* Disable tso if asked by ethtool */
5977 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5978 		if (features & NETIF_F_TSO)
5979 			priv->tso = true;
5980 		else
5981 			priv->tso = false;
5982 	}
5983 
5984 	return features;
5985 }
5986 
5987 static int stmmac_set_features(struct net_device *netdev,
5988 			       netdev_features_t features)
5989 {
5990 	struct stmmac_priv *priv = netdev_priv(netdev);
5991 
5992 	/* Keep the COE Type in case of csum is supporting */
5993 	if (features & NETIF_F_RXCSUM)
5994 		priv->hw->rx_csum = priv->plat->rx_coe;
5995 	else
5996 		priv->hw->rx_csum = 0;
5997 	/* No check needed because rx_coe has been set before and it will be
5998 	 * fixed in case of issue.
5999 	 */
6000 	stmmac_rx_ipc(priv, priv->hw);
6001 
6002 	if (priv->sph_cap) {
6003 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6004 		u32 chan;
6005 
6006 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6007 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6008 	}
6009 
6010 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
6011 		priv->hw->hw_vlan_en = true;
6012 	else
6013 		priv->hw->hw_vlan_en = false;
6014 
6015 	stmmac_set_hw_vlan_mode(priv, priv->hw);
6016 
6017 	return 0;
6018 }
6019 
6020 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6021 {
6022 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6023 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6024 	u32 queues_count;
6025 	u32 queue;
6026 	bool xmac;
6027 
6028 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6029 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6030 
6031 	if (priv->irq_wake)
6032 		pm_wakeup_event(priv->device, 0);
6033 
6034 	if (priv->dma_cap.estsel)
6035 		stmmac_est_irq_status(priv, priv, priv->dev,
6036 				      &priv->xstats, tx_cnt);
6037 
6038 	if (stmmac_fpe_supported(priv))
6039 		stmmac_fpe_irq_status(priv);
6040 
6041 	/* To handle GMAC own interrupts */
6042 	if ((priv->plat->has_gmac) || xmac) {
6043 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6044 
6045 		if (unlikely(status)) {
6046 			/* For LPI we need to save the tx status */
6047 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6048 				priv->tx_path_in_lpi_mode = true;
6049 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6050 				priv->tx_path_in_lpi_mode = false;
6051 		}
6052 
6053 		for (queue = 0; queue < queues_count; queue++)
6054 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6055 
6056 		/* PCS link status */
6057 		if (priv->hw->pcs &&
6058 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6059 			if (priv->xstats.pcs_link)
6060 				netif_carrier_on(priv->dev);
6061 			else
6062 				netif_carrier_off(priv->dev);
6063 		}
6064 
6065 		stmmac_timestamp_interrupt(priv, priv);
6066 	}
6067 }
6068 
6069 /**
6070  *  stmmac_interrupt - main ISR
6071  *  @irq: interrupt number.
6072  *  @dev_id: to pass the net device pointer.
6073  *  Description: this is the main driver interrupt service routine.
6074  *  It can call:
6075  *  o DMA service routine (to manage incoming frame reception and transmission
6076  *    status)
6077  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6078  *    interrupts.
6079  */
6080 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6081 {
6082 	struct net_device *dev = (struct net_device *)dev_id;
6083 	struct stmmac_priv *priv = netdev_priv(dev);
6084 
6085 	/* Check if adapter is up */
6086 	if (test_bit(STMMAC_DOWN, &priv->state))
6087 		return IRQ_HANDLED;
6088 
6089 	/* Check ASP error if it isn't delivered via an individual IRQ */
6090 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6091 		return IRQ_HANDLED;
6092 
6093 	/* To handle Common interrupts */
6094 	stmmac_common_interrupt(priv);
6095 
6096 	/* To handle DMA interrupts */
6097 	stmmac_dma_interrupt(priv);
6098 
6099 	return IRQ_HANDLED;
6100 }
6101 
6102 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6103 {
6104 	struct net_device *dev = (struct net_device *)dev_id;
6105 	struct stmmac_priv *priv = netdev_priv(dev);
6106 
6107 	/* Check if adapter is up */
6108 	if (test_bit(STMMAC_DOWN, &priv->state))
6109 		return IRQ_HANDLED;
6110 
6111 	/* To handle Common interrupts */
6112 	stmmac_common_interrupt(priv);
6113 
6114 	return IRQ_HANDLED;
6115 }
6116 
6117 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6118 {
6119 	struct net_device *dev = (struct net_device *)dev_id;
6120 	struct stmmac_priv *priv = netdev_priv(dev);
6121 
6122 	/* Check if adapter is up */
6123 	if (test_bit(STMMAC_DOWN, &priv->state))
6124 		return IRQ_HANDLED;
6125 
6126 	/* Check if a fatal error happened */
6127 	stmmac_safety_feat_interrupt(priv);
6128 
6129 	return IRQ_HANDLED;
6130 }
6131 
6132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6133 {
6134 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6135 	struct stmmac_dma_conf *dma_conf;
6136 	int chan = tx_q->queue_index;
6137 	struct stmmac_priv *priv;
6138 	int status;
6139 
6140 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6141 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6142 
6143 	/* Check if adapter is up */
6144 	if (test_bit(STMMAC_DOWN, &priv->state))
6145 		return IRQ_HANDLED;
6146 
6147 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6148 
6149 	if (unlikely(status & tx_hard_error_bump_tc)) {
6150 		/* Try to bump up the dma threshold on this failure */
6151 		stmmac_bump_dma_threshold(priv, chan);
6152 	} else if (unlikely(status == tx_hard_error)) {
6153 		stmmac_tx_err(priv, chan);
6154 	}
6155 
6156 	return IRQ_HANDLED;
6157 }
6158 
6159 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6160 {
6161 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6162 	struct stmmac_dma_conf *dma_conf;
6163 	int chan = rx_q->queue_index;
6164 	struct stmmac_priv *priv;
6165 
6166 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6167 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6168 
6169 	/* Check if adapter is up */
6170 	if (test_bit(STMMAC_DOWN, &priv->state))
6171 		return IRQ_HANDLED;
6172 
6173 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6174 
6175 	return IRQ_HANDLED;
6176 }
6177 
6178 /**
6179  *  stmmac_ioctl - Entry point for the Ioctl
6180  *  @dev: Device pointer.
6181  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6182  *  a proprietary structure used to pass information to the driver.
6183  *  @cmd: IOCTL command
6184  *  Description:
6185  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6186  */
6187 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6188 {
6189 	struct stmmac_priv *priv = netdev_priv (dev);
6190 	int ret = -EOPNOTSUPP;
6191 
6192 	if (!netif_running(dev))
6193 		return -EINVAL;
6194 
6195 	switch (cmd) {
6196 	case SIOCGMIIPHY:
6197 	case SIOCGMIIREG:
6198 	case SIOCSMIIREG:
6199 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6200 		break;
6201 	case SIOCSHWTSTAMP:
6202 		ret = stmmac_hwtstamp_set(dev, rq);
6203 		break;
6204 	case SIOCGHWTSTAMP:
6205 		ret = stmmac_hwtstamp_get(dev, rq);
6206 		break;
6207 	default:
6208 		break;
6209 	}
6210 
6211 	return ret;
6212 }
6213 
6214 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6215 				    void *cb_priv)
6216 {
6217 	struct stmmac_priv *priv = cb_priv;
6218 	int ret = -EOPNOTSUPP;
6219 
6220 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6221 		return ret;
6222 
6223 	__stmmac_disable_all_queues(priv);
6224 
6225 	switch (type) {
6226 	case TC_SETUP_CLSU32:
6227 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6228 		break;
6229 	case TC_SETUP_CLSFLOWER:
6230 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6231 		break;
6232 	default:
6233 		break;
6234 	}
6235 
6236 	stmmac_enable_all_queues(priv);
6237 	return ret;
6238 }
6239 
6240 static LIST_HEAD(stmmac_block_cb_list);
6241 
6242 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6243 			   void *type_data)
6244 {
6245 	struct stmmac_priv *priv = netdev_priv(ndev);
6246 
6247 	switch (type) {
6248 	case TC_QUERY_CAPS:
6249 		return stmmac_tc_query_caps(priv, priv, type_data);
6250 	case TC_SETUP_QDISC_MQPRIO:
6251 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6252 	case TC_SETUP_BLOCK:
6253 		return flow_block_cb_setup_simple(type_data,
6254 						  &stmmac_block_cb_list,
6255 						  stmmac_setup_tc_block_cb,
6256 						  priv, priv, true);
6257 	case TC_SETUP_QDISC_CBS:
6258 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6259 	case TC_SETUP_QDISC_TAPRIO:
6260 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6261 	case TC_SETUP_QDISC_ETF:
6262 		return stmmac_tc_setup_etf(priv, priv, type_data);
6263 	default:
6264 		return -EOPNOTSUPP;
6265 	}
6266 }
6267 
6268 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6269 			       struct net_device *sb_dev)
6270 {
6271 	int gso = skb_shinfo(skb)->gso_type;
6272 
6273 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6274 		/*
6275 		 * There is no way to determine the number of TSO/USO
6276 		 * capable Queues. Let's use always the Queue 0
6277 		 * because if TSO/USO is supported then at least this
6278 		 * one will be capable.
6279 		 */
6280 		return 0;
6281 	}
6282 
6283 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6284 }
6285 
6286 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6287 {
6288 	struct stmmac_priv *priv = netdev_priv(ndev);
6289 	int ret = 0;
6290 
6291 	ret = pm_runtime_resume_and_get(priv->device);
6292 	if (ret < 0)
6293 		return ret;
6294 
6295 	ret = eth_mac_addr(ndev, addr);
6296 	if (ret)
6297 		goto set_mac_error;
6298 
6299 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6300 
6301 set_mac_error:
6302 	pm_runtime_put(priv->device);
6303 
6304 	return ret;
6305 }
6306 
6307 #ifdef CONFIG_DEBUG_FS
6308 static struct dentry *stmmac_fs_dir;
6309 
6310 static void sysfs_display_ring(void *head, int size, int extend_desc,
6311 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6312 {
6313 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6314 	struct dma_desc *p = (struct dma_desc *)head;
6315 	unsigned int desc_size;
6316 	dma_addr_t dma_addr;
6317 	int i;
6318 
6319 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6320 	for (i = 0; i < size; i++) {
6321 		dma_addr = dma_phy_addr + i * desc_size;
6322 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6323 				i, &dma_addr,
6324 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6325 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6326 		if (extend_desc)
6327 			p = &(++ep)->basic;
6328 		else
6329 			p++;
6330 	}
6331 }
6332 
6333 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6334 {
6335 	struct net_device *dev = seq->private;
6336 	struct stmmac_priv *priv = netdev_priv(dev);
6337 	u32 rx_count = priv->plat->rx_queues_to_use;
6338 	u32 tx_count = priv->plat->tx_queues_to_use;
6339 	u32 queue;
6340 
6341 	if ((dev->flags & IFF_UP) == 0)
6342 		return 0;
6343 
6344 	for (queue = 0; queue < rx_count; queue++) {
6345 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6346 
6347 		seq_printf(seq, "RX Queue %d:\n", queue);
6348 
6349 		if (priv->extend_desc) {
6350 			seq_printf(seq, "Extended descriptor ring:\n");
6351 			sysfs_display_ring((void *)rx_q->dma_erx,
6352 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6353 		} else {
6354 			seq_printf(seq, "Descriptor ring:\n");
6355 			sysfs_display_ring((void *)rx_q->dma_rx,
6356 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6357 		}
6358 	}
6359 
6360 	for (queue = 0; queue < tx_count; queue++) {
6361 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6362 
6363 		seq_printf(seq, "TX Queue %d:\n", queue);
6364 
6365 		if (priv->extend_desc) {
6366 			seq_printf(seq, "Extended descriptor ring:\n");
6367 			sysfs_display_ring((void *)tx_q->dma_etx,
6368 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6369 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6370 			seq_printf(seq, "Descriptor ring:\n");
6371 			sysfs_display_ring((void *)tx_q->dma_tx,
6372 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6373 		}
6374 	}
6375 
6376 	return 0;
6377 }
6378 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6379 
6380 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6381 {
6382 	static const char * const dwxgmac_timestamp_source[] = {
6383 		"None",
6384 		"Internal",
6385 		"External",
6386 		"Both",
6387 	};
6388 	static const char * const dwxgmac_safety_feature_desc[] = {
6389 		"No",
6390 		"All Safety Features with ECC and Parity",
6391 		"All Safety Features without ECC or Parity",
6392 		"All Safety Features with Parity Only",
6393 		"ECC Only",
6394 		"UNDEFINED",
6395 		"UNDEFINED",
6396 		"UNDEFINED",
6397 	};
6398 	struct net_device *dev = seq->private;
6399 	struct stmmac_priv *priv = netdev_priv(dev);
6400 
6401 	if (!priv->hw_cap_support) {
6402 		seq_printf(seq, "DMA HW features not supported\n");
6403 		return 0;
6404 	}
6405 
6406 	seq_printf(seq, "==============================\n");
6407 	seq_printf(seq, "\tDMA HW features\n");
6408 	seq_printf(seq, "==============================\n");
6409 
6410 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6411 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6412 	seq_printf(seq, "\t1000 Mbps: %s\n",
6413 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6414 	seq_printf(seq, "\tHalf duplex: %s\n",
6415 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6416 	if (priv->plat->has_xgmac) {
6417 		seq_printf(seq,
6418 			   "\tNumber of Additional MAC address registers: %d\n",
6419 			   priv->dma_cap.multi_addr);
6420 	} else {
6421 		seq_printf(seq, "\tHash Filter: %s\n",
6422 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6423 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6424 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6425 	}
6426 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6427 		   (priv->dma_cap.pcs) ? "Y" : "N");
6428 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6429 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6430 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6431 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6432 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6433 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6434 	seq_printf(seq, "\tRMON module: %s\n",
6435 		   (priv->dma_cap.rmon) ? "Y" : "N");
6436 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6437 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6438 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6439 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6440 	if (priv->plat->has_xgmac)
6441 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6442 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6443 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6444 		   (priv->dma_cap.eee) ? "Y" : "N");
6445 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6446 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6447 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6448 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6449 	    priv->plat->has_xgmac) {
6450 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6451 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6452 	} else {
6453 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6454 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6455 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6456 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6457 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6458 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6459 	}
6460 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6461 		   priv->dma_cap.number_rx_channel);
6462 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6463 		   priv->dma_cap.number_tx_channel);
6464 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6465 		   priv->dma_cap.number_rx_queues);
6466 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6467 		   priv->dma_cap.number_tx_queues);
6468 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6469 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6470 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6471 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6472 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6473 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6474 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6475 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6476 		   priv->dma_cap.pps_out_num);
6477 	seq_printf(seq, "\tSafety Features: %s\n",
6478 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6479 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6480 		   priv->dma_cap.frpsel ? "Y" : "N");
6481 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6482 		   priv->dma_cap.host_dma_width);
6483 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6484 		   priv->dma_cap.rssen ? "Y" : "N");
6485 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6486 		   priv->dma_cap.vlhash ? "Y" : "N");
6487 	seq_printf(seq, "\tSplit Header: %s\n",
6488 		   priv->dma_cap.sphen ? "Y" : "N");
6489 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6490 		   priv->dma_cap.vlins ? "Y" : "N");
6491 	seq_printf(seq, "\tDouble VLAN: %s\n",
6492 		   priv->dma_cap.dvlan ? "Y" : "N");
6493 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6494 		   priv->dma_cap.l3l4fnum);
6495 	seq_printf(seq, "\tARP Offloading: %s\n",
6496 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6497 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6498 		   priv->dma_cap.estsel ? "Y" : "N");
6499 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6500 		   priv->dma_cap.fpesel ? "Y" : "N");
6501 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6502 		   priv->dma_cap.tbssel ? "Y" : "N");
6503 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6504 		   priv->dma_cap.tbs_ch_num);
6505 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6506 		   priv->dma_cap.sgfsel ? "Y" : "N");
6507 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6508 		   BIT(priv->dma_cap.ttsfd) >> 1);
6509 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6510 		   priv->dma_cap.numtc);
6511 	seq_printf(seq, "\tDCB Feature: %s\n",
6512 		   priv->dma_cap.dcben ? "Y" : "N");
6513 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6514 		   priv->dma_cap.advthword ? "Y" : "N");
6515 	seq_printf(seq, "\tPTP Offload: %s\n",
6516 		   priv->dma_cap.ptoen ? "Y" : "N");
6517 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6518 		   priv->dma_cap.osten ? "Y" : "N");
6519 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6520 		   priv->dma_cap.pfcen ? "Y" : "N");
6521 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6522 		   BIT(priv->dma_cap.frpes) << 6);
6523 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6524 		   BIT(priv->dma_cap.frpbs) << 6);
6525 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6526 		   priv->dma_cap.frppipe_num);
6527 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6528 		   priv->dma_cap.nrvf_num ?
6529 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6530 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6531 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6532 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6533 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6534 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6535 		   priv->dma_cap.cbtisel ? "Y" : "N");
6536 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6537 		   priv->dma_cap.aux_snapshot_n);
6538 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6539 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6540 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6541 		   priv->dma_cap.edma ? "Y" : "N");
6542 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6543 		   priv->dma_cap.ediffc ? "Y" : "N");
6544 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6545 		   priv->dma_cap.vxn ? "Y" : "N");
6546 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6547 		   priv->dma_cap.dbgmem ? "Y" : "N");
6548 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6549 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6550 	return 0;
6551 }
6552 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6553 
6554 /* Use network device events to rename debugfs file entries.
6555  */
6556 static int stmmac_device_event(struct notifier_block *unused,
6557 			       unsigned long event, void *ptr)
6558 {
6559 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6560 	struct stmmac_priv *priv = netdev_priv(dev);
6561 
6562 	if (dev->netdev_ops != &stmmac_netdev_ops)
6563 		goto done;
6564 
6565 	switch (event) {
6566 	case NETDEV_CHANGENAME:
6567 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6568 		break;
6569 	}
6570 done:
6571 	return NOTIFY_DONE;
6572 }
6573 
6574 static struct notifier_block stmmac_notifier = {
6575 	.notifier_call = stmmac_device_event,
6576 };
6577 
6578 static void stmmac_init_fs(struct net_device *dev)
6579 {
6580 	struct stmmac_priv *priv = netdev_priv(dev);
6581 
6582 	rtnl_lock();
6583 
6584 	/* Create per netdev entries */
6585 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6586 
6587 	/* Entry to report DMA RX/TX rings */
6588 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6589 			    &stmmac_rings_status_fops);
6590 
6591 	/* Entry to report the DMA HW features */
6592 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6593 			    &stmmac_dma_cap_fops);
6594 
6595 	rtnl_unlock();
6596 }
6597 
6598 static void stmmac_exit_fs(struct net_device *dev)
6599 {
6600 	struct stmmac_priv *priv = netdev_priv(dev);
6601 
6602 	debugfs_remove_recursive(priv->dbgfs_dir);
6603 }
6604 #endif /* CONFIG_DEBUG_FS */
6605 
6606 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6607 {
6608 	unsigned char *data = (unsigned char *)&vid_le;
6609 	unsigned char data_byte = 0;
6610 	u32 crc = ~0x0;
6611 	u32 temp = 0;
6612 	int i, bits;
6613 
6614 	bits = get_bitmask_order(VLAN_VID_MASK);
6615 	for (i = 0; i < bits; i++) {
6616 		if ((i % 8) == 0)
6617 			data_byte = data[i / 8];
6618 
6619 		temp = ((crc & 1) ^ data_byte) & 1;
6620 		crc >>= 1;
6621 		data_byte >>= 1;
6622 
6623 		if (temp)
6624 			crc ^= 0xedb88320;
6625 	}
6626 
6627 	return crc;
6628 }
6629 
6630 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6631 {
6632 	u32 crc, hash = 0;
6633 	u16 pmatch = 0;
6634 	int count = 0;
6635 	u16 vid = 0;
6636 
6637 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6638 		__le16 vid_le = cpu_to_le16(vid);
6639 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6640 		hash |= (1 << crc);
6641 		count++;
6642 	}
6643 
6644 	if (!priv->dma_cap.vlhash) {
6645 		if (count > 2) /* VID = 0 always passes filter */
6646 			return -EOPNOTSUPP;
6647 
6648 		pmatch = vid;
6649 		hash = 0;
6650 	}
6651 
6652 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6653 }
6654 
6655 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6656 {
6657 	struct stmmac_priv *priv = netdev_priv(ndev);
6658 	bool is_double = false;
6659 	int ret;
6660 
6661 	ret = pm_runtime_resume_and_get(priv->device);
6662 	if (ret < 0)
6663 		return ret;
6664 
6665 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6666 		is_double = true;
6667 
6668 	set_bit(vid, priv->active_vlans);
6669 	ret = stmmac_vlan_update(priv, is_double);
6670 	if (ret) {
6671 		clear_bit(vid, priv->active_vlans);
6672 		goto err_pm_put;
6673 	}
6674 
6675 	if (priv->hw->num_vlan) {
6676 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6677 		if (ret)
6678 			goto err_pm_put;
6679 	}
6680 err_pm_put:
6681 	pm_runtime_put(priv->device);
6682 
6683 	return ret;
6684 }
6685 
6686 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6687 {
6688 	struct stmmac_priv *priv = netdev_priv(ndev);
6689 	bool is_double = false;
6690 	int ret;
6691 
6692 	ret = pm_runtime_resume_and_get(priv->device);
6693 	if (ret < 0)
6694 		return ret;
6695 
6696 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6697 		is_double = true;
6698 
6699 	clear_bit(vid, priv->active_vlans);
6700 
6701 	if (priv->hw->num_vlan) {
6702 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6703 		if (ret)
6704 			goto del_vlan_error;
6705 	}
6706 
6707 	ret = stmmac_vlan_update(priv, is_double);
6708 
6709 del_vlan_error:
6710 	pm_runtime_put(priv->device);
6711 
6712 	return ret;
6713 }
6714 
6715 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6716 {
6717 	struct stmmac_priv *priv = netdev_priv(dev);
6718 
6719 	switch (bpf->command) {
6720 	case XDP_SETUP_PROG:
6721 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6722 	case XDP_SETUP_XSK_POOL:
6723 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6724 					     bpf->xsk.queue_id);
6725 	default:
6726 		return -EOPNOTSUPP;
6727 	}
6728 }
6729 
6730 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6731 			   struct xdp_frame **frames, u32 flags)
6732 {
6733 	struct stmmac_priv *priv = netdev_priv(dev);
6734 	int cpu = smp_processor_id();
6735 	struct netdev_queue *nq;
6736 	int i, nxmit = 0;
6737 	int queue;
6738 
6739 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6740 		return -ENETDOWN;
6741 
6742 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6743 		return -EINVAL;
6744 
6745 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6746 	nq = netdev_get_tx_queue(priv->dev, queue);
6747 
6748 	__netif_tx_lock(nq, cpu);
6749 	/* Avoids TX time-out as we are sharing with slow path */
6750 	txq_trans_cond_update(nq);
6751 
6752 	for (i = 0; i < num_frames; i++) {
6753 		int res;
6754 
6755 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6756 		if (res == STMMAC_XDP_CONSUMED)
6757 			break;
6758 
6759 		nxmit++;
6760 	}
6761 
6762 	if (flags & XDP_XMIT_FLUSH) {
6763 		stmmac_flush_tx_descriptors(priv, queue);
6764 		stmmac_tx_timer_arm(priv, queue);
6765 	}
6766 
6767 	__netif_tx_unlock(nq);
6768 
6769 	return nxmit;
6770 }
6771 
6772 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6773 {
6774 	struct stmmac_channel *ch = &priv->channel[queue];
6775 	unsigned long flags;
6776 
6777 	spin_lock_irqsave(&ch->lock, flags);
6778 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6779 	spin_unlock_irqrestore(&ch->lock, flags);
6780 
6781 	stmmac_stop_rx_dma(priv, queue);
6782 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6783 }
6784 
6785 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6786 {
6787 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6788 	struct stmmac_channel *ch = &priv->channel[queue];
6789 	unsigned long flags;
6790 	u32 buf_size;
6791 	int ret;
6792 
6793 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6794 	if (ret) {
6795 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6796 		return;
6797 	}
6798 
6799 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6800 	if (ret) {
6801 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6802 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6803 		return;
6804 	}
6805 
6806 	stmmac_reset_rx_queue(priv, queue);
6807 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6808 
6809 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6810 			    rx_q->dma_rx_phy, rx_q->queue_index);
6811 
6812 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6813 			     sizeof(struct dma_desc));
6814 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6815 			       rx_q->rx_tail_addr, rx_q->queue_index);
6816 
6817 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6818 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6819 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6820 				      buf_size,
6821 				      rx_q->queue_index);
6822 	} else {
6823 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6824 				      priv->dma_conf.dma_buf_sz,
6825 				      rx_q->queue_index);
6826 	}
6827 
6828 	stmmac_start_rx_dma(priv, queue);
6829 
6830 	spin_lock_irqsave(&ch->lock, flags);
6831 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6832 	spin_unlock_irqrestore(&ch->lock, flags);
6833 }
6834 
6835 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6836 {
6837 	struct stmmac_channel *ch = &priv->channel[queue];
6838 	unsigned long flags;
6839 
6840 	spin_lock_irqsave(&ch->lock, flags);
6841 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6842 	spin_unlock_irqrestore(&ch->lock, flags);
6843 
6844 	stmmac_stop_tx_dma(priv, queue);
6845 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6846 }
6847 
6848 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6849 {
6850 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6851 	struct stmmac_channel *ch = &priv->channel[queue];
6852 	unsigned long flags;
6853 	int ret;
6854 
6855 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6856 	if (ret) {
6857 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6858 		return;
6859 	}
6860 
6861 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6862 	if (ret) {
6863 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6864 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6865 		return;
6866 	}
6867 
6868 	stmmac_reset_tx_queue(priv, queue);
6869 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6870 
6871 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6872 			    tx_q->dma_tx_phy, tx_q->queue_index);
6873 
6874 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6875 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6876 
6877 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6878 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6879 			       tx_q->tx_tail_addr, tx_q->queue_index);
6880 
6881 	stmmac_start_tx_dma(priv, queue);
6882 
6883 	spin_lock_irqsave(&ch->lock, flags);
6884 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6885 	spin_unlock_irqrestore(&ch->lock, flags);
6886 }
6887 
6888 void stmmac_xdp_release(struct net_device *dev)
6889 {
6890 	struct stmmac_priv *priv = netdev_priv(dev);
6891 	u32 chan;
6892 
6893 	/* Ensure tx function is not running */
6894 	netif_tx_disable(dev);
6895 
6896 	/* Disable NAPI process */
6897 	stmmac_disable_all_queues(priv);
6898 
6899 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6900 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6901 
6902 	/* Free the IRQ lines */
6903 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6904 
6905 	/* Stop TX/RX DMA channels */
6906 	stmmac_stop_all_dma(priv);
6907 
6908 	/* Release and free the Rx/Tx resources */
6909 	free_dma_desc_resources(priv, &priv->dma_conf);
6910 
6911 	/* Disable the MAC Rx/Tx */
6912 	stmmac_mac_set(priv, priv->ioaddr, false);
6913 
6914 	/* set trans_start so we don't get spurious
6915 	 * watchdogs during reset
6916 	 */
6917 	netif_trans_update(dev);
6918 	netif_carrier_off(dev);
6919 }
6920 
6921 int stmmac_xdp_open(struct net_device *dev)
6922 {
6923 	struct stmmac_priv *priv = netdev_priv(dev);
6924 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6925 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6926 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6927 	struct stmmac_rx_queue *rx_q;
6928 	struct stmmac_tx_queue *tx_q;
6929 	u32 buf_size;
6930 	bool sph_en;
6931 	u32 chan;
6932 	int ret;
6933 
6934 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6935 	if (ret < 0) {
6936 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6937 			   __func__);
6938 		goto dma_desc_error;
6939 	}
6940 
6941 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6942 	if (ret < 0) {
6943 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6944 			   __func__);
6945 		goto init_error;
6946 	}
6947 
6948 	stmmac_reset_queues_param(priv);
6949 
6950 	/* DMA CSR Channel configuration */
6951 	for (chan = 0; chan < dma_csr_ch; chan++) {
6952 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6953 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6954 	}
6955 
6956 	/* Adjust Split header */
6957 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6958 
6959 	/* DMA RX Channel Configuration */
6960 	for (chan = 0; chan < rx_cnt; chan++) {
6961 		rx_q = &priv->dma_conf.rx_queue[chan];
6962 
6963 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6964 				    rx_q->dma_rx_phy, chan);
6965 
6966 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6967 				     (rx_q->buf_alloc_num *
6968 				      sizeof(struct dma_desc));
6969 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6970 				       rx_q->rx_tail_addr, chan);
6971 
6972 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6973 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6974 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6975 					      buf_size,
6976 					      rx_q->queue_index);
6977 		} else {
6978 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6979 					      priv->dma_conf.dma_buf_sz,
6980 					      rx_q->queue_index);
6981 		}
6982 
6983 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6984 	}
6985 
6986 	/* DMA TX Channel Configuration */
6987 	for (chan = 0; chan < tx_cnt; chan++) {
6988 		tx_q = &priv->dma_conf.tx_queue[chan];
6989 
6990 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6991 				    tx_q->dma_tx_phy, chan);
6992 
6993 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6994 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6995 				       tx_q->tx_tail_addr, chan);
6996 
6997 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6998 		tx_q->txtimer.function = stmmac_tx_timer;
6999 	}
7000 
7001 	/* Enable the MAC Rx/Tx */
7002 	stmmac_mac_set(priv, priv->ioaddr, true);
7003 
7004 	/* Start Rx & Tx DMA Channels */
7005 	stmmac_start_all_dma(priv);
7006 
7007 	ret = stmmac_request_irq(dev);
7008 	if (ret)
7009 		goto irq_error;
7010 
7011 	/* Enable NAPI process*/
7012 	stmmac_enable_all_queues(priv);
7013 	netif_carrier_on(dev);
7014 	netif_tx_start_all_queues(dev);
7015 	stmmac_enable_all_dma_irq(priv);
7016 
7017 	return 0;
7018 
7019 irq_error:
7020 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7021 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7022 
7023 	stmmac_hw_teardown(dev);
7024 init_error:
7025 	free_dma_desc_resources(priv, &priv->dma_conf);
7026 dma_desc_error:
7027 	return ret;
7028 }
7029 
7030 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7031 {
7032 	struct stmmac_priv *priv = netdev_priv(dev);
7033 	struct stmmac_rx_queue *rx_q;
7034 	struct stmmac_tx_queue *tx_q;
7035 	struct stmmac_channel *ch;
7036 
7037 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7038 	    !netif_carrier_ok(priv->dev))
7039 		return -ENETDOWN;
7040 
7041 	if (!stmmac_xdp_is_enabled(priv))
7042 		return -EINVAL;
7043 
7044 	if (queue >= priv->plat->rx_queues_to_use ||
7045 	    queue >= priv->plat->tx_queues_to_use)
7046 		return -EINVAL;
7047 
7048 	rx_q = &priv->dma_conf.rx_queue[queue];
7049 	tx_q = &priv->dma_conf.tx_queue[queue];
7050 	ch = &priv->channel[queue];
7051 
7052 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7053 		return -EINVAL;
7054 
7055 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7056 		/* EQoS does not have per-DMA channel SW interrupt,
7057 		 * so we schedule RX Napi straight-away.
7058 		 */
7059 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7060 			__napi_schedule(&ch->rxtx_napi);
7061 	}
7062 
7063 	return 0;
7064 }
7065 
7066 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7067 {
7068 	struct stmmac_priv *priv = netdev_priv(dev);
7069 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7070 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7071 	unsigned int start;
7072 	int q;
7073 
7074 	for (q = 0; q < tx_cnt; q++) {
7075 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7076 		u64 tx_packets;
7077 		u64 tx_bytes;
7078 
7079 		do {
7080 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7081 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7082 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7083 		do {
7084 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7085 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7086 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7087 
7088 		stats->tx_packets += tx_packets;
7089 		stats->tx_bytes += tx_bytes;
7090 	}
7091 
7092 	for (q = 0; q < rx_cnt; q++) {
7093 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7094 		u64 rx_packets;
7095 		u64 rx_bytes;
7096 
7097 		do {
7098 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7099 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7100 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7101 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7102 
7103 		stats->rx_packets += rx_packets;
7104 		stats->rx_bytes += rx_bytes;
7105 	}
7106 
7107 	stats->rx_dropped = priv->xstats.rx_dropped;
7108 	stats->rx_errors = priv->xstats.rx_errors;
7109 	stats->tx_dropped = priv->xstats.tx_dropped;
7110 	stats->tx_errors = priv->xstats.tx_errors;
7111 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7112 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7113 	stats->rx_length_errors = priv->xstats.rx_length;
7114 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7115 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7116 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7117 }
7118 
7119 static const struct net_device_ops stmmac_netdev_ops = {
7120 	.ndo_open = stmmac_open,
7121 	.ndo_start_xmit = stmmac_xmit,
7122 	.ndo_stop = stmmac_release,
7123 	.ndo_change_mtu = stmmac_change_mtu,
7124 	.ndo_fix_features = stmmac_fix_features,
7125 	.ndo_set_features = stmmac_set_features,
7126 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7127 	.ndo_tx_timeout = stmmac_tx_timeout,
7128 	.ndo_eth_ioctl = stmmac_ioctl,
7129 	.ndo_get_stats64 = stmmac_get_stats64,
7130 	.ndo_setup_tc = stmmac_setup_tc,
7131 	.ndo_select_queue = stmmac_select_queue,
7132 	.ndo_set_mac_address = stmmac_set_mac_address,
7133 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7134 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7135 	.ndo_bpf = stmmac_bpf,
7136 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7137 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7138 };
7139 
7140 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7141 {
7142 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7143 		return;
7144 	if (test_bit(STMMAC_DOWN, &priv->state))
7145 		return;
7146 
7147 	netdev_err(priv->dev, "Reset adapter.\n");
7148 
7149 	rtnl_lock();
7150 	netif_trans_update(priv->dev);
7151 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7152 		usleep_range(1000, 2000);
7153 
7154 	set_bit(STMMAC_DOWN, &priv->state);
7155 	dev_close(priv->dev);
7156 	dev_open(priv->dev, NULL);
7157 	clear_bit(STMMAC_DOWN, &priv->state);
7158 	clear_bit(STMMAC_RESETING, &priv->state);
7159 	rtnl_unlock();
7160 }
7161 
7162 static void stmmac_service_task(struct work_struct *work)
7163 {
7164 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7165 			service_task);
7166 
7167 	stmmac_reset_subtask(priv);
7168 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7169 }
7170 
7171 /**
7172  *  stmmac_hw_init - Init the MAC device
7173  *  @priv: driver private structure
7174  *  Description: this function is to configure the MAC device according to
7175  *  some platform parameters or the HW capability register. It prepares the
7176  *  driver to use either ring or chain modes and to setup either enhanced or
7177  *  normal descriptors.
7178  */
7179 static int stmmac_hw_init(struct stmmac_priv *priv)
7180 {
7181 	int ret;
7182 
7183 	/* dwmac-sun8i only work in chain mode */
7184 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7185 		chain_mode = 1;
7186 	priv->chain_mode = chain_mode;
7187 
7188 	/* Initialize HW Interface */
7189 	ret = stmmac_hwif_init(priv);
7190 	if (ret)
7191 		return ret;
7192 
7193 	/* Get the HW capability (new GMAC newer than 3.50a) */
7194 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7195 	if (priv->hw_cap_support) {
7196 		dev_info(priv->device, "DMA HW capability register supported\n");
7197 
7198 		/* We can override some gmac/dma configuration fields: e.g.
7199 		 * enh_desc, tx_coe (e.g. that are passed through the
7200 		 * platform) with the values from the HW capability
7201 		 * register (if supported).
7202 		 */
7203 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7204 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7205 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7206 		priv->hw->pmt = priv->plat->pmt;
7207 		if (priv->dma_cap.hash_tb_sz) {
7208 			priv->hw->multicast_filter_bins =
7209 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7210 			priv->hw->mcast_bits_log2 =
7211 					ilog2(priv->hw->multicast_filter_bins);
7212 		}
7213 
7214 		/* TXCOE doesn't work in thresh DMA mode */
7215 		if (priv->plat->force_thresh_dma_mode)
7216 			priv->plat->tx_coe = 0;
7217 		else
7218 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7219 
7220 		/* In case of GMAC4 rx_coe is from HW cap register. */
7221 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7222 
7223 		if (priv->dma_cap.rx_coe_type2)
7224 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7225 		else if (priv->dma_cap.rx_coe_type1)
7226 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7227 
7228 	} else {
7229 		dev_info(priv->device, "No HW DMA feature register supported\n");
7230 	}
7231 
7232 	if (priv->plat->rx_coe) {
7233 		priv->hw->rx_csum = priv->plat->rx_coe;
7234 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7235 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7236 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7237 	}
7238 	if (priv->plat->tx_coe)
7239 		dev_info(priv->device, "TX Checksum insertion supported\n");
7240 
7241 	if (priv->plat->pmt) {
7242 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7243 		device_set_wakeup_capable(priv->device, 1);
7244 	}
7245 
7246 	if (priv->dma_cap.tsoen)
7247 		dev_info(priv->device, "TSO supported\n");
7248 
7249 	if (priv->dma_cap.number_rx_queues &&
7250 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7251 		dev_warn(priv->device,
7252 			 "Number of Rx queues (%u) exceeds dma capability\n",
7253 			 priv->plat->rx_queues_to_use);
7254 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7255 	}
7256 	if (priv->dma_cap.number_tx_queues &&
7257 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7258 		dev_warn(priv->device,
7259 			 "Number of Tx queues (%u) exceeds dma capability\n",
7260 			 priv->plat->tx_queues_to_use);
7261 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7262 	}
7263 
7264 	if (priv->dma_cap.rx_fifo_size &&
7265 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7266 		dev_warn(priv->device,
7267 			 "Rx FIFO size (%u) exceeds dma capability\n",
7268 			 priv->plat->rx_fifo_size);
7269 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7270 	}
7271 	if (priv->dma_cap.tx_fifo_size &&
7272 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7273 		dev_warn(priv->device,
7274 			 "Tx FIFO size (%u) exceeds dma capability\n",
7275 			 priv->plat->tx_fifo_size);
7276 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7277 	}
7278 
7279 	priv->hw->vlan_fail_q_en =
7280 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7281 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7282 
7283 	/* Run HW quirks, if any */
7284 	if (priv->hwif_quirks) {
7285 		ret = priv->hwif_quirks(priv);
7286 		if (ret)
7287 			return ret;
7288 	}
7289 
7290 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7291 	 * In some case, for example on bugged HW this feature
7292 	 * has to be disable and this can be done by passing the
7293 	 * riwt_off field from the platform.
7294 	 */
7295 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7296 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7297 		priv->use_riwt = 1;
7298 		dev_info(priv->device,
7299 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7300 	}
7301 
7302 	return 0;
7303 }
7304 
7305 static void stmmac_napi_add(struct net_device *dev)
7306 {
7307 	struct stmmac_priv *priv = netdev_priv(dev);
7308 	u32 queue, maxq;
7309 
7310 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7311 
7312 	for (queue = 0; queue < maxq; queue++) {
7313 		struct stmmac_channel *ch = &priv->channel[queue];
7314 
7315 		ch->priv_data = priv;
7316 		ch->index = queue;
7317 		spin_lock_init(&ch->lock);
7318 
7319 		if (queue < priv->plat->rx_queues_to_use) {
7320 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7321 		}
7322 		if (queue < priv->plat->tx_queues_to_use) {
7323 			netif_napi_add_tx(dev, &ch->tx_napi,
7324 					  stmmac_napi_poll_tx);
7325 		}
7326 		if (queue < priv->plat->rx_queues_to_use &&
7327 		    queue < priv->plat->tx_queues_to_use) {
7328 			netif_napi_add(dev, &ch->rxtx_napi,
7329 				       stmmac_napi_poll_rxtx);
7330 		}
7331 	}
7332 }
7333 
7334 static void stmmac_napi_del(struct net_device *dev)
7335 {
7336 	struct stmmac_priv *priv = netdev_priv(dev);
7337 	u32 queue, maxq;
7338 
7339 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7340 
7341 	for (queue = 0; queue < maxq; queue++) {
7342 		struct stmmac_channel *ch = &priv->channel[queue];
7343 
7344 		if (queue < priv->plat->rx_queues_to_use)
7345 			netif_napi_del(&ch->rx_napi);
7346 		if (queue < priv->plat->tx_queues_to_use)
7347 			netif_napi_del(&ch->tx_napi);
7348 		if (queue < priv->plat->rx_queues_to_use &&
7349 		    queue < priv->plat->tx_queues_to_use) {
7350 			netif_napi_del(&ch->rxtx_napi);
7351 		}
7352 	}
7353 }
7354 
7355 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7356 {
7357 	struct stmmac_priv *priv = netdev_priv(dev);
7358 	int ret = 0, i;
7359 
7360 	if (netif_running(dev))
7361 		stmmac_release(dev);
7362 
7363 	stmmac_napi_del(dev);
7364 
7365 	priv->plat->rx_queues_to_use = rx_cnt;
7366 	priv->plat->tx_queues_to_use = tx_cnt;
7367 	if (!netif_is_rxfh_configured(dev))
7368 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7369 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7370 									rx_cnt);
7371 
7372 	stmmac_napi_add(dev);
7373 
7374 	if (netif_running(dev))
7375 		ret = stmmac_open(dev);
7376 
7377 	return ret;
7378 }
7379 
7380 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7381 {
7382 	struct stmmac_priv *priv = netdev_priv(dev);
7383 	int ret = 0;
7384 
7385 	if (netif_running(dev))
7386 		stmmac_release(dev);
7387 
7388 	priv->dma_conf.dma_rx_size = rx_size;
7389 	priv->dma_conf.dma_tx_size = tx_size;
7390 
7391 	if (netif_running(dev))
7392 		ret = stmmac_open(dev);
7393 
7394 	return ret;
7395 }
7396 
7397 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7398 {
7399 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7400 	struct dma_desc *desc_contains_ts = ctx->desc;
7401 	struct stmmac_priv *priv = ctx->priv;
7402 	struct dma_desc *ndesc = ctx->ndesc;
7403 	struct dma_desc *desc = ctx->desc;
7404 	u64 ns = 0;
7405 
7406 	if (!priv->hwts_rx_en)
7407 		return -ENODATA;
7408 
7409 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7410 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7411 		desc_contains_ts = ndesc;
7412 
7413 	/* Check if timestamp is available */
7414 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7415 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7416 		ns -= priv->plat->cdc_error_adj;
7417 		*timestamp = ns_to_ktime(ns);
7418 		return 0;
7419 	}
7420 
7421 	return -ENODATA;
7422 }
7423 
7424 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7425 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7426 };
7427 
7428 /**
7429  * stmmac_dvr_probe
7430  * @device: device pointer
7431  * @plat_dat: platform data pointer
7432  * @res: stmmac resource pointer
7433  * Description: this is the main probe function used to
7434  * call the alloc_etherdev, allocate the priv structure.
7435  * Return:
7436  * returns 0 on success, otherwise errno.
7437  */
7438 int stmmac_dvr_probe(struct device *device,
7439 		     struct plat_stmmacenet_data *plat_dat,
7440 		     struct stmmac_resources *res)
7441 {
7442 	struct net_device *ndev = NULL;
7443 	struct stmmac_priv *priv;
7444 	u32 rxq;
7445 	int i, ret = 0;
7446 
7447 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7448 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7449 	if (!ndev)
7450 		return -ENOMEM;
7451 
7452 	SET_NETDEV_DEV(ndev, device);
7453 
7454 	priv = netdev_priv(ndev);
7455 	priv->device = device;
7456 	priv->dev = ndev;
7457 
7458 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7459 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7460 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7461 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7462 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7463 	}
7464 
7465 	priv->xstats.pcpu_stats =
7466 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7467 	if (!priv->xstats.pcpu_stats)
7468 		return -ENOMEM;
7469 
7470 	stmmac_set_ethtool_ops(ndev);
7471 	priv->pause_time = pause;
7472 	priv->plat = plat_dat;
7473 	priv->ioaddr = res->addr;
7474 	priv->dev->base_addr = (unsigned long)res->addr;
7475 	priv->plat->dma_cfg->multi_msi_en =
7476 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7477 
7478 	priv->dev->irq = res->irq;
7479 	priv->wol_irq = res->wol_irq;
7480 	priv->lpi_irq = res->lpi_irq;
7481 	priv->sfty_irq = res->sfty_irq;
7482 	priv->sfty_ce_irq = res->sfty_ce_irq;
7483 	priv->sfty_ue_irq = res->sfty_ue_irq;
7484 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7485 		priv->rx_irq[i] = res->rx_irq[i];
7486 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7487 		priv->tx_irq[i] = res->tx_irq[i];
7488 
7489 	if (!is_zero_ether_addr(res->mac))
7490 		eth_hw_addr_set(priv->dev, res->mac);
7491 
7492 	dev_set_drvdata(device, priv->dev);
7493 
7494 	/* Verify driver arguments */
7495 	stmmac_verify_args();
7496 
7497 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7498 	if (!priv->af_xdp_zc_qps)
7499 		return -ENOMEM;
7500 
7501 	/* Allocate workqueue */
7502 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7503 	if (!priv->wq) {
7504 		dev_err(priv->device, "failed to create workqueue\n");
7505 		ret = -ENOMEM;
7506 		goto error_wq_init;
7507 	}
7508 
7509 	INIT_WORK(&priv->service_task, stmmac_service_task);
7510 
7511 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7512 
7513 	/* Override with kernel parameters if supplied XXX CRS XXX
7514 	 * this needs to have multiple instances
7515 	 */
7516 	if ((phyaddr >= 0) && (phyaddr <= 31))
7517 		priv->plat->phy_addr = phyaddr;
7518 
7519 	if (priv->plat->stmmac_rst) {
7520 		ret = reset_control_assert(priv->plat->stmmac_rst);
7521 		reset_control_deassert(priv->plat->stmmac_rst);
7522 		/* Some reset controllers have only reset callback instead of
7523 		 * assert + deassert callbacks pair.
7524 		 */
7525 		if (ret == -ENOTSUPP)
7526 			reset_control_reset(priv->plat->stmmac_rst);
7527 	}
7528 
7529 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7530 	if (ret == -ENOTSUPP)
7531 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7532 			ERR_PTR(ret));
7533 
7534 	/* Wait a bit for the reset to take effect */
7535 	udelay(10);
7536 
7537 	/* Init MAC and get the capabilities */
7538 	ret = stmmac_hw_init(priv);
7539 	if (ret)
7540 		goto error_hw_init;
7541 
7542 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7543 	 */
7544 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7545 		priv->plat->dma_cfg->dche = false;
7546 
7547 	stmmac_check_ether_addr(priv);
7548 
7549 	ndev->netdev_ops = &stmmac_netdev_ops;
7550 
7551 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7552 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7553 
7554 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7555 			    NETIF_F_RXCSUM;
7556 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7557 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7558 
7559 	ret = stmmac_tc_init(priv, priv);
7560 	if (!ret) {
7561 		ndev->hw_features |= NETIF_F_HW_TC;
7562 	}
7563 
7564 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7565 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7566 		if (priv->plat->has_gmac4)
7567 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7568 		priv->tso = true;
7569 		dev_info(priv->device, "TSO feature enabled\n");
7570 	}
7571 
7572 	if (priv->dma_cap.sphen &&
7573 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7574 		ndev->hw_features |= NETIF_F_GRO;
7575 		priv->sph_cap = true;
7576 		priv->sph = priv->sph_cap;
7577 		dev_info(priv->device, "SPH feature enabled\n");
7578 	}
7579 
7580 	/* Ideally our host DMA address width is the same as for the
7581 	 * device. However, it may differ and then we have to use our
7582 	 * host DMA width for allocation and the device DMA width for
7583 	 * register handling.
7584 	 */
7585 	if (priv->plat->host_dma_width)
7586 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7587 	else
7588 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7589 
7590 	if (priv->dma_cap.host_dma_width) {
7591 		ret = dma_set_mask_and_coherent(device,
7592 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7593 		if (!ret) {
7594 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7595 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7596 
7597 			/*
7598 			 * If more than 32 bits can be addressed, make sure to
7599 			 * enable enhanced addressing mode.
7600 			 */
7601 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7602 				priv->plat->dma_cfg->eame = true;
7603 		} else {
7604 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7605 			if (ret) {
7606 				dev_err(priv->device, "Failed to set DMA Mask\n");
7607 				goto error_hw_init;
7608 			}
7609 
7610 			priv->dma_cap.host_dma_width = 32;
7611 		}
7612 	}
7613 
7614 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7615 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7616 #ifdef STMMAC_VLAN_TAG_USED
7617 	/* Both mac100 and gmac support receive VLAN tag detection */
7618 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7619 	if (priv->plat->has_gmac4) {
7620 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7621 		priv->hw->hw_vlan_en = true;
7622 	}
7623 	if (priv->dma_cap.vlhash) {
7624 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7625 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7626 	}
7627 	if (priv->dma_cap.vlins) {
7628 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7629 		if (priv->dma_cap.dvlan)
7630 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7631 	}
7632 #endif
7633 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7634 
7635 	priv->xstats.threshold = tc;
7636 
7637 	/* Initialize RSS */
7638 	rxq = priv->plat->rx_queues_to_use;
7639 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7640 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7641 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7642 
7643 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7644 		ndev->features |= NETIF_F_RXHASH;
7645 
7646 	ndev->vlan_features |= ndev->features;
7647 
7648 	/* MTU range: 46 - hw-specific max */
7649 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7650 	if (priv->plat->has_xgmac)
7651 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7652 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7653 		ndev->max_mtu = JUMBO_LEN;
7654 	else
7655 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7656 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7657 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7658 	 */
7659 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7660 	    (priv->plat->maxmtu >= ndev->min_mtu))
7661 		ndev->max_mtu = priv->plat->maxmtu;
7662 	else if (priv->plat->maxmtu < ndev->min_mtu)
7663 		dev_warn(priv->device,
7664 			 "%s: warning: maxmtu having invalid value (%d)\n",
7665 			 __func__, priv->plat->maxmtu);
7666 
7667 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7668 
7669 	/* Setup channels NAPI */
7670 	stmmac_napi_add(ndev);
7671 
7672 	mutex_init(&priv->lock);
7673 
7674 	stmmac_fpe_init(priv);
7675 
7676 	/* If a specific clk_csr value is passed from the platform
7677 	 * this means that the CSR Clock Range selection cannot be
7678 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7679 	 * set the MDC clock dynamically according to the csr actual
7680 	 * clock input.
7681 	 */
7682 	if (priv->plat->clk_csr >= 0)
7683 		priv->clk_csr = priv->plat->clk_csr;
7684 	else
7685 		stmmac_clk_csr_set(priv);
7686 
7687 	stmmac_check_pcs_mode(priv);
7688 
7689 	pm_runtime_get_noresume(device);
7690 	pm_runtime_set_active(device);
7691 	if (!pm_runtime_enabled(device))
7692 		pm_runtime_enable(device);
7693 
7694 	ret = stmmac_mdio_register(ndev);
7695 	if (ret < 0) {
7696 		dev_err_probe(priv->device, ret,
7697 			      "MDIO bus (id: %d) registration failed\n",
7698 			      priv->plat->bus_id);
7699 		goto error_mdio_register;
7700 	}
7701 
7702 	if (priv->plat->speed_mode_2500)
7703 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7704 
7705 	ret = stmmac_pcs_setup(ndev);
7706 	if (ret)
7707 		goto error_pcs_setup;
7708 
7709 	ret = stmmac_phy_setup(priv);
7710 	if (ret) {
7711 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7712 		goto error_phy_setup;
7713 	}
7714 
7715 	ret = register_netdev(ndev);
7716 	if (ret) {
7717 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7718 			__func__, ret);
7719 		goto error_netdev_register;
7720 	}
7721 
7722 #ifdef CONFIG_DEBUG_FS
7723 	stmmac_init_fs(ndev);
7724 #endif
7725 
7726 	if (priv->plat->dump_debug_regs)
7727 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7728 
7729 	/* Let pm_runtime_put() disable the clocks.
7730 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7731 	 */
7732 	pm_runtime_put(device);
7733 
7734 	return ret;
7735 
7736 error_netdev_register:
7737 	phylink_destroy(priv->phylink);
7738 error_phy_setup:
7739 	stmmac_pcs_clean(ndev);
7740 error_pcs_setup:
7741 	stmmac_mdio_unregister(ndev);
7742 error_mdio_register:
7743 	stmmac_napi_del(ndev);
7744 error_hw_init:
7745 	destroy_workqueue(priv->wq);
7746 error_wq_init:
7747 	bitmap_free(priv->af_xdp_zc_qps);
7748 
7749 	return ret;
7750 }
7751 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7752 
7753 /**
7754  * stmmac_dvr_remove
7755  * @dev: device pointer
7756  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7757  * changes the link status, releases the DMA descriptor rings.
7758  */
7759 void stmmac_dvr_remove(struct device *dev)
7760 {
7761 	struct net_device *ndev = dev_get_drvdata(dev);
7762 	struct stmmac_priv *priv = netdev_priv(ndev);
7763 
7764 	netdev_info(priv->dev, "%s: removing driver", __func__);
7765 
7766 	pm_runtime_get_sync(dev);
7767 
7768 	unregister_netdev(ndev);
7769 
7770 #ifdef CONFIG_DEBUG_FS
7771 	stmmac_exit_fs(ndev);
7772 #endif
7773 	phylink_destroy(priv->phylink);
7774 	if (priv->plat->stmmac_rst)
7775 		reset_control_assert(priv->plat->stmmac_rst);
7776 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7777 
7778 	stmmac_pcs_clean(ndev);
7779 	stmmac_mdio_unregister(ndev);
7780 
7781 	destroy_workqueue(priv->wq);
7782 	mutex_destroy(&priv->lock);
7783 	bitmap_free(priv->af_xdp_zc_qps);
7784 
7785 	pm_runtime_disable(dev);
7786 	pm_runtime_put_noidle(dev);
7787 }
7788 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7789 
7790 /**
7791  * stmmac_suspend - suspend callback
7792  * @dev: device pointer
7793  * Description: this is the function to suspend the device and it is called
7794  * by the platform driver to stop the network queue, release the resources,
7795  * program the PMT register (for WoL), clean and release driver resources.
7796  */
7797 int stmmac_suspend(struct device *dev)
7798 {
7799 	struct net_device *ndev = dev_get_drvdata(dev);
7800 	struct stmmac_priv *priv = netdev_priv(ndev);
7801 	u32 chan;
7802 
7803 	if (!ndev || !netif_running(ndev))
7804 		return 0;
7805 
7806 	mutex_lock(&priv->lock);
7807 
7808 	netif_device_detach(ndev);
7809 
7810 	stmmac_disable_all_queues(priv);
7811 
7812 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7813 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7814 
7815 	if (priv->eee_sw_timer_en) {
7816 		priv->tx_path_in_lpi_mode = false;
7817 		del_timer_sync(&priv->eee_ctrl_timer);
7818 	}
7819 
7820 	/* Stop TX/RX DMA */
7821 	stmmac_stop_all_dma(priv);
7822 
7823 	if (priv->plat->serdes_powerdown)
7824 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7825 
7826 	/* Enable Power down mode by programming the PMT regs */
7827 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7828 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7829 		priv->irq_wake = 1;
7830 	} else {
7831 		stmmac_mac_set(priv, priv->ioaddr, false);
7832 		pinctrl_pm_select_sleep_state(priv->device);
7833 	}
7834 
7835 	mutex_unlock(&priv->lock);
7836 
7837 	rtnl_lock();
7838 	if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7839 		phylink_speed_down(priv->phylink, false);
7840 
7841 	phylink_suspend(priv->phylink,
7842 			device_may_wakeup(priv->device) && priv->plat->pmt);
7843 	rtnl_unlock();
7844 
7845 	if (stmmac_fpe_supported(priv))
7846 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7847 
7848 	return 0;
7849 }
7850 EXPORT_SYMBOL_GPL(stmmac_suspend);
7851 
7852 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7853 {
7854 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7855 
7856 	rx_q->cur_rx = 0;
7857 	rx_q->dirty_rx = 0;
7858 }
7859 
7860 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7861 {
7862 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7863 
7864 	tx_q->cur_tx = 0;
7865 	tx_q->dirty_tx = 0;
7866 	tx_q->mss = 0;
7867 
7868 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7869 }
7870 
7871 /**
7872  * stmmac_reset_queues_param - reset queue parameters
7873  * @priv: device pointer
7874  */
7875 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7876 {
7877 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7878 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7879 	u32 queue;
7880 
7881 	for (queue = 0; queue < rx_cnt; queue++)
7882 		stmmac_reset_rx_queue(priv, queue);
7883 
7884 	for (queue = 0; queue < tx_cnt; queue++)
7885 		stmmac_reset_tx_queue(priv, queue);
7886 }
7887 
7888 /**
7889  * stmmac_resume - resume callback
7890  * @dev: device pointer
7891  * Description: when resume this function is invoked to setup the DMA and CORE
7892  * in a usable state.
7893  */
7894 int stmmac_resume(struct device *dev)
7895 {
7896 	struct net_device *ndev = dev_get_drvdata(dev);
7897 	struct stmmac_priv *priv = netdev_priv(ndev);
7898 	int ret;
7899 
7900 	if (!netif_running(ndev))
7901 		return 0;
7902 
7903 	/* Power Down bit, into the PM register, is cleared
7904 	 * automatically as soon as a magic packet or a Wake-up frame
7905 	 * is received. Anyway, it's better to manually clear
7906 	 * this bit because it can generate problems while resuming
7907 	 * from another devices (e.g. serial console).
7908 	 */
7909 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7910 		mutex_lock(&priv->lock);
7911 		stmmac_pmt(priv, priv->hw, 0);
7912 		mutex_unlock(&priv->lock);
7913 		priv->irq_wake = 0;
7914 	} else {
7915 		pinctrl_pm_select_default_state(priv->device);
7916 		/* reset the phy so that it's ready */
7917 		if (priv->mii)
7918 			stmmac_mdio_reset(priv->mii);
7919 	}
7920 
7921 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7922 	    priv->plat->serdes_powerup) {
7923 		ret = priv->plat->serdes_powerup(ndev,
7924 						 priv->plat->bsp_priv);
7925 
7926 		if (ret < 0)
7927 			return ret;
7928 	}
7929 
7930 	rtnl_lock();
7931 	phylink_resume(priv->phylink);
7932 	if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7933 		phylink_speed_up(priv->phylink);
7934 	rtnl_unlock();
7935 
7936 	rtnl_lock();
7937 	mutex_lock(&priv->lock);
7938 
7939 	stmmac_reset_queues_param(priv);
7940 
7941 	stmmac_free_tx_skbufs(priv);
7942 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7943 
7944 	stmmac_hw_setup(ndev, false);
7945 	stmmac_init_coalesce(priv);
7946 	stmmac_set_rx_mode(ndev);
7947 
7948 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7949 
7950 	stmmac_enable_all_queues(priv);
7951 	stmmac_enable_all_dma_irq(priv);
7952 
7953 	mutex_unlock(&priv->lock);
7954 	rtnl_unlock();
7955 
7956 	netif_device_attach(ndev);
7957 
7958 	return 0;
7959 }
7960 EXPORT_SYMBOL_GPL(stmmac_resume);
7961 
7962 #ifndef MODULE
7963 static int __init stmmac_cmdline_opt(char *str)
7964 {
7965 	char *opt;
7966 
7967 	if (!str || !*str)
7968 		return 1;
7969 	while ((opt = strsep(&str, ",")) != NULL) {
7970 		if (!strncmp(opt, "debug:", 6)) {
7971 			if (kstrtoint(opt + 6, 0, &debug))
7972 				goto err;
7973 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7974 			if (kstrtoint(opt + 8, 0, &phyaddr))
7975 				goto err;
7976 		} else if (!strncmp(opt, "tc:", 3)) {
7977 			if (kstrtoint(opt + 3, 0, &tc))
7978 				goto err;
7979 		} else if (!strncmp(opt, "watchdog:", 9)) {
7980 			if (kstrtoint(opt + 9, 0, &watchdog))
7981 				goto err;
7982 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7983 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7984 				goto err;
7985 		} else if (!strncmp(opt, "pause:", 6)) {
7986 			if (kstrtoint(opt + 6, 0, &pause))
7987 				goto err;
7988 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7989 			if (kstrtoint(opt + 10, 0, &eee_timer))
7990 				goto err;
7991 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7992 			if (kstrtoint(opt + 11, 0, &chain_mode))
7993 				goto err;
7994 		}
7995 	}
7996 	return 1;
7997 
7998 err:
7999 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8000 	return 1;
8001 }
8002 
8003 __setup("stmmaceth=", stmmac_cmdline_opt);
8004 #endif /* MODULE */
8005 
8006 static int __init stmmac_init(void)
8007 {
8008 #ifdef CONFIG_DEBUG_FS
8009 	/* Create debugfs main directory if it doesn't exist yet */
8010 	if (!stmmac_fs_dir)
8011 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8012 	register_netdevice_notifier(&stmmac_notifier);
8013 #endif
8014 
8015 	return 0;
8016 }
8017 
8018 static void __exit stmmac_exit(void)
8019 {
8020 #ifdef CONFIG_DEBUG_FS
8021 	unregister_netdevice_notifier(&stmmac_notifier);
8022 	debugfs_remove_recursive(stmmac_fs_dir);
8023 #endif
8024 }
8025 
8026 module_init(stmmac_init)
8027 module_exit(stmmac_exit)
8028 
8029 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8030 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8031 MODULE_LICENSE("GPL");
8032