xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 0d2ab5f922e75d10162e7199826e14df9cfae5cc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/pm_wakeirq.h>
33 #include <linux/prefetch.h>
34 #include <linux/pinctrl/consumer.h>
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #endif /* CONFIG_DEBUG_FS */
39 #include <linux/net_tstamp.h>
40 #include <linux/phylink.h>
41 #include <linux/udp.h>
42 #include <linux/bpf_trace.h>
43 #include <net/page_pool/helpers.h>
44 #include <net/pkt_cls.h>
45 #include <net/xdp_sock_drv.h>
46 #include "stmmac_ptp.h"
47 #include "stmmac_fpe.h"
48 #include "stmmac.h"
49 #include "stmmac_xdp.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 #include "dwxgmac2.h"
54 #include "hwif.h"
55 
56 /* As long as the interface is active, we keep the timestamping counter enabled
57  * with fine resolution and binary rollover. This avoid non-monotonic behavior
58  * (clock jumps) when changing timestamping settings at runtime.
59  */
60 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
61 				 PTP_TCR_TSCTRLSSR)
62 
63 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
64 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
65 
66 /* Module parameters */
67 #define TX_TIMEO	5000
68 static int watchdog = TX_TIMEO;
69 module_param(watchdog, int, 0644);
70 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
71 
72 static int debug = -1;
73 module_param(debug, int, 0644);
74 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
75 
76 static int phyaddr = -1;
77 module_param(phyaddr, int, 0444);
78 MODULE_PARM_DESC(phyaddr, "Physical device address");
79 
80 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
81 
82 /* Limit to make sure XDP TX and slow path can coexist */
83 #define STMMAC_XSK_TX_BUDGET_MAX	256
84 #define STMMAC_TX_XSK_AVAIL		16
85 #define STMMAC_RX_FILL_BATCH		16
86 
87 #define STMMAC_XDP_PASS		0
88 #define STMMAC_XDP_CONSUMED	BIT(0)
89 #define STMMAC_XDP_TX		BIT(1)
90 #define STMMAC_XDP_REDIRECT	BIT(2)
91 
92 static int flow_ctrl = 0xdead;
93 module_param(flow_ctrl, int, 0644);
94 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
95 
96 static int pause = PAUSE_TIME;
97 module_param(pause, int, 0644);
98 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
99 
100 #define TC_DEFAULT 64
101 static int tc = TC_DEFAULT;
102 module_param(tc, int, 0644);
103 MODULE_PARM_DESC(tc, "DMA threshold control value");
104 
105 /* This is unused */
106 #define	DEFAULT_BUFSIZE	1536
107 static int buf_sz = DEFAULT_BUFSIZE;
108 module_param(buf_sz, int, 0644);
109 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, uint, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	struct plat_stmmacenet_data *plat_dat = priv->plat;
153 	int ret;
154 
155 	if (enabled) {
156 		ret = clk_prepare_enable(plat_dat->stmmac_clk);
157 		if (ret)
158 			return ret;
159 		ret = clk_prepare_enable(plat_dat->pclk);
160 		if (ret) {
161 			clk_disable_unprepare(plat_dat->stmmac_clk);
162 			return ret;
163 		}
164 		if (plat_dat->clks_config) {
165 			ret = plat_dat->clks_config(plat_dat->bsp_priv, enabled);
166 			if (ret) {
167 				clk_disable_unprepare(plat_dat->stmmac_clk);
168 				clk_disable_unprepare(plat_dat->pclk);
169 				return ret;
170 			}
171 		}
172 	} else {
173 		clk_disable_unprepare(plat_dat->stmmac_clk);
174 		clk_disable_unprepare(plat_dat->pclk);
175 		if (plat_dat->clks_config)
176 			plat_dat->clks_config(plat_dat->bsp_priv, enabled);
177 	}
178 
179 	return 0;
180 }
181 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
182 
183 /**
184  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
185  * @bsp_priv: BSP private data structure (unused)
186  * @clk_tx_i: the transmit clock
187  * @interface: the selected interface mode
188  * @speed: the speed that the MAC will be operating at
189  *
190  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
191  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
192  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
193  * the plat_data->set_clk_tx_rate method directly, call it via their own
194  * implementation, or implement their own method should they have more
195  * complex requirements. It is intended to only be used in this method.
196  *
197  * plat_data->clk_tx_i must be filled in.
198  */
199 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
200 			   phy_interface_t interface, int speed)
201 {
202 	long rate = rgmii_clock(speed);
203 
204 	/* Silently ignore unsupported speeds as rgmii_clock() only
205 	 * supports 10, 100 and 1000Mbps. We do not want to spit
206 	 * errors for 2500 and higher speeds here.
207 	 */
208 	if (rate < 0)
209 		return 0;
210 
211 	return clk_set_rate(clk_tx_i, rate);
212 }
213 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
214 
215 /**
216  * stmmac_verify_args - verify the driver parameters.
217  * Description: it checks the driver parameters and set a default in case of
218  * errors.
219  */
220 static void stmmac_verify_args(void)
221 {
222 	if (unlikely(watchdog < 0))
223 		watchdog = TX_TIMEO;
224 	if (unlikely((pause < 0) || (pause > 0xffff)))
225 		pause = PAUSE_TIME;
226 
227 	if (flow_ctrl != 0xdead)
228 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
229 }
230 
231 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
232 {
233 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
234 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
235 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
236 	u32 queue;
237 
238 	for (queue = 0; queue < maxq; queue++) {
239 		struct stmmac_channel *ch = &priv->channel[queue];
240 
241 		if (stmmac_xdp_is_enabled(priv) &&
242 		    test_bit(queue, priv->af_xdp_zc_qps)) {
243 			napi_disable(&ch->rxtx_napi);
244 			continue;
245 		}
246 
247 		if (queue < rx_queues_cnt)
248 			napi_disable(&ch->rx_napi);
249 		if (queue < tx_queues_cnt)
250 			napi_disable(&ch->tx_napi);
251 	}
252 }
253 
254 /**
255  * stmmac_disable_all_queues - Disable all queues
256  * @priv: driver private structure
257  */
258 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
259 {
260 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
261 	struct stmmac_rx_queue *rx_q;
262 	u32 queue;
263 
264 	/* synchronize_rcu() needed for pending XDP buffers to drain */
265 	for (queue = 0; queue < rx_queues_cnt; queue++) {
266 		rx_q = &priv->dma_conf.rx_queue[queue];
267 		if (rx_q->xsk_pool) {
268 			synchronize_rcu();
269 			break;
270 		}
271 	}
272 
273 	__stmmac_disable_all_queues(priv);
274 }
275 
276 /**
277  * stmmac_enable_all_queues - Enable all queues
278  * @priv: driver private structure
279  */
280 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
281 {
282 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
283 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
284 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
285 	u32 queue;
286 
287 	for (queue = 0; queue < maxq; queue++) {
288 		struct stmmac_channel *ch = &priv->channel[queue];
289 
290 		if (stmmac_xdp_is_enabled(priv) &&
291 		    test_bit(queue, priv->af_xdp_zc_qps)) {
292 			napi_enable(&ch->rxtx_napi);
293 			continue;
294 		}
295 
296 		if (queue < rx_queues_cnt)
297 			napi_enable(&ch->rx_napi);
298 		if (queue < tx_queues_cnt)
299 			napi_enable(&ch->tx_napi);
300 	}
301 }
302 
303 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
304 {
305 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
306 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
307 		queue_work(priv->wq, &priv->service_task);
308 }
309 
310 static void stmmac_global_err(struct stmmac_priv *priv)
311 {
312 	netif_carrier_off(priv->dev);
313 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
314 	stmmac_service_event_schedule(priv);
315 }
316 
317 /**
318  * stmmac_clk_csr_set - dynamically set the MDC clock
319  * @priv: driver private structure
320  * Description: this is to dynamically set the MDC clock according to the csr
321  * clock input.
322  * Note:
323  *	If a specific clk_csr value is passed from the platform
324  *	this means that the CSR Clock Range selection cannot be
325  *	changed at run-time and it is fixed (as reported in the driver
326  *	documentation). Viceversa the driver will try to set the MDC
327  *	clock dynamically according to the actual clock input.
328  */
329 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
330 {
331 	unsigned long clk_rate;
332 
333 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
334 
335 	/* Platform provided default clk_csr would be assumed valid
336 	 * for all other cases except for the below mentioned ones.
337 	 * For values higher than the IEEE 802.3 specified frequency
338 	 * we can not estimate the proper divider as it is not known
339 	 * the frequency of clk_csr_i. So we do not change the default
340 	 * divider.
341 	 */
342 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
343 		if (clk_rate < CSR_F_35M)
344 			priv->clk_csr = STMMAC_CSR_20_35M;
345 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
346 			priv->clk_csr = STMMAC_CSR_35_60M;
347 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
348 			priv->clk_csr = STMMAC_CSR_60_100M;
349 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
350 			priv->clk_csr = STMMAC_CSR_100_150M;
351 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
352 			priv->clk_csr = STMMAC_CSR_150_250M;
353 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
354 			priv->clk_csr = STMMAC_CSR_250_300M;
355 		else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
356 			priv->clk_csr = STMMAC_CSR_300_500M;
357 		else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
358 			priv->clk_csr = STMMAC_CSR_500_800M;
359 	}
360 
361 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
362 		if (clk_rate > 160000000)
363 			priv->clk_csr = 0x03;
364 		else if (clk_rate > 80000000)
365 			priv->clk_csr = 0x02;
366 		else if (clk_rate > 40000000)
367 			priv->clk_csr = 0x01;
368 		else
369 			priv->clk_csr = 0;
370 	}
371 
372 	if (priv->plat->has_xgmac) {
373 		if (clk_rate > 400000000)
374 			priv->clk_csr = 0x5;
375 		else if (clk_rate > 350000000)
376 			priv->clk_csr = 0x4;
377 		else if (clk_rate > 300000000)
378 			priv->clk_csr = 0x3;
379 		else if (clk_rate > 250000000)
380 			priv->clk_csr = 0x2;
381 		else if (clk_rate > 150000000)
382 			priv->clk_csr = 0x1;
383 		else
384 			priv->clk_csr = 0x0;
385 	}
386 }
387 
388 static void print_pkt(unsigned char *buf, int len)
389 {
390 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
391 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
392 }
393 
394 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
395 {
396 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
397 	u32 avail;
398 
399 	if (tx_q->dirty_tx > tx_q->cur_tx)
400 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
401 	else
402 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
403 
404 	return avail;
405 }
406 
407 /**
408  * stmmac_rx_dirty - Get RX queue dirty
409  * @priv: driver private structure
410  * @queue: RX queue index
411  */
412 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
413 {
414 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
415 	u32 dirty;
416 
417 	if (rx_q->dirty_rx <= rx_q->cur_rx)
418 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
419 	else
420 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
421 
422 	return dirty;
423 }
424 
425 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
426 {
427 	u32 tx_cnt = priv->plat->tx_queues_to_use;
428 	u32 queue;
429 
430 	/* check if all TX queues have the work finished */
431 	for (queue = 0; queue < tx_cnt; queue++) {
432 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
433 
434 		if (tx_q->dirty_tx != tx_q->cur_tx)
435 			return true; /* still unfinished work */
436 	}
437 
438 	return false;
439 }
440 
441 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
442 {
443 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
444 }
445 
446 /**
447  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
448  * @priv: driver private structure
449  * Description: this function is to verify and enter in LPI mode in case of
450  * EEE.
451  */
452 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
453 {
454 	if (stmmac_eee_tx_busy(priv)) {
455 		stmmac_restart_sw_lpi_timer(priv);
456 		return;
457 	}
458 
459 	/* Check and enter in LPI mode */
460 	if (!priv->tx_path_in_lpi_mode)
461 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
462 				    priv->tx_lpi_clk_stop, 0);
463 }
464 
465 /**
466  * stmmac_stop_sw_lpi - stop transmitting LPI
467  * @priv: driver private structure
468  * Description: When using software-controlled LPI, stop transmitting LPI state.
469  */
470 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
471 {
472 	timer_delete_sync(&priv->eee_ctrl_timer);
473 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
474 	priv->tx_path_in_lpi_mode = false;
475 }
476 
477 /**
478  * stmmac_eee_ctrl_timer - EEE TX SW timer.
479  * @t:  timer_list struct containing private info
480  * Description:
481  *  if there is no data transfer and if we are not in LPI state,
482  *  then MAC Transmitter can be moved to LPI state.
483  */
484 static void stmmac_eee_ctrl_timer(struct timer_list *t)
485 {
486 	struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
487 
488 	stmmac_try_to_start_sw_lpi(priv);
489 }
490 
491 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
492  * @priv: driver private structure
493  * @p : descriptor pointer
494  * @skb : the socket buffer
495  * Description :
496  * This function will read timestamp from the descriptor & pass it to stack.
497  * and also perform some sanity checks.
498  */
499 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
500 				   struct dma_desc *p, struct sk_buff *skb)
501 {
502 	struct skb_shared_hwtstamps shhwtstamp;
503 	bool found = false;
504 	u64 ns = 0;
505 
506 	if (!priv->hwts_tx_en)
507 		return;
508 
509 	/* exit if skb doesn't support hw tstamp */
510 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
511 		return;
512 
513 	/* check tx tstamp status */
514 	if (stmmac_get_tx_timestamp_status(priv, p)) {
515 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
516 		found = true;
517 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
518 		found = true;
519 	}
520 
521 	if (found) {
522 		ns -= priv->plat->cdc_error_adj;
523 
524 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
525 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
526 
527 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
528 		/* pass tstamp to stack */
529 		skb_tstamp_tx(skb, &shhwtstamp);
530 	}
531 }
532 
533 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
534  * @priv: driver private structure
535  * @p : descriptor pointer
536  * @np : next descriptor pointer
537  * @skb : the socket buffer
538  * Description :
539  * This function will read received packet's timestamp from the descriptor
540  * and pass it to stack. It also perform some sanity checks.
541  */
542 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
543 				   struct dma_desc *np, struct sk_buff *skb)
544 {
545 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
546 	struct dma_desc *desc = p;
547 	u64 ns = 0;
548 
549 	if (!priv->hwts_rx_en)
550 		return;
551 	/* For GMAC4, the valid timestamp is from CTX next desc. */
552 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
553 		desc = np;
554 
555 	/* Check if timestamp is available */
556 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
557 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
558 
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
562 		shhwtstamp = skb_hwtstamps(skb);
563 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
564 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
565 	} else  {
566 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
567 	}
568 }
569 
570 /**
571  *  stmmac_hwtstamp_set - control hardware timestamping.
572  *  @dev: device pointer.
573  *  @config: the timestamping configuration.
574  *  @extack: netlink extended ack structure for error reporting.
575  *  Description:
576  *  This function configures the MAC to enable/disable both outgoing(TX)
577  *  and incoming(RX) packets time stamping based on user input.
578  *  Return Value:
579  *  0 on success and an appropriate -ve integer on failure.
580  */
581 static int stmmac_hwtstamp_set(struct net_device *dev,
582 			       struct kernel_hwtstamp_config *config,
583 			       struct netlink_ext_ack *extack)
584 {
585 	struct stmmac_priv *priv = netdev_priv(dev);
586 	u32 ptp_v2 = 0;
587 	u32 tstamp_all = 0;
588 	u32 ptp_over_ipv4_udp = 0;
589 	u32 ptp_over_ipv6_udp = 0;
590 	u32 ptp_over_ethernet = 0;
591 	u32 snap_type_sel = 0;
592 	u32 ts_master_en = 0;
593 	u32 ts_event_en = 0;
594 
595 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
596 		NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
597 		priv->hwts_tx_en = 0;
598 		priv->hwts_rx_en = 0;
599 
600 		return -EOPNOTSUPP;
601 	}
602 
603 	if (!netif_running(dev)) {
604 		NL_SET_ERR_MSG_MOD(extack,
605 				   "Cannot change timestamping configuration while down");
606 		return -ENODEV;
607 	}
608 
609 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
610 		   __func__, config->flags, config->tx_type, config->rx_filter);
611 
612 	if (config->tx_type != HWTSTAMP_TX_OFF &&
613 	    config->tx_type != HWTSTAMP_TX_ON)
614 		return -ERANGE;
615 
616 	if (priv->adv_ts) {
617 		switch (config->rx_filter) {
618 		case HWTSTAMP_FILTER_NONE:
619 			/* time stamp no incoming packet at all */
620 			config->rx_filter = HWTSTAMP_FILTER_NONE;
621 			break;
622 
623 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
624 			/* PTP v1, UDP, any kind of event packet */
625 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
626 			/* 'xmac' hardware can support Sync, Pdelay_Req and
627 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
628 			 * This leaves Delay_Req timestamps out.
629 			 * Enable all events *and* general purpose message
630 			 * timestamping
631 			 */
632 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
633 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
634 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
635 			break;
636 
637 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
638 			/* PTP v1, UDP, Sync packet */
639 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
640 			/* take time stamp for SYNC messages only */
641 			ts_event_en = PTP_TCR_TSEVNTENA;
642 
643 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
644 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
645 			break;
646 
647 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
648 			/* PTP v1, UDP, Delay_req packet */
649 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
650 			/* take time stamp for Delay_Req messages only */
651 			ts_master_en = PTP_TCR_TSMSTRENA;
652 			ts_event_en = PTP_TCR_TSEVNTENA;
653 
654 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
655 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
656 			break;
657 
658 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
659 			/* PTP v2, UDP, any kind of event packet */
660 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
661 			ptp_v2 = PTP_TCR_TSVER2ENA;
662 			/* take time stamp for all event messages */
663 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
664 
665 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
666 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
667 			break;
668 
669 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
670 			/* PTP v2, UDP, Sync packet */
671 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
672 			ptp_v2 = PTP_TCR_TSVER2ENA;
673 			/* take time stamp for SYNC messages only */
674 			ts_event_en = PTP_TCR_TSEVNTENA;
675 
676 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678 			break;
679 
680 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
681 			/* PTP v2, UDP, Delay_req packet */
682 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
683 			ptp_v2 = PTP_TCR_TSVER2ENA;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
693 			/* PTP v2/802.AS1 any layer, any kind of event packet */
694 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
697 			if (priv->synopsys_id < DWMAC_CORE_4_10)
698 				ts_event_en = PTP_TCR_TSEVNTENA;
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			ptp_over_ethernet = PTP_TCR_TSIPENA;
702 			break;
703 
704 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
705 			/* PTP v2/802.AS1, any layer, Sync packet */
706 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
707 			ptp_v2 = PTP_TCR_TSVER2ENA;
708 			/* take time stamp for SYNC messages only */
709 			ts_event_en = PTP_TCR_TSEVNTENA;
710 
711 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
712 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
713 			ptp_over_ethernet = PTP_TCR_TSIPENA;
714 			break;
715 
716 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
717 			/* PTP v2/802.AS1, any layer, Delay_req packet */
718 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
719 			ptp_v2 = PTP_TCR_TSVER2ENA;
720 			/* take time stamp for Delay_Req messages only */
721 			ts_master_en = PTP_TCR_TSMSTRENA;
722 			ts_event_en = PTP_TCR_TSEVNTENA;
723 
724 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
725 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
726 			ptp_over_ethernet = PTP_TCR_TSIPENA;
727 			break;
728 
729 		case HWTSTAMP_FILTER_NTP_ALL:
730 		case HWTSTAMP_FILTER_ALL:
731 			/* time stamp any incoming packet */
732 			config->rx_filter = HWTSTAMP_FILTER_ALL;
733 			tstamp_all = PTP_TCR_TSENALL;
734 			break;
735 
736 		default:
737 			return -ERANGE;
738 		}
739 	} else {
740 		switch (config->rx_filter) {
741 		case HWTSTAMP_FILTER_NONE:
742 			config->rx_filter = HWTSTAMP_FILTER_NONE;
743 			break;
744 		default:
745 			/* PTP v1, UDP, any kind of event packet */
746 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
747 			break;
748 		}
749 	}
750 	priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
751 	priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
752 
753 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
754 
755 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
756 		priv->systime_flags |= tstamp_all | ptp_v2 |
757 				       ptp_over_ethernet | ptp_over_ipv6_udp |
758 				       ptp_over_ipv4_udp | ts_event_en |
759 				       ts_master_en | snap_type_sel;
760 	}
761 
762 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
763 
764 	priv->tstamp_config = *config;
765 
766 	return 0;
767 }
768 
769 /**
770  *  stmmac_hwtstamp_get - read hardware timestamping.
771  *  @dev: device pointer.
772  *  @config: the timestamping configuration.
773  *  Description:
774  *  This function obtain the current hardware timestamping settings
775  *  as requested.
776  */
777 static int stmmac_hwtstamp_get(struct net_device *dev,
778 			       struct kernel_hwtstamp_config *config)
779 {
780 	struct stmmac_priv *priv = netdev_priv(dev);
781 
782 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
783 		return -EOPNOTSUPP;
784 
785 	*config = priv->tstamp_config;
786 
787 	return 0;
788 }
789 
790 /**
791  * stmmac_init_tstamp_counter - init hardware timestamping counter
792  * @priv: driver private structure
793  * @systime_flags: timestamping flags
794  * Description:
795  * Initialize hardware counter for packet timestamping.
796  * This is valid as long as the interface is open and not suspended.
797  * Will be rerun after resuming from suspend, case in which the timestamping
798  * flags updated by stmmac_hwtstamp_set() also need to be restored.
799  */
800 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
801 {
802 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
803 	struct timespec64 now;
804 	u32 sec_inc = 0;
805 	u64 temp = 0;
806 
807 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
808 		return -EOPNOTSUPP;
809 
810 	if (!priv->plat->clk_ptp_rate) {
811 		netdev_err(priv->dev, "Invalid PTP clock rate");
812 		return -EINVAL;
813 	}
814 
815 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
816 	priv->systime_flags = systime_flags;
817 
818 	/* program Sub Second Increment reg */
819 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
820 					   priv->plat->clk_ptp_rate,
821 					   xmac, &sec_inc);
822 	temp = div_u64(1000000000ULL, sec_inc);
823 
824 	/* Store sub second increment for later use */
825 	priv->sub_second_inc = sec_inc;
826 
827 	/* calculate default added value:
828 	 * formula is :
829 	 * addend = (2^32)/freq_div_ratio;
830 	 * where, freq_div_ratio = 1e9ns/sec_inc
831 	 */
832 	temp = (u64)(temp << 32);
833 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
834 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
835 
836 	/* initialize system time */
837 	ktime_get_real_ts64(&now);
838 
839 	/* lower 32 bits of tv_sec are safe until y2106 */
840 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
841 
842 	return 0;
843 }
844 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
845 
846 /**
847  * stmmac_init_ptp - init PTP
848  * @priv: driver private structure
849  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
850  * This is done by looking at the HW cap. register.
851  * This function also registers the ptp driver.
852  */
853 static int stmmac_init_ptp(struct stmmac_priv *priv)
854 {
855 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
856 	int ret;
857 
858 	if (priv->plat->ptp_clk_freq_config)
859 		priv->plat->ptp_clk_freq_config(priv);
860 
861 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
862 	if (ret)
863 		return ret;
864 
865 	priv->adv_ts = 0;
866 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
867 	if (xmac && priv->dma_cap.atime_stamp)
868 		priv->adv_ts = 1;
869 	/* Dwmac 3.x core with extend_desc can support adv_ts */
870 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
871 		priv->adv_ts = 1;
872 
873 	if (priv->dma_cap.time_stamp)
874 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
875 
876 	if (priv->adv_ts)
877 		netdev_info(priv->dev,
878 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
879 
880 	priv->hwts_tx_en = 0;
881 	priv->hwts_rx_en = 0;
882 
883 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
884 		stmmac_hwtstamp_correct_latency(priv, priv);
885 
886 	return 0;
887 }
888 
889 static void stmmac_release_ptp(struct stmmac_priv *priv)
890 {
891 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
892 	stmmac_ptp_unregister(priv);
893 }
894 
895 /**
896  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
897  *  @priv: driver private structure
898  *  @duplex: duplex passed to the next function
899  *  @flow_ctrl: desired flow control modes
900  *  Description: It is used for configuring the flow control in all queues
901  */
902 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
903 				 unsigned int flow_ctrl)
904 {
905 	u32 tx_cnt = priv->plat->tx_queues_to_use;
906 
907 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
908 			 tx_cnt);
909 }
910 
911 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
912 					 phy_interface_t interface)
913 {
914 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
915 
916 	/* Refresh the MAC-specific capabilities */
917 	stmmac_mac_update_caps(priv);
918 
919 	config->mac_capabilities = priv->hw->link.caps;
920 
921 	if (priv->plat->max_speed)
922 		phylink_limit_mac_speed(config, priv->plat->max_speed);
923 
924 	return config->mac_capabilities;
925 }
926 
927 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
928 						 phy_interface_t interface)
929 {
930 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
931 	struct phylink_pcs *pcs;
932 
933 	if (priv->plat->select_pcs) {
934 		pcs = priv->plat->select_pcs(priv, interface);
935 		if (!IS_ERR(pcs))
936 			return pcs;
937 	}
938 
939 	return NULL;
940 }
941 
942 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
943 			      const struct phylink_link_state *state)
944 {
945 	/* Nothing to do, xpcs_config() handles everything */
946 }
947 
948 static void stmmac_mac_link_down(struct phylink_config *config,
949 				 unsigned int mode, phy_interface_t interface)
950 {
951 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952 
953 	stmmac_mac_set(priv, priv->ioaddr, false);
954 	if (priv->dma_cap.eee)
955 		stmmac_set_eee_pls(priv, priv->hw, false);
956 
957 	if (stmmac_fpe_supported(priv))
958 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
959 }
960 
961 static void stmmac_mac_link_up(struct phylink_config *config,
962 			       struct phy_device *phy,
963 			       unsigned int mode, phy_interface_t interface,
964 			       int speed, int duplex,
965 			       bool tx_pause, bool rx_pause)
966 {
967 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
968 	unsigned int flow_ctrl;
969 	u32 old_ctrl, ctrl;
970 	int ret;
971 
972 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
973 	    priv->plat->serdes_powerup)
974 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
975 
976 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
977 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
978 
979 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
980 		switch (speed) {
981 		case SPEED_10000:
982 			ctrl |= priv->hw->link.xgmii.speed10000;
983 			break;
984 		case SPEED_5000:
985 			ctrl |= priv->hw->link.xgmii.speed5000;
986 			break;
987 		case SPEED_2500:
988 			ctrl |= priv->hw->link.xgmii.speed2500;
989 			break;
990 		default:
991 			return;
992 		}
993 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
994 		switch (speed) {
995 		case SPEED_100000:
996 			ctrl |= priv->hw->link.xlgmii.speed100000;
997 			break;
998 		case SPEED_50000:
999 			ctrl |= priv->hw->link.xlgmii.speed50000;
1000 			break;
1001 		case SPEED_40000:
1002 			ctrl |= priv->hw->link.xlgmii.speed40000;
1003 			break;
1004 		case SPEED_25000:
1005 			ctrl |= priv->hw->link.xlgmii.speed25000;
1006 			break;
1007 		case SPEED_10000:
1008 			ctrl |= priv->hw->link.xgmii.speed10000;
1009 			break;
1010 		case SPEED_2500:
1011 			ctrl |= priv->hw->link.speed2500;
1012 			break;
1013 		case SPEED_1000:
1014 			ctrl |= priv->hw->link.speed1000;
1015 			break;
1016 		default:
1017 			return;
1018 		}
1019 	} else {
1020 		switch (speed) {
1021 		case SPEED_2500:
1022 			ctrl |= priv->hw->link.speed2500;
1023 			break;
1024 		case SPEED_1000:
1025 			ctrl |= priv->hw->link.speed1000;
1026 			break;
1027 		case SPEED_100:
1028 			ctrl |= priv->hw->link.speed100;
1029 			break;
1030 		case SPEED_10:
1031 			ctrl |= priv->hw->link.speed10;
1032 			break;
1033 		default:
1034 			return;
1035 		}
1036 	}
1037 
1038 	if (priv->plat->fix_mac_speed)
1039 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1040 
1041 	if (!duplex)
1042 		ctrl &= ~priv->hw->link.duplex;
1043 	else
1044 		ctrl |= priv->hw->link.duplex;
1045 
1046 	/* Flow Control operation */
1047 	if (rx_pause && tx_pause)
1048 		flow_ctrl = FLOW_AUTO;
1049 	else if (rx_pause && !tx_pause)
1050 		flow_ctrl = FLOW_RX;
1051 	else if (!rx_pause && tx_pause)
1052 		flow_ctrl = FLOW_TX;
1053 	else
1054 		flow_ctrl = FLOW_OFF;
1055 
1056 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1057 
1058 	if (ctrl != old_ctrl)
1059 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1060 
1061 	if (priv->plat->set_clk_tx_rate) {
1062 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1063 						priv->plat->clk_tx_i,
1064 						interface, speed);
1065 		if (ret < 0)
1066 			netdev_err(priv->dev,
1067 				   "failed to configure %s transmit clock for %dMbps: %pe\n",
1068 				   phy_modes(interface), speed, ERR_PTR(ret));
1069 	}
1070 
1071 	stmmac_mac_set(priv, priv->ioaddr, true);
1072 	if (priv->dma_cap.eee)
1073 		stmmac_set_eee_pls(priv, priv->hw, true);
1074 
1075 	if (stmmac_fpe_supported(priv))
1076 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1077 
1078 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1079 		stmmac_hwtstamp_correct_latency(priv, priv);
1080 }
1081 
1082 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1083 {
1084 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1085 
1086 	priv->eee_active = false;
1087 
1088 	mutex_lock(&priv->lock);
1089 
1090 	priv->eee_enabled = false;
1091 
1092 	netdev_dbg(priv->dev, "disable EEE\n");
1093 	priv->eee_sw_timer_en = false;
1094 	timer_delete_sync(&priv->eee_ctrl_timer);
1095 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1096 	priv->tx_path_in_lpi_mode = false;
1097 
1098 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1099 	mutex_unlock(&priv->lock);
1100 }
1101 
1102 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1103 				    bool tx_clk_stop)
1104 {
1105 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1106 	int ret;
1107 
1108 	priv->tx_lpi_timer = timer;
1109 	priv->eee_active = true;
1110 
1111 	mutex_lock(&priv->lock);
1112 
1113 	priv->eee_enabled = true;
1114 
1115 	/* Update the transmit clock stop according to PHY capability if
1116 	 * the platform allows
1117 	 */
1118 	if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1119 		priv->tx_lpi_clk_stop = tx_clk_stop;
1120 
1121 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1122 			     STMMAC_DEFAULT_TWT_LS);
1123 
1124 	/* Try to cnfigure the hardware timer. */
1125 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1126 				  priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1127 
1128 	if (ret) {
1129 		/* Hardware timer mode not supported, or value out of range.
1130 		 * Fall back to using software LPI mode
1131 		 */
1132 		priv->eee_sw_timer_en = true;
1133 		stmmac_restart_sw_lpi_timer(priv);
1134 	}
1135 
1136 	mutex_unlock(&priv->lock);
1137 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1138 
1139 	return 0;
1140 }
1141 
1142 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1143 			     phy_interface_t interface)
1144 {
1145 	struct net_device *ndev = to_net_dev(config->dev);
1146 	struct stmmac_priv *priv = netdev_priv(ndev);
1147 
1148 	if (priv->plat->mac_finish)
1149 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1150 
1151 	return 0;
1152 }
1153 
1154 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1155 	.mac_get_caps = stmmac_mac_get_caps,
1156 	.mac_select_pcs = stmmac_mac_select_pcs,
1157 	.mac_config = stmmac_mac_config,
1158 	.mac_link_down = stmmac_mac_link_down,
1159 	.mac_link_up = stmmac_mac_link_up,
1160 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1161 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1162 	.mac_finish = stmmac_mac_finish,
1163 };
1164 
1165 /**
1166  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1167  * @priv: driver private structure
1168  * Description: this is to verify if the HW supports the PCS.
1169  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1170  * configured for the TBI, RTBI, or SGMII PHY interface.
1171  */
1172 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1173 {
1174 	int interface = priv->plat->mac_interface;
1175 
1176 	if (priv->dma_cap.pcs) {
1177 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1178 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1179 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1180 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1181 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1182 			priv->hw->pcs = STMMAC_PCS_RGMII;
1183 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1184 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1185 			priv->hw->pcs = STMMAC_PCS_SGMII;
1186 		}
1187 	}
1188 }
1189 
1190 /**
1191  * stmmac_init_phy - PHY initialization
1192  * @dev: net device structure
1193  * Description: it initializes the driver's PHY state, and attaches the PHY
1194  * to the mac driver.
1195  *  Return value:
1196  *  0 on success
1197  */
1198 static int stmmac_init_phy(struct net_device *dev)
1199 {
1200 	struct stmmac_priv *priv = netdev_priv(dev);
1201 	struct fwnode_handle *phy_fwnode;
1202 	struct fwnode_handle *fwnode;
1203 	int ret;
1204 
1205 	if (!phylink_expects_phy(priv->phylink))
1206 		return 0;
1207 
1208 	fwnode = priv->plat->port_node;
1209 	if (!fwnode)
1210 		fwnode = dev_fwnode(priv->device);
1211 
1212 	if (fwnode)
1213 		phy_fwnode = fwnode_get_phy_node(fwnode);
1214 	else
1215 		phy_fwnode = NULL;
1216 
1217 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1218 	 * manually parse it
1219 	 */
1220 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1221 		int addr = priv->plat->phy_addr;
1222 		struct phy_device *phydev;
1223 
1224 		if (addr < 0) {
1225 			netdev_err(priv->dev, "no phy found\n");
1226 			return -ENODEV;
1227 		}
1228 
1229 		phydev = mdiobus_get_phy(priv->mii, addr);
1230 		if (!phydev) {
1231 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1232 			return -ENODEV;
1233 		}
1234 
1235 		ret = phylink_connect_phy(priv->phylink, phydev);
1236 	} else {
1237 		fwnode_handle_put(phy_fwnode);
1238 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1239 	}
1240 
1241 	if (ret == 0) {
1242 		struct ethtool_keee eee;
1243 
1244 		/* Configure phylib's copy of the LPI timer. Normally,
1245 		 * phylink_config.lpi_timer_default would do this, but there is
1246 		 * a chance that userspace could change the eee_timer setting
1247 		 * via sysfs before the first open. Thus, preserve existing
1248 		 * behaviour.
1249 		 */
1250 		if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1251 			eee.tx_lpi_timer = priv->tx_lpi_timer;
1252 			phylink_ethtool_set_eee(priv->phylink, &eee);
1253 		}
1254 	}
1255 
1256 	if (!priv->plat->pmt) {
1257 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1258 
1259 		phylink_ethtool_get_wol(priv->phylink, &wol);
1260 		device_set_wakeup_capable(priv->device, !!wol.supported);
1261 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1262 	}
1263 
1264 	return ret;
1265 }
1266 
1267 static int stmmac_phy_setup(struct stmmac_priv *priv)
1268 {
1269 	struct stmmac_mdio_bus_data *mdio_bus_data;
1270 	struct phylink_config *config;
1271 	struct fwnode_handle *fwnode;
1272 	struct phylink_pcs *pcs;
1273 	struct phylink *phylink;
1274 
1275 	config = &priv->phylink_config;
1276 
1277 	config->dev = &priv->dev->dev;
1278 	config->type = PHYLINK_NETDEV;
1279 	config->mac_managed_pm = true;
1280 
1281 	/* Stmmac always requires an RX clock for hardware initialization */
1282 	config->mac_requires_rxc = true;
1283 
1284 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1285 		config->eee_rx_clk_stop_enable = true;
1286 
1287 	/* Set the default transmit clock stop bit based on the platform glue */
1288 	priv->tx_lpi_clk_stop = priv->plat->flags &
1289 				STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1290 
1291 	mdio_bus_data = priv->plat->mdio_bus_data;
1292 	if (mdio_bus_data)
1293 		config->default_an_inband = mdio_bus_data->default_an_inband;
1294 
1295 	/* Get the PHY interface modes (at the PHY end of the link) that
1296 	 * are supported by the platform.
1297 	 */
1298 	if (priv->plat->get_interfaces)
1299 		priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1300 					   config->supported_interfaces);
1301 
1302 	/* Set the platform/firmware specified interface mode if the
1303 	 * supported interfaces have not already been provided using
1304 	 * phy_interface as a last resort.
1305 	 */
1306 	if (phy_interface_empty(config->supported_interfaces))
1307 		__set_bit(priv->plat->phy_interface,
1308 			  config->supported_interfaces);
1309 
1310 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1311 	if (priv->hw->xpcs)
1312 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1313 	else
1314 		pcs = priv->hw->phylink_pcs;
1315 
1316 	if (pcs)
1317 		phy_interface_or(config->supported_interfaces,
1318 				 config->supported_interfaces,
1319 				 pcs->supported_interfaces);
1320 
1321 	if (priv->dma_cap.eee) {
1322 		/* Assume all supported interfaces also support LPI */
1323 		memcpy(config->lpi_interfaces, config->supported_interfaces,
1324 		       sizeof(config->lpi_interfaces));
1325 
1326 		/* All full duplex speeds above 100Mbps are supported */
1327 		config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1328 		config->lpi_timer_default = eee_timer * 1000;
1329 		config->eee_enabled_default = true;
1330 	}
1331 
1332 	fwnode = priv->plat->port_node;
1333 	if (!fwnode)
1334 		fwnode = dev_fwnode(priv->device);
1335 
1336 	phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1337 				 &stmmac_phylink_mac_ops);
1338 	if (IS_ERR(phylink))
1339 		return PTR_ERR(phylink);
1340 
1341 	priv->phylink = phylink;
1342 	return 0;
1343 }
1344 
1345 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1346 				    struct stmmac_dma_conf *dma_conf)
1347 {
1348 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1349 	unsigned int desc_size;
1350 	void *head_rx;
1351 	u32 queue;
1352 
1353 	/* Display RX rings */
1354 	for (queue = 0; queue < rx_cnt; queue++) {
1355 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1356 
1357 		pr_info("\tRX Queue %u rings\n", queue);
1358 
1359 		if (priv->extend_desc) {
1360 			head_rx = (void *)rx_q->dma_erx;
1361 			desc_size = sizeof(struct dma_extended_desc);
1362 		} else {
1363 			head_rx = (void *)rx_q->dma_rx;
1364 			desc_size = sizeof(struct dma_desc);
1365 		}
1366 
1367 		/* Display RX ring */
1368 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1369 				    rx_q->dma_rx_phy, desc_size);
1370 	}
1371 }
1372 
1373 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1374 				    struct stmmac_dma_conf *dma_conf)
1375 {
1376 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1377 	unsigned int desc_size;
1378 	void *head_tx;
1379 	u32 queue;
1380 
1381 	/* Display TX rings */
1382 	for (queue = 0; queue < tx_cnt; queue++) {
1383 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1384 
1385 		pr_info("\tTX Queue %d rings\n", queue);
1386 
1387 		if (priv->extend_desc) {
1388 			head_tx = (void *)tx_q->dma_etx;
1389 			desc_size = sizeof(struct dma_extended_desc);
1390 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1391 			head_tx = (void *)tx_q->dma_entx;
1392 			desc_size = sizeof(struct dma_edesc);
1393 		} else {
1394 			head_tx = (void *)tx_q->dma_tx;
1395 			desc_size = sizeof(struct dma_desc);
1396 		}
1397 
1398 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1399 				    tx_q->dma_tx_phy, desc_size);
1400 	}
1401 }
1402 
1403 static void stmmac_display_rings(struct stmmac_priv *priv,
1404 				 struct stmmac_dma_conf *dma_conf)
1405 {
1406 	/* Display RX ring */
1407 	stmmac_display_rx_rings(priv, dma_conf);
1408 
1409 	/* Display TX ring */
1410 	stmmac_display_tx_rings(priv, dma_conf);
1411 }
1412 
1413 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1414 {
1415 	if (stmmac_xdp_is_enabled(priv))
1416 		return XDP_PACKET_HEADROOM;
1417 
1418 	return NET_SKB_PAD;
1419 }
1420 
1421 static int stmmac_set_bfsize(int mtu, int bufsize)
1422 {
1423 	int ret = bufsize;
1424 
1425 	if (mtu >= BUF_SIZE_8KiB)
1426 		ret = BUF_SIZE_16KiB;
1427 	else if (mtu >= BUF_SIZE_4KiB)
1428 		ret = BUF_SIZE_8KiB;
1429 	else if (mtu >= BUF_SIZE_2KiB)
1430 		ret = BUF_SIZE_4KiB;
1431 	else if (mtu > DEFAULT_BUFSIZE)
1432 		ret = BUF_SIZE_2KiB;
1433 	else
1434 		ret = DEFAULT_BUFSIZE;
1435 
1436 	return ret;
1437 }
1438 
1439 /**
1440  * stmmac_clear_rx_descriptors - clear RX descriptors
1441  * @priv: driver private structure
1442  * @dma_conf: structure to take the dma data
1443  * @queue: RX queue index
1444  * Description: this function is called to clear the RX descriptors
1445  * in case of both basic and extended descriptors are used.
1446  */
1447 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1448 					struct stmmac_dma_conf *dma_conf,
1449 					u32 queue)
1450 {
1451 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1452 	int i;
1453 
1454 	/* Clear the RX descriptors */
1455 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1456 		if (priv->extend_desc)
1457 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1458 					priv->use_riwt, priv->mode,
1459 					(i == dma_conf->dma_rx_size - 1),
1460 					dma_conf->dma_buf_sz);
1461 		else
1462 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1463 					priv->use_riwt, priv->mode,
1464 					(i == dma_conf->dma_rx_size - 1),
1465 					dma_conf->dma_buf_sz);
1466 }
1467 
1468 /**
1469  * stmmac_clear_tx_descriptors - clear tx descriptors
1470  * @priv: driver private structure
1471  * @dma_conf: structure to take the dma data
1472  * @queue: TX queue index.
1473  * Description: this function is called to clear the TX descriptors
1474  * in case of both basic and extended descriptors are used.
1475  */
1476 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1477 					struct stmmac_dma_conf *dma_conf,
1478 					u32 queue)
1479 {
1480 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1481 	int i;
1482 
1483 	/* Clear the TX descriptors */
1484 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1485 		int last = (i == (dma_conf->dma_tx_size - 1));
1486 		struct dma_desc *p;
1487 
1488 		if (priv->extend_desc)
1489 			p = &tx_q->dma_etx[i].basic;
1490 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1491 			p = &tx_q->dma_entx[i].basic;
1492 		else
1493 			p = &tx_q->dma_tx[i];
1494 
1495 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1496 	}
1497 }
1498 
1499 /**
1500  * stmmac_clear_descriptors - clear descriptors
1501  * @priv: driver private structure
1502  * @dma_conf: structure to take the dma data
1503  * Description: this function is called to clear the TX and RX descriptors
1504  * in case of both basic and extended descriptors are used.
1505  */
1506 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1507 				     struct stmmac_dma_conf *dma_conf)
1508 {
1509 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1510 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1511 	u32 queue;
1512 
1513 	/* Clear the RX descriptors */
1514 	for (queue = 0; queue < rx_queue_cnt; queue++)
1515 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1516 
1517 	/* Clear the TX descriptors */
1518 	for (queue = 0; queue < tx_queue_cnt; queue++)
1519 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1520 }
1521 
1522 /**
1523  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1524  * @priv: driver private structure
1525  * @dma_conf: structure to take the dma data
1526  * @p: descriptor pointer
1527  * @i: descriptor index
1528  * @flags: gfp flag
1529  * @queue: RX queue index
1530  * Description: this function is called to allocate a receive buffer, perform
1531  * the DMA mapping and init the descriptor.
1532  */
1533 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1534 				  struct stmmac_dma_conf *dma_conf,
1535 				  struct dma_desc *p,
1536 				  int i, gfp_t flags, u32 queue)
1537 {
1538 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1539 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1540 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1541 
1542 	if (priv->dma_cap.host_dma_width <= 32)
1543 		gfp |= GFP_DMA32;
1544 
1545 	if (!buf->page) {
1546 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1547 		if (!buf->page)
1548 			return -ENOMEM;
1549 		buf->page_offset = stmmac_rx_offset(priv);
1550 	}
1551 
1552 	if (priv->sph && !buf->sec_page) {
1553 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1554 		if (!buf->sec_page)
1555 			return -ENOMEM;
1556 
1557 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1558 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1559 	} else {
1560 		buf->sec_page = NULL;
1561 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1562 	}
1563 
1564 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1565 
1566 	stmmac_set_desc_addr(priv, p, buf->addr);
1567 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1568 		stmmac_init_desc3(priv, p);
1569 
1570 	return 0;
1571 }
1572 
1573 /**
1574  * stmmac_free_rx_buffer - free RX dma buffers
1575  * @priv: private structure
1576  * @rx_q: RX queue
1577  * @i: buffer index.
1578  */
1579 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1580 				  struct stmmac_rx_queue *rx_q,
1581 				  int i)
1582 {
1583 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1584 
1585 	if (buf->page)
1586 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1587 	buf->page = NULL;
1588 
1589 	if (buf->sec_page)
1590 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1591 	buf->sec_page = NULL;
1592 }
1593 
1594 /**
1595  * stmmac_free_tx_buffer - free RX dma buffers
1596  * @priv: private structure
1597  * @dma_conf: structure to take the dma data
1598  * @queue: RX queue index
1599  * @i: buffer index.
1600  */
1601 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1602 				  struct stmmac_dma_conf *dma_conf,
1603 				  u32 queue, int i)
1604 {
1605 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1606 
1607 	if (tx_q->tx_skbuff_dma[i].buf &&
1608 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1609 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1610 			dma_unmap_page(priv->device,
1611 				       tx_q->tx_skbuff_dma[i].buf,
1612 				       tx_q->tx_skbuff_dma[i].len,
1613 				       DMA_TO_DEVICE);
1614 		else
1615 			dma_unmap_single(priv->device,
1616 					 tx_q->tx_skbuff_dma[i].buf,
1617 					 tx_q->tx_skbuff_dma[i].len,
1618 					 DMA_TO_DEVICE);
1619 	}
1620 
1621 	if (tx_q->xdpf[i] &&
1622 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1623 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1624 		xdp_return_frame(tx_q->xdpf[i]);
1625 		tx_q->xdpf[i] = NULL;
1626 	}
1627 
1628 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1629 		tx_q->xsk_frames_done++;
1630 
1631 	if (tx_q->tx_skbuff[i] &&
1632 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1633 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1634 		tx_q->tx_skbuff[i] = NULL;
1635 	}
1636 
1637 	tx_q->tx_skbuff_dma[i].buf = 0;
1638 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1639 }
1640 
1641 /**
1642  * dma_free_rx_skbufs - free RX dma buffers
1643  * @priv: private structure
1644  * @dma_conf: structure to take the dma data
1645  * @queue: RX queue index
1646  */
1647 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1648 			       struct stmmac_dma_conf *dma_conf,
1649 			       u32 queue)
1650 {
1651 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1652 	int i;
1653 
1654 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1655 		stmmac_free_rx_buffer(priv, rx_q, i);
1656 }
1657 
1658 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1659 				   struct stmmac_dma_conf *dma_conf,
1660 				   u32 queue, gfp_t flags)
1661 {
1662 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1663 	int i;
1664 
1665 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1666 		struct dma_desc *p;
1667 		int ret;
1668 
1669 		if (priv->extend_desc)
1670 			p = &((rx_q->dma_erx + i)->basic);
1671 		else
1672 			p = rx_q->dma_rx + i;
1673 
1674 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1675 					     queue);
1676 		if (ret)
1677 			return ret;
1678 
1679 		rx_q->buf_alloc_num++;
1680 	}
1681 
1682 	return 0;
1683 }
1684 
1685 /**
1686  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1687  * @priv: private structure
1688  * @dma_conf: structure to take the dma data
1689  * @queue: RX queue index
1690  */
1691 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1692 				struct stmmac_dma_conf *dma_conf,
1693 				u32 queue)
1694 {
1695 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1696 	int i;
1697 
1698 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1699 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1700 
1701 		if (!buf->xdp)
1702 			continue;
1703 
1704 		xsk_buff_free(buf->xdp);
1705 		buf->xdp = NULL;
1706 	}
1707 }
1708 
1709 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1710 				      struct stmmac_dma_conf *dma_conf,
1711 				      u32 queue)
1712 {
1713 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1714 	int i;
1715 
1716 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1717 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1718 	 * use this macro to make sure no size violations.
1719 	 */
1720 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1721 
1722 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1723 		struct stmmac_rx_buffer *buf;
1724 		dma_addr_t dma_addr;
1725 		struct dma_desc *p;
1726 
1727 		if (priv->extend_desc)
1728 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1729 		else
1730 			p = rx_q->dma_rx + i;
1731 
1732 		buf = &rx_q->buf_pool[i];
1733 
1734 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1735 		if (!buf->xdp)
1736 			return -ENOMEM;
1737 
1738 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1739 		stmmac_set_desc_addr(priv, p, dma_addr);
1740 		rx_q->buf_alloc_num++;
1741 	}
1742 
1743 	return 0;
1744 }
1745 
1746 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1747 {
1748 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1749 		return NULL;
1750 
1751 	return xsk_get_pool_from_qid(priv->dev, queue);
1752 }
1753 
1754 /**
1755  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1756  * @priv: driver private structure
1757  * @dma_conf: structure to take the dma data
1758  * @queue: RX queue index
1759  * @flags: gfp flag.
1760  * Description: this function initializes the DMA RX descriptors
1761  * and allocates the socket buffers. It supports the chained and ring
1762  * modes.
1763  */
1764 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1765 				    struct stmmac_dma_conf *dma_conf,
1766 				    u32 queue, gfp_t flags)
1767 {
1768 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1769 	int ret;
1770 
1771 	netif_dbg(priv, probe, priv->dev,
1772 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1773 		  (u32)rx_q->dma_rx_phy);
1774 
1775 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1776 
1777 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1778 
1779 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1780 
1781 	if (rx_q->xsk_pool) {
1782 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1783 						   MEM_TYPE_XSK_BUFF_POOL,
1784 						   NULL));
1785 		netdev_info(priv->dev,
1786 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1787 			    rx_q->queue_index);
1788 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1789 	} else {
1790 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1791 						   MEM_TYPE_PAGE_POOL,
1792 						   rx_q->page_pool));
1793 		netdev_info(priv->dev,
1794 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1795 			    rx_q->queue_index);
1796 	}
1797 
1798 	if (rx_q->xsk_pool) {
1799 		/* RX XDP ZC buffer pool may not be populated, e.g.
1800 		 * xdpsock TX-only.
1801 		 */
1802 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1803 	} else {
1804 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1805 		if (ret < 0)
1806 			return -ENOMEM;
1807 	}
1808 
1809 	/* Setup the chained descriptor addresses */
1810 	if (priv->mode == STMMAC_CHAIN_MODE) {
1811 		if (priv->extend_desc)
1812 			stmmac_mode_init(priv, rx_q->dma_erx,
1813 					 rx_q->dma_rx_phy,
1814 					 dma_conf->dma_rx_size, 1);
1815 		else
1816 			stmmac_mode_init(priv, rx_q->dma_rx,
1817 					 rx_q->dma_rx_phy,
1818 					 dma_conf->dma_rx_size, 0);
1819 	}
1820 
1821 	return 0;
1822 }
1823 
1824 static int init_dma_rx_desc_rings(struct net_device *dev,
1825 				  struct stmmac_dma_conf *dma_conf,
1826 				  gfp_t flags)
1827 {
1828 	struct stmmac_priv *priv = netdev_priv(dev);
1829 	u32 rx_count = priv->plat->rx_queues_to_use;
1830 	int queue;
1831 	int ret;
1832 
1833 	/* RX INITIALIZATION */
1834 	netif_dbg(priv, probe, priv->dev,
1835 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1836 
1837 	for (queue = 0; queue < rx_count; queue++) {
1838 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1839 		if (ret)
1840 			goto err_init_rx_buffers;
1841 	}
1842 
1843 	return 0;
1844 
1845 err_init_rx_buffers:
1846 	while (queue >= 0) {
1847 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1848 
1849 		if (rx_q->xsk_pool)
1850 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1851 		else
1852 			dma_free_rx_skbufs(priv, dma_conf, queue);
1853 
1854 		rx_q->buf_alloc_num = 0;
1855 		rx_q->xsk_pool = NULL;
1856 
1857 		queue--;
1858 	}
1859 
1860 	return ret;
1861 }
1862 
1863 /**
1864  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1865  * @priv: driver private structure
1866  * @dma_conf: structure to take the dma data
1867  * @queue: TX queue index
1868  * Description: this function initializes the DMA TX descriptors
1869  * and allocates the socket buffers. It supports the chained and ring
1870  * modes.
1871  */
1872 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1873 				    struct stmmac_dma_conf *dma_conf,
1874 				    u32 queue)
1875 {
1876 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1877 	int i;
1878 
1879 	netif_dbg(priv, probe, priv->dev,
1880 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1881 		  (u32)tx_q->dma_tx_phy);
1882 
1883 	/* Setup the chained descriptor addresses */
1884 	if (priv->mode == STMMAC_CHAIN_MODE) {
1885 		if (priv->extend_desc)
1886 			stmmac_mode_init(priv, tx_q->dma_etx,
1887 					 tx_q->dma_tx_phy,
1888 					 dma_conf->dma_tx_size, 1);
1889 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1890 			stmmac_mode_init(priv, tx_q->dma_tx,
1891 					 tx_q->dma_tx_phy,
1892 					 dma_conf->dma_tx_size, 0);
1893 	}
1894 
1895 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1896 
1897 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1898 		struct dma_desc *p;
1899 
1900 		if (priv->extend_desc)
1901 			p = &((tx_q->dma_etx + i)->basic);
1902 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1903 			p = &((tx_q->dma_entx + i)->basic);
1904 		else
1905 			p = tx_q->dma_tx + i;
1906 
1907 		stmmac_clear_desc(priv, p);
1908 
1909 		tx_q->tx_skbuff_dma[i].buf = 0;
1910 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1911 		tx_q->tx_skbuff_dma[i].len = 0;
1912 		tx_q->tx_skbuff_dma[i].last_segment = false;
1913 		tx_q->tx_skbuff[i] = NULL;
1914 	}
1915 
1916 	return 0;
1917 }
1918 
1919 static int init_dma_tx_desc_rings(struct net_device *dev,
1920 				  struct stmmac_dma_conf *dma_conf)
1921 {
1922 	struct stmmac_priv *priv = netdev_priv(dev);
1923 	u32 tx_queue_cnt;
1924 	u32 queue;
1925 
1926 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1927 
1928 	for (queue = 0; queue < tx_queue_cnt; queue++)
1929 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1930 
1931 	return 0;
1932 }
1933 
1934 /**
1935  * init_dma_desc_rings - init the RX/TX descriptor rings
1936  * @dev: net device structure
1937  * @dma_conf: structure to take the dma data
1938  * @flags: gfp flag.
1939  * Description: this function initializes the DMA RX/TX descriptors
1940  * and allocates the socket buffers. It supports the chained and ring
1941  * modes.
1942  */
1943 static int init_dma_desc_rings(struct net_device *dev,
1944 			       struct stmmac_dma_conf *dma_conf,
1945 			       gfp_t flags)
1946 {
1947 	struct stmmac_priv *priv = netdev_priv(dev);
1948 	int ret;
1949 
1950 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1951 	if (ret)
1952 		return ret;
1953 
1954 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1955 
1956 	stmmac_clear_descriptors(priv, dma_conf);
1957 
1958 	if (netif_msg_hw(priv))
1959 		stmmac_display_rings(priv, dma_conf);
1960 
1961 	return ret;
1962 }
1963 
1964 /**
1965  * dma_free_tx_skbufs - free TX dma buffers
1966  * @priv: private structure
1967  * @dma_conf: structure to take the dma data
1968  * @queue: TX queue index
1969  */
1970 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1971 			       struct stmmac_dma_conf *dma_conf,
1972 			       u32 queue)
1973 {
1974 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 	int i;
1976 
1977 	tx_q->xsk_frames_done = 0;
1978 
1979 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1980 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1981 
1982 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1983 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1984 		tx_q->xsk_frames_done = 0;
1985 		tx_q->xsk_pool = NULL;
1986 	}
1987 }
1988 
1989 /**
1990  * stmmac_free_tx_skbufs - free TX skb buffers
1991  * @priv: private structure
1992  */
1993 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1994 {
1995 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1996 	u32 queue;
1997 
1998 	for (queue = 0; queue < tx_queue_cnt; queue++)
1999 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
2000 }
2001 
2002 /**
2003  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
2004  * @priv: private structure
2005  * @dma_conf: structure to take the dma data
2006  * @queue: RX queue index
2007  */
2008 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
2009 					 struct stmmac_dma_conf *dma_conf,
2010 					 u32 queue)
2011 {
2012 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2013 
2014 	/* Release the DMA RX socket buffers */
2015 	if (rx_q->xsk_pool)
2016 		dma_free_rx_xskbufs(priv, dma_conf, queue);
2017 	else
2018 		dma_free_rx_skbufs(priv, dma_conf, queue);
2019 
2020 	rx_q->buf_alloc_num = 0;
2021 	rx_q->xsk_pool = NULL;
2022 
2023 	/* Free DMA regions of consistent memory previously allocated */
2024 	if (!priv->extend_desc)
2025 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2026 				  sizeof(struct dma_desc),
2027 				  rx_q->dma_rx, rx_q->dma_rx_phy);
2028 	else
2029 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2030 				  sizeof(struct dma_extended_desc),
2031 				  rx_q->dma_erx, rx_q->dma_rx_phy);
2032 
2033 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2034 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2035 
2036 	kfree(rx_q->buf_pool);
2037 	if (rx_q->page_pool)
2038 		page_pool_destroy(rx_q->page_pool);
2039 }
2040 
2041 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2042 				       struct stmmac_dma_conf *dma_conf)
2043 {
2044 	u32 rx_count = priv->plat->rx_queues_to_use;
2045 	u32 queue;
2046 
2047 	/* Free RX queue resources */
2048 	for (queue = 0; queue < rx_count; queue++)
2049 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
2050 }
2051 
2052 /**
2053  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2054  * @priv: private structure
2055  * @dma_conf: structure to take the dma data
2056  * @queue: TX queue index
2057  */
2058 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2059 					 struct stmmac_dma_conf *dma_conf,
2060 					 u32 queue)
2061 {
2062 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2063 	size_t size;
2064 	void *addr;
2065 
2066 	/* Release the DMA TX socket buffers */
2067 	dma_free_tx_skbufs(priv, dma_conf, queue);
2068 
2069 	if (priv->extend_desc) {
2070 		size = sizeof(struct dma_extended_desc);
2071 		addr = tx_q->dma_etx;
2072 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2073 		size = sizeof(struct dma_edesc);
2074 		addr = tx_q->dma_entx;
2075 	} else {
2076 		size = sizeof(struct dma_desc);
2077 		addr = tx_q->dma_tx;
2078 	}
2079 
2080 	size *= dma_conf->dma_tx_size;
2081 
2082 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2083 
2084 	kfree(tx_q->tx_skbuff_dma);
2085 	kfree(tx_q->tx_skbuff);
2086 }
2087 
2088 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2089 				       struct stmmac_dma_conf *dma_conf)
2090 {
2091 	u32 tx_count = priv->plat->tx_queues_to_use;
2092 	u32 queue;
2093 
2094 	/* Free TX queue resources */
2095 	for (queue = 0; queue < tx_count; queue++)
2096 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2097 }
2098 
2099 /**
2100  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2101  * @priv: private structure
2102  * @dma_conf: structure to take the dma data
2103  * @queue: RX queue index
2104  * Description: according to which descriptor can be used (extend or basic)
2105  * this function allocates the resources for TX and RX paths. In case of
2106  * reception, for example, it pre-allocated the RX socket buffer in order to
2107  * allow zero-copy mechanism.
2108  */
2109 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2110 					 struct stmmac_dma_conf *dma_conf,
2111 					 u32 queue)
2112 {
2113 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2114 	struct stmmac_channel *ch = &priv->channel[queue];
2115 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2116 	struct page_pool_params pp_params = { 0 };
2117 	unsigned int dma_buf_sz_pad, num_pages;
2118 	unsigned int napi_id;
2119 	int ret;
2120 
2121 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2122 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2123 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2124 
2125 	rx_q->queue_index = queue;
2126 	rx_q->priv_data = priv;
2127 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2128 
2129 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2130 	pp_params.pool_size = dma_conf->dma_rx_size;
2131 	pp_params.order = order_base_2(num_pages);
2132 	pp_params.nid = dev_to_node(priv->device);
2133 	pp_params.dev = priv->device;
2134 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2135 	pp_params.offset = stmmac_rx_offset(priv);
2136 	pp_params.max_len = dma_conf->dma_buf_sz;
2137 
2138 	if (priv->sph) {
2139 		pp_params.offset = 0;
2140 		pp_params.max_len += stmmac_rx_offset(priv);
2141 	}
2142 
2143 	rx_q->page_pool = page_pool_create(&pp_params);
2144 	if (IS_ERR(rx_q->page_pool)) {
2145 		ret = PTR_ERR(rx_q->page_pool);
2146 		rx_q->page_pool = NULL;
2147 		return ret;
2148 	}
2149 
2150 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2151 				 sizeof(*rx_q->buf_pool),
2152 				 GFP_KERNEL);
2153 	if (!rx_q->buf_pool)
2154 		return -ENOMEM;
2155 
2156 	if (priv->extend_desc) {
2157 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2158 						   dma_conf->dma_rx_size *
2159 						   sizeof(struct dma_extended_desc),
2160 						   &rx_q->dma_rx_phy,
2161 						   GFP_KERNEL);
2162 		if (!rx_q->dma_erx)
2163 			return -ENOMEM;
2164 
2165 	} else {
2166 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2167 						  dma_conf->dma_rx_size *
2168 						  sizeof(struct dma_desc),
2169 						  &rx_q->dma_rx_phy,
2170 						  GFP_KERNEL);
2171 		if (!rx_q->dma_rx)
2172 			return -ENOMEM;
2173 	}
2174 
2175 	if (stmmac_xdp_is_enabled(priv) &&
2176 	    test_bit(queue, priv->af_xdp_zc_qps))
2177 		napi_id = ch->rxtx_napi.napi_id;
2178 	else
2179 		napi_id = ch->rx_napi.napi_id;
2180 
2181 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2182 			       rx_q->queue_index,
2183 			       napi_id);
2184 	if (ret) {
2185 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2186 		return -EINVAL;
2187 	}
2188 
2189 	return 0;
2190 }
2191 
2192 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2193 				       struct stmmac_dma_conf *dma_conf)
2194 {
2195 	u32 rx_count = priv->plat->rx_queues_to_use;
2196 	u32 queue;
2197 	int ret;
2198 
2199 	/* RX queues buffers and DMA */
2200 	for (queue = 0; queue < rx_count; queue++) {
2201 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2202 		if (ret)
2203 			goto err_dma;
2204 	}
2205 
2206 	return 0;
2207 
2208 err_dma:
2209 	free_dma_rx_desc_resources(priv, dma_conf);
2210 
2211 	return ret;
2212 }
2213 
2214 /**
2215  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2216  * @priv: private structure
2217  * @dma_conf: structure to take the dma data
2218  * @queue: TX queue index
2219  * Description: according to which descriptor can be used (extend or basic)
2220  * this function allocates the resources for TX and RX paths. In case of
2221  * reception, for example, it pre-allocated the RX socket buffer in order to
2222  * allow zero-copy mechanism.
2223  */
2224 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2225 					 struct stmmac_dma_conf *dma_conf,
2226 					 u32 queue)
2227 {
2228 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2229 	size_t size;
2230 	void *addr;
2231 
2232 	tx_q->queue_index = queue;
2233 	tx_q->priv_data = priv;
2234 
2235 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2236 				      sizeof(*tx_q->tx_skbuff_dma),
2237 				      GFP_KERNEL);
2238 	if (!tx_q->tx_skbuff_dma)
2239 		return -ENOMEM;
2240 
2241 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2242 				  sizeof(struct sk_buff *),
2243 				  GFP_KERNEL);
2244 	if (!tx_q->tx_skbuff)
2245 		return -ENOMEM;
2246 
2247 	if (priv->extend_desc)
2248 		size = sizeof(struct dma_extended_desc);
2249 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2250 		size = sizeof(struct dma_edesc);
2251 	else
2252 		size = sizeof(struct dma_desc);
2253 
2254 	size *= dma_conf->dma_tx_size;
2255 
2256 	addr = dma_alloc_coherent(priv->device, size,
2257 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2258 	if (!addr)
2259 		return -ENOMEM;
2260 
2261 	if (priv->extend_desc)
2262 		tx_q->dma_etx = addr;
2263 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2264 		tx_q->dma_entx = addr;
2265 	else
2266 		tx_q->dma_tx = addr;
2267 
2268 	return 0;
2269 }
2270 
2271 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2272 				       struct stmmac_dma_conf *dma_conf)
2273 {
2274 	u32 tx_count = priv->plat->tx_queues_to_use;
2275 	u32 queue;
2276 	int ret;
2277 
2278 	/* TX queues buffers and DMA */
2279 	for (queue = 0; queue < tx_count; queue++) {
2280 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2281 		if (ret)
2282 			goto err_dma;
2283 	}
2284 
2285 	return 0;
2286 
2287 err_dma:
2288 	free_dma_tx_desc_resources(priv, dma_conf);
2289 	return ret;
2290 }
2291 
2292 /**
2293  * alloc_dma_desc_resources - alloc TX/RX resources.
2294  * @priv: private structure
2295  * @dma_conf: structure to take the dma data
2296  * Description: according to which descriptor can be used (extend or basic)
2297  * this function allocates the resources for TX and RX paths. In case of
2298  * reception, for example, it pre-allocated the RX socket buffer in order to
2299  * allow zero-copy mechanism.
2300  */
2301 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2302 				    struct stmmac_dma_conf *dma_conf)
2303 {
2304 	/* RX Allocation */
2305 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2306 
2307 	if (ret)
2308 		return ret;
2309 
2310 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2311 
2312 	return ret;
2313 }
2314 
2315 /**
2316  * free_dma_desc_resources - free dma desc resources
2317  * @priv: private structure
2318  * @dma_conf: structure to take the dma data
2319  */
2320 static void free_dma_desc_resources(struct stmmac_priv *priv,
2321 				    struct stmmac_dma_conf *dma_conf)
2322 {
2323 	/* Release the DMA TX socket buffers */
2324 	free_dma_tx_desc_resources(priv, dma_conf);
2325 
2326 	/* Release the DMA RX socket buffers later
2327 	 * to ensure all pending XDP_TX buffers are returned.
2328 	 */
2329 	free_dma_rx_desc_resources(priv, dma_conf);
2330 }
2331 
2332 /**
2333  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2334  *  @priv: driver private structure
2335  *  Description: It is used for enabling the rx queues in the MAC
2336  */
2337 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2338 {
2339 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2340 	int queue;
2341 	u8 mode;
2342 
2343 	for (queue = 0; queue < rx_queues_count; queue++) {
2344 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2345 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2346 	}
2347 }
2348 
2349 /**
2350  * stmmac_start_rx_dma - start RX DMA channel
2351  * @priv: driver private structure
2352  * @chan: RX channel index
2353  * Description:
2354  * This starts a RX DMA channel
2355  */
2356 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2357 {
2358 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2359 	stmmac_start_rx(priv, priv->ioaddr, chan);
2360 }
2361 
2362 /**
2363  * stmmac_start_tx_dma - start TX DMA channel
2364  * @priv: driver private structure
2365  * @chan: TX channel index
2366  * Description:
2367  * This starts a TX DMA channel
2368  */
2369 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2370 {
2371 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2372 	stmmac_start_tx(priv, priv->ioaddr, chan);
2373 }
2374 
2375 /**
2376  * stmmac_stop_rx_dma - stop RX DMA channel
2377  * @priv: driver private structure
2378  * @chan: RX channel index
2379  * Description:
2380  * This stops a RX DMA channel
2381  */
2382 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2383 {
2384 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2385 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2386 }
2387 
2388 /**
2389  * stmmac_stop_tx_dma - stop TX DMA channel
2390  * @priv: driver private structure
2391  * @chan: TX channel index
2392  * Description:
2393  * This stops a TX DMA channel
2394  */
2395 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2396 {
2397 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2398 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2399 }
2400 
2401 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2402 {
2403 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2404 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2405 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2406 	u32 chan;
2407 
2408 	for (chan = 0; chan < dma_csr_ch; chan++) {
2409 		struct stmmac_channel *ch = &priv->channel[chan];
2410 		unsigned long flags;
2411 
2412 		spin_lock_irqsave(&ch->lock, flags);
2413 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2414 		spin_unlock_irqrestore(&ch->lock, flags);
2415 	}
2416 }
2417 
2418 /**
2419  * stmmac_start_all_dma - start all RX and TX DMA channels
2420  * @priv: driver private structure
2421  * Description:
2422  * This starts all the RX and TX DMA channels
2423  */
2424 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2425 {
2426 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2427 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2428 	u32 chan = 0;
2429 
2430 	for (chan = 0; chan < rx_channels_count; chan++)
2431 		stmmac_start_rx_dma(priv, chan);
2432 
2433 	for (chan = 0; chan < tx_channels_count; chan++)
2434 		stmmac_start_tx_dma(priv, chan);
2435 }
2436 
2437 /**
2438  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2439  * @priv: driver private structure
2440  * Description:
2441  * This stops the RX and TX DMA channels
2442  */
2443 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2444 {
2445 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2446 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2447 	u32 chan = 0;
2448 
2449 	for (chan = 0; chan < rx_channels_count; chan++)
2450 		stmmac_stop_rx_dma(priv, chan);
2451 
2452 	for (chan = 0; chan < tx_channels_count; chan++)
2453 		stmmac_stop_tx_dma(priv, chan);
2454 }
2455 
2456 /**
2457  *  stmmac_dma_operation_mode - HW DMA operation mode
2458  *  @priv: driver private structure
2459  *  Description: it is used for configuring the DMA operation mode register in
2460  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2461  */
2462 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2463 {
2464 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2465 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2466 	int rxfifosz = priv->plat->rx_fifo_size;
2467 	int txfifosz = priv->plat->tx_fifo_size;
2468 	u32 txmode = 0;
2469 	u32 rxmode = 0;
2470 	u32 chan = 0;
2471 	u8 qmode = 0;
2472 
2473 	if (rxfifosz == 0)
2474 		rxfifosz = priv->dma_cap.rx_fifo_size;
2475 	if (txfifosz == 0)
2476 		txfifosz = priv->dma_cap.tx_fifo_size;
2477 
2478 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2479 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2480 		rxfifosz /= rx_channels_count;
2481 		txfifosz /= tx_channels_count;
2482 	}
2483 
2484 	if (priv->plat->force_thresh_dma_mode) {
2485 		txmode = tc;
2486 		rxmode = tc;
2487 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2488 		/*
2489 		 * In case of GMAC, SF mode can be enabled
2490 		 * to perform the TX COE in HW. This depends on:
2491 		 * 1) TX COE if actually supported
2492 		 * 2) There is no bugged Jumbo frame support
2493 		 *    that needs to not insert csum in the TDES.
2494 		 */
2495 		txmode = SF_DMA_MODE;
2496 		rxmode = SF_DMA_MODE;
2497 		priv->xstats.threshold = SF_DMA_MODE;
2498 	} else {
2499 		txmode = tc;
2500 		rxmode = SF_DMA_MODE;
2501 	}
2502 
2503 	/* configure all channels */
2504 	for (chan = 0; chan < rx_channels_count; chan++) {
2505 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2506 		u32 buf_size;
2507 
2508 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2509 
2510 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2511 				rxfifosz, qmode);
2512 
2513 		if (rx_q->xsk_pool) {
2514 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2515 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2516 					      buf_size,
2517 					      chan);
2518 		} else {
2519 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2520 					      priv->dma_conf.dma_buf_sz,
2521 					      chan);
2522 		}
2523 	}
2524 
2525 	for (chan = 0; chan < tx_channels_count; chan++) {
2526 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2527 
2528 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2529 				txfifosz, qmode);
2530 	}
2531 }
2532 
2533 static void stmmac_xsk_request_timestamp(void *_priv)
2534 {
2535 	struct stmmac_metadata_request *meta_req = _priv;
2536 
2537 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2538 	*meta_req->set_ic = true;
2539 }
2540 
2541 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2542 {
2543 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2544 	struct stmmac_priv *priv = tx_compl->priv;
2545 	struct dma_desc *desc = tx_compl->desc;
2546 	bool found = false;
2547 	u64 ns = 0;
2548 
2549 	if (!priv->hwts_tx_en)
2550 		return 0;
2551 
2552 	/* check tx tstamp status */
2553 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2554 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2555 		found = true;
2556 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2557 		found = true;
2558 	}
2559 
2560 	if (found) {
2561 		ns -= priv->plat->cdc_error_adj;
2562 		return ns_to_ktime(ns);
2563 	}
2564 
2565 	return 0;
2566 }
2567 
2568 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2569 {
2570 	struct timespec64 ts = ns_to_timespec64(launch_time);
2571 	struct stmmac_metadata_request *meta_req = _priv;
2572 
2573 	if (meta_req->tbs & STMMAC_TBS_EN)
2574 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2575 				    ts.tv_nsec);
2576 }
2577 
2578 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2579 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2580 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2581 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2582 };
2583 
2584 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2585 {
2586 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2587 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2588 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2589 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2590 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2591 	unsigned int entry = tx_q->cur_tx;
2592 	struct dma_desc *tx_desc = NULL;
2593 	struct xdp_desc xdp_desc;
2594 	bool work_done = true;
2595 	u32 tx_set_ic_bit = 0;
2596 
2597 	/* Avoids TX time-out as we are sharing with slow path */
2598 	txq_trans_cond_update(nq);
2599 
2600 	budget = min(budget, stmmac_tx_avail(priv, queue));
2601 
2602 	for (; budget > 0; budget--) {
2603 		struct stmmac_metadata_request meta_req;
2604 		struct xsk_tx_metadata *meta = NULL;
2605 		dma_addr_t dma_addr;
2606 		bool set_ic;
2607 
2608 		/* We are sharing with slow path and stop XSK TX desc submission when
2609 		 * available TX ring is less than threshold.
2610 		 */
2611 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2612 		    !netif_carrier_ok(priv->dev)) {
2613 			work_done = false;
2614 			break;
2615 		}
2616 
2617 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2618 			break;
2619 
2620 		if (priv->est && priv->est->enable &&
2621 		    priv->est->max_sdu[queue] &&
2622 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2623 			priv->xstats.max_sdu_txq_drop[queue]++;
2624 			continue;
2625 		}
2626 
2627 		if (likely(priv->extend_desc))
2628 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2629 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2630 			tx_desc = &tx_q->dma_entx[entry].basic;
2631 		else
2632 			tx_desc = tx_q->dma_tx + entry;
2633 
2634 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2635 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2636 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2637 
2638 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2639 
2640 		/* To return XDP buffer to XSK pool, we simple call
2641 		 * xsk_tx_completed(), so we don't need to fill up
2642 		 * 'buf' and 'xdpf'.
2643 		 */
2644 		tx_q->tx_skbuff_dma[entry].buf = 0;
2645 		tx_q->xdpf[entry] = NULL;
2646 
2647 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2648 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2649 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2650 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2651 
2652 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2653 
2654 		tx_q->tx_count_frames++;
2655 
2656 		if (!priv->tx_coal_frames[queue])
2657 			set_ic = false;
2658 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2659 			set_ic = true;
2660 		else
2661 			set_ic = false;
2662 
2663 		meta_req.priv = priv;
2664 		meta_req.tx_desc = tx_desc;
2665 		meta_req.set_ic = &set_ic;
2666 		meta_req.tbs = tx_q->tbs;
2667 		meta_req.edesc = &tx_q->dma_entx[entry];
2668 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2669 					&meta_req);
2670 		if (set_ic) {
2671 			tx_q->tx_count_frames = 0;
2672 			stmmac_set_tx_ic(priv, tx_desc);
2673 			tx_set_ic_bit++;
2674 		}
2675 
2676 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2677 				       csum, priv->mode, true, true,
2678 				       xdp_desc.len);
2679 
2680 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2681 
2682 		xsk_tx_metadata_to_compl(meta,
2683 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2684 
2685 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2686 		entry = tx_q->cur_tx;
2687 	}
2688 	u64_stats_update_begin(&txq_stats->napi_syncp);
2689 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2690 	u64_stats_update_end(&txq_stats->napi_syncp);
2691 
2692 	if (tx_desc) {
2693 		stmmac_flush_tx_descriptors(priv, queue);
2694 		xsk_tx_release(pool);
2695 	}
2696 
2697 	/* Return true if all of the 3 conditions are met
2698 	 *  a) TX Budget is still available
2699 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2700 	 *     pending XSK TX for transmission)
2701 	 */
2702 	return !!budget && work_done;
2703 }
2704 
2705 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2706 {
2707 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2708 		tc += 64;
2709 
2710 		if (priv->plat->force_thresh_dma_mode)
2711 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2712 		else
2713 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2714 						      chan);
2715 
2716 		priv->xstats.threshold = tc;
2717 	}
2718 }
2719 
2720 /**
2721  * stmmac_tx_clean - to manage the transmission completion
2722  * @priv: driver private structure
2723  * @budget: napi budget limiting this functions packet handling
2724  * @queue: TX queue index
2725  * @pending_packets: signal to arm the TX coal timer
2726  * Description: it reclaims the transmit resources after transmission completes.
2727  * If some packets still needs to be handled, due to TX coalesce, set
2728  * pending_packets to true to make NAPI arm the TX coal timer.
2729  */
2730 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2731 			   bool *pending_packets)
2732 {
2733 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2734 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2735 	unsigned int bytes_compl = 0, pkts_compl = 0;
2736 	unsigned int entry, xmits = 0, count = 0;
2737 	u32 tx_packets = 0, tx_errors = 0;
2738 
2739 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2740 
2741 	tx_q->xsk_frames_done = 0;
2742 
2743 	entry = tx_q->dirty_tx;
2744 
2745 	/* Try to clean all TX complete frame in 1 shot */
2746 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2747 		struct xdp_frame *xdpf;
2748 		struct sk_buff *skb;
2749 		struct dma_desc *p;
2750 		int status;
2751 
2752 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2753 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2754 			xdpf = tx_q->xdpf[entry];
2755 			skb = NULL;
2756 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2757 			xdpf = NULL;
2758 			skb = tx_q->tx_skbuff[entry];
2759 		} else {
2760 			xdpf = NULL;
2761 			skb = NULL;
2762 		}
2763 
2764 		if (priv->extend_desc)
2765 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2766 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2767 			p = &tx_q->dma_entx[entry].basic;
2768 		else
2769 			p = tx_q->dma_tx + entry;
2770 
2771 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2772 		/* Check if the descriptor is owned by the DMA */
2773 		if (unlikely(status & tx_dma_own))
2774 			break;
2775 
2776 		count++;
2777 
2778 		/* Make sure descriptor fields are read after reading
2779 		 * the own bit.
2780 		 */
2781 		dma_rmb();
2782 
2783 		/* Just consider the last segment and ...*/
2784 		if (likely(!(status & tx_not_ls))) {
2785 			/* ... verify the status error condition */
2786 			if (unlikely(status & tx_err)) {
2787 				tx_errors++;
2788 				if (unlikely(status & tx_err_bump_tc))
2789 					stmmac_bump_dma_threshold(priv, queue);
2790 			} else {
2791 				tx_packets++;
2792 			}
2793 			if (skb) {
2794 				stmmac_get_tx_hwtstamp(priv, p, skb);
2795 			} else if (tx_q->xsk_pool &&
2796 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2797 				struct stmmac_xsk_tx_complete tx_compl = {
2798 					.priv = priv,
2799 					.desc = p,
2800 				};
2801 
2802 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2803 							 &stmmac_xsk_tx_metadata_ops,
2804 							 &tx_compl);
2805 			}
2806 		}
2807 
2808 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2809 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2810 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2811 				dma_unmap_page(priv->device,
2812 					       tx_q->tx_skbuff_dma[entry].buf,
2813 					       tx_q->tx_skbuff_dma[entry].len,
2814 					       DMA_TO_DEVICE);
2815 			else
2816 				dma_unmap_single(priv->device,
2817 						 tx_q->tx_skbuff_dma[entry].buf,
2818 						 tx_q->tx_skbuff_dma[entry].len,
2819 						 DMA_TO_DEVICE);
2820 			tx_q->tx_skbuff_dma[entry].buf = 0;
2821 			tx_q->tx_skbuff_dma[entry].len = 0;
2822 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2823 		}
2824 
2825 		stmmac_clean_desc3(priv, tx_q, p);
2826 
2827 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2828 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2829 
2830 		if (xdpf &&
2831 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2832 			xdp_return_frame_rx_napi(xdpf);
2833 			tx_q->xdpf[entry] = NULL;
2834 		}
2835 
2836 		if (xdpf &&
2837 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2838 			xdp_return_frame(xdpf);
2839 			tx_q->xdpf[entry] = NULL;
2840 		}
2841 
2842 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2843 			tx_q->xsk_frames_done++;
2844 
2845 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2846 			if (likely(skb)) {
2847 				pkts_compl++;
2848 				bytes_compl += skb->len;
2849 				dev_consume_skb_any(skb);
2850 				tx_q->tx_skbuff[entry] = NULL;
2851 			}
2852 		}
2853 
2854 		stmmac_release_tx_desc(priv, p, priv->mode);
2855 
2856 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2857 	}
2858 	tx_q->dirty_tx = entry;
2859 
2860 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2861 				  pkts_compl, bytes_compl);
2862 
2863 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2864 								queue))) &&
2865 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2866 
2867 		netif_dbg(priv, tx_done, priv->dev,
2868 			  "%s: restart transmit\n", __func__);
2869 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2870 	}
2871 
2872 	if (tx_q->xsk_pool) {
2873 		bool work_done;
2874 
2875 		if (tx_q->xsk_frames_done)
2876 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2877 
2878 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2879 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2880 
2881 		/* For XSK TX, we try to send as many as possible.
2882 		 * If XSK work done (XSK TX desc empty and budget still
2883 		 * available), return "budget - 1" to reenable TX IRQ.
2884 		 * Else, return "budget" to make NAPI continue polling.
2885 		 */
2886 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2887 					       STMMAC_XSK_TX_BUDGET_MAX);
2888 		if (work_done)
2889 			xmits = budget - 1;
2890 		else
2891 			xmits = budget;
2892 	}
2893 
2894 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2895 		stmmac_restart_sw_lpi_timer(priv);
2896 
2897 	/* We still have pending packets, let's call for a new scheduling */
2898 	if (tx_q->dirty_tx != tx_q->cur_tx)
2899 		*pending_packets = true;
2900 
2901 	u64_stats_update_begin(&txq_stats->napi_syncp);
2902 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2903 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2904 	u64_stats_inc(&txq_stats->napi.tx_clean);
2905 	u64_stats_update_end(&txq_stats->napi_syncp);
2906 
2907 	priv->xstats.tx_errors += tx_errors;
2908 
2909 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2910 
2911 	/* Combine decisions from TX clean and XSK TX */
2912 	return max(count, xmits);
2913 }
2914 
2915 /**
2916  * stmmac_tx_err - to manage the tx error
2917  * @priv: driver private structure
2918  * @chan: channel index
2919  * Description: it cleans the descriptors and restarts the transmission
2920  * in case of transmission errors.
2921  */
2922 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2923 {
2924 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2925 
2926 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2927 
2928 	stmmac_stop_tx_dma(priv, chan);
2929 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2930 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2931 	stmmac_reset_tx_queue(priv, chan);
2932 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2933 			    tx_q->dma_tx_phy, chan);
2934 	stmmac_start_tx_dma(priv, chan);
2935 
2936 	priv->xstats.tx_errors++;
2937 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2938 }
2939 
2940 /**
2941  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2942  *  @priv: driver private structure
2943  *  @txmode: TX operating mode
2944  *  @rxmode: RX operating mode
2945  *  @chan: channel index
2946  *  Description: it is used for configuring of the DMA operation mode in
2947  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2948  *  mode.
2949  */
2950 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2951 					  u32 rxmode, u32 chan)
2952 {
2953 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2954 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2955 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2956 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2957 	int rxfifosz = priv->plat->rx_fifo_size;
2958 	int txfifosz = priv->plat->tx_fifo_size;
2959 
2960 	if (rxfifosz == 0)
2961 		rxfifosz = priv->dma_cap.rx_fifo_size;
2962 	if (txfifosz == 0)
2963 		txfifosz = priv->dma_cap.tx_fifo_size;
2964 
2965 	/* Adjust for real per queue fifo size */
2966 	rxfifosz /= rx_channels_count;
2967 	txfifosz /= tx_channels_count;
2968 
2969 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2970 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2971 }
2972 
2973 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2974 {
2975 	int ret;
2976 
2977 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2978 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2979 	if (ret && (ret != -EINVAL)) {
2980 		stmmac_global_err(priv);
2981 		return true;
2982 	}
2983 
2984 	return false;
2985 }
2986 
2987 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2988 {
2989 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2990 						 &priv->xstats, chan, dir);
2991 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2992 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2993 	struct stmmac_channel *ch = &priv->channel[chan];
2994 	struct napi_struct *rx_napi;
2995 	struct napi_struct *tx_napi;
2996 	unsigned long flags;
2997 
2998 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2999 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3000 
3001 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
3002 		if (napi_schedule_prep(rx_napi)) {
3003 			spin_lock_irqsave(&ch->lock, flags);
3004 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3005 			spin_unlock_irqrestore(&ch->lock, flags);
3006 			__napi_schedule(rx_napi);
3007 		}
3008 	}
3009 
3010 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
3011 		if (napi_schedule_prep(tx_napi)) {
3012 			spin_lock_irqsave(&ch->lock, flags);
3013 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3014 			spin_unlock_irqrestore(&ch->lock, flags);
3015 			__napi_schedule(tx_napi);
3016 		}
3017 	}
3018 
3019 	return status;
3020 }
3021 
3022 /**
3023  * stmmac_dma_interrupt - DMA ISR
3024  * @priv: driver private structure
3025  * Description: this is the DMA ISR. It is called by the main ISR.
3026  * It calls the dwmac dma routine and schedule poll method in case of some
3027  * work can be done.
3028  */
3029 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3030 {
3031 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3032 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3033 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
3034 				tx_channel_count : rx_channel_count;
3035 	u32 chan;
3036 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3037 
3038 	/* Make sure we never check beyond our status buffer. */
3039 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3040 		channels_to_check = ARRAY_SIZE(status);
3041 
3042 	for (chan = 0; chan < channels_to_check; chan++)
3043 		status[chan] = stmmac_napi_check(priv, chan,
3044 						 DMA_DIR_RXTX);
3045 
3046 	for (chan = 0; chan < tx_channel_count; chan++) {
3047 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3048 			/* Try to bump up the dma threshold on this failure */
3049 			stmmac_bump_dma_threshold(priv, chan);
3050 		} else if (unlikely(status[chan] == tx_hard_error)) {
3051 			stmmac_tx_err(priv, chan);
3052 		}
3053 	}
3054 }
3055 
3056 /**
3057  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3058  * @priv: driver private structure
3059  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3060  */
3061 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3062 {
3063 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3064 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3065 
3066 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3067 
3068 	if (priv->dma_cap.rmon) {
3069 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3070 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3071 	} else
3072 		netdev_info(priv->dev, "No MAC Management Counters available\n");
3073 }
3074 
3075 /**
3076  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3077  * @priv: driver private structure
3078  * Description:
3079  *  new GMAC chip generations have a new register to indicate the
3080  *  presence of the optional feature/functions.
3081  *  This can be also used to override the value passed through the
3082  *  platform and necessary for old MAC10/100 and GMAC chips.
3083  */
3084 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3085 {
3086 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3087 }
3088 
3089 /**
3090  * stmmac_check_ether_addr - check if the MAC addr is valid
3091  * @priv: driver private structure
3092  * Description:
3093  * it is to verify if the MAC address is valid, in case of failures it
3094  * generates a random MAC address
3095  */
3096 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3097 {
3098 	u8 addr[ETH_ALEN];
3099 
3100 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3101 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3102 		if (is_valid_ether_addr(addr))
3103 			eth_hw_addr_set(priv->dev, addr);
3104 		else
3105 			eth_hw_addr_random(priv->dev);
3106 		dev_info(priv->device, "device MAC address %pM\n",
3107 			 priv->dev->dev_addr);
3108 	}
3109 }
3110 
3111 /**
3112  * stmmac_init_dma_engine - DMA init.
3113  * @priv: driver private structure
3114  * Description:
3115  * It inits the DMA invoking the specific MAC/GMAC callback.
3116  * Some DMA parameters can be passed from the platform;
3117  * in case of these are not passed a default is kept for the MAC or GMAC.
3118  */
3119 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3120 {
3121 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3122 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3123 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3124 	struct stmmac_rx_queue *rx_q;
3125 	struct stmmac_tx_queue *tx_q;
3126 	u32 chan = 0;
3127 	int ret = 0;
3128 
3129 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3130 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3131 		return -EINVAL;
3132 	}
3133 
3134 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3135 		priv->plat->dma_cfg->atds = 1;
3136 
3137 	ret = stmmac_reset(priv, priv->ioaddr);
3138 	if (ret) {
3139 		netdev_err(priv->dev, "Failed to reset the dma\n");
3140 		return ret;
3141 	}
3142 
3143 	/* DMA Configuration */
3144 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3145 
3146 	if (priv->plat->axi)
3147 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3148 
3149 	/* DMA CSR Channel configuration */
3150 	for (chan = 0; chan < dma_csr_ch; chan++) {
3151 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3152 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3153 	}
3154 
3155 	/* DMA RX Channel Configuration */
3156 	for (chan = 0; chan < rx_channels_count; chan++) {
3157 		rx_q = &priv->dma_conf.rx_queue[chan];
3158 
3159 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3160 				    rx_q->dma_rx_phy, chan);
3161 
3162 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3163 				     (rx_q->buf_alloc_num *
3164 				      sizeof(struct dma_desc));
3165 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3166 				       rx_q->rx_tail_addr, chan);
3167 	}
3168 
3169 	/* DMA TX Channel Configuration */
3170 	for (chan = 0; chan < tx_channels_count; chan++) {
3171 		tx_q = &priv->dma_conf.tx_queue[chan];
3172 
3173 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3174 				    tx_q->dma_tx_phy, chan);
3175 
3176 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3177 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3178 				       tx_q->tx_tail_addr, chan);
3179 	}
3180 
3181 	return ret;
3182 }
3183 
3184 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3185 {
3186 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3187 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3188 	struct stmmac_channel *ch;
3189 	struct napi_struct *napi;
3190 
3191 	if (!tx_coal_timer)
3192 		return;
3193 
3194 	ch = &priv->channel[tx_q->queue_index];
3195 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3196 
3197 	/* Arm timer only if napi is not already scheduled.
3198 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3199 	 * again in the next scheduled napi.
3200 	 */
3201 	if (unlikely(!napi_is_scheduled(napi)))
3202 		hrtimer_start(&tx_q->txtimer,
3203 			      STMMAC_COAL_TIMER(tx_coal_timer),
3204 			      HRTIMER_MODE_REL);
3205 	else
3206 		hrtimer_try_to_cancel(&tx_q->txtimer);
3207 }
3208 
3209 /**
3210  * stmmac_tx_timer - mitigation sw timer for tx.
3211  * @t: data pointer
3212  * Description:
3213  * This is the timer handler to directly invoke the stmmac_tx_clean.
3214  */
3215 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3216 {
3217 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3218 	struct stmmac_priv *priv = tx_q->priv_data;
3219 	struct stmmac_channel *ch;
3220 	struct napi_struct *napi;
3221 
3222 	ch = &priv->channel[tx_q->queue_index];
3223 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3224 
3225 	if (likely(napi_schedule_prep(napi))) {
3226 		unsigned long flags;
3227 
3228 		spin_lock_irqsave(&ch->lock, flags);
3229 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3230 		spin_unlock_irqrestore(&ch->lock, flags);
3231 		__napi_schedule(napi);
3232 	}
3233 
3234 	return HRTIMER_NORESTART;
3235 }
3236 
3237 /**
3238  * stmmac_init_coalesce - init mitigation options.
3239  * @priv: driver private structure
3240  * Description:
3241  * This inits the coalesce parameters: i.e. timer rate,
3242  * timer handler and default threshold used for enabling the
3243  * interrupt on completion bit.
3244  */
3245 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3246 {
3247 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3248 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3249 	u32 chan;
3250 
3251 	for (chan = 0; chan < tx_channel_count; chan++) {
3252 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3253 
3254 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3255 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3256 
3257 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3258 	}
3259 
3260 	for (chan = 0; chan < rx_channel_count; chan++)
3261 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3262 }
3263 
3264 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3265 {
3266 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3267 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3268 	u32 chan;
3269 
3270 	/* set TX ring length */
3271 	for (chan = 0; chan < tx_channels_count; chan++)
3272 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3273 				       (priv->dma_conf.dma_tx_size - 1), chan);
3274 
3275 	/* set RX ring length */
3276 	for (chan = 0; chan < rx_channels_count; chan++)
3277 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3278 				       (priv->dma_conf.dma_rx_size - 1), chan);
3279 }
3280 
3281 /**
3282  *  stmmac_set_tx_queue_weight - Set TX queue weight
3283  *  @priv: driver private structure
3284  *  Description: It is used for setting TX queues weight
3285  */
3286 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3287 {
3288 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3289 	u32 weight;
3290 	u32 queue;
3291 
3292 	for (queue = 0; queue < tx_queues_count; queue++) {
3293 		weight = priv->plat->tx_queues_cfg[queue].weight;
3294 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3295 	}
3296 }
3297 
3298 /**
3299  *  stmmac_configure_cbs - Configure CBS in TX queue
3300  *  @priv: driver private structure
3301  *  Description: It is used for configuring CBS in AVB TX queues
3302  */
3303 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3304 {
3305 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3306 	u32 mode_to_use;
3307 	u32 queue;
3308 
3309 	/* queue 0 is reserved for legacy traffic */
3310 	for (queue = 1; queue < tx_queues_count; queue++) {
3311 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3312 		if (mode_to_use == MTL_QUEUE_DCB)
3313 			continue;
3314 
3315 		stmmac_config_cbs(priv, priv->hw,
3316 				priv->plat->tx_queues_cfg[queue].send_slope,
3317 				priv->plat->tx_queues_cfg[queue].idle_slope,
3318 				priv->plat->tx_queues_cfg[queue].high_credit,
3319 				priv->plat->tx_queues_cfg[queue].low_credit,
3320 				queue);
3321 	}
3322 }
3323 
3324 /**
3325  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3326  *  @priv: driver private structure
3327  *  Description: It is used for mapping RX queues to RX dma channels
3328  */
3329 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3330 {
3331 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3332 	u32 queue;
3333 	u32 chan;
3334 
3335 	for (queue = 0; queue < rx_queues_count; queue++) {
3336 		chan = priv->plat->rx_queues_cfg[queue].chan;
3337 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3338 	}
3339 }
3340 
3341 /**
3342  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3343  *  @priv: driver private structure
3344  *  Description: It is used for configuring the RX Queue Priority
3345  */
3346 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3347 {
3348 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3349 	u32 queue;
3350 	u32 prio;
3351 
3352 	for (queue = 0; queue < rx_queues_count; queue++) {
3353 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3354 			continue;
3355 
3356 		prio = priv->plat->rx_queues_cfg[queue].prio;
3357 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3358 	}
3359 }
3360 
3361 /**
3362  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3363  *  @priv: driver private structure
3364  *  Description: It is used for configuring the TX Queue Priority
3365  */
3366 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3367 {
3368 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3369 	u32 queue;
3370 	u32 prio;
3371 
3372 	for (queue = 0; queue < tx_queues_count; queue++) {
3373 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3374 			continue;
3375 
3376 		prio = priv->plat->tx_queues_cfg[queue].prio;
3377 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3378 	}
3379 }
3380 
3381 /**
3382  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3383  *  @priv: driver private structure
3384  *  Description: It is used for configuring the RX queue routing
3385  */
3386 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3387 {
3388 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3389 	u32 queue;
3390 	u8 packet;
3391 
3392 	for (queue = 0; queue < rx_queues_count; queue++) {
3393 		/* no specific packet type routing specified for the queue */
3394 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3395 			continue;
3396 
3397 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3398 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3399 	}
3400 }
3401 
3402 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3403 {
3404 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3405 		priv->rss.enable = false;
3406 		return;
3407 	}
3408 
3409 	if (priv->dev->features & NETIF_F_RXHASH)
3410 		priv->rss.enable = true;
3411 	else
3412 		priv->rss.enable = false;
3413 
3414 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3415 			     priv->plat->rx_queues_to_use);
3416 }
3417 
3418 /**
3419  *  stmmac_mtl_configuration - Configure MTL
3420  *  @priv: driver private structure
3421  *  Description: It is used for configurring MTL
3422  */
3423 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3424 {
3425 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3426 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3427 
3428 	if (tx_queues_count > 1)
3429 		stmmac_set_tx_queue_weight(priv);
3430 
3431 	/* Configure MTL RX algorithms */
3432 	if (rx_queues_count > 1)
3433 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3434 				priv->plat->rx_sched_algorithm);
3435 
3436 	/* Configure MTL TX algorithms */
3437 	if (tx_queues_count > 1)
3438 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3439 				priv->plat->tx_sched_algorithm);
3440 
3441 	/* Configure CBS in AVB TX queues */
3442 	if (tx_queues_count > 1)
3443 		stmmac_configure_cbs(priv);
3444 
3445 	/* Map RX MTL to DMA channels */
3446 	stmmac_rx_queue_dma_chan_map(priv);
3447 
3448 	/* Enable MAC RX Queues */
3449 	stmmac_mac_enable_rx_queues(priv);
3450 
3451 	/* Set RX priorities */
3452 	if (rx_queues_count > 1)
3453 		stmmac_mac_config_rx_queues_prio(priv);
3454 
3455 	/* Set TX priorities */
3456 	if (tx_queues_count > 1)
3457 		stmmac_mac_config_tx_queues_prio(priv);
3458 
3459 	/* Set RX routing */
3460 	if (rx_queues_count > 1)
3461 		stmmac_mac_config_rx_queues_routing(priv);
3462 
3463 	/* Receive Side Scaling */
3464 	if (rx_queues_count > 1)
3465 		stmmac_mac_config_rss(priv);
3466 }
3467 
3468 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3469 {
3470 	if (priv->dma_cap.asp) {
3471 		netdev_info(priv->dev, "Enabling Safety Features\n");
3472 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3473 					  priv->plat->safety_feat_cfg);
3474 	} else {
3475 		netdev_info(priv->dev, "No Safety Features support found\n");
3476 	}
3477 }
3478 
3479 /**
3480  * stmmac_hw_setup - setup mac in a usable state.
3481  *  @dev : pointer to the device structure.
3482  *  @ptp_register: register PTP if set
3483  *  Description:
3484  *  this is the main function to setup the HW in a usable state because the
3485  *  dma engine is reset, the core registers are configured (e.g. AXI,
3486  *  Checksum features, timers). The DMA is ready to start receiving and
3487  *  transmitting.
3488  *  Return value:
3489  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3490  *  file on failure.
3491  */
3492 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3493 {
3494 	struct stmmac_priv *priv = netdev_priv(dev);
3495 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3496 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3497 	bool sph_en;
3498 	u32 chan;
3499 	int ret;
3500 
3501 	/* Make sure RX clock is enabled */
3502 	if (priv->hw->phylink_pcs)
3503 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3504 
3505 	/* Note that clk_rx_i must be running for reset to complete. This
3506 	 * clock may also be required when setting the MAC address.
3507 	 *
3508 	 * Block the receive clock stop for LPI mode at the PHY in case
3509 	 * the link is established with EEE mode active.
3510 	 */
3511 	phylink_rx_clk_stop_block(priv->phylink);
3512 
3513 	/* DMA initialization and SW reset */
3514 	ret = stmmac_init_dma_engine(priv);
3515 	if (ret < 0) {
3516 		phylink_rx_clk_stop_unblock(priv->phylink);
3517 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3518 			   __func__);
3519 		return ret;
3520 	}
3521 
3522 	/* Copy the MAC addr into the HW  */
3523 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3524 	phylink_rx_clk_stop_unblock(priv->phylink);
3525 
3526 	/* PS and related bits will be programmed according to the speed */
3527 	if (priv->hw->pcs) {
3528 		int speed = priv->plat->mac_port_sel_speed;
3529 
3530 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3531 		    (speed == SPEED_1000)) {
3532 			priv->hw->ps = speed;
3533 		} else {
3534 			dev_warn(priv->device, "invalid port speed\n");
3535 			priv->hw->ps = 0;
3536 		}
3537 	}
3538 
3539 	/* Initialize the MAC Core */
3540 	stmmac_core_init(priv, priv->hw, dev);
3541 
3542 	/* Initialize MTL*/
3543 	stmmac_mtl_configuration(priv);
3544 
3545 	/* Initialize Safety Features */
3546 	stmmac_safety_feat_configuration(priv);
3547 
3548 	ret = stmmac_rx_ipc(priv, priv->hw);
3549 	if (!ret) {
3550 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3551 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3552 		priv->hw->rx_csum = 0;
3553 	}
3554 
3555 	/* Enable the MAC Rx/Tx */
3556 	stmmac_mac_set(priv, priv->ioaddr, true);
3557 
3558 	/* Set the HW DMA mode and the COE */
3559 	stmmac_dma_operation_mode(priv);
3560 
3561 	stmmac_mmc_setup(priv);
3562 
3563 	if (ptp_register) {
3564 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3565 		if (ret < 0)
3566 			netdev_warn(priv->dev,
3567 				    "failed to enable PTP reference clock: %pe\n",
3568 				    ERR_PTR(ret));
3569 	}
3570 
3571 	ret = stmmac_init_ptp(priv);
3572 	if (ret == -EOPNOTSUPP)
3573 		netdev_info(priv->dev, "PTP not supported by HW\n");
3574 	else if (ret)
3575 		netdev_warn(priv->dev, "PTP init failed\n");
3576 	else if (ptp_register)
3577 		stmmac_ptp_register(priv);
3578 
3579 	if (priv->use_riwt) {
3580 		u32 queue;
3581 
3582 		for (queue = 0; queue < rx_cnt; queue++) {
3583 			if (!priv->rx_riwt[queue])
3584 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3585 
3586 			stmmac_rx_watchdog(priv, priv->ioaddr,
3587 					   priv->rx_riwt[queue], queue);
3588 		}
3589 	}
3590 
3591 	if (priv->hw->pcs)
3592 		stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
3593 
3594 	/* set TX and RX rings length */
3595 	stmmac_set_rings_length(priv);
3596 
3597 	/* Enable TSO */
3598 	if (priv->tso) {
3599 		for (chan = 0; chan < tx_cnt; chan++) {
3600 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3601 
3602 			/* TSO and TBS cannot co-exist */
3603 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3604 				continue;
3605 
3606 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3607 		}
3608 	}
3609 
3610 	/* Enable Split Header */
3611 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3612 	for (chan = 0; chan < rx_cnt; chan++)
3613 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3614 
3615 
3616 	/* VLAN Tag Insertion */
3617 	if (priv->dma_cap.vlins)
3618 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3619 
3620 	/* TBS */
3621 	for (chan = 0; chan < tx_cnt; chan++) {
3622 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3623 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3624 
3625 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3626 	}
3627 
3628 	/* Configure real RX and TX queues */
3629 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3630 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3631 
3632 	/* Start the ball rolling... */
3633 	stmmac_start_all_dma(priv);
3634 
3635 	phylink_rx_clk_stop_block(priv->phylink);
3636 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3637 	phylink_rx_clk_stop_unblock(priv->phylink);
3638 
3639 	return 0;
3640 }
3641 
3642 static void stmmac_hw_teardown(struct net_device *dev)
3643 {
3644 	struct stmmac_priv *priv = netdev_priv(dev);
3645 
3646 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3647 }
3648 
3649 static void stmmac_free_irq(struct net_device *dev,
3650 			    enum request_irq_err irq_err, int irq_idx)
3651 {
3652 	struct stmmac_priv *priv = netdev_priv(dev);
3653 	int j;
3654 
3655 	switch (irq_err) {
3656 	case REQ_IRQ_ERR_ALL:
3657 		irq_idx = priv->plat->tx_queues_to_use;
3658 		fallthrough;
3659 	case REQ_IRQ_ERR_TX:
3660 		for (j = irq_idx - 1; j >= 0; j--) {
3661 			if (priv->tx_irq[j] > 0) {
3662 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3663 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3664 			}
3665 		}
3666 		irq_idx = priv->plat->rx_queues_to_use;
3667 		fallthrough;
3668 	case REQ_IRQ_ERR_RX:
3669 		for (j = irq_idx - 1; j >= 0; j--) {
3670 			if (priv->rx_irq[j] > 0) {
3671 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3672 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3673 			}
3674 		}
3675 
3676 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3677 			free_irq(priv->sfty_ue_irq, dev);
3678 		fallthrough;
3679 	case REQ_IRQ_ERR_SFTY_UE:
3680 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3681 			free_irq(priv->sfty_ce_irq, dev);
3682 		fallthrough;
3683 	case REQ_IRQ_ERR_SFTY_CE:
3684 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3685 			free_irq(priv->lpi_irq, dev);
3686 		fallthrough;
3687 	case REQ_IRQ_ERR_LPI:
3688 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3689 			free_irq(priv->wol_irq, dev);
3690 		fallthrough;
3691 	case REQ_IRQ_ERR_SFTY:
3692 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3693 			free_irq(priv->sfty_irq, dev);
3694 		fallthrough;
3695 	case REQ_IRQ_ERR_WOL:
3696 		free_irq(dev->irq, dev);
3697 		fallthrough;
3698 	case REQ_IRQ_ERR_MAC:
3699 	case REQ_IRQ_ERR_NO:
3700 		/* If MAC IRQ request error, no more IRQ to free */
3701 		break;
3702 	}
3703 }
3704 
3705 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3706 {
3707 	struct stmmac_priv *priv = netdev_priv(dev);
3708 	enum request_irq_err irq_err;
3709 	int irq_idx = 0;
3710 	char *int_name;
3711 	int ret;
3712 	int i;
3713 
3714 	/* For common interrupt */
3715 	int_name = priv->int_name_mac;
3716 	sprintf(int_name, "%s:%s", dev->name, "mac");
3717 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3718 			  0, int_name, dev);
3719 	if (unlikely(ret < 0)) {
3720 		netdev_err(priv->dev,
3721 			   "%s: alloc mac MSI %d (error: %d)\n",
3722 			   __func__, dev->irq, ret);
3723 		irq_err = REQ_IRQ_ERR_MAC;
3724 		goto irq_error;
3725 	}
3726 
3727 	/* Request the Wake IRQ in case of another line
3728 	 * is used for WoL
3729 	 */
3730 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3731 		int_name = priv->int_name_wol;
3732 		sprintf(int_name, "%s:%s", dev->name, "wol");
3733 		ret = request_irq(priv->wol_irq,
3734 				  stmmac_mac_interrupt,
3735 				  0, int_name, dev);
3736 		if (unlikely(ret < 0)) {
3737 			netdev_err(priv->dev,
3738 				   "%s: alloc wol MSI %d (error: %d)\n",
3739 				   __func__, priv->wol_irq, ret);
3740 			irq_err = REQ_IRQ_ERR_WOL;
3741 			goto irq_error;
3742 		}
3743 	}
3744 
3745 	/* Request the LPI IRQ in case of another line
3746 	 * is used for LPI
3747 	 */
3748 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3749 		int_name = priv->int_name_lpi;
3750 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3751 		ret = request_irq(priv->lpi_irq,
3752 				  stmmac_mac_interrupt,
3753 				  0, int_name, dev);
3754 		if (unlikely(ret < 0)) {
3755 			netdev_err(priv->dev,
3756 				   "%s: alloc lpi MSI %d (error: %d)\n",
3757 				   __func__, priv->lpi_irq, ret);
3758 			irq_err = REQ_IRQ_ERR_LPI;
3759 			goto irq_error;
3760 		}
3761 	}
3762 
3763 	/* Request the common Safety Feature Correctible/Uncorrectible
3764 	 * Error line in case of another line is used
3765 	 */
3766 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3767 		int_name = priv->int_name_sfty;
3768 		sprintf(int_name, "%s:%s", dev->name, "safety");
3769 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3770 				  0, int_name, dev);
3771 		if (unlikely(ret < 0)) {
3772 			netdev_err(priv->dev,
3773 				   "%s: alloc sfty MSI %d (error: %d)\n",
3774 				   __func__, priv->sfty_irq, ret);
3775 			irq_err = REQ_IRQ_ERR_SFTY;
3776 			goto irq_error;
3777 		}
3778 	}
3779 
3780 	/* Request the Safety Feature Correctible Error line in
3781 	 * case of another line is used
3782 	 */
3783 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3784 		int_name = priv->int_name_sfty_ce;
3785 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3786 		ret = request_irq(priv->sfty_ce_irq,
3787 				  stmmac_safety_interrupt,
3788 				  0, int_name, dev);
3789 		if (unlikely(ret < 0)) {
3790 			netdev_err(priv->dev,
3791 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3792 				   __func__, priv->sfty_ce_irq, ret);
3793 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3794 			goto irq_error;
3795 		}
3796 	}
3797 
3798 	/* Request the Safety Feature Uncorrectible Error line in
3799 	 * case of another line is used
3800 	 */
3801 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3802 		int_name = priv->int_name_sfty_ue;
3803 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3804 		ret = request_irq(priv->sfty_ue_irq,
3805 				  stmmac_safety_interrupt,
3806 				  0, int_name, dev);
3807 		if (unlikely(ret < 0)) {
3808 			netdev_err(priv->dev,
3809 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3810 				   __func__, priv->sfty_ue_irq, ret);
3811 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3812 			goto irq_error;
3813 		}
3814 	}
3815 
3816 	/* Request Rx MSI irq */
3817 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3818 		if (i >= MTL_MAX_RX_QUEUES)
3819 			break;
3820 		if (priv->rx_irq[i] == 0)
3821 			continue;
3822 
3823 		int_name = priv->int_name_rx_irq[i];
3824 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3825 		ret = request_irq(priv->rx_irq[i],
3826 				  stmmac_msi_intr_rx,
3827 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3828 		if (unlikely(ret < 0)) {
3829 			netdev_err(priv->dev,
3830 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3831 				   __func__, i, priv->rx_irq[i], ret);
3832 			irq_err = REQ_IRQ_ERR_RX;
3833 			irq_idx = i;
3834 			goto irq_error;
3835 		}
3836 		irq_set_affinity_hint(priv->rx_irq[i],
3837 				      cpumask_of(i % num_online_cpus()));
3838 	}
3839 
3840 	/* Request Tx MSI irq */
3841 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3842 		if (i >= MTL_MAX_TX_QUEUES)
3843 			break;
3844 		if (priv->tx_irq[i] == 0)
3845 			continue;
3846 
3847 		int_name = priv->int_name_tx_irq[i];
3848 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3849 		ret = request_irq(priv->tx_irq[i],
3850 				  stmmac_msi_intr_tx,
3851 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3852 		if (unlikely(ret < 0)) {
3853 			netdev_err(priv->dev,
3854 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3855 				   __func__, i, priv->tx_irq[i], ret);
3856 			irq_err = REQ_IRQ_ERR_TX;
3857 			irq_idx = i;
3858 			goto irq_error;
3859 		}
3860 		irq_set_affinity_hint(priv->tx_irq[i],
3861 				      cpumask_of(i % num_online_cpus()));
3862 	}
3863 
3864 	return 0;
3865 
3866 irq_error:
3867 	stmmac_free_irq(dev, irq_err, irq_idx);
3868 	return ret;
3869 }
3870 
3871 static int stmmac_request_irq_single(struct net_device *dev)
3872 {
3873 	struct stmmac_priv *priv = netdev_priv(dev);
3874 	enum request_irq_err irq_err;
3875 	int ret;
3876 
3877 	ret = request_irq(dev->irq, stmmac_interrupt,
3878 			  IRQF_SHARED, dev->name, dev);
3879 	if (unlikely(ret < 0)) {
3880 		netdev_err(priv->dev,
3881 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3882 			   __func__, dev->irq, ret);
3883 		irq_err = REQ_IRQ_ERR_MAC;
3884 		goto irq_error;
3885 	}
3886 
3887 	/* Request the Wake IRQ in case of another line
3888 	 * is used for WoL
3889 	 */
3890 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3891 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3892 				  IRQF_SHARED, dev->name, dev);
3893 		if (unlikely(ret < 0)) {
3894 			netdev_err(priv->dev,
3895 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3896 				   __func__, priv->wol_irq, ret);
3897 			irq_err = REQ_IRQ_ERR_WOL;
3898 			goto irq_error;
3899 		}
3900 	}
3901 
3902 	/* Request the IRQ lines */
3903 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3904 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3905 				  IRQF_SHARED, dev->name, dev);
3906 		if (unlikely(ret < 0)) {
3907 			netdev_err(priv->dev,
3908 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3909 				   __func__, priv->lpi_irq, ret);
3910 			irq_err = REQ_IRQ_ERR_LPI;
3911 			goto irq_error;
3912 		}
3913 	}
3914 
3915 	/* Request the common Safety Feature Correctible/Uncorrectible
3916 	 * Error line in case of another line is used
3917 	 */
3918 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3919 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3920 				  IRQF_SHARED, dev->name, dev);
3921 		if (unlikely(ret < 0)) {
3922 			netdev_err(priv->dev,
3923 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3924 				   __func__, priv->sfty_irq, ret);
3925 			irq_err = REQ_IRQ_ERR_SFTY;
3926 			goto irq_error;
3927 		}
3928 	}
3929 
3930 	return 0;
3931 
3932 irq_error:
3933 	stmmac_free_irq(dev, irq_err, 0);
3934 	return ret;
3935 }
3936 
3937 static int stmmac_request_irq(struct net_device *dev)
3938 {
3939 	struct stmmac_priv *priv = netdev_priv(dev);
3940 	int ret;
3941 
3942 	/* Request the IRQ lines */
3943 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3944 		ret = stmmac_request_irq_multi_msi(dev);
3945 	else
3946 		ret = stmmac_request_irq_single(dev);
3947 
3948 	return ret;
3949 }
3950 
3951 /**
3952  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3953  *  @priv: driver private structure
3954  *  @mtu: MTU to setup the dma queue and buf with
3955  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3956  *  Allocate the Tx/Rx DMA queue and init them.
3957  *  Return value:
3958  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3959  */
3960 static struct stmmac_dma_conf *
3961 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3962 {
3963 	struct stmmac_dma_conf *dma_conf;
3964 	int chan, bfsize, ret;
3965 
3966 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3967 	if (!dma_conf) {
3968 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3969 			   __func__);
3970 		return ERR_PTR(-ENOMEM);
3971 	}
3972 
3973 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3974 	if (bfsize < 0)
3975 		bfsize = 0;
3976 
3977 	if (bfsize < BUF_SIZE_16KiB)
3978 		bfsize = stmmac_set_bfsize(mtu, 0);
3979 
3980 	dma_conf->dma_buf_sz = bfsize;
3981 	/* Chose the tx/rx size from the already defined one in the
3982 	 * priv struct. (if defined)
3983 	 */
3984 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3985 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3986 
3987 	if (!dma_conf->dma_tx_size)
3988 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3989 	if (!dma_conf->dma_rx_size)
3990 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3991 
3992 	/* Earlier check for TBS */
3993 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3994 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3995 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3996 
3997 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3998 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3999 	}
4000 
4001 	ret = alloc_dma_desc_resources(priv, dma_conf);
4002 	if (ret < 0) {
4003 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4004 			   __func__);
4005 		goto alloc_error;
4006 	}
4007 
4008 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
4009 	if (ret < 0) {
4010 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4011 			   __func__);
4012 		goto init_error;
4013 	}
4014 
4015 	return dma_conf;
4016 
4017 init_error:
4018 	free_dma_desc_resources(priv, dma_conf);
4019 alloc_error:
4020 	kfree(dma_conf);
4021 	return ERR_PTR(ret);
4022 }
4023 
4024 /**
4025  *  __stmmac_open - open entry point of the driver
4026  *  @dev : pointer to the device structure.
4027  *  @dma_conf :  structure to take the dma data
4028  *  Description:
4029  *  This function is the open entry point of the driver.
4030  *  Return value:
4031  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4032  *  file on failure.
4033  */
4034 static int __stmmac_open(struct net_device *dev,
4035 			 struct stmmac_dma_conf *dma_conf)
4036 {
4037 	struct stmmac_priv *priv = netdev_priv(dev);
4038 	int mode = priv->plat->phy_interface;
4039 	u32 chan;
4040 	int ret;
4041 
4042 	/* Initialise the tx lpi timer, converting from msec to usec */
4043 	if (!priv->tx_lpi_timer)
4044 		priv->tx_lpi_timer = eee_timer * 1000;
4045 
4046 	ret = pm_runtime_resume_and_get(priv->device);
4047 	if (ret < 0)
4048 		return ret;
4049 
4050 	if ((!priv->hw->xpcs ||
4051 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
4052 		ret = stmmac_init_phy(dev);
4053 		if (ret) {
4054 			netdev_err(priv->dev,
4055 				   "%s: Cannot attach to PHY (error: %d)\n",
4056 				   __func__, ret);
4057 			goto init_phy_error;
4058 		}
4059 	}
4060 
4061 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4062 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4063 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4064 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4065 
4066 	stmmac_reset_queues_param(priv);
4067 
4068 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4069 	    priv->plat->serdes_powerup) {
4070 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4071 		if (ret < 0) {
4072 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4073 				   __func__);
4074 			goto init_error;
4075 		}
4076 	}
4077 
4078 	ret = stmmac_hw_setup(dev, true);
4079 	if (ret < 0) {
4080 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4081 		goto init_error;
4082 	}
4083 
4084 	stmmac_init_coalesce(priv);
4085 
4086 	phylink_start(priv->phylink);
4087 	/* We may have called phylink_speed_down before */
4088 	phylink_speed_up(priv->phylink);
4089 
4090 	ret = stmmac_request_irq(dev);
4091 	if (ret)
4092 		goto irq_error;
4093 
4094 	stmmac_enable_all_queues(priv);
4095 	netif_tx_start_all_queues(priv->dev);
4096 	stmmac_enable_all_dma_irq(priv);
4097 
4098 	return 0;
4099 
4100 irq_error:
4101 	phylink_stop(priv->phylink);
4102 
4103 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4104 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4105 
4106 	stmmac_hw_teardown(dev);
4107 init_error:
4108 	phylink_disconnect_phy(priv->phylink);
4109 init_phy_error:
4110 	pm_runtime_put(priv->device);
4111 	return ret;
4112 }
4113 
4114 static int stmmac_open(struct net_device *dev)
4115 {
4116 	struct stmmac_priv *priv = netdev_priv(dev);
4117 	struct stmmac_dma_conf *dma_conf;
4118 	int ret;
4119 
4120 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4121 	if (IS_ERR(dma_conf))
4122 		return PTR_ERR(dma_conf);
4123 
4124 	ret = __stmmac_open(dev, dma_conf);
4125 	if (ret)
4126 		free_dma_desc_resources(priv, dma_conf);
4127 
4128 	kfree(dma_conf);
4129 	return ret;
4130 }
4131 
4132 /**
4133  *  stmmac_release - close entry point of the driver
4134  *  @dev : device pointer.
4135  *  Description:
4136  *  This is the stop entry point of the driver.
4137  */
4138 static int stmmac_release(struct net_device *dev)
4139 {
4140 	struct stmmac_priv *priv = netdev_priv(dev);
4141 	u32 chan;
4142 
4143 	/* If the PHY or MAC has WoL enabled, then the PHY will not be
4144 	 * suspended when phylink_stop() is called below. Set the PHY
4145 	 * to its slowest speed to save power.
4146 	 */
4147 	if (device_may_wakeup(priv->device))
4148 		phylink_speed_down(priv->phylink, false);
4149 
4150 	/* Stop and disconnect the PHY */
4151 	phylink_stop(priv->phylink);
4152 	phylink_disconnect_phy(priv->phylink);
4153 
4154 	stmmac_disable_all_queues(priv);
4155 
4156 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4157 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4158 
4159 	netif_tx_disable(dev);
4160 
4161 	/* Free the IRQ lines */
4162 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4163 
4164 	/* Stop TX/RX DMA and clear the descriptors */
4165 	stmmac_stop_all_dma(priv);
4166 
4167 	/* Release and free the Rx/Tx resources */
4168 	free_dma_desc_resources(priv, &priv->dma_conf);
4169 
4170 	/* Powerdown Serdes if there is */
4171 	if (priv->plat->serdes_powerdown)
4172 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4173 
4174 	stmmac_release_ptp(priv);
4175 
4176 	if (stmmac_fpe_supported(priv))
4177 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4178 
4179 	pm_runtime_put(priv->device);
4180 
4181 	return 0;
4182 }
4183 
4184 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4185 			       struct stmmac_tx_queue *tx_q)
4186 {
4187 	u16 tag = 0x0, inner_tag = 0x0;
4188 	u32 inner_type = 0x0;
4189 	struct dma_desc *p;
4190 
4191 	if (!priv->dma_cap.vlins)
4192 		return false;
4193 	if (!skb_vlan_tag_present(skb))
4194 		return false;
4195 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4196 		inner_tag = skb_vlan_tag_get(skb);
4197 		inner_type = STMMAC_VLAN_INSERT;
4198 	}
4199 
4200 	tag = skb_vlan_tag_get(skb);
4201 
4202 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4203 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4204 	else
4205 		p = &tx_q->dma_tx[tx_q->cur_tx];
4206 
4207 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4208 		return false;
4209 
4210 	stmmac_set_tx_owner(priv, p);
4211 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4212 	return true;
4213 }
4214 
4215 /**
4216  *  stmmac_tso_allocator - close entry point of the driver
4217  *  @priv: driver private structure
4218  *  @des: buffer start address
4219  *  @total_len: total length to fill in descriptors
4220  *  @last_segment: condition for the last descriptor
4221  *  @queue: TX queue index
4222  *  Description:
4223  *  This function fills descriptor and request new descriptors according to
4224  *  buffer length to fill
4225  */
4226 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4227 				 int total_len, bool last_segment, u32 queue)
4228 {
4229 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4230 	struct dma_desc *desc;
4231 	u32 buff_size;
4232 	int tmp_len;
4233 
4234 	tmp_len = total_len;
4235 
4236 	while (tmp_len > 0) {
4237 		dma_addr_t curr_addr;
4238 
4239 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4240 						priv->dma_conf.dma_tx_size);
4241 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4242 
4243 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4244 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4245 		else
4246 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4247 
4248 		curr_addr = des + (total_len - tmp_len);
4249 		stmmac_set_desc_addr(priv, desc, curr_addr);
4250 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4251 			    TSO_MAX_BUFF_SIZE : tmp_len;
4252 
4253 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4254 				0, 1,
4255 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4256 				0, 0);
4257 
4258 		tmp_len -= TSO_MAX_BUFF_SIZE;
4259 	}
4260 }
4261 
4262 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4263 {
4264 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4265 	int desc_size;
4266 
4267 	if (likely(priv->extend_desc))
4268 		desc_size = sizeof(struct dma_extended_desc);
4269 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4270 		desc_size = sizeof(struct dma_edesc);
4271 	else
4272 		desc_size = sizeof(struct dma_desc);
4273 
4274 	/* The own bit must be the latest setting done when prepare the
4275 	 * descriptor and then barrier is needed to make sure that
4276 	 * all is coherent before granting the DMA engine.
4277 	 */
4278 	wmb();
4279 
4280 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4281 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4282 }
4283 
4284 /**
4285  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4286  *  @skb : the socket buffer
4287  *  @dev : device pointer
4288  *  Description: this is the transmit function that is called on TSO frames
4289  *  (support available on GMAC4 and newer chips).
4290  *  Diagram below show the ring programming in case of TSO frames:
4291  *
4292  *  First Descriptor
4293  *   --------
4294  *   | DES0 |---> buffer1 = L2/L3/L4 header
4295  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4296  *   |      |     width is 32-bit, but we never use it.
4297  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4298  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4299  *   |      |     or 48-bit, and we always use it.
4300  *   | DES2 |---> buffer1 len
4301  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4302  *   --------
4303  *   --------
4304  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4305  *   | DES1 |---> same as the First Descriptor
4306  *   | DES2 |---> buffer1 len
4307  *   | DES3 |
4308  *   --------
4309  *	|
4310  *     ...
4311  *	|
4312  *   --------
4313  *   | DES0 |---> buffer1 = Split TCP Payload
4314  *   | DES1 |---> same as the First Descriptor
4315  *   | DES2 |---> buffer1 len
4316  *   | DES3 |
4317  *   --------
4318  *
4319  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4320  */
4321 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4322 {
4323 	struct dma_desc *desc, *first, *mss_desc = NULL;
4324 	struct stmmac_priv *priv = netdev_priv(dev);
4325 	unsigned int first_entry, tx_packets;
4326 	struct stmmac_txq_stats *txq_stats;
4327 	struct stmmac_tx_queue *tx_q;
4328 	u32 pay_len, mss, queue;
4329 	int i, first_tx, nfrags;
4330 	u8 proto_hdr_len, hdr;
4331 	dma_addr_t des;
4332 	bool set_ic;
4333 
4334 	/* Always insert VLAN tag to SKB payload for TSO frames.
4335 	 *
4336 	 * Never insert VLAN tag by HW, since segments splited by
4337 	 * TSO engine will be un-tagged by mistake.
4338 	 */
4339 	if (skb_vlan_tag_present(skb)) {
4340 		skb = __vlan_hwaccel_push_inside(skb);
4341 		if (unlikely(!skb)) {
4342 			priv->xstats.tx_dropped++;
4343 			return NETDEV_TX_OK;
4344 		}
4345 	}
4346 
4347 	nfrags = skb_shinfo(skb)->nr_frags;
4348 	queue = skb_get_queue_mapping(skb);
4349 
4350 	tx_q = &priv->dma_conf.tx_queue[queue];
4351 	txq_stats = &priv->xstats.txq_stats[queue];
4352 	first_tx = tx_q->cur_tx;
4353 
4354 	/* Compute header lengths */
4355 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4356 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4357 		hdr = sizeof(struct udphdr);
4358 	} else {
4359 		proto_hdr_len = skb_tcp_all_headers(skb);
4360 		hdr = tcp_hdrlen(skb);
4361 	}
4362 
4363 	/* Desc availability based on threshold should be enough safe */
4364 	if (unlikely(stmmac_tx_avail(priv, queue) <
4365 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4366 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4367 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4368 								queue));
4369 			/* This is a hard error, log it. */
4370 			netdev_err(priv->dev,
4371 				   "%s: Tx Ring full when queue awake\n",
4372 				   __func__);
4373 		}
4374 		return NETDEV_TX_BUSY;
4375 	}
4376 
4377 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4378 
4379 	mss = skb_shinfo(skb)->gso_size;
4380 
4381 	/* set new MSS value if needed */
4382 	if (mss != tx_q->mss) {
4383 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4384 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4385 		else
4386 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4387 
4388 		stmmac_set_mss(priv, mss_desc, mss);
4389 		tx_q->mss = mss;
4390 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4391 						priv->dma_conf.dma_tx_size);
4392 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4393 	}
4394 
4395 	if (netif_msg_tx_queued(priv)) {
4396 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4397 			__func__, hdr, proto_hdr_len, pay_len, mss);
4398 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4399 			skb->data_len);
4400 	}
4401 
4402 	first_entry = tx_q->cur_tx;
4403 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4404 
4405 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4406 		desc = &tx_q->dma_entx[first_entry].basic;
4407 	else
4408 		desc = &tx_q->dma_tx[first_entry];
4409 	first = desc;
4410 
4411 	/* first descriptor: fill Headers on Buf1 */
4412 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4413 			     DMA_TO_DEVICE);
4414 	if (dma_mapping_error(priv->device, des))
4415 		goto dma_map_err;
4416 
4417 	stmmac_set_desc_addr(priv, first, des);
4418 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4419 			     (nfrags == 0), queue);
4420 
4421 	/* In case two or more DMA transmit descriptors are allocated for this
4422 	 * non-paged SKB data, the DMA buffer address should be saved to
4423 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4424 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4425 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4426 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4427 	 * sooner or later.
4428 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4429 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4430 	 * this DMA buffer right after the DMA engine completely finishes the
4431 	 * full buffer transmission.
4432 	 */
4433 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4434 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4435 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4436 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4437 
4438 	/* Prepare fragments */
4439 	for (i = 0; i < nfrags; i++) {
4440 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4441 
4442 		des = skb_frag_dma_map(priv->device, frag, 0,
4443 				       skb_frag_size(frag),
4444 				       DMA_TO_DEVICE);
4445 		if (dma_mapping_error(priv->device, des))
4446 			goto dma_map_err;
4447 
4448 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4449 				     (i == nfrags - 1), queue);
4450 
4451 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4452 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4453 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4454 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4455 	}
4456 
4457 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4458 
4459 	/* Only the last descriptor gets to point to the skb. */
4460 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4461 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4462 
4463 	/* Manage tx mitigation */
4464 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4465 	tx_q->tx_count_frames += tx_packets;
4466 
4467 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4468 		set_ic = true;
4469 	else if (!priv->tx_coal_frames[queue])
4470 		set_ic = false;
4471 	else if (tx_packets > priv->tx_coal_frames[queue])
4472 		set_ic = true;
4473 	else if ((tx_q->tx_count_frames %
4474 		  priv->tx_coal_frames[queue]) < tx_packets)
4475 		set_ic = true;
4476 	else
4477 		set_ic = false;
4478 
4479 	if (set_ic) {
4480 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4481 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4482 		else
4483 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4484 
4485 		tx_q->tx_count_frames = 0;
4486 		stmmac_set_tx_ic(priv, desc);
4487 	}
4488 
4489 	/* We've used all descriptors we need for this skb, however,
4490 	 * advance cur_tx so that it references a fresh descriptor.
4491 	 * ndo_start_xmit will fill this descriptor the next time it's
4492 	 * called and stmmac_tx_clean may clean up to this descriptor.
4493 	 */
4494 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4495 
4496 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4497 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4498 			  __func__);
4499 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4500 	}
4501 
4502 	u64_stats_update_begin(&txq_stats->q_syncp);
4503 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4504 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4505 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4506 	if (set_ic)
4507 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4508 	u64_stats_update_end(&txq_stats->q_syncp);
4509 
4510 	if (priv->sarc_type)
4511 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4512 
4513 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4514 		     priv->hwts_tx_en)) {
4515 		/* declare that device is doing timestamping */
4516 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4517 		stmmac_enable_tx_timestamp(priv, first);
4518 	}
4519 
4520 	/* Complete the first descriptor before granting the DMA */
4521 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4522 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4523 				   hdr / 4, (skb->len - proto_hdr_len));
4524 
4525 	/* If context desc is used to change MSS */
4526 	if (mss_desc) {
4527 		/* Make sure that first descriptor has been completely
4528 		 * written, including its own bit. This is because MSS is
4529 		 * actually before first descriptor, so we need to make
4530 		 * sure that MSS's own bit is the last thing written.
4531 		 */
4532 		dma_wmb();
4533 		stmmac_set_tx_owner(priv, mss_desc);
4534 	}
4535 
4536 	if (netif_msg_pktdata(priv)) {
4537 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4538 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4539 			tx_q->cur_tx, first, nfrags);
4540 		pr_info(">>> frame to be transmitted: ");
4541 		print_pkt(skb->data, skb_headlen(skb));
4542 	}
4543 
4544 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4545 	skb_tx_timestamp(skb);
4546 
4547 	stmmac_flush_tx_descriptors(priv, queue);
4548 	stmmac_tx_timer_arm(priv, queue);
4549 
4550 	return NETDEV_TX_OK;
4551 
4552 dma_map_err:
4553 	dev_err(priv->device, "Tx dma map failed\n");
4554 	dev_kfree_skb(skb);
4555 	priv->xstats.tx_dropped++;
4556 	return NETDEV_TX_OK;
4557 }
4558 
4559 /**
4560  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4561  * @skb: socket buffer to check
4562  *
4563  * Check if a packet has an ethertype that will trigger the IP header checks
4564  * and IP/TCP checksum engine of the stmmac core.
4565  *
4566  * Return: true if the ethertype can trigger the checksum engine, false
4567  * otherwise
4568  */
4569 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4570 {
4571 	int depth = 0;
4572 	__be16 proto;
4573 
4574 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4575 				    &depth);
4576 
4577 	return (depth <= ETH_HLEN) &&
4578 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4579 }
4580 
4581 /**
4582  *  stmmac_xmit - Tx entry point of the driver
4583  *  @skb : the socket buffer
4584  *  @dev : device pointer
4585  *  Description : this is the tx entry point of the driver.
4586  *  It programs the chain or the ring and supports oversized frames
4587  *  and SG feature.
4588  */
4589 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4590 {
4591 	unsigned int first_entry, tx_packets, enh_desc;
4592 	struct stmmac_priv *priv = netdev_priv(dev);
4593 	unsigned int nopaged_len = skb_headlen(skb);
4594 	int i, csum_insertion = 0, is_jumbo = 0;
4595 	u32 queue = skb_get_queue_mapping(skb);
4596 	int nfrags = skb_shinfo(skb)->nr_frags;
4597 	int gso = skb_shinfo(skb)->gso_type;
4598 	struct stmmac_txq_stats *txq_stats;
4599 	struct dma_edesc *tbs_desc = NULL;
4600 	struct dma_desc *desc, *first;
4601 	struct stmmac_tx_queue *tx_q;
4602 	bool has_vlan, set_ic;
4603 	int entry, first_tx;
4604 	dma_addr_t des;
4605 
4606 	tx_q = &priv->dma_conf.tx_queue[queue];
4607 	txq_stats = &priv->xstats.txq_stats[queue];
4608 	first_tx = tx_q->cur_tx;
4609 
4610 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4611 		stmmac_stop_sw_lpi(priv);
4612 
4613 	/* Manage oversized TCP frames for GMAC4 device */
4614 	if (skb_is_gso(skb) && priv->tso) {
4615 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4616 			return stmmac_tso_xmit(skb, dev);
4617 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4618 			return stmmac_tso_xmit(skb, dev);
4619 	}
4620 
4621 	if (priv->est && priv->est->enable &&
4622 	    priv->est->max_sdu[queue] &&
4623 	    skb->len > priv->est->max_sdu[queue]){
4624 		priv->xstats.max_sdu_txq_drop[queue]++;
4625 		goto max_sdu_err;
4626 	}
4627 
4628 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4629 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4630 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4631 								queue));
4632 			/* This is a hard error, log it. */
4633 			netdev_err(priv->dev,
4634 				   "%s: Tx Ring full when queue awake\n",
4635 				   __func__);
4636 		}
4637 		return NETDEV_TX_BUSY;
4638 	}
4639 
4640 	/* Check if VLAN can be inserted by HW */
4641 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4642 
4643 	entry = tx_q->cur_tx;
4644 	first_entry = entry;
4645 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4646 
4647 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4648 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4649 	 * queues. In that case, checksum offloading for those queues that don't
4650 	 * support tx coe needs to fallback to software checksum calculation.
4651 	 *
4652 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4653 	 * also have to be checksummed in software.
4654 	 */
4655 	if (csum_insertion &&
4656 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4657 	     !stmmac_has_ip_ethertype(skb))) {
4658 		if (unlikely(skb_checksum_help(skb)))
4659 			goto dma_map_err;
4660 		csum_insertion = !csum_insertion;
4661 	}
4662 
4663 	if (likely(priv->extend_desc))
4664 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4665 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4666 		desc = &tx_q->dma_entx[entry].basic;
4667 	else
4668 		desc = tx_q->dma_tx + entry;
4669 
4670 	first = desc;
4671 
4672 	if (has_vlan)
4673 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4674 
4675 	enh_desc = priv->plat->enh_desc;
4676 	/* To program the descriptors according to the size of the frame */
4677 	if (enh_desc)
4678 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4679 
4680 	if (unlikely(is_jumbo)) {
4681 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4682 		if (unlikely(entry < 0) && (entry != -EINVAL))
4683 			goto dma_map_err;
4684 	}
4685 
4686 	for (i = 0; i < nfrags; i++) {
4687 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4688 		int len = skb_frag_size(frag);
4689 		bool last_segment = (i == (nfrags - 1));
4690 
4691 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4692 		WARN_ON(tx_q->tx_skbuff[entry]);
4693 
4694 		if (likely(priv->extend_desc))
4695 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4696 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4697 			desc = &tx_q->dma_entx[entry].basic;
4698 		else
4699 			desc = tx_q->dma_tx + entry;
4700 
4701 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4702 				       DMA_TO_DEVICE);
4703 		if (dma_mapping_error(priv->device, des))
4704 			goto dma_map_err; /* should reuse desc w/o issues */
4705 
4706 		tx_q->tx_skbuff_dma[entry].buf = des;
4707 
4708 		stmmac_set_desc_addr(priv, desc, des);
4709 
4710 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4711 		tx_q->tx_skbuff_dma[entry].len = len;
4712 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4713 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4714 
4715 		/* Prepare the descriptor and set the own bit too */
4716 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4717 				priv->mode, 1, last_segment, skb->len);
4718 	}
4719 
4720 	/* Only the last descriptor gets to point to the skb. */
4721 	tx_q->tx_skbuff[entry] = skb;
4722 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4723 
4724 	/* According to the coalesce parameter the IC bit for the latest
4725 	 * segment is reset and the timer re-started to clean the tx status.
4726 	 * This approach takes care about the fragments: desc is the first
4727 	 * element in case of no SG.
4728 	 */
4729 	tx_packets = (entry + 1) - first_tx;
4730 	tx_q->tx_count_frames += tx_packets;
4731 
4732 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4733 		set_ic = true;
4734 	else if (!priv->tx_coal_frames[queue])
4735 		set_ic = false;
4736 	else if (tx_packets > priv->tx_coal_frames[queue])
4737 		set_ic = true;
4738 	else if ((tx_q->tx_count_frames %
4739 		  priv->tx_coal_frames[queue]) < tx_packets)
4740 		set_ic = true;
4741 	else
4742 		set_ic = false;
4743 
4744 	if (set_ic) {
4745 		if (likely(priv->extend_desc))
4746 			desc = &tx_q->dma_etx[entry].basic;
4747 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4748 			desc = &tx_q->dma_entx[entry].basic;
4749 		else
4750 			desc = &tx_q->dma_tx[entry];
4751 
4752 		tx_q->tx_count_frames = 0;
4753 		stmmac_set_tx_ic(priv, desc);
4754 	}
4755 
4756 	/* We've used all descriptors we need for this skb, however,
4757 	 * advance cur_tx so that it references a fresh descriptor.
4758 	 * ndo_start_xmit will fill this descriptor the next time it's
4759 	 * called and stmmac_tx_clean may clean up to this descriptor.
4760 	 */
4761 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4762 	tx_q->cur_tx = entry;
4763 
4764 	if (netif_msg_pktdata(priv)) {
4765 		netdev_dbg(priv->dev,
4766 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4767 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4768 			   entry, first, nfrags);
4769 
4770 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4771 		print_pkt(skb->data, skb->len);
4772 	}
4773 
4774 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4775 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4776 			  __func__);
4777 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4778 	}
4779 
4780 	u64_stats_update_begin(&txq_stats->q_syncp);
4781 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4782 	if (set_ic)
4783 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4784 	u64_stats_update_end(&txq_stats->q_syncp);
4785 
4786 	if (priv->sarc_type)
4787 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4788 
4789 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4790 	 * problems because all the descriptors are actually ready to be
4791 	 * passed to the DMA engine.
4792 	 */
4793 	if (likely(!is_jumbo)) {
4794 		bool last_segment = (nfrags == 0);
4795 
4796 		des = dma_map_single(priv->device, skb->data,
4797 				     nopaged_len, DMA_TO_DEVICE);
4798 		if (dma_mapping_error(priv->device, des))
4799 			goto dma_map_err;
4800 
4801 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4802 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4803 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4804 
4805 		stmmac_set_desc_addr(priv, first, des);
4806 
4807 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4808 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4809 
4810 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4811 			     priv->hwts_tx_en)) {
4812 			/* declare that device is doing timestamping */
4813 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4814 			stmmac_enable_tx_timestamp(priv, first);
4815 		}
4816 
4817 		/* Prepare the first descriptor setting the OWN bit too */
4818 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4819 				csum_insertion, priv->mode, 0, last_segment,
4820 				skb->len);
4821 	}
4822 
4823 	if (tx_q->tbs & STMMAC_TBS_EN) {
4824 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4825 
4826 		tbs_desc = &tx_q->dma_entx[first_entry];
4827 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4828 	}
4829 
4830 	stmmac_set_tx_owner(priv, first);
4831 
4832 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4833 
4834 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4835 	skb_tx_timestamp(skb);
4836 	stmmac_flush_tx_descriptors(priv, queue);
4837 	stmmac_tx_timer_arm(priv, queue);
4838 
4839 	return NETDEV_TX_OK;
4840 
4841 dma_map_err:
4842 	netdev_err(priv->dev, "Tx DMA map failed\n");
4843 max_sdu_err:
4844 	dev_kfree_skb(skb);
4845 	priv->xstats.tx_dropped++;
4846 	return NETDEV_TX_OK;
4847 }
4848 
4849 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4850 {
4851 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4852 	__be16 vlan_proto = veth->h_vlan_proto;
4853 	u16 vlanid;
4854 
4855 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4856 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4857 	    (vlan_proto == htons(ETH_P_8021AD) &&
4858 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4859 		/* pop the vlan tag */
4860 		vlanid = ntohs(veth->h_vlan_TCI);
4861 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4862 		skb_pull(skb, VLAN_HLEN);
4863 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4864 	}
4865 }
4866 
4867 /**
4868  * stmmac_rx_refill - refill used skb preallocated buffers
4869  * @priv: driver private structure
4870  * @queue: RX queue index
4871  * Description : this is to reallocate the skb for the reception process
4872  * that is based on zero-copy.
4873  */
4874 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4875 {
4876 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4877 	int dirty = stmmac_rx_dirty(priv, queue);
4878 	unsigned int entry = rx_q->dirty_rx;
4879 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4880 
4881 	if (priv->dma_cap.host_dma_width <= 32)
4882 		gfp |= GFP_DMA32;
4883 
4884 	while (dirty-- > 0) {
4885 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4886 		struct dma_desc *p;
4887 		bool use_rx_wd;
4888 
4889 		if (priv->extend_desc)
4890 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4891 		else
4892 			p = rx_q->dma_rx + entry;
4893 
4894 		if (!buf->page) {
4895 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4896 			if (!buf->page)
4897 				break;
4898 		}
4899 
4900 		if (priv->sph && !buf->sec_page) {
4901 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4902 			if (!buf->sec_page)
4903 				break;
4904 
4905 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4906 		}
4907 
4908 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4909 
4910 		stmmac_set_desc_addr(priv, p, buf->addr);
4911 		if (priv->sph)
4912 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4913 		else
4914 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4915 		stmmac_refill_desc3(priv, rx_q, p);
4916 
4917 		rx_q->rx_count_frames++;
4918 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4919 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4920 			rx_q->rx_count_frames = 0;
4921 
4922 		use_rx_wd = !priv->rx_coal_frames[queue];
4923 		use_rx_wd |= rx_q->rx_count_frames > 0;
4924 		if (!priv->use_riwt)
4925 			use_rx_wd = false;
4926 
4927 		dma_wmb();
4928 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4929 
4930 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4931 	}
4932 	rx_q->dirty_rx = entry;
4933 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4934 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4935 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4936 }
4937 
4938 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4939 				       struct dma_desc *p,
4940 				       int status, unsigned int len)
4941 {
4942 	unsigned int plen = 0, hlen = 0;
4943 	int coe = priv->hw->rx_csum;
4944 
4945 	/* Not first descriptor, buffer is always zero */
4946 	if (priv->sph && len)
4947 		return 0;
4948 
4949 	/* First descriptor, get split header length */
4950 	stmmac_get_rx_header_len(priv, p, &hlen);
4951 	if (priv->sph && hlen) {
4952 		priv->xstats.rx_split_hdr_pkt_n++;
4953 		return hlen;
4954 	}
4955 
4956 	/* First descriptor, not last descriptor and not split header */
4957 	if (status & rx_not_ls)
4958 		return priv->dma_conf.dma_buf_sz;
4959 
4960 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4961 
4962 	/* First descriptor and last descriptor and not split header */
4963 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4964 }
4965 
4966 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4967 				       struct dma_desc *p,
4968 				       int status, unsigned int len)
4969 {
4970 	int coe = priv->hw->rx_csum;
4971 	unsigned int plen = 0;
4972 
4973 	/* Not split header, buffer is not available */
4974 	if (!priv->sph)
4975 		return 0;
4976 
4977 	/* Not last descriptor */
4978 	if (status & rx_not_ls)
4979 		return priv->dma_conf.dma_buf_sz;
4980 
4981 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4982 
4983 	/* Last descriptor */
4984 	return plen - len;
4985 }
4986 
4987 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4988 				struct xdp_frame *xdpf, bool dma_map)
4989 {
4990 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4991 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4992 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
4993 	unsigned int entry = tx_q->cur_tx;
4994 	struct dma_desc *tx_desc;
4995 	dma_addr_t dma_addr;
4996 	bool set_ic;
4997 
4998 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4999 		return STMMAC_XDP_CONSUMED;
5000 
5001 	if (priv->est && priv->est->enable &&
5002 	    priv->est->max_sdu[queue] &&
5003 	    xdpf->len > priv->est->max_sdu[queue]) {
5004 		priv->xstats.max_sdu_txq_drop[queue]++;
5005 		return STMMAC_XDP_CONSUMED;
5006 	}
5007 
5008 	if (likely(priv->extend_desc))
5009 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
5010 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
5011 		tx_desc = &tx_q->dma_entx[entry].basic;
5012 	else
5013 		tx_desc = tx_q->dma_tx + entry;
5014 
5015 	if (dma_map) {
5016 		dma_addr = dma_map_single(priv->device, xdpf->data,
5017 					  xdpf->len, DMA_TO_DEVICE);
5018 		if (dma_mapping_error(priv->device, dma_addr))
5019 			return STMMAC_XDP_CONSUMED;
5020 
5021 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
5022 	} else {
5023 		struct page *page = virt_to_page(xdpf->data);
5024 
5025 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5026 			   xdpf->headroom;
5027 		dma_sync_single_for_device(priv->device, dma_addr,
5028 					   xdpf->len, DMA_BIDIRECTIONAL);
5029 
5030 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5031 	}
5032 
5033 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5034 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
5035 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5036 	tx_q->tx_skbuff_dma[entry].last_segment = true;
5037 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5038 
5039 	tx_q->xdpf[entry] = xdpf;
5040 
5041 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5042 
5043 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5044 			       csum, priv->mode, true, true,
5045 			       xdpf->len);
5046 
5047 	tx_q->tx_count_frames++;
5048 
5049 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5050 		set_ic = true;
5051 	else
5052 		set_ic = false;
5053 
5054 	if (set_ic) {
5055 		tx_q->tx_count_frames = 0;
5056 		stmmac_set_tx_ic(priv, tx_desc);
5057 		u64_stats_update_begin(&txq_stats->q_syncp);
5058 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5059 		u64_stats_update_end(&txq_stats->q_syncp);
5060 	}
5061 
5062 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5063 
5064 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5065 	tx_q->cur_tx = entry;
5066 
5067 	return STMMAC_XDP_TX;
5068 }
5069 
5070 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5071 				   int cpu)
5072 {
5073 	int index = cpu;
5074 
5075 	if (unlikely(index < 0))
5076 		index = 0;
5077 
5078 	while (index >= priv->plat->tx_queues_to_use)
5079 		index -= priv->plat->tx_queues_to_use;
5080 
5081 	return index;
5082 }
5083 
5084 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5085 				struct xdp_buff *xdp)
5086 {
5087 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5088 	int cpu = smp_processor_id();
5089 	struct netdev_queue *nq;
5090 	int queue;
5091 	int res;
5092 
5093 	if (unlikely(!xdpf))
5094 		return STMMAC_XDP_CONSUMED;
5095 
5096 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5097 	nq = netdev_get_tx_queue(priv->dev, queue);
5098 
5099 	__netif_tx_lock(nq, cpu);
5100 	/* Avoids TX time-out as we are sharing with slow path */
5101 	txq_trans_cond_update(nq);
5102 
5103 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5104 	if (res == STMMAC_XDP_TX)
5105 		stmmac_flush_tx_descriptors(priv, queue);
5106 
5107 	__netif_tx_unlock(nq);
5108 
5109 	return res;
5110 }
5111 
5112 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5113 				 struct bpf_prog *prog,
5114 				 struct xdp_buff *xdp)
5115 {
5116 	u32 act;
5117 	int res;
5118 
5119 	act = bpf_prog_run_xdp(prog, xdp);
5120 	switch (act) {
5121 	case XDP_PASS:
5122 		res = STMMAC_XDP_PASS;
5123 		break;
5124 	case XDP_TX:
5125 		res = stmmac_xdp_xmit_back(priv, xdp);
5126 		break;
5127 	case XDP_REDIRECT:
5128 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5129 			res = STMMAC_XDP_CONSUMED;
5130 		else
5131 			res = STMMAC_XDP_REDIRECT;
5132 		break;
5133 	default:
5134 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5135 		fallthrough;
5136 	case XDP_ABORTED:
5137 		trace_xdp_exception(priv->dev, prog, act);
5138 		fallthrough;
5139 	case XDP_DROP:
5140 		res = STMMAC_XDP_CONSUMED;
5141 		break;
5142 	}
5143 
5144 	return res;
5145 }
5146 
5147 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5148 					   struct xdp_buff *xdp)
5149 {
5150 	struct bpf_prog *prog;
5151 	int res;
5152 
5153 	prog = READ_ONCE(priv->xdp_prog);
5154 	if (!prog) {
5155 		res = STMMAC_XDP_PASS;
5156 		goto out;
5157 	}
5158 
5159 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5160 out:
5161 	return ERR_PTR(-res);
5162 }
5163 
5164 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5165 				   int xdp_status)
5166 {
5167 	int cpu = smp_processor_id();
5168 	int queue;
5169 
5170 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5171 
5172 	if (xdp_status & STMMAC_XDP_TX)
5173 		stmmac_tx_timer_arm(priv, queue);
5174 
5175 	if (xdp_status & STMMAC_XDP_REDIRECT)
5176 		xdp_do_flush();
5177 }
5178 
5179 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5180 					       struct xdp_buff *xdp)
5181 {
5182 	unsigned int metasize = xdp->data - xdp->data_meta;
5183 	unsigned int datasize = xdp->data_end - xdp->data;
5184 	struct sk_buff *skb;
5185 
5186 	skb = napi_alloc_skb(&ch->rxtx_napi,
5187 			     xdp->data_end - xdp->data_hard_start);
5188 	if (unlikely(!skb))
5189 		return NULL;
5190 
5191 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5192 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5193 	if (metasize)
5194 		skb_metadata_set(skb, metasize);
5195 
5196 	return skb;
5197 }
5198 
5199 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5200 				   struct dma_desc *p, struct dma_desc *np,
5201 				   struct xdp_buff *xdp)
5202 {
5203 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5204 	struct stmmac_channel *ch = &priv->channel[queue];
5205 	unsigned int len = xdp->data_end - xdp->data;
5206 	enum pkt_hash_types hash_type;
5207 	int coe = priv->hw->rx_csum;
5208 	struct sk_buff *skb;
5209 	u32 hash;
5210 
5211 	skb = stmmac_construct_skb_zc(ch, xdp);
5212 	if (!skb) {
5213 		priv->xstats.rx_dropped++;
5214 		return;
5215 	}
5216 
5217 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5218 	if (priv->hw->hw_vlan_en)
5219 		/* MAC level stripping. */
5220 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5221 	else
5222 		/* Driver level stripping. */
5223 		stmmac_rx_vlan(priv->dev, skb);
5224 	skb->protocol = eth_type_trans(skb, priv->dev);
5225 
5226 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5227 		skb_checksum_none_assert(skb);
5228 	else
5229 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5230 
5231 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5232 		skb_set_hash(skb, hash, hash_type);
5233 
5234 	skb_record_rx_queue(skb, queue);
5235 	napi_gro_receive(&ch->rxtx_napi, skb);
5236 
5237 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5238 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5239 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5240 	u64_stats_update_end(&rxq_stats->napi_syncp);
5241 }
5242 
5243 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5244 {
5245 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5246 	unsigned int entry = rx_q->dirty_rx;
5247 	struct dma_desc *rx_desc = NULL;
5248 	bool ret = true;
5249 
5250 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5251 
5252 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5253 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5254 		dma_addr_t dma_addr;
5255 		bool use_rx_wd;
5256 
5257 		if (!buf->xdp) {
5258 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5259 			if (!buf->xdp) {
5260 				ret = false;
5261 				break;
5262 			}
5263 		}
5264 
5265 		if (priv->extend_desc)
5266 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5267 		else
5268 			rx_desc = rx_q->dma_rx + entry;
5269 
5270 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5271 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5272 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5273 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5274 
5275 		rx_q->rx_count_frames++;
5276 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5277 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5278 			rx_q->rx_count_frames = 0;
5279 
5280 		use_rx_wd = !priv->rx_coal_frames[queue];
5281 		use_rx_wd |= rx_q->rx_count_frames > 0;
5282 		if (!priv->use_riwt)
5283 			use_rx_wd = false;
5284 
5285 		dma_wmb();
5286 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5287 
5288 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5289 	}
5290 
5291 	if (rx_desc) {
5292 		rx_q->dirty_rx = entry;
5293 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5294 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5295 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5296 	}
5297 
5298 	return ret;
5299 }
5300 
5301 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5302 {
5303 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5304 	 * to represent incoming packet, whereas cb field in the same structure
5305 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5306 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5307 	 */
5308 	return (struct stmmac_xdp_buff *)xdp;
5309 }
5310 
5311 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5312 {
5313 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5314 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5315 	unsigned int count = 0, error = 0, len = 0;
5316 	int dirty = stmmac_rx_dirty(priv, queue);
5317 	unsigned int next_entry = rx_q->cur_rx;
5318 	u32 rx_errors = 0, rx_dropped = 0;
5319 	unsigned int desc_size;
5320 	struct bpf_prog *prog;
5321 	bool failure = false;
5322 	int xdp_status = 0;
5323 	int status = 0;
5324 
5325 	if (netif_msg_rx_status(priv)) {
5326 		void *rx_head;
5327 
5328 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5329 		if (priv->extend_desc) {
5330 			rx_head = (void *)rx_q->dma_erx;
5331 			desc_size = sizeof(struct dma_extended_desc);
5332 		} else {
5333 			rx_head = (void *)rx_q->dma_rx;
5334 			desc_size = sizeof(struct dma_desc);
5335 		}
5336 
5337 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5338 				    rx_q->dma_rx_phy, desc_size);
5339 	}
5340 	while (count < limit) {
5341 		struct stmmac_rx_buffer *buf;
5342 		struct stmmac_xdp_buff *ctx;
5343 		unsigned int buf1_len = 0;
5344 		struct dma_desc *np, *p;
5345 		int entry;
5346 		int res;
5347 
5348 		if (!count && rx_q->state_saved) {
5349 			error = rx_q->state.error;
5350 			len = rx_q->state.len;
5351 		} else {
5352 			rx_q->state_saved = false;
5353 			error = 0;
5354 			len = 0;
5355 		}
5356 
5357 		if (count >= limit)
5358 			break;
5359 
5360 read_again:
5361 		buf1_len = 0;
5362 		entry = next_entry;
5363 		buf = &rx_q->buf_pool[entry];
5364 
5365 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5366 			failure = failure ||
5367 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5368 			dirty = 0;
5369 		}
5370 
5371 		if (priv->extend_desc)
5372 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5373 		else
5374 			p = rx_q->dma_rx + entry;
5375 
5376 		/* read the status of the incoming frame */
5377 		status = stmmac_rx_status(priv, &priv->xstats, p);
5378 		/* check if managed by the DMA otherwise go ahead */
5379 		if (unlikely(status & dma_own))
5380 			break;
5381 
5382 		/* Prefetch the next RX descriptor */
5383 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5384 						priv->dma_conf.dma_rx_size);
5385 		next_entry = rx_q->cur_rx;
5386 
5387 		if (priv->extend_desc)
5388 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5389 		else
5390 			np = rx_q->dma_rx + next_entry;
5391 
5392 		prefetch(np);
5393 
5394 		/* Ensure a valid XSK buffer before proceed */
5395 		if (!buf->xdp)
5396 			break;
5397 
5398 		if (priv->extend_desc)
5399 			stmmac_rx_extended_status(priv, &priv->xstats,
5400 						  rx_q->dma_erx + entry);
5401 		if (unlikely(status == discard_frame)) {
5402 			xsk_buff_free(buf->xdp);
5403 			buf->xdp = NULL;
5404 			dirty++;
5405 			error = 1;
5406 			if (!priv->hwts_rx_en)
5407 				rx_errors++;
5408 		}
5409 
5410 		if (unlikely(error && (status & rx_not_ls)))
5411 			goto read_again;
5412 		if (unlikely(error)) {
5413 			count++;
5414 			continue;
5415 		}
5416 
5417 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5418 		if (likely(status & rx_not_ls)) {
5419 			xsk_buff_free(buf->xdp);
5420 			buf->xdp = NULL;
5421 			dirty++;
5422 			count++;
5423 			goto read_again;
5424 		}
5425 
5426 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5427 		ctx->priv = priv;
5428 		ctx->desc = p;
5429 		ctx->ndesc = np;
5430 
5431 		/* XDP ZC Frame only support primary buffers for now */
5432 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5433 		len += buf1_len;
5434 
5435 		/* ACS is disabled; strip manually. */
5436 		if (likely(!(status & rx_not_ls))) {
5437 			buf1_len -= ETH_FCS_LEN;
5438 			len -= ETH_FCS_LEN;
5439 		}
5440 
5441 		/* RX buffer is good and fit into a XSK pool buffer */
5442 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5443 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5444 
5445 		prog = READ_ONCE(priv->xdp_prog);
5446 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5447 
5448 		switch (res) {
5449 		case STMMAC_XDP_PASS:
5450 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5451 			xsk_buff_free(buf->xdp);
5452 			break;
5453 		case STMMAC_XDP_CONSUMED:
5454 			xsk_buff_free(buf->xdp);
5455 			rx_dropped++;
5456 			break;
5457 		case STMMAC_XDP_TX:
5458 		case STMMAC_XDP_REDIRECT:
5459 			xdp_status |= res;
5460 			break;
5461 		}
5462 
5463 		buf->xdp = NULL;
5464 		dirty++;
5465 		count++;
5466 	}
5467 
5468 	if (status & rx_not_ls) {
5469 		rx_q->state_saved = true;
5470 		rx_q->state.error = error;
5471 		rx_q->state.len = len;
5472 	}
5473 
5474 	stmmac_finalize_xdp_rx(priv, xdp_status);
5475 
5476 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5477 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5478 	u64_stats_update_end(&rxq_stats->napi_syncp);
5479 
5480 	priv->xstats.rx_dropped += rx_dropped;
5481 	priv->xstats.rx_errors += rx_errors;
5482 
5483 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5484 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5485 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5486 		else
5487 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5488 
5489 		return (int)count;
5490 	}
5491 
5492 	return failure ? limit : (int)count;
5493 }
5494 
5495 /**
5496  * stmmac_rx - manage the receive process
5497  * @priv: driver private structure
5498  * @limit: napi bugget
5499  * @queue: RX queue index.
5500  * Description :  this the function called by the napi poll method.
5501  * It gets all the frames inside the ring.
5502  */
5503 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5504 {
5505 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5506 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5507 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5508 	struct stmmac_channel *ch = &priv->channel[queue];
5509 	unsigned int count = 0, error = 0, len = 0;
5510 	int status = 0, coe = priv->hw->rx_csum;
5511 	unsigned int next_entry = rx_q->cur_rx;
5512 	enum dma_data_direction dma_dir;
5513 	unsigned int desc_size;
5514 	struct sk_buff *skb = NULL;
5515 	struct stmmac_xdp_buff ctx;
5516 	int xdp_status = 0;
5517 	int bufsz;
5518 
5519 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5520 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5521 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5522 
5523 	if (netif_msg_rx_status(priv)) {
5524 		void *rx_head;
5525 
5526 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5527 		if (priv->extend_desc) {
5528 			rx_head = (void *)rx_q->dma_erx;
5529 			desc_size = sizeof(struct dma_extended_desc);
5530 		} else {
5531 			rx_head = (void *)rx_q->dma_rx;
5532 			desc_size = sizeof(struct dma_desc);
5533 		}
5534 
5535 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5536 				    rx_q->dma_rx_phy, desc_size);
5537 	}
5538 	while (count < limit) {
5539 		unsigned int buf1_len = 0, buf2_len = 0;
5540 		enum pkt_hash_types hash_type;
5541 		struct stmmac_rx_buffer *buf;
5542 		struct dma_desc *np, *p;
5543 		int entry;
5544 		u32 hash;
5545 
5546 		if (!count && rx_q->state_saved) {
5547 			skb = rx_q->state.skb;
5548 			error = rx_q->state.error;
5549 			len = rx_q->state.len;
5550 		} else {
5551 			rx_q->state_saved = false;
5552 			skb = NULL;
5553 			error = 0;
5554 			len = 0;
5555 		}
5556 
5557 read_again:
5558 		if (count >= limit)
5559 			break;
5560 
5561 		buf1_len = 0;
5562 		buf2_len = 0;
5563 		entry = next_entry;
5564 		buf = &rx_q->buf_pool[entry];
5565 
5566 		if (priv->extend_desc)
5567 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5568 		else
5569 			p = rx_q->dma_rx + entry;
5570 
5571 		/* read the status of the incoming frame */
5572 		status = stmmac_rx_status(priv, &priv->xstats, p);
5573 		/* check if managed by the DMA otherwise go ahead */
5574 		if (unlikely(status & dma_own))
5575 			break;
5576 
5577 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5578 						priv->dma_conf.dma_rx_size);
5579 		next_entry = rx_q->cur_rx;
5580 
5581 		if (priv->extend_desc)
5582 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5583 		else
5584 			np = rx_q->dma_rx + next_entry;
5585 
5586 		prefetch(np);
5587 
5588 		if (priv->extend_desc)
5589 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5590 		if (unlikely(status == discard_frame)) {
5591 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5592 			buf->page = NULL;
5593 			error = 1;
5594 			if (!priv->hwts_rx_en)
5595 				rx_errors++;
5596 		}
5597 
5598 		if (unlikely(error && (status & rx_not_ls)))
5599 			goto read_again;
5600 		if (unlikely(error)) {
5601 			dev_kfree_skb(skb);
5602 			skb = NULL;
5603 			count++;
5604 			continue;
5605 		}
5606 
5607 		/* Buffer is good. Go on. */
5608 
5609 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5610 		len += buf1_len;
5611 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5612 		len += buf2_len;
5613 
5614 		/* ACS is disabled; strip manually. */
5615 		if (likely(!(status & rx_not_ls))) {
5616 			if (buf2_len) {
5617 				buf2_len -= ETH_FCS_LEN;
5618 				len -= ETH_FCS_LEN;
5619 			} else if (buf1_len) {
5620 				buf1_len -= ETH_FCS_LEN;
5621 				len -= ETH_FCS_LEN;
5622 			}
5623 		}
5624 
5625 		if (!skb) {
5626 			unsigned int pre_len, sync_len;
5627 
5628 			dma_sync_single_for_cpu(priv->device, buf->addr,
5629 						buf1_len, dma_dir);
5630 			net_prefetch(page_address(buf->page) +
5631 				     buf->page_offset);
5632 
5633 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5634 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5635 					 buf->page_offset, buf1_len, true);
5636 
5637 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5638 				  buf->page_offset;
5639 
5640 			ctx.priv = priv;
5641 			ctx.desc = p;
5642 			ctx.ndesc = np;
5643 
5644 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5645 			/* Due xdp_adjust_tail: DMA sync for_device
5646 			 * cover max len CPU touch
5647 			 */
5648 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5649 				   buf->page_offset;
5650 			sync_len = max(sync_len, pre_len);
5651 
5652 			/* For Not XDP_PASS verdict */
5653 			if (IS_ERR(skb)) {
5654 				unsigned int xdp_res = -PTR_ERR(skb);
5655 
5656 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5657 					page_pool_put_page(rx_q->page_pool,
5658 							   virt_to_head_page(ctx.xdp.data),
5659 							   sync_len, true);
5660 					buf->page = NULL;
5661 					rx_dropped++;
5662 
5663 					/* Clear skb as it was set as
5664 					 * status by XDP program.
5665 					 */
5666 					skb = NULL;
5667 
5668 					if (unlikely((status & rx_not_ls)))
5669 						goto read_again;
5670 
5671 					count++;
5672 					continue;
5673 				} else if (xdp_res & (STMMAC_XDP_TX |
5674 						      STMMAC_XDP_REDIRECT)) {
5675 					xdp_status |= xdp_res;
5676 					buf->page = NULL;
5677 					skb = NULL;
5678 					count++;
5679 					continue;
5680 				}
5681 			}
5682 		}
5683 
5684 		if (!skb) {
5685 			unsigned int head_pad_len;
5686 
5687 			/* XDP program may expand or reduce tail */
5688 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5689 
5690 			skb = napi_build_skb(page_address(buf->page),
5691 					     rx_q->napi_skb_frag_size);
5692 			if (!skb) {
5693 				page_pool_recycle_direct(rx_q->page_pool,
5694 							 buf->page);
5695 				rx_dropped++;
5696 				count++;
5697 				goto drain_data;
5698 			}
5699 
5700 			/* XDP program may adjust header */
5701 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5702 			skb_reserve(skb, head_pad_len);
5703 			skb_put(skb, buf1_len);
5704 			skb_mark_for_recycle(skb);
5705 			buf->page = NULL;
5706 		} else if (buf1_len) {
5707 			dma_sync_single_for_cpu(priv->device, buf->addr,
5708 						buf1_len, dma_dir);
5709 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5710 					buf->page, buf->page_offset, buf1_len,
5711 					priv->dma_conf.dma_buf_sz);
5712 			buf->page = NULL;
5713 		}
5714 
5715 		if (buf2_len) {
5716 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5717 						buf2_len, dma_dir);
5718 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5719 					buf->sec_page, 0, buf2_len,
5720 					priv->dma_conf.dma_buf_sz);
5721 			buf->sec_page = NULL;
5722 		}
5723 
5724 drain_data:
5725 		if (likely(status & rx_not_ls))
5726 			goto read_again;
5727 		if (!skb)
5728 			continue;
5729 
5730 		/* Got entire packet into SKB. Finish it. */
5731 
5732 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5733 
5734 		if (priv->hw->hw_vlan_en)
5735 			/* MAC level stripping. */
5736 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5737 		else
5738 			/* Driver level stripping. */
5739 			stmmac_rx_vlan(priv->dev, skb);
5740 
5741 		skb->protocol = eth_type_trans(skb, priv->dev);
5742 
5743 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5744 		    (status & csum_none))
5745 			skb_checksum_none_assert(skb);
5746 		else
5747 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5748 
5749 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5750 			skb_set_hash(skb, hash, hash_type);
5751 
5752 		skb_record_rx_queue(skb, queue);
5753 		napi_gro_receive(&ch->rx_napi, skb);
5754 		skb = NULL;
5755 
5756 		rx_packets++;
5757 		rx_bytes += len;
5758 		count++;
5759 	}
5760 
5761 	if (status & rx_not_ls || skb) {
5762 		rx_q->state_saved = true;
5763 		rx_q->state.skb = skb;
5764 		rx_q->state.error = error;
5765 		rx_q->state.len = len;
5766 	}
5767 
5768 	stmmac_finalize_xdp_rx(priv, xdp_status);
5769 
5770 	stmmac_rx_refill(priv, queue);
5771 
5772 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5773 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5774 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5775 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5776 	u64_stats_update_end(&rxq_stats->napi_syncp);
5777 
5778 	priv->xstats.rx_dropped += rx_dropped;
5779 	priv->xstats.rx_errors += rx_errors;
5780 
5781 	return count;
5782 }
5783 
5784 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5785 {
5786 	struct stmmac_channel *ch =
5787 		container_of(napi, struct stmmac_channel, rx_napi);
5788 	struct stmmac_priv *priv = ch->priv_data;
5789 	struct stmmac_rxq_stats *rxq_stats;
5790 	u32 chan = ch->index;
5791 	int work_done;
5792 
5793 	rxq_stats = &priv->xstats.rxq_stats[chan];
5794 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5795 	u64_stats_inc(&rxq_stats->napi.poll);
5796 	u64_stats_update_end(&rxq_stats->napi_syncp);
5797 
5798 	work_done = stmmac_rx(priv, budget, chan);
5799 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5800 		unsigned long flags;
5801 
5802 		spin_lock_irqsave(&ch->lock, flags);
5803 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5804 		spin_unlock_irqrestore(&ch->lock, flags);
5805 	}
5806 
5807 	return work_done;
5808 }
5809 
5810 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5811 {
5812 	struct stmmac_channel *ch =
5813 		container_of(napi, struct stmmac_channel, tx_napi);
5814 	struct stmmac_priv *priv = ch->priv_data;
5815 	struct stmmac_txq_stats *txq_stats;
5816 	bool pending_packets = false;
5817 	u32 chan = ch->index;
5818 	int work_done;
5819 
5820 	txq_stats = &priv->xstats.txq_stats[chan];
5821 	u64_stats_update_begin(&txq_stats->napi_syncp);
5822 	u64_stats_inc(&txq_stats->napi.poll);
5823 	u64_stats_update_end(&txq_stats->napi_syncp);
5824 
5825 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5826 	work_done = min(work_done, budget);
5827 
5828 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5829 		unsigned long flags;
5830 
5831 		spin_lock_irqsave(&ch->lock, flags);
5832 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5833 		spin_unlock_irqrestore(&ch->lock, flags);
5834 	}
5835 
5836 	/* TX still have packet to handle, check if we need to arm tx timer */
5837 	if (pending_packets)
5838 		stmmac_tx_timer_arm(priv, chan);
5839 
5840 	return work_done;
5841 }
5842 
5843 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5844 {
5845 	struct stmmac_channel *ch =
5846 		container_of(napi, struct stmmac_channel, rxtx_napi);
5847 	struct stmmac_priv *priv = ch->priv_data;
5848 	bool tx_pending_packets = false;
5849 	int rx_done, tx_done, rxtx_done;
5850 	struct stmmac_rxq_stats *rxq_stats;
5851 	struct stmmac_txq_stats *txq_stats;
5852 	u32 chan = ch->index;
5853 
5854 	rxq_stats = &priv->xstats.rxq_stats[chan];
5855 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5856 	u64_stats_inc(&rxq_stats->napi.poll);
5857 	u64_stats_update_end(&rxq_stats->napi_syncp);
5858 
5859 	txq_stats = &priv->xstats.txq_stats[chan];
5860 	u64_stats_update_begin(&txq_stats->napi_syncp);
5861 	u64_stats_inc(&txq_stats->napi.poll);
5862 	u64_stats_update_end(&txq_stats->napi_syncp);
5863 
5864 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5865 	tx_done = min(tx_done, budget);
5866 
5867 	rx_done = stmmac_rx_zc(priv, budget, chan);
5868 
5869 	rxtx_done = max(tx_done, rx_done);
5870 
5871 	/* If either TX or RX work is not complete, return budget
5872 	 * and keep pooling
5873 	 */
5874 	if (rxtx_done >= budget)
5875 		return budget;
5876 
5877 	/* all work done, exit the polling mode */
5878 	if (napi_complete_done(napi, rxtx_done)) {
5879 		unsigned long flags;
5880 
5881 		spin_lock_irqsave(&ch->lock, flags);
5882 		/* Both RX and TX work done are compelte,
5883 		 * so enable both RX & TX IRQs.
5884 		 */
5885 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5886 		spin_unlock_irqrestore(&ch->lock, flags);
5887 	}
5888 
5889 	/* TX still have packet to handle, check if we need to arm tx timer */
5890 	if (tx_pending_packets)
5891 		stmmac_tx_timer_arm(priv, chan);
5892 
5893 	return min(rxtx_done, budget - 1);
5894 }
5895 
5896 /**
5897  *  stmmac_tx_timeout
5898  *  @dev : Pointer to net device structure
5899  *  @txqueue: the index of the hanging transmit queue
5900  *  Description: this function is called when a packet transmission fails to
5901  *   complete within a reasonable time. The driver will mark the error in the
5902  *   netdev structure and arrange for the device to be reset to a sane state
5903  *   in order to transmit a new packet.
5904  */
5905 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5906 {
5907 	struct stmmac_priv *priv = netdev_priv(dev);
5908 
5909 	stmmac_global_err(priv);
5910 }
5911 
5912 /**
5913  *  stmmac_set_rx_mode - entry point for multicast addressing
5914  *  @dev : pointer to the device structure
5915  *  Description:
5916  *  This function is a driver entry point which gets called by the kernel
5917  *  whenever multicast addresses must be enabled/disabled.
5918  *  Return value:
5919  *  void.
5920  *
5921  *  FIXME: This may need RXC to be running, but it may be called with BH
5922  *  disabled, which means we can't call phylink_rx_clk_stop*().
5923  */
5924 static void stmmac_set_rx_mode(struct net_device *dev)
5925 {
5926 	struct stmmac_priv *priv = netdev_priv(dev);
5927 
5928 	stmmac_set_filter(priv, priv->hw, dev);
5929 }
5930 
5931 /**
5932  *  stmmac_change_mtu - entry point to change MTU size for the device.
5933  *  @dev : device pointer.
5934  *  @new_mtu : the new MTU size for the device.
5935  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5936  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5937  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5938  *  Return value:
5939  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5940  *  file on failure.
5941  */
5942 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5943 {
5944 	struct stmmac_priv *priv = netdev_priv(dev);
5945 	int txfifosz = priv->plat->tx_fifo_size;
5946 	struct stmmac_dma_conf *dma_conf;
5947 	const int mtu = new_mtu;
5948 	int ret;
5949 
5950 	if (txfifosz == 0)
5951 		txfifosz = priv->dma_cap.tx_fifo_size;
5952 
5953 	txfifosz /= priv->plat->tx_queues_to_use;
5954 
5955 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5956 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5957 		return -EINVAL;
5958 	}
5959 
5960 	new_mtu = STMMAC_ALIGN(new_mtu);
5961 
5962 	/* If condition true, FIFO is too small or MTU too large */
5963 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5964 		return -EINVAL;
5965 
5966 	if (netif_running(dev)) {
5967 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5968 		/* Try to allocate the new DMA conf with the new mtu */
5969 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5970 		if (IS_ERR(dma_conf)) {
5971 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5972 				   mtu);
5973 			return PTR_ERR(dma_conf);
5974 		}
5975 
5976 		stmmac_release(dev);
5977 
5978 		ret = __stmmac_open(dev, dma_conf);
5979 		if (ret) {
5980 			free_dma_desc_resources(priv, dma_conf);
5981 			kfree(dma_conf);
5982 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5983 			return ret;
5984 		}
5985 
5986 		kfree(dma_conf);
5987 
5988 		stmmac_set_rx_mode(dev);
5989 	}
5990 
5991 	WRITE_ONCE(dev->mtu, mtu);
5992 	netdev_update_features(dev);
5993 
5994 	return 0;
5995 }
5996 
5997 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5998 					     netdev_features_t features)
5999 {
6000 	struct stmmac_priv *priv = netdev_priv(dev);
6001 
6002 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
6003 		features &= ~NETIF_F_RXCSUM;
6004 
6005 	if (!priv->plat->tx_coe)
6006 		features &= ~NETIF_F_CSUM_MASK;
6007 
6008 	/* Some GMAC devices have a bugged Jumbo frame support that
6009 	 * needs to have the Tx COE disabled for oversized frames
6010 	 * (due to limited buffer sizes). In this case we disable
6011 	 * the TX csum insertion in the TDES and not use SF.
6012 	 */
6013 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
6014 		features &= ~NETIF_F_CSUM_MASK;
6015 
6016 	/* Disable tso if asked by ethtool */
6017 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
6018 		if (features & NETIF_F_TSO)
6019 			priv->tso = true;
6020 		else
6021 			priv->tso = false;
6022 	}
6023 
6024 	return features;
6025 }
6026 
6027 static int stmmac_set_features(struct net_device *netdev,
6028 			       netdev_features_t features)
6029 {
6030 	struct stmmac_priv *priv = netdev_priv(netdev);
6031 
6032 	/* Keep the COE Type in case of csum is supporting */
6033 	if (features & NETIF_F_RXCSUM)
6034 		priv->hw->rx_csum = priv->plat->rx_coe;
6035 	else
6036 		priv->hw->rx_csum = 0;
6037 	/* No check needed because rx_coe has been set before and it will be
6038 	 * fixed in case of issue.
6039 	 */
6040 	stmmac_rx_ipc(priv, priv->hw);
6041 
6042 	if (priv->sph_cap) {
6043 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6044 		u32 chan;
6045 
6046 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6047 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6048 	}
6049 
6050 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
6051 		priv->hw->hw_vlan_en = true;
6052 	else
6053 		priv->hw->hw_vlan_en = false;
6054 
6055 	phylink_rx_clk_stop_block(priv->phylink);
6056 	stmmac_set_hw_vlan_mode(priv, priv->hw);
6057 	phylink_rx_clk_stop_unblock(priv->phylink);
6058 
6059 	return 0;
6060 }
6061 
6062 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6063 {
6064 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6065 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6066 	u32 queues_count;
6067 	u32 queue;
6068 	bool xmac;
6069 
6070 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6071 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6072 
6073 	if (priv->irq_wake)
6074 		pm_wakeup_event(priv->device, 0);
6075 
6076 	if (priv->dma_cap.estsel)
6077 		stmmac_est_irq_status(priv, priv, priv->dev,
6078 				      &priv->xstats, tx_cnt);
6079 
6080 	if (stmmac_fpe_supported(priv))
6081 		stmmac_fpe_irq_status(priv);
6082 
6083 	/* To handle GMAC own interrupts */
6084 	if ((priv->plat->has_gmac) || xmac) {
6085 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6086 
6087 		if (unlikely(status)) {
6088 			/* For LPI we need to save the tx status */
6089 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6090 				priv->tx_path_in_lpi_mode = true;
6091 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6092 				priv->tx_path_in_lpi_mode = false;
6093 		}
6094 
6095 		for (queue = 0; queue < queues_count; queue++)
6096 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6097 
6098 		/* PCS link status */
6099 		if (priv->hw->pcs &&
6100 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6101 			if (priv->xstats.pcs_link)
6102 				netif_carrier_on(priv->dev);
6103 			else
6104 				netif_carrier_off(priv->dev);
6105 		}
6106 
6107 		stmmac_timestamp_interrupt(priv, priv);
6108 	}
6109 }
6110 
6111 /**
6112  *  stmmac_interrupt - main ISR
6113  *  @irq: interrupt number.
6114  *  @dev_id: to pass the net device pointer.
6115  *  Description: this is the main driver interrupt service routine.
6116  *  It can call:
6117  *  o DMA service routine (to manage incoming frame reception and transmission
6118  *    status)
6119  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6120  *    interrupts.
6121  */
6122 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6123 {
6124 	struct net_device *dev = (struct net_device *)dev_id;
6125 	struct stmmac_priv *priv = netdev_priv(dev);
6126 
6127 	/* Check if adapter is up */
6128 	if (test_bit(STMMAC_DOWN, &priv->state))
6129 		return IRQ_HANDLED;
6130 
6131 	/* Check ASP error if it isn't delivered via an individual IRQ */
6132 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6133 		return IRQ_HANDLED;
6134 
6135 	/* To handle Common interrupts */
6136 	stmmac_common_interrupt(priv);
6137 
6138 	/* To handle DMA interrupts */
6139 	stmmac_dma_interrupt(priv);
6140 
6141 	return IRQ_HANDLED;
6142 }
6143 
6144 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6145 {
6146 	struct net_device *dev = (struct net_device *)dev_id;
6147 	struct stmmac_priv *priv = netdev_priv(dev);
6148 
6149 	/* Check if adapter is up */
6150 	if (test_bit(STMMAC_DOWN, &priv->state))
6151 		return IRQ_HANDLED;
6152 
6153 	/* To handle Common interrupts */
6154 	stmmac_common_interrupt(priv);
6155 
6156 	return IRQ_HANDLED;
6157 }
6158 
6159 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6160 {
6161 	struct net_device *dev = (struct net_device *)dev_id;
6162 	struct stmmac_priv *priv = netdev_priv(dev);
6163 
6164 	/* Check if adapter is up */
6165 	if (test_bit(STMMAC_DOWN, &priv->state))
6166 		return IRQ_HANDLED;
6167 
6168 	/* Check if a fatal error happened */
6169 	stmmac_safety_feat_interrupt(priv);
6170 
6171 	return IRQ_HANDLED;
6172 }
6173 
6174 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6175 {
6176 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6177 	struct stmmac_dma_conf *dma_conf;
6178 	int chan = tx_q->queue_index;
6179 	struct stmmac_priv *priv;
6180 	int status;
6181 
6182 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6183 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6184 
6185 	/* Check if adapter is up */
6186 	if (test_bit(STMMAC_DOWN, &priv->state))
6187 		return IRQ_HANDLED;
6188 
6189 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6190 
6191 	if (unlikely(status & tx_hard_error_bump_tc)) {
6192 		/* Try to bump up the dma threshold on this failure */
6193 		stmmac_bump_dma_threshold(priv, chan);
6194 	} else if (unlikely(status == tx_hard_error)) {
6195 		stmmac_tx_err(priv, chan);
6196 	}
6197 
6198 	return IRQ_HANDLED;
6199 }
6200 
6201 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6202 {
6203 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6204 	struct stmmac_dma_conf *dma_conf;
6205 	int chan = rx_q->queue_index;
6206 	struct stmmac_priv *priv;
6207 
6208 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6209 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6210 
6211 	/* Check if adapter is up */
6212 	if (test_bit(STMMAC_DOWN, &priv->state))
6213 		return IRQ_HANDLED;
6214 
6215 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6216 
6217 	return IRQ_HANDLED;
6218 }
6219 
6220 /**
6221  *  stmmac_ioctl - Entry point for the Ioctl
6222  *  @dev: Device pointer.
6223  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6224  *  a proprietary structure used to pass information to the driver.
6225  *  @cmd: IOCTL command
6226  *  Description:
6227  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6228  */
6229 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6230 {
6231 	struct stmmac_priv *priv = netdev_priv (dev);
6232 	int ret = -EOPNOTSUPP;
6233 
6234 	if (!netif_running(dev))
6235 		return -EINVAL;
6236 
6237 	switch (cmd) {
6238 	case SIOCGMIIPHY:
6239 	case SIOCGMIIREG:
6240 	case SIOCSMIIREG:
6241 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6242 		break;
6243 	default:
6244 		break;
6245 	}
6246 
6247 	return ret;
6248 }
6249 
6250 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6251 				    void *cb_priv)
6252 {
6253 	struct stmmac_priv *priv = cb_priv;
6254 	int ret = -EOPNOTSUPP;
6255 
6256 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6257 		return ret;
6258 
6259 	__stmmac_disable_all_queues(priv);
6260 
6261 	switch (type) {
6262 	case TC_SETUP_CLSU32:
6263 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6264 		break;
6265 	case TC_SETUP_CLSFLOWER:
6266 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6267 		break;
6268 	default:
6269 		break;
6270 	}
6271 
6272 	stmmac_enable_all_queues(priv);
6273 	return ret;
6274 }
6275 
6276 static LIST_HEAD(stmmac_block_cb_list);
6277 
6278 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6279 			   void *type_data)
6280 {
6281 	struct stmmac_priv *priv = netdev_priv(ndev);
6282 
6283 	switch (type) {
6284 	case TC_QUERY_CAPS:
6285 		return stmmac_tc_query_caps(priv, priv, type_data);
6286 	case TC_SETUP_QDISC_MQPRIO:
6287 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6288 	case TC_SETUP_BLOCK:
6289 		return flow_block_cb_setup_simple(type_data,
6290 						  &stmmac_block_cb_list,
6291 						  stmmac_setup_tc_block_cb,
6292 						  priv, priv, true);
6293 	case TC_SETUP_QDISC_CBS:
6294 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6295 	case TC_SETUP_QDISC_TAPRIO:
6296 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6297 	case TC_SETUP_QDISC_ETF:
6298 		return stmmac_tc_setup_etf(priv, priv, type_data);
6299 	default:
6300 		return -EOPNOTSUPP;
6301 	}
6302 }
6303 
6304 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6305 			       struct net_device *sb_dev)
6306 {
6307 	int gso = skb_shinfo(skb)->gso_type;
6308 
6309 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6310 		/*
6311 		 * There is no way to determine the number of TSO/USO
6312 		 * capable Queues. Let's use always the Queue 0
6313 		 * because if TSO/USO is supported then at least this
6314 		 * one will be capable.
6315 		 */
6316 		return 0;
6317 	}
6318 
6319 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6320 }
6321 
6322 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6323 {
6324 	struct stmmac_priv *priv = netdev_priv(ndev);
6325 	int ret = 0;
6326 
6327 	ret = pm_runtime_resume_and_get(priv->device);
6328 	if (ret < 0)
6329 		return ret;
6330 
6331 	ret = eth_mac_addr(ndev, addr);
6332 	if (ret)
6333 		goto set_mac_error;
6334 
6335 	phylink_rx_clk_stop_block(priv->phylink);
6336 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6337 	phylink_rx_clk_stop_unblock(priv->phylink);
6338 
6339 set_mac_error:
6340 	pm_runtime_put(priv->device);
6341 
6342 	return ret;
6343 }
6344 
6345 #ifdef CONFIG_DEBUG_FS
6346 static struct dentry *stmmac_fs_dir;
6347 
6348 static void sysfs_display_ring(void *head, int size, int extend_desc,
6349 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6350 {
6351 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6352 	struct dma_desc *p = (struct dma_desc *)head;
6353 	unsigned int desc_size;
6354 	dma_addr_t dma_addr;
6355 	int i;
6356 
6357 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6358 	for (i = 0; i < size; i++) {
6359 		dma_addr = dma_phy_addr + i * desc_size;
6360 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6361 				i, &dma_addr,
6362 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6363 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6364 		if (extend_desc)
6365 			p = &(++ep)->basic;
6366 		else
6367 			p++;
6368 	}
6369 }
6370 
6371 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6372 {
6373 	struct net_device *dev = seq->private;
6374 	struct stmmac_priv *priv = netdev_priv(dev);
6375 	u32 rx_count = priv->plat->rx_queues_to_use;
6376 	u32 tx_count = priv->plat->tx_queues_to_use;
6377 	u32 queue;
6378 
6379 	if ((dev->flags & IFF_UP) == 0)
6380 		return 0;
6381 
6382 	for (queue = 0; queue < rx_count; queue++) {
6383 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6384 
6385 		seq_printf(seq, "RX Queue %d:\n", queue);
6386 
6387 		if (priv->extend_desc) {
6388 			seq_printf(seq, "Extended descriptor ring:\n");
6389 			sysfs_display_ring((void *)rx_q->dma_erx,
6390 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6391 		} else {
6392 			seq_printf(seq, "Descriptor ring:\n");
6393 			sysfs_display_ring((void *)rx_q->dma_rx,
6394 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6395 		}
6396 	}
6397 
6398 	for (queue = 0; queue < tx_count; queue++) {
6399 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6400 
6401 		seq_printf(seq, "TX Queue %d:\n", queue);
6402 
6403 		if (priv->extend_desc) {
6404 			seq_printf(seq, "Extended descriptor ring:\n");
6405 			sysfs_display_ring((void *)tx_q->dma_etx,
6406 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6407 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6408 			seq_printf(seq, "Descriptor ring:\n");
6409 			sysfs_display_ring((void *)tx_q->dma_tx,
6410 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6411 		}
6412 	}
6413 
6414 	return 0;
6415 }
6416 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6417 
6418 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6419 {
6420 	static const char * const dwxgmac_timestamp_source[] = {
6421 		"None",
6422 		"Internal",
6423 		"External",
6424 		"Both",
6425 	};
6426 	static const char * const dwxgmac_safety_feature_desc[] = {
6427 		"No",
6428 		"All Safety Features with ECC and Parity",
6429 		"All Safety Features without ECC or Parity",
6430 		"All Safety Features with Parity Only",
6431 		"ECC Only",
6432 		"UNDEFINED",
6433 		"UNDEFINED",
6434 		"UNDEFINED",
6435 	};
6436 	struct net_device *dev = seq->private;
6437 	struct stmmac_priv *priv = netdev_priv(dev);
6438 
6439 	if (!priv->hw_cap_support) {
6440 		seq_printf(seq, "DMA HW features not supported\n");
6441 		return 0;
6442 	}
6443 
6444 	seq_printf(seq, "==============================\n");
6445 	seq_printf(seq, "\tDMA HW features\n");
6446 	seq_printf(seq, "==============================\n");
6447 
6448 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6449 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6450 	seq_printf(seq, "\t1000 Mbps: %s\n",
6451 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6452 	seq_printf(seq, "\tHalf duplex: %s\n",
6453 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6454 	if (priv->plat->has_xgmac) {
6455 		seq_printf(seq,
6456 			   "\tNumber of Additional MAC address registers: %d\n",
6457 			   priv->dma_cap.multi_addr);
6458 	} else {
6459 		seq_printf(seq, "\tHash Filter: %s\n",
6460 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6461 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6462 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6463 	}
6464 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6465 		   (priv->dma_cap.pcs) ? "Y" : "N");
6466 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6467 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6468 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6469 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6470 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6471 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6472 	seq_printf(seq, "\tRMON module: %s\n",
6473 		   (priv->dma_cap.rmon) ? "Y" : "N");
6474 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6475 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6476 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6477 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6478 	if (priv->plat->has_xgmac)
6479 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6480 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6481 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6482 		   (priv->dma_cap.eee) ? "Y" : "N");
6483 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6484 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6485 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6486 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6487 	    priv->plat->has_xgmac) {
6488 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6489 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6490 	} else {
6491 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6492 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6493 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6494 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6495 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6496 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6497 	}
6498 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6499 		   priv->dma_cap.number_rx_channel);
6500 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6501 		   priv->dma_cap.number_tx_channel);
6502 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6503 		   priv->dma_cap.number_rx_queues);
6504 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6505 		   priv->dma_cap.number_tx_queues);
6506 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6507 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6508 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6509 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6510 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6511 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6512 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6513 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6514 		   priv->dma_cap.pps_out_num);
6515 	seq_printf(seq, "\tSafety Features: %s\n",
6516 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6517 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6518 		   priv->dma_cap.frpsel ? "Y" : "N");
6519 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6520 		   priv->dma_cap.host_dma_width);
6521 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6522 		   priv->dma_cap.rssen ? "Y" : "N");
6523 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6524 		   priv->dma_cap.vlhash ? "Y" : "N");
6525 	seq_printf(seq, "\tSplit Header: %s\n",
6526 		   priv->dma_cap.sphen ? "Y" : "N");
6527 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6528 		   priv->dma_cap.vlins ? "Y" : "N");
6529 	seq_printf(seq, "\tDouble VLAN: %s\n",
6530 		   priv->dma_cap.dvlan ? "Y" : "N");
6531 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6532 		   priv->dma_cap.l3l4fnum);
6533 	seq_printf(seq, "\tARP Offloading: %s\n",
6534 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6535 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6536 		   priv->dma_cap.estsel ? "Y" : "N");
6537 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6538 		   priv->dma_cap.fpesel ? "Y" : "N");
6539 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6540 		   priv->dma_cap.tbssel ? "Y" : "N");
6541 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6542 		   priv->dma_cap.tbs_ch_num);
6543 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6544 		   priv->dma_cap.sgfsel ? "Y" : "N");
6545 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6546 		   BIT(priv->dma_cap.ttsfd) >> 1);
6547 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6548 		   priv->dma_cap.numtc);
6549 	seq_printf(seq, "\tDCB Feature: %s\n",
6550 		   priv->dma_cap.dcben ? "Y" : "N");
6551 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6552 		   priv->dma_cap.advthword ? "Y" : "N");
6553 	seq_printf(seq, "\tPTP Offload: %s\n",
6554 		   priv->dma_cap.ptoen ? "Y" : "N");
6555 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6556 		   priv->dma_cap.osten ? "Y" : "N");
6557 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6558 		   priv->dma_cap.pfcen ? "Y" : "N");
6559 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6560 		   BIT(priv->dma_cap.frpes) << 6);
6561 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6562 		   BIT(priv->dma_cap.frpbs) << 6);
6563 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6564 		   priv->dma_cap.frppipe_num);
6565 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6566 		   priv->dma_cap.nrvf_num ?
6567 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6568 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6569 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6570 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6571 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6572 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6573 		   priv->dma_cap.cbtisel ? "Y" : "N");
6574 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6575 		   priv->dma_cap.aux_snapshot_n);
6576 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6577 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6578 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6579 		   priv->dma_cap.edma ? "Y" : "N");
6580 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6581 		   priv->dma_cap.ediffc ? "Y" : "N");
6582 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6583 		   priv->dma_cap.vxn ? "Y" : "N");
6584 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6585 		   priv->dma_cap.dbgmem ? "Y" : "N");
6586 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6587 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6588 	return 0;
6589 }
6590 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6591 
6592 /* Use network device events to rename debugfs file entries.
6593  */
6594 static int stmmac_device_event(struct notifier_block *unused,
6595 			       unsigned long event, void *ptr)
6596 {
6597 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6598 	struct stmmac_priv *priv = netdev_priv(dev);
6599 
6600 	if (dev->netdev_ops != &stmmac_netdev_ops)
6601 		goto done;
6602 
6603 	switch (event) {
6604 	case NETDEV_CHANGENAME:
6605 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6606 		break;
6607 	}
6608 done:
6609 	return NOTIFY_DONE;
6610 }
6611 
6612 static struct notifier_block stmmac_notifier = {
6613 	.notifier_call = stmmac_device_event,
6614 };
6615 
6616 static void stmmac_init_fs(struct net_device *dev)
6617 {
6618 	struct stmmac_priv *priv = netdev_priv(dev);
6619 
6620 	rtnl_lock();
6621 
6622 	/* Create per netdev entries */
6623 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6624 
6625 	/* Entry to report DMA RX/TX rings */
6626 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6627 			    &stmmac_rings_status_fops);
6628 
6629 	/* Entry to report the DMA HW features */
6630 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6631 			    &stmmac_dma_cap_fops);
6632 
6633 	rtnl_unlock();
6634 }
6635 
6636 static void stmmac_exit_fs(struct net_device *dev)
6637 {
6638 	struct stmmac_priv *priv = netdev_priv(dev);
6639 
6640 	debugfs_remove_recursive(priv->dbgfs_dir);
6641 }
6642 #endif /* CONFIG_DEBUG_FS */
6643 
6644 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6645 {
6646 	unsigned char *data = (unsigned char *)&vid_le;
6647 	unsigned char data_byte = 0;
6648 	u32 crc = ~0x0;
6649 	u32 temp = 0;
6650 	int i, bits;
6651 
6652 	bits = get_bitmask_order(VLAN_VID_MASK);
6653 	for (i = 0; i < bits; i++) {
6654 		if ((i % 8) == 0)
6655 			data_byte = data[i / 8];
6656 
6657 		temp = ((crc & 1) ^ data_byte) & 1;
6658 		crc >>= 1;
6659 		data_byte >>= 1;
6660 
6661 		if (temp)
6662 			crc ^= 0xedb88320;
6663 	}
6664 
6665 	return crc;
6666 }
6667 
6668 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6669 {
6670 	u32 crc, hash = 0;
6671 	u16 pmatch = 0;
6672 	int count = 0;
6673 	u16 vid = 0;
6674 
6675 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6676 		__le16 vid_le = cpu_to_le16(vid);
6677 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6678 		hash |= (1 << crc);
6679 		count++;
6680 	}
6681 
6682 	if (!priv->dma_cap.vlhash) {
6683 		if (count > 2) /* VID = 0 always passes filter */
6684 			return -EOPNOTSUPP;
6685 
6686 		pmatch = vid;
6687 		hash = 0;
6688 	}
6689 
6690 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6691 }
6692 
6693 /* FIXME: This may need RXC to be running, but it may be called with BH
6694  * disabled, which means we can't call phylink_rx_clk_stop*().
6695  */
6696 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6697 {
6698 	struct stmmac_priv *priv = netdev_priv(ndev);
6699 	bool is_double = false;
6700 	int ret;
6701 
6702 	ret = pm_runtime_resume_and_get(priv->device);
6703 	if (ret < 0)
6704 		return ret;
6705 
6706 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6707 		is_double = true;
6708 
6709 	set_bit(vid, priv->active_vlans);
6710 	ret = stmmac_vlan_update(priv, is_double);
6711 	if (ret) {
6712 		clear_bit(vid, priv->active_vlans);
6713 		goto err_pm_put;
6714 	}
6715 
6716 	if (priv->hw->num_vlan) {
6717 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6718 		if (ret)
6719 			goto err_pm_put;
6720 	}
6721 err_pm_put:
6722 	pm_runtime_put(priv->device);
6723 
6724 	return ret;
6725 }
6726 
6727 /* FIXME: This may need RXC to be running, but it may be called with BH
6728  * disabled, which means we can't call phylink_rx_clk_stop*().
6729  */
6730 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6731 {
6732 	struct stmmac_priv *priv = netdev_priv(ndev);
6733 	bool is_double = false;
6734 	int ret;
6735 
6736 	ret = pm_runtime_resume_and_get(priv->device);
6737 	if (ret < 0)
6738 		return ret;
6739 
6740 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6741 		is_double = true;
6742 
6743 	clear_bit(vid, priv->active_vlans);
6744 
6745 	if (priv->hw->num_vlan) {
6746 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6747 		if (ret)
6748 			goto del_vlan_error;
6749 	}
6750 
6751 	ret = stmmac_vlan_update(priv, is_double);
6752 
6753 del_vlan_error:
6754 	pm_runtime_put(priv->device);
6755 
6756 	return ret;
6757 }
6758 
6759 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6760 {
6761 	struct stmmac_priv *priv = netdev_priv(dev);
6762 
6763 	switch (bpf->command) {
6764 	case XDP_SETUP_PROG:
6765 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6766 	case XDP_SETUP_XSK_POOL:
6767 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6768 					     bpf->xsk.queue_id);
6769 	default:
6770 		return -EOPNOTSUPP;
6771 	}
6772 }
6773 
6774 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6775 			   struct xdp_frame **frames, u32 flags)
6776 {
6777 	struct stmmac_priv *priv = netdev_priv(dev);
6778 	int cpu = smp_processor_id();
6779 	struct netdev_queue *nq;
6780 	int i, nxmit = 0;
6781 	int queue;
6782 
6783 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6784 		return -ENETDOWN;
6785 
6786 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6787 		return -EINVAL;
6788 
6789 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6790 	nq = netdev_get_tx_queue(priv->dev, queue);
6791 
6792 	__netif_tx_lock(nq, cpu);
6793 	/* Avoids TX time-out as we are sharing with slow path */
6794 	txq_trans_cond_update(nq);
6795 
6796 	for (i = 0; i < num_frames; i++) {
6797 		int res;
6798 
6799 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6800 		if (res == STMMAC_XDP_CONSUMED)
6801 			break;
6802 
6803 		nxmit++;
6804 	}
6805 
6806 	if (flags & XDP_XMIT_FLUSH) {
6807 		stmmac_flush_tx_descriptors(priv, queue);
6808 		stmmac_tx_timer_arm(priv, queue);
6809 	}
6810 
6811 	__netif_tx_unlock(nq);
6812 
6813 	return nxmit;
6814 }
6815 
6816 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6817 {
6818 	struct stmmac_channel *ch = &priv->channel[queue];
6819 	unsigned long flags;
6820 
6821 	spin_lock_irqsave(&ch->lock, flags);
6822 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6823 	spin_unlock_irqrestore(&ch->lock, flags);
6824 
6825 	stmmac_stop_rx_dma(priv, queue);
6826 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6827 }
6828 
6829 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6830 {
6831 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6832 	struct stmmac_channel *ch = &priv->channel[queue];
6833 	unsigned long flags;
6834 	u32 buf_size;
6835 	int ret;
6836 
6837 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6838 	if (ret) {
6839 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6840 		return;
6841 	}
6842 
6843 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6844 	if (ret) {
6845 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6846 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6847 		return;
6848 	}
6849 
6850 	stmmac_reset_rx_queue(priv, queue);
6851 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6852 
6853 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6854 			    rx_q->dma_rx_phy, rx_q->queue_index);
6855 
6856 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6857 			     sizeof(struct dma_desc));
6858 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6859 			       rx_q->rx_tail_addr, rx_q->queue_index);
6860 
6861 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6862 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6863 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6864 				      buf_size,
6865 				      rx_q->queue_index);
6866 	} else {
6867 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6868 				      priv->dma_conf.dma_buf_sz,
6869 				      rx_q->queue_index);
6870 	}
6871 
6872 	stmmac_start_rx_dma(priv, queue);
6873 
6874 	spin_lock_irqsave(&ch->lock, flags);
6875 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6876 	spin_unlock_irqrestore(&ch->lock, flags);
6877 }
6878 
6879 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6880 {
6881 	struct stmmac_channel *ch = &priv->channel[queue];
6882 	unsigned long flags;
6883 
6884 	spin_lock_irqsave(&ch->lock, flags);
6885 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6886 	spin_unlock_irqrestore(&ch->lock, flags);
6887 
6888 	stmmac_stop_tx_dma(priv, queue);
6889 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6890 }
6891 
6892 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6893 {
6894 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6895 	struct stmmac_channel *ch = &priv->channel[queue];
6896 	unsigned long flags;
6897 	int ret;
6898 
6899 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6900 	if (ret) {
6901 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6902 		return;
6903 	}
6904 
6905 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6906 	if (ret) {
6907 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6908 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6909 		return;
6910 	}
6911 
6912 	stmmac_reset_tx_queue(priv, queue);
6913 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6914 
6915 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6916 			    tx_q->dma_tx_phy, tx_q->queue_index);
6917 
6918 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6919 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6920 
6921 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6922 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6923 			       tx_q->tx_tail_addr, tx_q->queue_index);
6924 
6925 	stmmac_start_tx_dma(priv, queue);
6926 
6927 	spin_lock_irqsave(&ch->lock, flags);
6928 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6929 	spin_unlock_irqrestore(&ch->lock, flags);
6930 }
6931 
6932 void stmmac_xdp_release(struct net_device *dev)
6933 {
6934 	struct stmmac_priv *priv = netdev_priv(dev);
6935 	u32 chan;
6936 
6937 	/* Ensure tx function is not running */
6938 	netif_tx_disable(dev);
6939 
6940 	/* Disable NAPI process */
6941 	stmmac_disable_all_queues(priv);
6942 
6943 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6944 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6945 
6946 	/* Free the IRQ lines */
6947 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6948 
6949 	/* Stop TX/RX DMA channels */
6950 	stmmac_stop_all_dma(priv);
6951 
6952 	/* Release and free the Rx/Tx resources */
6953 	free_dma_desc_resources(priv, &priv->dma_conf);
6954 
6955 	/* Disable the MAC Rx/Tx */
6956 	stmmac_mac_set(priv, priv->ioaddr, false);
6957 
6958 	/* set trans_start so we don't get spurious
6959 	 * watchdogs during reset
6960 	 */
6961 	netif_trans_update(dev);
6962 	netif_carrier_off(dev);
6963 }
6964 
6965 int stmmac_xdp_open(struct net_device *dev)
6966 {
6967 	struct stmmac_priv *priv = netdev_priv(dev);
6968 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6969 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6970 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6971 	struct stmmac_rx_queue *rx_q;
6972 	struct stmmac_tx_queue *tx_q;
6973 	u32 buf_size;
6974 	bool sph_en;
6975 	u32 chan;
6976 	int ret;
6977 
6978 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6979 	if (ret < 0) {
6980 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6981 			   __func__);
6982 		goto dma_desc_error;
6983 	}
6984 
6985 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6986 	if (ret < 0) {
6987 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6988 			   __func__);
6989 		goto init_error;
6990 	}
6991 
6992 	stmmac_reset_queues_param(priv);
6993 
6994 	/* DMA CSR Channel configuration */
6995 	for (chan = 0; chan < dma_csr_ch; chan++) {
6996 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6997 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6998 	}
6999 
7000 	/* Adjust Split header */
7001 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
7002 
7003 	/* DMA RX Channel Configuration */
7004 	for (chan = 0; chan < rx_cnt; chan++) {
7005 		rx_q = &priv->dma_conf.rx_queue[chan];
7006 
7007 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7008 				    rx_q->dma_rx_phy, chan);
7009 
7010 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
7011 				     (rx_q->buf_alloc_num *
7012 				      sizeof(struct dma_desc));
7013 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
7014 				       rx_q->rx_tail_addr, chan);
7015 
7016 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
7017 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
7018 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7019 					      buf_size,
7020 					      rx_q->queue_index);
7021 		} else {
7022 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7023 					      priv->dma_conf.dma_buf_sz,
7024 					      rx_q->queue_index);
7025 		}
7026 
7027 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7028 	}
7029 
7030 	/* DMA TX Channel Configuration */
7031 	for (chan = 0; chan < tx_cnt; chan++) {
7032 		tx_q = &priv->dma_conf.tx_queue[chan];
7033 
7034 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7035 				    tx_q->dma_tx_phy, chan);
7036 
7037 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7038 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7039 				       tx_q->tx_tail_addr, chan);
7040 
7041 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7042 	}
7043 
7044 	/* Enable the MAC Rx/Tx */
7045 	stmmac_mac_set(priv, priv->ioaddr, true);
7046 
7047 	/* Start Rx & Tx DMA Channels */
7048 	stmmac_start_all_dma(priv);
7049 
7050 	ret = stmmac_request_irq(dev);
7051 	if (ret)
7052 		goto irq_error;
7053 
7054 	/* Enable NAPI process*/
7055 	stmmac_enable_all_queues(priv);
7056 	netif_carrier_on(dev);
7057 	netif_tx_start_all_queues(dev);
7058 	stmmac_enable_all_dma_irq(priv);
7059 
7060 	return 0;
7061 
7062 irq_error:
7063 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7064 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7065 
7066 	stmmac_hw_teardown(dev);
7067 init_error:
7068 	free_dma_desc_resources(priv, &priv->dma_conf);
7069 dma_desc_error:
7070 	return ret;
7071 }
7072 
7073 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7074 {
7075 	struct stmmac_priv *priv = netdev_priv(dev);
7076 	struct stmmac_rx_queue *rx_q;
7077 	struct stmmac_tx_queue *tx_q;
7078 	struct stmmac_channel *ch;
7079 
7080 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7081 	    !netif_carrier_ok(priv->dev))
7082 		return -ENETDOWN;
7083 
7084 	if (!stmmac_xdp_is_enabled(priv))
7085 		return -EINVAL;
7086 
7087 	if (queue >= priv->plat->rx_queues_to_use ||
7088 	    queue >= priv->plat->tx_queues_to_use)
7089 		return -EINVAL;
7090 
7091 	rx_q = &priv->dma_conf.rx_queue[queue];
7092 	tx_q = &priv->dma_conf.tx_queue[queue];
7093 	ch = &priv->channel[queue];
7094 
7095 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7096 		return -EINVAL;
7097 
7098 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7099 		/* EQoS does not have per-DMA channel SW interrupt,
7100 		 * so we schedule RX Napi straight-away.
7101 		 */
7102 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7103 			__napi_schedule(&ch->rxtx_napi);
7104 	}
7105 
7106 	return 0;
7107 }
7108 
7109 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7110 {
7111 	struct stmmac_priv *priv = netdev_priv(dev);
7112 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7113 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7114 	unsigned int start;
7115 	int q;
7116 
7117 	for (q = 0; q < tx_cnt; q++) {
7118 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7119 		u64 tx_packets;
7120 		u64 tx_bytes;
7121 
7122 		do {
7123 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7124 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7125 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7126 		do {
7127 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7128 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7129 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7130 
7131 		stats->tx_packets += tx_packets;
7132 		stats->tx_bytes += tx_bytes;
7133 	}
7134 
7135 	for (q = 0; q < rx_cnt; q++) {
7136 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7137 		u64 rx_packets;
7138 		u64 rx_bytes;
7139 
7140 		do {
7141 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7142 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7143 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7144 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7145 
7146 		stats->rx_packets += rx_packets;
7147 		stats->rx_bytes += rx_bytes;
7148 	}
7149 
7150 	stats->rx_dropped = priv->xstats.rx_dropped;
7151 	stats->rx_errors = priv->xstats.rx_errors;
7152 	stats->tx_dropped = priv->xstats.tx_dropped;
7153 	stats->tx_errors = priv->xstats.tx_errors;
7154 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7155 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7156 	stats->rx_length_errors = priv->xstats.rx_length;
7157 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7158 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7159 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7160 }
7161 
7162 static const struct net_device_ops stmmac_netdev_ops = {
7163 	.ndo_open = stmmac_open,
7164 	.ndo_start_xmit = stmmac_xmit,
7165 	.ndo_stop = stmmac_release,
7166 	.ndo_change_mtu = stmmac_change_mtu,
7167 	.ndo_fix_features = stmmac_fix_features,
7168 	.ndo_set_features = stmmac_set_features,
7169 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7170 	.ndo_tx_timeout = stmmac_tx_timeout,
7171 	.ndo_eth_ioctl = stmmac_ioctl,
7172 	.ndo_get_stats64 = stmmac_get_stats64,
7173 	.ndo_setup_tc = stmmac_setup_tc,
7174 	.ndo_select_queue = stmmac_select_queue,
7175 	.ndo_set_mac_address = stmmac_set_mac_address,
7176 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7177 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7178 	.ndo_bpf = stmmac_bpf,
7179 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7180 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7181 	.ndo_hwtstamp_get = stmmac_hwtstamp_get,
7182 	.ndo_hwtstamp_set = stmmac_hwtstamp_set,
7183 };
7184 
7185 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7186 {
7187 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7188 		return;
7189 	if (test_bit(STMMAC_DOWN, &priv->state))
7190 		return;
7191 
7192 	netdev_err(priv->dev, "Reset adapter.\n");
7193 
7194 	rtnl_lock();
7195 	netif_trans_update(priv->dev);
7196 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7197 		usleep_range(1000, 2000);
7198 
7199 	set_bit(STMMAC_DOWN, &priv->state);
7200 	dev_close(priv->dev);
7201 	dev_open(priv->dev, NULL);
7202 	clear_bit(STMMAC_DOWN, &priv->state);
7203 	clear_bit(STMMAC_RESETING, &priv->state);
7204 	rtnl_unlock();
7205 }
7206 
7207 static void stmmac_service_task(struct work_struct *work)
7208 {
7209 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7210 			service_task);
7211 
7212 	stmmac_reset_subtask(priv);
7213 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7214 }
7215 
7216 /**
7217  *  stmmac_hw_init - Init the MAC device
7218  *  @priv: driver private structure
7219  *  Description: this function is to configure the MAC device according to
7220  *  some platform parameters or the HW capability register. It prepares the
7221  *  driver to use either ring or chain modes and to setup either enhanced or
7222  *  normal descriptors.
7223  */
7224 static int stmmac_hw_init(struct stmmac_priv *priv)
7225 {
7226 	int ret;
7227 
7228 	/* dwmac-sun8i only work in chain mode */
7229 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7230 		chain_mode = 1;
7231 	priv->chain_mode = chain_mode;
7232 
7233 	/* Initialize HW Interface */
7234 	ret = stmmac_hwif_init(priv);
7235 	if (ret)
7236 		return ret;
7237 
7238 	/* Get the HW capability (new GMAC newer than 3.50a) */
7239 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7240 	if (priv->hw_cap_support) {
7241 		dev_info(priv->device, "DMA HW capability register supported\n");
7242 
7243 		/* We can override some gmac/dma configuration fields: e.g.
7244 		 * enh_desc, tx_coe (e.g. that are passed through the
7245 		 * platform) with the values from the HW capability
7246 		 * register (if supported).
7247 		 */
7248 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7249 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7250 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7251 		if (priv->dma_cap.hash_tb_sz) {
7252 			priv->hw->multicast_filter_bins =
7253 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7254 			priv->hw->mcast_bits_log2 =
7255 					ilog2(priv->hw->multicast_filter_bins);
7256 		}
7257 
7258 		/* TXCOE doesn't work in thresh DMA mode */
7259 		if (priv->plat->force_thresh_dma_mode)
7260 			priv->plat->tx_coe = 0;
7261 		else
7262 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7263 
7264 		/* In case of GMAC4 rx_coe is from HW cap register. */
7265 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7266 
7267 		if (priv->dma_cap.rx_coe_type2)
7268 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7269 		else if (priv->dma_cap.rx_coe_type1)
7270 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7271 
7272 	} else {
7273 		dev_info(priv->device, "No HW DMA feature register supported\n");
7274 	}
7275 
7276 	if (priv->plat->rx_coe) {
7277 		priv->hw->rx_csum = priv->plat->rx_coe;
7278 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7279 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7280 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7281 	}
7282 	if (priv->plat->tx_coe)
7283 		dev_info(priv->device, "TX Checksum insertion supported\n");
7284 
7285 	if (priv->plat->pmt) {
7286 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7287 		device_set_wakeup_capable(priv->device, 1);
7288 		devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7289 	}
7290 
7291 	if (priv->dma_cap.tsoen)
7292 		dev_info(priv->device, "TSO supported\n");
7293 
7294 	if (priv->dma_cap.number_rx_queues &&
7295 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7296 		dev_warn(priv->device,
7297 			 "Number of Rx queues (%u) exceeds dma capability\n",
7298 			 priv->plat->rx_queues_to_use);
7299 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7300 	}
7301 	if (priv->dma_cap.number_tx_queues &&
7302 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7303 		dev_warn(priv->device,
7304 			 "Number of Tx queues (%u) exceeds dma capability\n",
7305 			 priv->plat->tx_queues_to_use);
7306 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7307 	}
7308 
7309 	if (priv->dma_cap.rx_fifo_size &&
7310 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7311 		dev_warn(priv->device,
7312 			 "Rx FIFO size (%u) exceeds dma capability\n",
7313 			 priv->plat->rx_fifo_size);
7314 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7315 	}
7316 	if (priv->dma_cap.tx_fifo_size &&
7317 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7318 		dev_warn(priv->device,
7319 			 "Tx FIFO size (%u) exceeds dma capability\n",
7320 			 priv->plat->tx_fifo_size);
7321 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7322 	}
7323 
7324 	priv->hw->vlan_fail_q_en =
7325 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7326 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7327 
7328 	/* Run HW quirks, if any */
7329 	if (priv->hwif_quirks) {
7330 		ret = priv->hwif_quirks(priv);
7331 		if (ret)
7332 			return ret;
7333 	}
7334 
7335 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7336 	 * In some case, for example on bugged HW this feature
7337 	 * has to be disable and this can be done by passing the
7338 	 * riwt_off field from the platform.
7339 	 */
7340 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7341 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7342 		priv->use_riwt = 1;
7343 		dev_info(priv->device,
7344 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7345 	}
7346 
7347 	return 0;
7348 }
7349 
7350 static void stmmac_napi_add(struct net_device *dev)
7351 {
7352 	struct stmmac_priv *priv = netdev_priv(dev);
7353 	u32 queue, maxq;
7354 
7355 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7356 
7357 	for (queue = 0; queue < maxq; queue++) {
7358 		struct stmmac_channel *ch = &priv->channel[queue];
7359 
7360 		ch->priv_data = priv;
7361 		ch->index = queue;
7362 		spin_lock_init(&ch->lock);
7363 
7364 		if (queue < priv->plat->rx_queues_to_use) {
7365 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7366 		}
7367 		if (queue < priv->plat->tx_queues_to_use) {
7368 			netif_napi_add_tx(dev, &ch->tx_napi,
7369 					  stmmac_napi_poll_tx);
7370 		}
7371 		if (queue < priv->plat->rx_queues_to_use &&
7372 		    queue < priv->plat->tx_queues_to_use) {
7373 			netif_napi_add(dev, &ch->rxtx_napi,
7374 				       stmmac_napi_poll_rxtx);
7375 		}
7376 	}
7377 }
7378 
7379 static void stmmac_napi_del(struct net_device *dev)
7380 {
7381 	struct stmmac_priv *priv = netdev_priv(dev);
7382 	u32 queue, maxq;
7383 
7384 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7385 
7386 	for (queue = 0; queue < maxq; queue++) {
7387 		struct stmmac_channel *ch = &priv->channel[queue];
7388 
7389 		if (queue < priv->plat->rx_queues_to_use)
7390 			netif_napi_del(&ch->rx_napi);
7391 		if (queue < priv->plat->tx_queues_to_use)
7392 			netif_napi_del(&ch->tx_napi);
7393 		if (queue < priv->plat->rx_queues_to_use &&
7394 		    queue < priv->plat->tx_queues_to_use) {
7395 			netif_napi_del(&ch->rxtx_napi);
7396 		}
7397 	}
7398 }
7399 
7400 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7401 {
7402 	struct stmmac_priv *priv = netdev_priv(dev);
7403 	int ret = 0, i;
7404 
7405 	if (netif_running(dev))
7406 		stmmac_release(dev);
7407 
7408 	stmmac_napi_del(dev);
7409 
7410 	priv->plat->rx_queues_to_use = rx_cnt;
7411 	priv->plat->tx_queues_to_use = tx_cnt;
7412 	if (!netif_is_rxfh_configured(dev))
7413 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7414 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7415 									rx_cnt);
7416 
7417 	stmmac_napi_add(dev);
7418 
7419 	if (netif_running(dev))
7420 		ret = stmmac_open(dev);
7421 
7422 	return ret;
7423 }
7424 
7425 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7426 {
7427 	struct stmmac_priv *priv = netdev_priv(dev);
7428 	int ret = 0;
7429 
7430 	if (netif_running(dev))
7431 		stmmac_release(dev);
7432 
7433 	priv->dma_conf.dma_rx_size = rx_size;
7434 	priv->dma_conf.dma_tx_size = tx_size;
7435 
7436 	if (netif_running(dev))
7437 		ret = stmmac_open(dev);
7438 
7439 	return ret;
7440 }
7441 
7442 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7443 {
7444 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7445 	struct dma_desc *desc_contains_ts = ctx->desc;
7446 	struct stmmac_priv *priv = ctx->priv;
7447 	struct dma_desc *ndesc = ctx->ndesc;
7448 	struct dma_desc *desc = ctx->desc;
7449 	u64 ns = 0;
7450 
7451 	if (!priv->hwts_rx_en)
7452 		return -ENODATA;
7453 
7454 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7455 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7456 		desc_contains_ts = ndesc;
7457 
7458 	/* Check if timestamp is available */
7459 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7460 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7461 		ns -= priv->plat->cdc_error_adj;
7462 		*timestamp = ns_to_ktime(ns);
7463 		return 0;
7464 	}
7465 
7466 	return -ENODATA;
7467 }
7468 
7469 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7470 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7471 };
7472 
7473 /**
7474  * stmmac_dvr_probe
7475  * @device: device pointer
7476  * @plat_dat: platform data pointer
7477  * @res: stmmac resource pointer
7478  * Description: this is the main probe function used to
7479  * call the alloc_etherdev, allocate the priv structure.
7480  * Return:
7481  * returns 0 on success, otherwise errno.
7482  */
7483 int stmmac_dvr_probe(struct device *device,
7484 		     struct plat_stmmacenet_data *plat_dat,
7485 		     struct stmmac_resources *res)
7486 {
7487 	struct net_device *ndev = NULL;
7488 	struct stmmac_priv *priv;
7489 	u32 rxq;
7490 	int i, ret = 0;
7491 
7492 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7493 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7494 	if (!ndev)
7495 		return -ENOMEM;
7496 
7497 	SET_NETDEV_DEV(ndev, device);
7498 
7499 	priv = netdev_priv(ndev);
7500 	priv->device = device;
7501 	priv->dev = ndev;
7502 
7503 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7504 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7505 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7506 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7507 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7508 	}
7509 
7510 	priv->xstats.pcpu_stats =
7511 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7512 	if (!priv->xstats.pcpu_stats)
7513 		return -ENOMEM;
7514 
7515 	stmmac_set_ethtool_ops(ndev);
7516 	priv->pause_time = pause;
7517 	priv->plat = plat_dat;
7518 	priv->ioaddr = res->addr;
7519 	priv->dev->base_addr = (unsigned long)res->addr;
7520 	priv->plat->dma_cfg->multi_msi_en =
7521 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7522 
7523 	priv->dev->irq = res->irq;
7524 	priv->wol_irq = res->wol_irq;
7525 	priv->lpi_irq = res->lpi_irq;
7526 	priv->sfty_irq = res->sfty_irq;
7527 	priv->sfty_ce_irq = res->sfty_ce_irq;
7528 	priv->sfty_ue_irq = res->sfty_ue_irq;
7529 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7530 		priv->rx_irq[i] = res->rx_irq[i];
7531 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7532 		priv->tx_irq[i] = res->tx_irq[i];
7533 
7534 	if (!is_zero_ether_addr(res->mac))
7535 		eth_hw_addr_set(priv->dev, res->mac);
7536 
7537 	dev_set_drvdata(device, priv->dev);
7538 
7539 	/* Verify driver arguments */
7540 	stmmac_verify_args();
7541 
7542 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7543 	if (!priv->af_xdp_zc_qps)
7544 		return -ENOMEM;
7545 
7546 	/* Allocate workqueue */
7547 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7548 	if (!priv->wq) {
7549 		dev_err(priv->device, "failed to create workqueue\n");
7550 		ret = -ENOMEM;
7551 		goto error_wq_init;
7552 	}
7553 
7554 	INIT_WORK(&priv->service_task, stmmac_service_task);
7555 
7556 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7557 
7558 	/* Override with kernel parameters if supplied XXX CRS XXX
7559 	 * this needs to have multiple instances
7560 	 */
7561 	if ((phyaddr >= 0) && (phyaddr <= 31))
7562 		priv->plat->phy_addr = phyaddr;
7563 
7564 	if (priv->plat->stmmac_rst) {
7565 		ret = reset_control_assert(priv->plat->stmmac_rst);
7566 		reset_control_deassert(priv->plat->stmmac_rst);
7567 		/* Some reset controllers have only reset callback instead of
7568 		 * assert + deassert callbacks pair.
7569 		 */
7570 		if (ret == -ENOTSUPP)
7571 			reset_control_reset(priv->plat->stmmac_rst);
7572 	}
7573 
7574 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7575 	if (ret == -ENOTSUPP)
7576 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7577 			ERR_PTR(ret));
7578 
7579 	/* Wait a bit for the reset to take effect */
7580 	udelay(10);
7581 
7582 	/* Init MAC and get the capabilities */
7583 	ret = stmmac_hw_init(priv);
7584 	if (ret)
7585 		goto error_hw_init;
7586 
7587 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7588 	 */
7589 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7590 		priv->plat->dma_cfg->dche = false;
7591 
7592 	stmmac_check_ether_addr(priv);
7593 
7594 	ndev->netdev_ops = &stmmac_netdev_ops;
7595 
7596 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7597 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7598 
7599 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7600 			    NETIF_F_RXCSUM;
7601 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7602 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7603 
7604 	ret = stmmac_tc_init(priv, priv);
7605 	if (!ret) {
7606 		ndev->hw_features |= NETIF_F_HW_TC;
7607 	}
7608 
7609 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7610 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7611 		if (priv->plat->has_gmac4)
7612 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7613 		priv->tso = true;
7614 		dev_info(priv->device, "TSO feature enabled\n");
7615 	}
7616 
7617 	if (priv->dma_cap.sphen &&
7618 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7619 		ndev->hw_features |= NETIF_F_GRO;
7620 		priv->sph_cap = true;
7621 		priv->sph = priv->sph_cap;
7622 		dev_info(priv->device, "SPH feature enabled\n");
7623 	}
7624 
7625 	/* Ideally our host DMA address width is the same as for the
7626 	 * device. However, it may differ and then we have to use our
7627 	 * host DMA width for allocation and the device DMA width for
7628 	 * register handling.
7629 	 */
7630 	if (priv->plat->host_dma_width)
7631 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7632 	else
7633 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7634 
7635 	if (priv->dma_cap.host_dma_width) {
7636 		ret = dma_set_mask_and_coherent(device,
7637 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7638 		if (!ret) {
7639 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7640 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7641 
7642 			/*
7643 			 * If more than 32 bits can be addressed, make sure to
7644 			 * enable enhanced addressing mode.
7645 			 */
7646 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7647 				priv->plat->dma_cfg->eame = true;
7648 		} else {
7649 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7650 			if (ret) {
7651 				dev_err(priv->device, "Failed to set DMA Mask\n");
7652 				goto error_hw_init;
7653 			}
7654 
7655 			priv->dma_cap.host_dma_width = 32;
7656 		}
7657 	}
7658 
7659 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7660 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7661 #ifdef STMMAC_VLAN_TAG_USED
7662 	/* Both mac100 and gmac support receive VLAN tag detection */
7663 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7664 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
7665 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7666 		priv->hw->hw_vlan_en = true;
7667 	}
7668 	if (priv->dma_cap.vlhash) {
7669 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7670 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7671 	}
7672 	if (priv->dma_cap.vlins) {
7673 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7674 		if (priv->dma_cap.dvlan)
7675 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7676 	}
7677 #endif
7678 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7679 
7680 	priv->xstats.threshold = tc;
7681 
7682 	/* Initialize RSS */
7683 	rxq = priv->plat->rx_queues_to_use;
7684 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7685 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7686 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7687 
7688 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7689 		ndev->features |= NETIF_F_RXHASH;
7690 
7691 	ndev->vlan_features |= ndev->features;
7692 
7693 	/* MTU range: 46 - hw-specific max */
7694 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7695 	if (priv->plat->has_xgmac)
7696 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7697 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7698 		ndev->max_mtu = JUMBO_LEN;
7699 	else
7700 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7701 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7702 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7703 	 */
7704 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7705 	    (priv->plat->maxmtu >= ndev->min_mtu))
7706 		ndev->max_mtu = priv->plat->maxmtu;
7707 	else if (priv->plat->maxmtu < ndev->min_mtu)
7708 		dev_warn(priv->device,
7709 			 "%s: warning: maxmtu having invalid value (%d)\n",
7710 			 __func__, priv->plat->maxmtu);
7711 
7712 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7713 
7714 	/* Setup channels NAPI */
7715 	stmmac_napi_add(ndev);
7716 
7717 	mutex_init(&priv->lock);
7718 
7719 	stmmac_fpe_init(priv);
7720 
7721 	/* If a specific clk_csr value is passed from the platform
7722 	 * this means that the CSR Clock Range selection cannot be
7723 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7724 	 * set the MDC clock dynamically according to the csr actual
7725 	 * clock input.
7726 	 */
7727 	if (priv->plat->clk_csr >= 0)
7728 		priv->clk_csr = priv->plat->clk_csr;
7729 	else
7730 		stmmac_clk_csr_set(priv);
7731 
7732 	stmmac_check_pcs_mode(priv);
7733 
7734 	pm_runtime_get_noresume(device);
7735 	pm_runtime_set_active(device);
7736 	if (!pm_runtime_enabled(device))
7737 		pm_runtime_enable(device);
7738 
7739 	ret = stmmac_mdio_register(ndev);
7740 	if (ret < 0) {
7741 		dev_err_probe(priv->device, ret,
7742 			      "MDIO bus (id: %d) registration failed\n",
7743 			      priv->plat->bus_id);
7744 		goto error_mdio_register;
7745 	}
7746 
7747 	ret = stmmac_pcs_setup(ndev);
7748 	if (ret)
7749 		goto error_pcs_setup;
7750 
7751 	ret = stmmac_phy_setup(priv);
7752 	if (ret) {
7753 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7754 		goto error_phy_setup;
7755 	}
7756 
7757 	ret = register_netdev(ndev);
7758 	if (ret) {
7759 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7760 			__func__, ret);
7761 		goto error_netdev_register;
7762 	}
7763 
7764 #ifdef CONFIG_DEBUG_FS
7765 	stmmac_init_fs(ndev);
7766 #endif
7767 
7768 	if (priv->plat->dump_debug_regs)
7769 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7770 
7771 	/* Let pm_runtime_put() disable the clocks.
7772 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7773 	 */
7774 	pm_runtime_put(device);
7775 
7776 	return ret;
7777 
7778 error_netdev_register:
7779 	phylink_destroy(priv->phylink);
7780 error_phy_setup:
7781 	stmmac_pcs_clean(ndev);
7782 error_pcs_setup:
7783 	stmmac_mdio_unregister(ndev);
7784 error_mdio_register:
7785 	stmmac_napi_del(ndev);
7786 error_hw_init:
7787 	destroy_workqueue(priv->wq);
7788 error_wq_init:
7789 	bitmap_free(priv->af_xdp_zc_qps);
7790 
7791 	return ret;
7792 }
7793 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7794 
7795 /**
7796  * stmmac_dvr_remove
7797  * @dev: device pointer
7798  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7799  * changes the link status, releases the DMA descriptor rings.
7800  */
7801 void stmmac_dvr_remove(struct device *dev)
7802 {
7803 	struct net_device *ndev = dev_get_drvdata(dev);
7804 	struct stmmac_priv *priv = netdev_priv(ndev);
7805 
7806 	netdev_info(priv->dev, "%s: removing driver", __func__);
7807 
7808 	pm_runtime_get_sync(dev);
7809 
7810 	unregister_netdev(ndev);
7811 
7812 #ifdef CONFIG_DEBUG_FS
7813 	stmmac_exit_fs(ndev);
7814 #endif
7815 	phylink_destroy(priv->phylink);
7816 	if (priv->plat->stmmac_rst)
7817 		reset_control_assert(priv->plat->stmmac_rst);
7818 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7819 
7820 	stmmac_pcs_clean(ndev);
7821 	stmmac_mdio_unregister(ndev);
7822 
7823 	destroy_workqueue(priv->wq);
7824 	mutex_destroy(&priv->lock);
7825 	bitmap_free(priv->af_xdp_zc_qps);
7826 
7827 	pm_runtime_disable(dev);
7828 	pm_runtime_put_noidle(dev);
7829 }
7830 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7831 
7832 /**
7833  * stmmac_suspend - suspend callback
7834  * @dev: device pointer
7835  * Description: this is the function to suspend the device and it is called
7836  * by the platform driver to stop the network queue, release the resources,
7837  * program the PMT register (for WoL), clean and release driver resources.
7838  */
7839 int stmmac_suspend(struct device *dev)
7840 {
7841 	struct net_device *ndev = dev_get_drvdata(dev);
7842 	struct stmmac_priv *priv = netdev_priv(ndev);
7843 	u32 chan;
7844 
7845 	if (!ndev || !netif_running(ndev))
7846 		return 0;
7847 
7848 	mutex_lock(&priv->lock);
7849 
7850 	netif_device_detach(ndev);
7851 
7852 	stmmac_disable_all_queues(priv);
7853 
7854 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7855 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7856 
7857 	if (priv->eee_sw_timer_en) {
7858 		priv->tx_path_in_lpi_mode = false;
7859 		timer_delete_sync(&priv->eee_ctrl_timer);
7860 	}
7861 
7862 	/* Stop TX/RX DMA */
7863 	stmmac_stop_all_dma(priv);
7864 
7865 	if (priv->plat->serdes_powerdown)
7866 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7867 
7868 	/* Enable Power down mode by programming the PMT regs */
7869 	if (stmmac_wol_enabled_mac(priv)) {
7870 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7871 		priv->irq_wake = 1;
7872 	} else {
7873 		stmmac_mac_set(priv, priv->ioaddr, false);
7874 		pinctrl_pm_select_sleep_state(priv->device);
7875 	}
7876 
7877 	mutex_unlock(&priv->lock);
7878 
7879 	rtnl_lock();
7880 	if (stmmac_wol_enabled_phy(priv))
7881 		phylink_speed_down(priv->phylink, false);
7882 
7883 	phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv));
7884 	rtnl_unlock();
7885 
7886 	if (stmmac_fpe_supported(priv))
7887 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
7888 
7889 	if (priv->plat->suspend)
7890 		return priv->plat->suspend(dev, priv->plat->bsp_priv);
7891 
7892 	return 0;
7893 }
7894 EXPORT_SYMBOL_GPL(stmmac_suspend);
7895 
7896 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7897 {
7898 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7899 
7900 	rx_q->cur_rx = 0;
7901 	rx_q->dirty_rx = 0;
7902 }
7903 
7904 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7905 {
7906 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7907 
7908 	tx_q->cur_tx = 0;
7909 	tx_q->dirty_tx = 0;
7910 	tx_q->mss = 0;
7911 
7912 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7913 }
7914 
7915 /**
7916  * stmmac_reset_queues_param - reset queue parameters
7917  * @priv: device pointer
7918  */
7919 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7920 {
7921 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7922 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7923 	u32 queue;
7924 
7925 	for (queue = 0; queue < rx_cnt; queue++)
7926 		stmmac_reset_rx_queue(priv, queue);
7927 
7928 	for (queue = 0; queue < tx_cnt; queue++)
7929 		stmmac_reset_tx_queue(priv, queue);
7930 }
7931 
7932 /**
7933  * stmmac_resume - resume callback
7934  * @dev: device pointer
7935  * Description: when resume this function is invoked to setup the DMA and CORE
7936  * in a usable state.
7937  */
7938 int stmmac_resume(struct device *dev)
7939 {
7940 	struct net_device *ndev = dev_get_drvdata(dev);
7941 	struct stmmac_priv *priv = netdev_priv(ndev);
7942 	int ret;
7943 
7944 	if (priv->plat->resume) {
7945 		ret = priv->plat->resume(dev, priv->plat->bsp_priv);
7946 		if (ret)
7947 			return ret;
7948 	}
7949 
7950 	if (!netif_running(ndev))
7951 		return 0;
7952 
7953 	/* Power Down bit, into the PM register, is cleared
7954 	 * automatically as soon as a magic packet or a Wake-up frame
7955 	 * is received. Anyway, it's better to manually clear
7956 	 * this bit because it can generate problems while resuming
7957 	 * from another devices (e.g. serial console).
7958 	 */
7959 	if (stmmac_wol_enabled_mac(priv)) {
7960 		mutex_lock(&priv->lock);
7961 		stmmac_pmt(priv, priv->hw, 0);
7962 		mutex_unlock(&priv->lock);
7963 		priv->irq_wake = 0;
7964 	} else {
7965 		pinctrl_pm_select_default_state(priv->device);
7966 		/* reset the phy so that it's ready */
7967 		if (priv->mii)
7968 			stmmac_mdio_reset(priv->mii);
7969 	}
7970 
7971 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7972 	    priv->plat->serdes_powerup) {
7973 		ret = priv->plat->serdes_powerup(ndev,
7974 						 priv->plat->bsp_priv);
7975 
7976 		if (ret < 0)
7977 			return ret;
7978 	}
7979 
7980 	rtnl_lock();
7981 
7982 	/* Prepare the PHY to resume, ensuring that its clocks which are
7983 	 * necessary for the MAC DMA reset to complete are running
7984 	 */
7985 	phylink_prepare_resume(priv->phylink);
7986 
7987 	mutex_lock(&priv->lock);
7988 
7989 	stmmac_reset_queues_param(priv);
7990 
7991 	stmmac_free_tx_skbufs(priv);
7992 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7993 
7994 	ret = stmmac_hw_setup(ndev, false);
7995 	if (ret < 0) {
7996 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
7997 		mutex_unlock(&priv->lock);
7998 		rtnl_unlock();
7999 		return ret;
8000 	}
8001 
8002 	stmmac_init_coalesce(priv);
8003 	phylink_rx_clk_stop_block(priv->phylink);
8004 	stmmac_set_rx_mode(ndev);
8005 
8006 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8007 	phylink_rx_clk_stop_unblock(priv->phylink);
8008 
8009 	stmmac_enable_all_queues(priv);
8010 	stmmac_enable_all_dma_irq(priv);
8011 
8012 	mutex_unlock(&priv->lock);
8013 
8014 	/* phylink_resume() must be called after the hardware has been
8015 	 * initialised because it may bring the link up immediately in a
8016 	 * workqueue thread, which will race with initialisation.
8017 	 */
8018 	phylink_resume(priv->phylink);
8019 	if (stmmac_wol_enabled_phy(priv))
8020 		phylink_speed_up(priv->phylink);
8021 
8022 	rtnl_unlock();
8023 
8024 	netif_device_attach(ndev);
8025 
8026 	return 0;
8027 }
8028 EXPORT_SYMBOL_GPL(stmmac_resume);
8029 
8030 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
8031 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
8032 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
8033 
8034 #ifndef MODULE
8035 static int __init stmmac_cmdline_opt(char *str)
8036 {
8037 	char *opt;
8038 
8039 	if (!str || !*str)
8040 		return 1;
8041 	while ((opt = strsep(&str, ",")) != NULL) {
8042 		if (!strncmp(opt, "debug:", 6)) {
8043 			if (kstrtoint(opt + 6, 0, &debug))
8044 				goto err;
8045 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8046 			if (kstrtoint(opt + 8, 0, &phyaddr))
8047 				goto err;
8048 		} else if (!strncmp(opt, "tc:", 3)) {
8049 			if (kstrtoint(opt + 3, 0, &tc))
8050 				goto err;
8051 		} else if (!strncmp(opt, "watchdog:", 9)) {
8052 			if (kstrtoint(opt + 9, 0, &watchdog))
8053 				goto err;
8054 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8055 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8056 				goto err;
8057 		} else if (!strncmp(opt, "pause:", 6)) {
8058 			if (kstrtoint(opt + 6, 0, &pause))
8059 				goto err;
8060 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8061 			if (kstrtoint(opt + 10, 0, &eee_timer))
8062 				goto err;
8063 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8064 			if (kstrtoint(opt + 11, 0, &chain_mode))
8065 				goto err;
8066 		}
8067 	}
8068 	return 1;
8069 
8070 err:
8071 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8072 	return 1;
8073 }
8074 
8075 __setup("stmmaceth=", stmmac_cmdline_opt);
8076 #endif /* MODULE */
8077 
8078 static int __init stmmac_init(void)
8079 {
8080 #ifdef CONFIG_DEBUG_FS
8081 	/* Create debugfs main directory if it doesn't exist yet */
8082 	if (!stmmac_fs_dir)
8083 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8084 	register_netdevice_notifier(&stmmac_notifier);
8085 #endif
8086 
8087 	return 0;
8088 }
8089 
8090 static void __exit stmmac_exit(void)
8091 {
8092 #ifdef CONFIG_DEBUG_FS
8093 	unregister_netdevice_notifier(&stmmac_notifier);
8094 	debugfs_remove_recursive(stmmac_fs_dir);
8095 #endif
8096 }
8097 
8098 module_init(stmmac_init)
8099 module_exit(stmmac_exit)
8100 
8101 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8102 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8103 MODULE_LICENSE("GPL");
8104