xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 6bab77ced3ffbce3d6c5b5bcce17da7c8a3f8266)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54 
55 /* As long as the interface is active, we keep the timestamping counter enabled
56  * with fine resolution and binary rollover. This avoid non-monotonic behavior
57  * (clock jumps) when changing timestamping settings at runtime.
58  */
59 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 				 PTP_TCR_TSCTRLSSR)
61 
62 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
64 
65 /* Module parameters */
66 #define TX_TIMEO	5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70 
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78 
79 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = 0xdead;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 /* This is unused */
105 #define	DEFAULT_BUFSIZE	1536
106 static int buf_sz = DEFAULT_BUFSIZE;
107 module_param(buf_sz, int, 0644);
108 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
109 
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
112 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113 
114 #define STMMAC_DEFAULT_LPI_TIMER	1000
115 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, uint, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119 
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126 
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139 					  u32 rxmode, u32 chan);
140 
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146 
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148 
149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151 	int ret = 0;
152 
153 	if (enabled) {
154 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
155 		if (ret)
156 			return ret;
157 		ret = clk_prepare_enable(priv->plat->pclk);
158 		if (ret) {
159 			clk_disable_unprepare(priv->plat->stmmac_clk);
160 			return ret;
161 		}
162 		if (priv->plat->clks_config) {
163 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164 			if (ret) {
165 				clk_disable_unprepare(priv->plat->stmmac_clk);
166 				clk_disable_unprepare(priv->plat->pclk);
167 				return ret;
168 			}
169 		}
170 	} else {
171 		clk_disable_unprepare(priv->plat->stmmac_clk);
172 		clk_disable_unprepare(priv->plat->pclk);
173 		if (priv->plat->clks_config)
174 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175 	}
176 
177 	return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180 
181 /**
182  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
183  * @bsp_priv: BSP private data structure (unused)
184  * @clk_tx_i: the transmit clock
185  * @interface: the selected interface mode
186  * @speed: the speed that the MAC will be operating at
187  *
188  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
189  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
190  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
191  * the plat_data->set_clk_tx_rate method directly, call it via their own
192  * implementation, or implement their own method should they have more
193  * complex requirements. It is intended to only be used in this method.
194  *
195  * plat_data->clk_tx_i must be filled in.
196  */
197 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
198 			   phy_interface_t interface, int speed)
199 {
200 	long rate = rgmii_clock(speed);
201 
202 	/* Silently ignore unsupported speeds as rgmii_clock() only
203 	 * supports 10, 100 and 1000Mbps. We do not want to spit
204 	 * errors for 2500 and higher speeds here.
205 	 */
206 	if (rate < 0)
207 		return 0;
208 
209 	return clk_set_rate(clk_tx_i, rate);
210 }
211 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
212 
213 /**
214  * stmmac_verify_args - verify the driver parameters.
215  * Description: it checks the driver parameters and set a default in case of
216  * errors.
217  */
218 static void stmmac_verify_args(void)
219 {
220 	if (unlikely(watchdog < 0))
221 		watchdog = TX_TIMEO;
222 	if (unlikely((pause < 0) || (pause > 0xffff)))
223 		pause = PAUSE_TIME;
224 
225 	if (flow_ctrl != 0xdead)
226 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
227 }
228 
229 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
233 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
234 	u32 queue;
235 
236 	for (queue = 0; queue < maxq; queue++) {
237 		struct stmmac_channel *ch = &priv->channel[queue];
238 
239 		if (stmmac_xdp_is_enabled(priv) &&
240 		    test_bit(queue, priv->af_xdp_zc_qps)) {
241 			napi_disable(&ch->rxtx_napi);
242 			continue;
243 		}
244 
245 		if (queue < rx_queues_cnt)
246 			napi_disable(&ch->rx_napi);
247 		if (queue < tx_queues_cnt)
248 			napi_disable(&ch->tx_napi);
249 	}
250 }
251 
252 /**
253  * stmmac_disable_all_queues - Disable all queues
254  * @priv: driver private structure
255  */
256 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
257 {
258 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
259 	struct stmmac_rx_queue *rx_q;
260 	u32 queue;
261 
262 	/* synchronize_rcu() needed for pending XDP buffers to drain */
263 	for (queue = 0; queue < rx_queues_cnt; queue++) {
264 		rx_q = &priv->dma_conf.rx_queue[queue];
265 		if (rx_q->xsk_pool) {
266 			synchronize_rcu();
267 			break;
268 		}
269 	}
270 
271 	__stmmac_disable_all_queues(priv);
272 }
273 
274 /**
275  * stmmac_enable_all_queues - Enable all queues
276  * @priv: driver private structure
277  */
278 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
279 {
280 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
281 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
282 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
283 	u32 queue;
284 
285 	for (queue = 0; queue < maxq; queue++) {
286 		struct stmmac_channel *ch = &priv->channel[queue];
287 
288 		if (stmmac_xdp_is_enabled(priv) &&
289 		    test_bit(queue, priv->af_xdp_zc_qps)) {
290 			napi_enable(&ch->rxtx_napi);
291 			continue;
292 		}
293 
294 		if (queue < rx_queues_cnt)
295 			napi_enable(&ch->rx_napi);
296 		if (queue < tx_queues_cnt)
297 			napi_enable(&ch->tx_napi);
298 	}
299 }
300 
301 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
302 {
303 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
304 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
305 		queue_work(priv->wq, &priv->service_task);
306 }
307 
308 static void stmmac_global_err(struct stmmac_priv *priv)
309 {
310 	netif_carrier_off(priv->dev);
311 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
312 	stmmac_service_event_schedule(priv);
313 }
314 
315 /**
316  * stmmac_clk_csr_set - dynamically set the MDC clock
317  * @priv: driver private structure
318  * Description: this is to dynamically set the MDC clock according to the csr
319  * clock input.
320  * Note:
321  *	If a specific clk_csr value is passed from the platform
322  *	this means that the CSR Clock Range selection cannot be
323  *	changed at run-time and it is fixed (as reported in the driver
324  *	documentation). Viceversa the driver will try to set the MDC
325  *	clock dynamically according to the actual clock input.
326  */
327 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
328 {
329 	unsigned long clk_rate;
330 
331 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
332 
333 	/* Platform provided default clk_csr would be assumed valid
334 	 * for all other cases except for the below mentioned ones.
335 	 * For values higher than the IEEE 802.3 specified frequency
336 	 * we can not estimate the proper divider as it is not known
337 	 * the frequency of clk_csr_i. So we do not change the default
338 	 * divider.
339 	 */
340 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
341 		if (clk_rate < CSR_F_35M)
342 			priv->clk_csr = STMMAC_CSR_20_35M;
343 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
344 			priv->clk_csr = STMMAC_CSR_35_60M;
345 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
346 			priv->clk_csr = STMMAC_CSR_60_100M;
347 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
348 			priv->clk_csr = STMMAC_CSR_100_150M;
349 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
350 			priv->clk_csr = STMMAC_CSR_150_250M;
351 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
352 			priv->clk_csr = STMMAC_CSR_250_300M;
353 		else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
354 			priv->clk_csr = STMMAC_CSR_300_500M;
355 		else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
356 			priv->clk_csr = STMMAC_CSR_500_800M;
357 	}
358 
359 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
360 		if (clk_rate > 160000000)
361 			priv->clk_csr = 0x03;
362 		else if (clk_rate > 80000000)
363 			priv->clk_csr = 0x02;
364 		else if (clk_rate > 40000000)
365 			priv->clk_csr = 0x01;
366 		else
367 			priv->clk_csr = 0;
368 	}
369 
370 	if (priv->plat->has_xgmac) {
371 		if (clk_rate > 400000000)
372 			priv->clk_csr = 0x5;
373 		else if (clk_rate > 350000000)
374 			priv->clk_csr = 0x4;
375 		else if (clk_rate > 300000000)
376 			priv->clk_csr = 0x3;
377 		else if (clk_rate > 250000000)
378 			priv->clk_csr = 0x2;
379 		else if (clk_rate > 150000000)
380 			priv->clk_csr = 0x1;
381 		else
382 			priv->clk_csr = 0x0;
383 	}
384 }
385 
386 static void print_pkt(unsigned char *buf, int len)
387 {
388 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
389 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
390 }
391 
392 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
393 {
394 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
395 	u32 avail;
396 
397 	if (tx_q->dirty_tx > tx_q->cur_tx)
398 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
399 	else
400 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
401 
402 	return avail;
403 }
404 
405 /**
406  * stmmac_rx_dirty - Get RX queue dirty
407  * @priv: driver private structure
408  * @queue: RX queue index
409  */
410 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
411 {
412 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
413 	u32 dirty;
414 
415 	if (rx_q->dirty_rx <= rx_q->cur_rx)
416 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
417 	else
418 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
419 
420 	return dirty;
421 }
422 
423 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
424 {
425 	u32 tx_cnt = priv->plat->tx_queues_to_use;
426 	u32 queue;
427 
428 	/* check if all TX queues have the work finished */
429 	for (queue = 0; queue < tx_cnt; queue++) {
430 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
431 
432 		if (tx_q->dirty_tx != tx_q->cur_tx)
433 			return true; /* still unfinished work */
434 	}
435 
436 	return false;
437 }
438 
439 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
440 {
441 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
442 }
443 
444 /**
445  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
446  * @priv: driver private structure
447  * Description: this function is to verify and enter in LPI mode in case of
448  * EEE.
449  */
450 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
451 {
452 	if (stmmac_eee_tx_busy(priv)) {
453 		stmmac_restart_sw_lpi_timer(priv);
454 		return;
455 	}
456 
457 	/* Check and enter in LPI mode */
458 	if (!priv->tx_path_in_lpi_mode)
459 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
460 				    priv->tx_lpi_clk_stop, 0);
461 }
462 
463 /**
464  * stmmac_stop_sw_lpi - stop transmitting LPI
465  * @priv: driver private structure
466  * Description: When using software-controlled LPI, stop transmitting LPI state.
467  */
468 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
469 {
470 	timer_delete_sync(&priv->eee_ctrl_timer);
471 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
472 	priv->tx_path_in_lpi_mode = false;
473 }
474 
475 /**
476  * stmmac_eee_ctrl_timer - EEE TX SW timer.
477  * @t:  timer_list struct containing private info
478  * Description:
479  *  if there is no data transfer and if we are not in LPI state,
480  *  then MAC Transmitter can be moved to LPI state.
481  */
482 static void stmmac_eee_ctrl_timer(struct timer_list *t)
483 {
484 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
485 
486 	stmmac_try_to_start_sw_lpi(priv);
487 }
488 
489 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
490  * @priv: driver private structure
491  * @p : descriptor pointer
492  * @skb : the socket buffer
493  * Description :
494  * This function will read timestamp from the descriptor & pass it to stack.
495  * and also perform some sanity checks.
496  */
497 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
498 				   struct dma_desc *p, struct sk_buff *skb)
499 {
500 	struct skb_shared_hwtstamps shhwtstamp;
501 	bool found = false;
502 	u64 ns = 0;
503 
504 	if (!priv->hwts_tx_en)
505 		return;
506 
507 	/* exit if skb doesn't support hw tstamp */
508 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
509 		return;
510 
511 	/* check tx tstamp status */
512 	if (stmmac_get_tx_timestamp_status(priv, p)) {
513 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
514 		found = true;
515 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
516 		found = true;
517 	}
518 
519 	if (found) {
520 		ns -= priv->plat->cdc_error_adj;
521 
522 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
523 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
524 
525 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
526 		/* pass tstamp to stack */
527 		skb_tstamp_tx(skb, &shhwtstamp);
528 	}
529 }
530 
531 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
532  * @priv: driver private structure
533  * @p : descriptor pointer
534  * @np : next descriptor pointer
535  * @skb : the socket buffer
536  * Description :
537  * This function will read received packet's timestamp from the descriptor
538  * and pass it to stack. It also perform some sanity checks.
539  */
540 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
541 				   struct dma_desc *np, struct sk_buff *skb)
542 {
543 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
544 	struct dma_desc *desc = p;
545 	u64 ns = 0;
546 
547 	if (!priv->hwts_rx_en)
548 		return;
549 	/* For GMAC4, the valid timestamp is from CTX next desc. */
550 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
551 		desc = np;
552 
553 	/* Check if timestamp is available */
554 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
555 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
556 
557 		ns -= priv->plat->cdc_error_adj;
558 
559 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
560 		shhwtstamp = skb_hwtstamps(skb);
561 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
563 	} else  {
564 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
565 	}
566 }
567 
568 /**
569  *  stmmac_hwtstamp_set - control hardware timestamping.
570  *  @dev: device pointer.
571  *  @config: the timestamping configuration.
572  *  @extack: netlink extended ack structure for error reporting.
573  *  Description:
574  *  This function configures the MAC to enable/disable both outgoing(TX)
575  *  and incoming(RX) packets time stamping based on user input.
576  *  Return Value:
577  *  0 on success and an appropriate -ve integer on failure.
578  */
579 static int stmmac_hwtstamp_set(struct net_device *dev,
580 			       struct kernel_hwtstamp_config *config,
581 			       struct netlink_ext_ack *extack)
582 {
583 	struct stmmac_priv *priv = netdev_priv(dev);
584 	u32 ptp_v2 = 0;
585 	u32 tstamp_all = 0;
586 	u32 ptp_over_ipv4_udp = 0;
587 	u32 ptp_over_ipv6_udp = 0;
588 	u32 ptp_over_ethernet = 0;
589 	u32 snap_type_sel = 0;
590 	u32 ts_master_en = 0;
591 	u32 ts_event_en = 0;
592 
593 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
594 		NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
595 		priv->hwts_tx_en = 0;
596 		priv->hwts_rx_en = 0;
597 
598 		return -EOPNOTSUPP;
599 	}
600 
601 	if (!netif_running(dev)) {
602 		NL_SET_ERR_MSG_MOD(extack,
603 				   "Cannot change timestamping configuration while down");
604 		return -ENODEV;
605 	}
606 
607 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
608 		   __func__, config->flags, config->tx_type, config->rx_filter);
609 
610 	if (config->tx_type != HWTSTAMP_TX_OFF &&
611 	    config->tx_type != HWTSTAMP_TX_ON)
612 		return -ERANGE;
613 
614 	if (priv->adv_ts) {
615 		switch (config->rx_filter) {
616 		case HWTSTAMP_FILTER_NONE:
617 			/* time stamp no incoming packet at all */
618 			config->rx_filter = HWTSTAMP_FILTER_NONE;
619 			break;
620 
621 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
622 			/* PTP v1, UDP, any kind of event packet */
623 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
624 			/* 'xmac' hardware can support Sync, Pdelay_Req and
625 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
626 			 * This leaves Delay_Req timestamps out.
627 			 * Enable all events *and* general purpose message
628 			 * timestamping
629 			 */
630 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
631 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
632 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
633 			break;
634 
635 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
636 			/* PTP v1, UDP, Sync packet */
637 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
638 			/* take time stamp for SYNC messages only */
639 			ts_event_en = PTP_TCR_TSEVNTENA;
640 
641 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643 			break;
644 
645 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
646 			/* PTP v1, UDP, Delay_req packet */
647 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
648 			/* take time stamp for Delay_Req messages only */
649 			ts_master_en = PTP_TCR_TSMSTRENA;
650 			ts_event_en = PTP_TCR_TSEVNTENA;
651 
652 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
653 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
654 			break;
655 
656 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
657 			/* PTP v2, UDP, any kind of event packet */
658 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
659 			ptp_v2 = PTP_TCR_TSVER2ENA;
660 			/* take time stamp for all event messages */
661 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
662 
663 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
664 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
665 			break;
666 
667 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
668 			/* PTP v2, UDP, Sync packet */
669 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
670 			ptp_v2 = PTP_TCR_TSVER2ENA;
671 			/* take time stamp for SYNC messages only */
672 			ts_event_en = PTP_TCR_TSEVNTENA;
673 
674 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
675 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
676 			break;
677 
678 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
679 			/* PTP v2, UDP, Delay_req packet */
680 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
681 			ptp_v2 = PTP_TCR_TSVER2ENA;
682 			/* take time stamp for Delay_Req messages only */
683 			ts_master_en = PTP_TCR_TSMSTRENA;
684 			ts_event_en = PTP_TCR_TSEVNTENA;
685 
686 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 			break;
689 
690 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
691 			/* PTP v2/802.AS1 any layer, any kind of event packet */
692 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
693 			ptp_v2 = PTP_TCR_TSVER2ENA;
694 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
695 			if (priv->synopsys_id < DWMAC_CORE_4_10)
696 				ts_event_en = PTP_TCR_TSEVNTENA;
697 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
698 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
699 			ptp_over_ethernet = PTP_TCR_TSIPENA;
700 			break;
701 
702 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
703 			/* PTP v2/802.AS1, any layer, Sync packet */
704 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
705 			ptp_v2 = PTP_TCR_TSVER2ENA;
706 			/* take time stamp for SYNC messages only */
707 			ts_event_en = PTP_TCR_TSEVNTENA;
708 
709 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
710 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
711 			ptp_over_ethernet = PTP_TCR_TSIPENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
715 			/* PTP v2/802.AS1, any layer, Delay_req packet */
716 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			ptp_over_ethernet = PTP_TCR_TSIPENA;
725 			break;
726 
727 		case HWTSTAMP_FILTER_NTP_ALL:
728 		case HWTSTAMP_FILTER_ALL:
729 			/* time stamp any incoming packet */
730 			config->rx_filter = HWTSTAMP_FILTER_ALL;
731 			tstamp_all = PTP_TCR_TSENALL;
732 			break;
733 
734 		default:
735 			return -ERANGE;
736 		}
737 	} else {
738 		switch (config->rx_filter) {
739 		case HWTSTAMP_FILTER_NONE:
740 			config->rx_filter = HWTSTAMP_FILTER_NONE;
741 			break;
742 		default:
743 			/* PTP v1, UDP, any kind of event packet */
744 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
745 			break;
746 		}
747 	}
748 	priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
749 	priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
750 
751 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
752 
753 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
754 		priv->systime_flags |= tstamp_all | ptp_v2 |
755 				       ptp_over_ethernet | ptp_over_ipv6_udp |
756 				       ptp_over_ipv4_udp | ts_event_en |
757 				       ts_master_en | snap_type_sel;
758 	}
759 
760 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
761 
762 	priv->tstamp_config = *config;
763 
764 	return 0;
765 }
766 
767 /**
768  *  stmmac_hwtstamp_get - read hardware timestamping.
769  *  @dev: device pointer.
770  *  @config: the timestamping configuration.
771  *  Description:
772  *  This function obtain the current hardware timestamping settings
773  *  as requested.
774  */
775 static int stmmac_hwtstamp_get(struct net_device *dev,
776 			       struct kernel_hwtstamp_config *config)
777 {
778 	struct stmmac_priv *priv = netdev_priv(dev);
779 
780 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
781 		return -EOPNOTSUPP;
782 
783 	*config = priv->tstamp_config;
784 
785 	return 0;
786 }
787 
788 /**
789  * stmmac_init_tstamp_counter - init hardware timestamping counter
790  * @priv: driver private structure
791  * @systime_flags: timestamping flags
792  * Description:
793  * Initialize hardware counter for packet timestamping.
794  * This is valid as long as the interface is open and not suspended.
795  * Will be rerun after resuming from suspend, case in which the timestamping
796  * flags updated by stmmac_hwtstamp_set() also need to be restored.
797  */
798 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
799 {
800 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
801 	struct timespec64 now;
802 	u32 sec_inc = 0;
803 	u64 temp = 0;
804 
805 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
806 		return -EOPNOTSUPP;
807 
808 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
809 	priv->systime_flags = systime_flags;
810 
811 	/* program Sub Second Increment reg */
812 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
813 					   priv->plat->clk_ptp_rate,
814 					   xmac, &sec_inc);
815 	temp = div_u64(1000000000ULL, sec_inc);
816 
817 	/* Store sub second increment for later use */
818 	priv->sub_second_inc = sec_inc;
819 
820 	/* calculate default added value:
821 	 * formula is :
822 	 * addend = (2^32)/freq_div_ratio;
823 	 * where, freq_div_ratio = 1e9ns/sec_inc
824 	 */
825 	temp = (u64)(temp << 32);
826 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
827 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
828 
829 	/* initialize system time */
830 	ktime_get_real_ts64(&now);
831 
832 	/* lower 32 bits of tv_sec are safe until y2106 */
833 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
834 
835 	return 0;
836 }
837 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
838 
839 /**
840  * stmmac_init_ptp - init PTP
841  * @priv: driver private structure
842  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
843  * This is done by looking at the HW cap. register.
844  * This function also registers the ptp driver.
845  */
846 static int stmmac_init_ptp(struct stmmac_priv *priv)
847 {
848 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
849 	int ret;
850 
851 	if (priv->plat->ptp_clk_freq_config)
852 		priv->plat->ptp_clk_freq_config(priv);
853 
854 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
855 	if (ret)
856 		return ret;
857 
858 	priv->adv_ts = 0;
859 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
860 	if (xmac && priv->dma_cap.atime_stamp)
861 		priv->adv_ts = 1;
862 	/* Dwmac 3.x core with extend_desc can support adv_ts */
863 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
864 		priv->adv_ts = 1;
865 
866 	if (priv->dma_cap.time_stamp)
867 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
868 
869 	if (priv->adv_ts)
870 		netdev_info(priv->dev,
871 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
872 
873 	priv->hwts_tx_en = 0;
874 	priv->hwts_rx_en = 0;
875 
876 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
877 		stmmac_hwtstamp_correct_latency(priv, priv);
878 
879 	return 0;
880 }
881 
882 static void stmmac_release_ptp(struct stmmac_priv *priv)
883 {
884 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
885 	stmmac_ptp_unregister(priv);
886 }
887 
888 /**
889  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
890  *  @priv: driver private structure
891  *  @duplex: duplex passed to the next function
892  *  @flow_ctrl: desired flow control modes
893  *  Description: It is used for configuring the flow control in all queues
894  */
895 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
896 				 unsigned int flow_ctrl)
897 {
898 	u32 tx_cnt = priv->plat->tx_queues_to_use;
899 
900 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
901 			 tx_cnt);
902 }
903 
904 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
905 					 phy_interface_t interface)
906 {
907 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
908 
909 	/* Refresh the MAC-specific capabilities */
910 	stmmac_mac_update_caps(priv);
911 
912 	config->mac_capabilities = priv->hw->link.caps;
913 
914 	if (priv->plat->max_speed)
915 		phylink_limit_mac_speed(config, priv->plat->max_speed);
916 
917 	return config->mac_capabilities;
918 }
919 
920 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
921 						 phy_interface_t interface)
922 {
923 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
924 	struct phylink_pcs *pcs;
925 
926 	if (priv->plat->select_pcs) {
927 		pcs = priv->plat->select_pcs(priv, interface);
928 		if (!IS_ERR(pcs))
929 			return pcs;
930 	}
931 
932 	return NULL;
933 }
934 
935 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
936 			      const struct phylink_link_state *state)
937 {
938 	/* Nothing to do, xpcs_config() handles everything */
939 }
940 
941 static void stmmac_mac_link_down(struct phylink_config *config,
942 				 unsigned int mode, phy_interface_t interface)
943 {
944 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
945 
946 	stmmac_mac_set(priv, priv->ioaddr, false);
947 	if (priv->dma_cap.eee)
948 		stmmac_set_eee_pls(priv, priv->hw, false);
949 
950 	if (stmmac_fpe_supported(priv))
951 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
952 }
953 
954 static void stmmac_mac_link_up(struct phylink_config *config,
955 			       struct phy_device *phy,
956 			       unsigned int mode, phy_interface_t interface,
957 			       int speed, int duplex,
958 			       bool tx_pause, bool rx_pause)
959 {
960 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
961 	unsigned int flow_ctrl;
962 	u32 old_ctrl, ctrl;
963 	int ret;
964 
965 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
966 	    priv->plat->serdes_powerup)
967 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
968 
969 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
970 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
971 
972 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
973 		switch (speed) {
974 		case SPEED_10000:
975 			ctrl |= priv->hw->link.xgmii.speed10000;
976 			break;
977 		case SPEED_5000:
978 			ctrl |= priv->hw->link.xgmii.speed5000;
979 			break;
980 		case SPEED_2500:
981 			ctrl |= priv->hw->link.xgmii.speed2500;
982 			break;
983 		default:
984 			return;
985 		}
986 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
987 		switch (speed) {
988 		case SPEED_100000:
989 			ctrl |= priv->hw->link.xlgmii.speed100000;
990 			break;
991 		case SPEED_50000:
992 			ctrl |= priv->hw->link.xlgmii.speed50000;
993 			break;
994 		case SPEED_40000:
995 			ctrl |= priv->hw->link.xlgmii.speed40000;
996 			break;
997 		case SPEED_25000:
998 			ctrl |= priv->hw->link.xlgmii.speed25000;
999 			break;
1000 		case SPEED_10000:
1001 			ctrl |= priv->hw->link.xgmii.speed10000;
1002 			break;
1003 		case SPEED_2500:
1004 			ctrl |= priv->hw->link.speed2500;
1005 			break;
1006 		case SPEED_1000:
1007 			ctrl |= priv->hw->link.speed1000;
1008 			break;
1009 		default:
1010 			return;
1011 		}
1012 	} else {
1013 		switch (speed) {
1014 		case SPEED_2500:
1015 			ctrl |= priv->hw->link.speed2500;
1016 			break;
1017 		case SPEED_1000:
1018 			ctrl |= priv->hw->link.speed1000;
1019 			break;
1020 		case SPEED_100:
1021 			ctrl |= priv->hw->link.speed100;
1022 			break;
1023 		case SPEED_10:
1024 			ctrl |= priv->hw->link.speed10;
1025 			break;
1026 		default:
1027 			return;
1028 		}
1029 	}
1030 
1031 	if (priv->plat->fix_mac_speed)
1032 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1033 
1034 	if (!duplex)
1035 		ctrl &= ~priv->hw->link.duplex;
1036 	else
1037 		ctrl |= priv->hw->link.duplex;
1038 
1039 	/* Flow Control operation */
1040 	if (rx_pause && tx_pause)
1041 		flow_ctrl = FLOW_AUTO;
1042 	else if (rx_pause && !tx_pause)
1043 		flow_ctrl = FLOW_RX;
1044 	else if (!rx_pause && tx_pause)
1045 		flow_ctrl = FLOW_TX;
1046 	else
1047 		flow_ctrl = FLOW_OFF;
1048 
1049 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1050 
1051 	if (ctrl != old_ctrl)
1052 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1053 
1054 	if (priv->plat->set_clk_tx_rate) {
1055 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1056 						priv->plat->clk_tx_i,
1057 						interface, speed);
1058 		if (ret < 0)
1059 			netdev_err(priv->dev,
1060 				   "failed to configure transmit clock for %dMbps: %pe\n",
1061 				   speed, ERR_PTR(ret));
1062 	}
1063 
1064 	stmmac_mac_set(priv, priv->ioaddr, true);
1065 	if (priv->dma_cap.eee)
1066 		stmmac_set_eee_pls(priv, priv->hw, true);
1067 
1068 	if (stmmac_fpe_supported(priv))
1069 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1070 
1071 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1072 		stmmac_hwtstamp_correct_latency(priv, priv);
1073 }
1074 
1075 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1076 {
1077 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1078 
1079 	priv->eee_active = false;
1080 
1081 	mutex_lock(&priv->lock);
1082 
1083 	priv->eee_enabled = false;
1084 
1085 	netdev_dbg(priv->dev, "disable EEE\n");
1086 	priv->eee_sw_timer_en = false;
1087 	timer_delete_sync(&priv->eee_ctrl_timer);
1088 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1089 	priv->tx_path_in_lpi_mode = false;
1090 
1091 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1092 	mutex_unlock(&priv->lock);
1093 }
1094 
1095 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1096 				    bool tx_clk_stop)
1097 {
1098 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1099 	int ret;
1100 
1101 	priv->tx_lpi_timer = timer;
1102 	priv->eee_active = true;
1103 
1104 	mutex_lock(&priv->lock);
1105 
1106 	priv->eee_enabled = true;
1107 
1108 	/* Update the transmit clock stop according to PHY capability if
1109 	 * the platform allows
1110 	 */
1111 	if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1112 		priv->tx_lpi_clk_stop = tx_clk_stop;
1113 
1114 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1115 			     STMMAC_DEFAULT_TWT_LS);
1116 
1117 	/* Try to cnfigure the hardware timer. */
1118 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1119 				  priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1120 
1121 	if (ret) {
1122 		/* Hardware timer mode not supported, or value out of range.
1123 		 * Fall back to using software LPI mode
1124 		 */
1125 		priv->eee_sw_timer_en = true;
1126 		stmmac_restart_sw_lpi_timer(priv);
1127 	}
1128 
1129 	mutex_unlock(&priv->lock);
1130 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1131 
1132 	return 0;
1133 }
1134 
1135 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1136 			     phy_interface_t interface)
1137 {
1138 	struct net_device *ndev = to_net_dev(config->dev);
1139 	struct stmmac_priv *priv = netdev_priv(ndev);
1140 
1141 	if (priv->plat->mac_finish)
1142 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1143 
1144 	return 0;
1145 }
1146 
1147 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1148 	.mac_get_caps = stmmac_mac_get_caps,
1149 	.mac_select_pcs = stmmac_mac_select_pcs,
1150 	.mac_config = stmmac_mac_config,
1151 	.mac_link_down = stmmac_mac_link_down,
1152 	.mac_link_up = stmmac_mac_link_up,
1153 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1154 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1155 	.mac_finish = stmmac_mac_finish,
1156 };
1157 
1158 /**
1159  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1160  * @priv: driver private structure
1161  * Description: this is to verify if the HW supports the PCS.
1162  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1163  * configured for the TBI, RTBI, or SGMII PHY interface.
1164  */
1165 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1166 {
1167 	int interface = priv->plat->mac_interface;
1168 
1169 	if (priv->dma_cap.pcs) {
1170 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1171 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1172 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1173 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1174 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1175 			priv->hw->pcs = STMMAC_PCS_RGMII;
1176 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1177 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1178 			priv->hw->pcs = STMMAC_PCS_SGMII;
1179 		}
1180 	}
1181 }
1182 
1183 /**
1184  * stmmac_init_phy - PHY initialization
1185  * @dev: net device structure
1186  * Description: it initializes the driver's PHY state, and attaches the PHY
1187  * to the mac driver.
1188  *  Return value:
1189  *  0 on success
1190  */
1191 static int stmmac_init_phy(struct net_device *dev)
1192 {
1193 	struct stmmac_priv *priv = netdev_priv(dev);
1194 	struct fwnode_handle *phy_fwnode;
1195 	struct fwnode_handle *fwnode;
1196 	int ret;
1197 
1198 	if (!phylink_expects_phy(priv->phylink))
1199 		return 0;
1200 
1201 	fwnode = priv->plat->port_node;
1202 	if (!fwnode)
1203 		fwnode = dev_fwnode(priv->device);
1204 
1205 	if (fwnode)
1206 		phy_fwnode = fwnode_get_phy_node(fwnode);
1207 	else
1208 		phy_fwnode = NULL;
1209 
1210 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1211 	 * manually parse it
1212 	 */
1213 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1214 		int addr = priv->plat->phy_addr;
1215 		struct phy_device *phydev;
1216 
1217 		if (addr < 0) {
1218 			netdev_err(priv->dev, "no phy found\n");
1219 			return -ENODEV;
1220 		}
1221 
1222 		phydev = mdiobus_get_phy(priv->mii, addr);
1223 		if (!phydev) {
1224 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1225 			return -ENODEV;
1226 		}
1227 
1228 		ret = phylink_connect_phy(priv->phylink, phydev);
1229 	} else {
1230 		fwnode_handle_put(phy_fwnode);
1231 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1232 	}
1233 
1234 	if (ret == 0) {
1235 		struct ethtool_keee eee;
1236 
1237 		/* Configure phylib's copy of the LPI timer. Normally,
1238 		 * phylink_config.lpi_timer_default would do this, but there is
1239 		 * a chance that userspace could change the eee_timer setting
1240 		 * via sysfs before the first open. Thus, preserve existing
1241 		 * behaviour.
1242 		 */
1243 		if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1244 			eee.tx_lpi_timer = priv->tx_lpi_timer;
1245 			phylink_ethtool_set_eee(priv->phylink, &eee);
1246 		}
1247 	}
1248 
1249 	if (!priv->plat->pmt) {
1250 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1251 
1252 		phylink_ethtool_get_wol(priv->phylink, &wol);
1253 		device_set_wakeup_capable(priv->device, !!wol.supported);
1254 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1255 	}
1256 
1257 	return ret;
1258 }
1259 
1260 static int stmmac_phy_setup(struct stmmac_priv *priv)
1261 {
1262 	struct stmmac_mdio_bus_data *mdio_bus_data;
1263 	struct phylink_config *config;
1264 	struct fwnode_handle *fwnode;
1265 	struct phylink_pcs *pcs;
1266 	struct phylink *phylink;
1267 
1268 	config = &priv->phylink_config;
1269 
1270 	config->dev = &priv->dev->dev;
1271 	config->type = PHYLINK_NETDEV;
1272 	config->mac_managed_pm = true;
1273 
1274 	/* Stmmac always requires an RX clock for hardware initialization */
1275 	config->mac_requires_rxc = true;
1276 
1277 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1278 		config->eee_rx_clk_stop_enable = true;
1279 
1280 	/* Set the default transmit clock stop bit based on the platform glue */
1281 	priv->tx_lpi_clk_stop = priv->plat->flags &
1282 				STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1283 
1284 	mdio_bus_data = priv->plat->mdio_bus_data;
1285 	if (mdio_bus_data)
1286 		config->default_an_inband = mdio_bus_data->default_an_inband;
1287 
1288 	/* Get the PHY interface modes (at the PHY end of the link) that
1289 	 * are supported by the platform.
1290 	 */
1291 	if (priv->plat->get_interfaces)
1292 		priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1293 					   config->supported_interfaces);
1294 
1295 	/* Set the platform/firmware specified interface mode if the
1296 	 * supported interfaces have not already been provided using
1297 	 * phy_interface as a last resort.
1298 	 */
1299 	if (phy_interface_empty(config->supported_interfaces))
1300 		__set_bit(priv->plat->phy_interface,
1301 			  config->supported_interfaces);
1302 
1303 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1304 	if (priv->hw->xpcs)
1305 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1306 	else
1307 		pcs = priv->hw->phylink_pcs;
1308 
1309 	if (pcs)
1310 		phy_interface_or(config->supported_interfaces,
1311 				 config->supported_interfaces,
1312 				 pcs->supported_interfaces);
1313 
1314 	if (priv->dma_cap.eee) {
1315 		/* Assume all supported interfaces also support LPI */
1316 		memcpy(config->lpi_interfaces, config->supported_interfaces,
1317 		       sizeof(config->lpi_interfaces));
1318 
1319 		/* All full duplex speeds above 100Mbps are supported */
1320 		config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1321 		config->lpi_timer_default = eee_timer * 1000;
1322 		config->eee_enabled_default = true;
1323 	}
1324 
1325 	fwnode = priv->plat->port_node;
1326 	if (!fwnode)
1327 		fwnode = dev_fwnode(priv->device);
1328 
1329 	phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1330 				 &stmmac_phylink_mac_ops);
1331 	if (IS_ERR(phylink))
1332 		return PTR_ERR(phylink);
1333 
1334 	priv->phylink = phylink;
1335 	return 0;
1336 }
1337 
1338 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1339 				    struct stmmac_dma_conf *dma_conf)
1340 {
1341 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1342 	unsigned int desc_size;
1343 	void *head_rx;
1344 	u32 queue;
1345 
1346 	/* Display RX rings */
1347 	for (queue = 0; queue < rx_cnt; queue++) {
1348 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1349 
1350 		pr_info("\tRX Queue %u rings\n", queue);
1351 
1352 		if (priv->extend_desc) {
1353 			head_rx = (void *)rx_q->dma_erx;
1354 			desc_size = sizeof(struct dma_extended_desc);
1355 		} else {
1356 			head_rx = (void *)rx_q->dma_rx;
1357 			desc_size = sizeof(struct dma_desc);
1358 		}
1359 
1360 		/* Display RX ring */
1361 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1362 				    rx_q->dma_rx_phy, desc_size);
1363 	}
1364 }
1365 
1366 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1367 				    struct stmmac_dma_conf *dma_conf)
1368 {
1369 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1370 	unsigned int desc_size;
1371 	void *head_tx;
1372 	u32 queue;
1373 
1374 	/* Display TX rings */
1375 	for (queue = 0; queue < tx_cnt; queue++) {
1376 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1377 
1378 		pr_info("\tTX Queue %d rings\n", queue);
1379 
1380 		if (priv->extend_desc) {
1381 			head_tx = (void *)tx_q->dma_etx;
1382 			desc_size = sizeof(struct dma_extended_desc);
1383 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1384 			head_tx = (void *)tx_q->dma_entx;
1385 			desc_size = sizeof(struct dma_edesc);
1386 		} else {
1387 			head_tx = (void *)tx_q->dma_tx;
1388 			desc_size = sizeof(struct dma_desc);
1389 		}
1390 
1391 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1392 				    tx_q->dma_tx_phy, desc_size);
1393 	}
1394 }
1395 
1396 static void stmmac_display_rings(struct stmmac_priv *priv,
1397 				 struct stmmac_dma_conf *dma_conf)
1398 {
1399 	/* Display RX ring */
1400 	stmmac_display_rx_rings(priv, dma_conf);
1401 
1402 	/* Display TX ring */
1403 	stmmac_display_tx_rings(priv, dma_conf);
1404 }
1405 
1406 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1407 {
1408 	if (stmmac_xdp_is_enabled(priv))
1409 		return XDP_PACKET_HEADROOM;
1410 
1411 	return NET_SKB_PAD;
1412 }
1413 
1414 static int stmmac_set_bfsize(int mtu, int bufsize)
1415 {
1416 	int ret = bufsize;
1417 
1418 	if (mtu >= BUF_SIZE_8KiB)
1419 		ret = BUF_SIZE_16KiB;
1420 	else if (mtu >= BUF_SIZE_4KiB)
1421 		ret = BUF_SIZE_8KiB;
1422 	else if (mtu >= BUF_SIZE_2KiB)
1423 		ret = BUF_SIZE_4KiB;
1424 	else if (mtu > DEFAULT_BUFSIZE)
1425 		ret = BUF_SIZE_2KiB;
1426 	else
1427 		ret = DEFAULT_BUFSIZE;
1428 
1429 	return ret;
1430 }
1431 
1432 /**
1433  * stmmac_clear_rx_descriptors - clear RX descriptors
1434  * @priv: driver private structure
1435  * @dma_conf: structure to take the dma data
1436  * @queue: RX queue index
1437  * Description: this function is called to clear the RX descriptors
1438  * in case of both basic and extended descriptors are used.
1439  */
1440 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1441 					struct stmmac_dma_conf *dma_conf,
1442 					u32 queue)
1443 {
1444 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1445 	int i;
1446 
1447 	/* Clear the RX descriptors */
1448 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1449 		if (priv->extend_desc)
1450 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1451 					priv->use_riwt, priv->mode,
1452 					(i == dma_conf->dma_rx_size - 1),
1453 					dma_conf->dma_buf_sz);
1454 		else
1455 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1456 					priv->use_riwt, priv->mode,
1457 					(i == dma_conf->dma_rx_size - 1),
1458 					dma_conf->dma_buf_sz);
1459 }
1460 
1461 /**
1462  * stmmac_clear_tx_descriptors - clear tx descriptors
1463  * @priv: driver private structure
1464  * @dma_conf: structure to take the dma data
1465  * @queue: TX queue index.
1466  * Description: this function is called to clear the TX descriptors
1467  * in case of both basic and extended descriptors are used.
1468  */
1469 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1470 					struct stmmac_dma_conf *dma_conf,
1471 					u32 queue)
1472 {
1473 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1474 	int i;
1475 
1476 	/* Clear the TX descriptors */
1477 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1478 		int last = (i == (dma_conf->dma_tx_size - 1));
1479 		struct dma_desc *p;
1480 
1481 		if (priv->extend_desc)
1482 			p = &tx_q->dma_etx[i].basic;
1483 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1484 			p = &tx_q->dma_entx[i].basic;
1485 		else
1486 			p = &tx_q->dma_tx[i];
1487 
1488 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1489 	}
1490 }
1491 
1492 /**
1493  * stmmac_clear_descriptors - clear descriptors
1494  * @priv: driver private structure
1495  * @dma_conf: structure to take the dma data
1496  * Description: this function is called to clear the TX and RX descriptors
1497  * in case of both basic and extended descriptors are used.
1498  */
1499 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1500 				     struct stmmac_dma_conf *dma_conf)
1501 {
1502 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1503 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1504 	u32 queue;
1505 
1506 	/* Clear the RX descriptors */
1507 	for (queue = 0; queue < rx_queue_cnt; queue++)
1508 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1509 
1510 	/* Clear the TX descriptors */
1511 	for (queue = 0; queue < tx_queue_cnt; queue++)
1512 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1513 }
1514 
1515 /**
1516  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1517  * @priv: driver private structure
1518  * @dma_conf: structure to take the dma data
1519  * @p: descriptor pointer
1520  * @i: descriptor index
1521  * @flags: gfp flag
1522  * @queue: RX queue index
1523  * Description: this function is called to allocate a receive buffer, perform
1524  * the DMA mapping and init the descriptor.
1525  */
1526 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1527 				  struct stmmac_dma_conf *dma_conf,
1528 				  struct dma_desc *p,
1529 				  int i, gfp_t flags, u32 queue)
1530 {
1531 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1532 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1533 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1534 
1535 	if (priv->dma_cap.host_dma_width <= 32)
1536 		gfp |= GFP_DMA32;
1537 
1538 	if (!buf->page) {
1539 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1540 		if (!buf->page)
1541 			return -ENOMEM;
1542 		buf->page_offset = stmmac_rx_offset(priv);
1543 	}
1544 
1545 	if (priv->sph && !buf->sec_page) {
1546 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1547 		if (!buf->sec_page)
1548 			return -ENOMEM;
1549 
1550 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1551 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1552 	} else {
1553 		buf->sec_page = NULL;
1554 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1555 	}
1556 
1557 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1558 
1559 	stmmac_set_desc_addr(priv, p, buf->addr);
1560 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1561 		stmmac_init_desc3(priv, p);
1562 
1563 	return 0;
1564 }
1565 
1566 /**
1567  * stmmac_free_rx_buffer - free RX dma buffers
1568  * @priv: private structure
1569  * @rx_q: RX queue
1570  * @i: buffer index.
1571  */
1572 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1573 				  struct stmmac_rx_queue *rx_q,
1574 				  int i)
1575 {
1576 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1577 
1578 	if (buf->page)
1579 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1580 	buf->page = NULL;
1581 
1582 	if (buf->sec_page)
1583 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1584 	buf->sec_page = NULL;
1585 }
1586 
1587 /**
1588  * stmmac_free_tx_buffer - free RX dma buffers
1589  * @priv: private structure
1590  * @dma_conf: structure to take the dma data
1591  * @queue: RX queue index
1592  * @i: buffer index.
1593  */
1594 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1595 				  struct stmmac_dma_conf *dma_conf,
1596 				  u32 queue, int i)
1597 {
1598 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1599 
1600 	if (tx_q->tx_skbuff_dma[i].buf &&
1601 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1602 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1603 			dma_unmap_page(priv->device,
1604 				       tx_q->tx_skbuff_dma[i].buf,
1605 				       tx_q->tx_skbuff_dma[i].len,
1606 				       DMA_TO_DEVICE);
1607 		else
1608 			dma_unmap_single(priv->device,
1609 					 tx_q->tx_skbuff_dma[i].buf,
1610 					 tx_q->tx_skbuff_dma[i].len,
1611 					 DMA_TO_DEVICE);
1612 	}
1613 
1614 	if (tx_q->xdpf[i] &&
1615 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1616 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1617 		xdp_return_frame(tx_q->xdpf[i]);
1618 		tx_q->xdpf[i] = NULL;
1619 	}
1620 
1621 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1622 		tx_q->xsk_frames_done++;
1623 
1624 	if (tx_q->tx_skbuff[i] &&
1625 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1626 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1627 		tx_q->tx_skbuff[i] = NULL;
1628 	}
1629 
1630 	tx_q->tx_skbuff_dma[i].buf = 0;
1631 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1632 }
1633 
1634 /**
1635  * dma_free_rx_skbufs - free RX dma buffers
1636  * @priv: private structure
1637  * @dma_conf: structure to take the dma data
1638  * @queue: RX queue index
1639  */
1640 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1641 			       struct stmmac_dma_conf *dma_conf,
1642 			       u32 queue)
1643 {
1644 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1645 	int i;
1646 
1647 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1648 		stmmac_free_rx_buffer(priv, rx_q, i);
1649 }
1650 
1651 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1652 				   struct stmmac_dma_conf *dma_conf,
1653 				   u32 queue, gfp_t flags)
1654 {
1655 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1656 	int i;
1657 
1658 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1659 		struct dma_desc *p;
1660 		int ret;
1661 
1662 		if (priv->extend_desc)
1663 			p = &((rx_q->dma_erx + i)->basic);
1664 		else
1665 			p = rx_q->dma_rx + i;
1666 
1667 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1668 					     queue);
1669 		if (ret)
1670 			return ret;
1671 
1672 		rx_q->buf_alloc_num++;
1673 	}
1674 
1675 	return 0;
1676 }
1677 
1678 /**
1679  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1680  * @priv: private structure
1681  * @dma_conf: structure to take the dma data
1682  * @queue: RX queue index
1683  */
1684 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1685 				struct stmmac_dma_conf *dma_conf,
1686 				u32 queue)
1687 {
1688 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1689 	int i;
1690 
1691 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1692 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1693 
1694 		if (!buf->xdp)
1695 			continue;
1696 
1697 		xsk_buff_free(buf->xdp);
1698 		buf->xdp = NULL;
1699 	}
1700 }
1701 
1702 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1703 				      struct stmmac_dma_conf *dma_conf,
1704 				      u32 queue)
1705 {
1706 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1707 	int i;
1708 
1709 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1710 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1711 	 * use this macro to make sure no size violations.
1712 	 */
1713 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1714 
1715 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1716 		struct stmmac_rx_buffer *buf;
1717 		dma_addr_t dma_addr;
1718 		struct dma_desc *p;
1719 
1720 		if (priv->extend_desc)
1721 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1722 		else
1723 			p = rx_q->dma_rx + i;
1724 
1725 		buf = &rx_q->buf_pool[i];
1726 
1727 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1728 		if (!buf->xdp)
1729 			return -ENOMEM;
1730 
1731 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1732 		stmmac_set_desc_addr(priv, p, dma_addr);
1733 		rx_q->buf_alloc_num++;
1734 	}
1735 
1736 	return 0;
1737 }
1738 
1739 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1740 {
1741 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1742 		return NULL;
1743 
1744 	return xsk_get_pool_from_qid(priv->dev, queue);
1745 }
1746 
1747 /**
1748  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1749  * @priv: driver private structure
1750  * @dma_conf: structure to take the dma data
1751  * @queue: RX queue index
1752  * @flags: gfp flag.
1753  * Description: this function initializes the DMA RX descriptors
1754  * and allocates the socket buffers. It supports the chained and ring
1755  * modes.
1756  */
1757 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1758 				    struct stmmac_dma_conf *dma_conf,
1759 				    u32 queue, gfp_t flags)
1760 {
1761 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1762 	int ret;
1763 
1764 	netif_dbg(priv, probe, priv->dev,
1765 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1766 		  (u32)rx_q->dma_rx_phy);
1767 
1768 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1769 
1770 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1771 
1772 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1773 
1774 	if (rx_q->xsk_pool) {
1775 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1776 						   MEM_TYPE_XSK_BUFF_POOL,
1777 						   NULL));
1778 		netdev_info(priv->dev,
1779 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1780 			    rx_q->queue_index);
1781 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1782 	} else {
1783 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1784 						   MEM_TYPE_PAGE_POOL,
1785 						   rx_q->page_pool));
1786 		netdev_info(priv->dev,
1787 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1788 			    rx_q->queue_index);
1789 	}
1790 
1791 	if (rx_q->xsk_pool) {
1792 		/* RX XDP ZC buffer pool may not be populated, e.g.
1793 		 * xdpsock TX-only.
1794 		 */
1795 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1796 	} else {
1797 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1798 		if (ret < 0)
1799 			return -ENOMEM;
1800 	}
1801 
1802 	/* Setup the chained descriptor addresses */
1803 	if (priv->mode == STMMAC_CHAIN_MODE) {
1804 		if (priv->extend_desc)
1805 			stmmac_mode_init(priv, rx_q->dma_erx,
1806 					 rx_q->dma_rx_phy,
1807 					 dma_conf->dma_rx_size, 1);
1808 		else
1809 			stmmac_mode_init(priv, rx_q->dma_rx,
1810 					 rx_q->dma_rx_phy,
1811 					 dma_conf->dma_rx_size, 0);
1812 	}
1813 
1814 	return 0;
1815 }
1816 
1817 static int init_dma_rx_desc_rings(struct net_device *dev,
1818 				  struct stmmac_dma_conf *dma_conf,
1819 				  gfp_t flags)
1820 {
1821 	struct stmmac_priv *priv = netdev_priv(dev);
1822 	u32 rx_count = priv->plat->rx_queues_to_use;
1823 	int queue;
1824 	int ret;
1825 
1826 	/* RX INITIALIZATION */
1827 	netif_dbg(priv, probe, priv->dev,
1828 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1829 
1830 	for (queue = 0; queue < rx_count; queue++) {
1831 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1832 		if (ret)
1833 			goto err_init_rx_buffers;
1834 	}
1835 
1836 	return 0;
1837 
1838 err_init_rx_buffers:
1839 	while (queue >= 0) {
1840 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1841 
1842 		if (rx_q->xsk_pool)
1843 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1844 		else
1845 			dma_free_rx_skbufs(priv, dma_conf, queue);
1846 
1847 		rx_q->buf_alloc_num = 0;
1848 		rx_q->xsk_pool = NULL;
1849 
1850 		queue--;
1851 	}
1852 
1853 	return ret;
1854 }
1855 
1856 /**
1857  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1858  * @priv: driver private structure
1859  * @dma_conf: structure to take the dma data
1860  * @queue: TX queue index
1861  * Description: this function initializes the DMA TX descriptors
1862  * and allocates the socket buffers. It supports the chained and ring
1863  * modes.
1864  */
1865 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1866 				    struct stmmac_dma_conf *dma_conf,
1867 				    u32 queue)
1868 {
1869 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1870 	int i;
1871 
1872 	netif_dbg(priv, probe, priv->dev,
1873 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1874 		  (u32)tx_q->dma_tx_phy);
1875 
1876 	/* Setup the chained descriptor addresses */
1877 	if (priv->mode == STMMAC_CHAIN_MODE) {
1878 		if (priv->extend_desc)
1879 			stmmac_mode_init(priv, tx_q->dma_etx,
1880 					 tx_q->dma_tx_phy,
1881 					 dma_conf->dma_tx_size, 1);
1882 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1883 			stmmac_mode_init(priv, tx_q->dma_tx,
1884 					 tx_q->dma_tx_phy,
1885 					 dma_conf->dma_tx_size, 0);
1886 	}
1887 
1888 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1889 
1890 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1891 		struct dma_desc *p;
1892 
1893 		if (priv->extend_desc)
1894 			p = &((tx_q->dma_etx + i)->basic);
1895 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1896 			p = &((tx_q->dma_entx + i)->basic);
1897 		else
1898 			p = tx_q->dma_tx + i;
1899 
1900 		stmmac_clear_desc(priv, p);
1901 
1902 		tx_q->tx_skbuff_dma[i].buf = 0;
1903 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1904 		tx_q->tx_skbuff_dma[i].len = 0;
1905 		tx_q->tx_skbuff_dma[i].last_segment = false;
1906 		tx_q->tx_skbuff[i] = NULL;
1907 	}
1908 
1909 	return 0;
1910 }
1911 
1912 static int init_dma_tx_desc_rings(struct net_device *dev,
1913 				  struct stmmac_dma_conf *dma_conf)
1914 {
1915 	struct stmmac_priv *priv = netdev_priv(dev);
1916 	u32 tx_queue_cnt;
1917 	u32 queue;
1918 
1919 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1920 
1921 	for (queue = 0; queue < tx_queue_cnt; queue++)
1922 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1923 
1924 	return 0;
1925 }
1926 
1927 /**
1928  * init_dma_desc_rings - init the RX/TX descriptor rings
1929  * @dev: net device structure
1930  * @dma_conf: structure to take the dma data
1931  * @flags: gfp flag.
1932  * Description: this function initializes the DMA RX/TX descriptors
1933  * and allocates the socket buffers. It supports the chained and ring
1934  * modes.
1935  */
1936 static int init_dma_desc_rings(struct net_device *dev,
1937 			       struct stmmac_dma_conf *dma_conf,
1938 			       gfp_t flags)
1939 {
1940 	struct stmmac_priv *priv = netdev_priv(dev);
1941 	int ret;
1942 
1943 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1944 	if (ret)
1945 		return ret;
1946 
1947 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1948 
1949 	stmmac_clear_descriptors(priv, dma_conf);
1950 
1951 	if (netif_msg_hw(priv))
1952 		stmmac_display_rings(priv, dma_conf);
1953 
1954 	return ret;
1955 }
1956 
1957 /**
1958  * dma_free_tx_skbufs - free TX dma buffers
1959  * @priv: private structure
1960  * @dma_conf: structure to take the dma data
1961  * @queue: TX queue index
1962  */
1963 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1964 			       struct stmmac_dma_conf *dma_conf,
1965 			       u32 queue)
1966 {
1967 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1968 	int i;
1969 
1970 	tx_q->xsk_frames_done = 0;
1971 
1972 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1973 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1974 
1975 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1976 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1977 		tx_q->xsk_frames_done = 0;
1978 		tx_q->xsk_pool = NULL;
1979 	}
1980 }
1981 
1982 /**
1983  * stmmac_free_tx_skbufs - free TX skb buffers
1984  * @priv: private structure
1985  */
1986 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1987 {
1988 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1989 	u32 queue;
1990 
1991 	for (queue = 0; queue < tx_queue_cnt; queue++)
1992 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1993 }
1994 
1995 /**
1996  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1997  * @priv: private structure
1998  * @dma_conf: structure to take the dma data
1999  * @queue: RX queue index
2000  */
2001 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
2002 					 struct stmmac_dma_conf *dma_conf,
2003 					 u32 queue)
2004 {
2005 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2006 
2007 	/* Release the DMA RX socket buffers */
2008 	if (rx_q->xsk_pool)
2009 		dma_free_rx_xskbufs(priv, dma_conf, queue);
2010 	else
2011 		dma_free_rx_skbufs(priv, dma_conf, queue);
2012 
2013 	rx_q->buf_alloc_num = 0;
2014 	rx_q->xsk_pool = NULL;
2015 
2016 	/* Free DMA regions of consistent memory previously allocated */
2017 	if (!priv->extend_desc)
2018 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2019 				  sizeof(struct dma_desc),
2020 				  rx_q->dma_rx, rx_q->dma_rx_phy);
2021 	else
2022 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2023 				  sizeof(struct dma_extended_desc),
2024 				  rx_q->dma_erx, rx_q->dma_rx_phy);
2025 
2026 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2027 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2028 
2029 	kfree(rx_q->buf_pool);
2030 	if (rx_q->page_pool)
2031 		page_pool_destroy(rx_q->page_pool);
2032 }
2033 
2034 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2035 				       struct stmmac_dma_conf *dma_conf)
2036 {
2037 	u32 rx_count = priv->plat->rx_queues_to_use;
2038 	u32 queue;
2039 
2040 	/* Free RX queue resources */
2041 	for (queue = 0; queue < rx_count; queue++)
2042 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
2043 }
2044 
2045 /**
2046  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2047  * @priv: private structure
2048  * @dma_conf: structure to take the dma data
2049  * @queue: TX queue index
2050  */
2051 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2052 					 struct stmmac_dma_conf *dma_conf,
2053 					 u32 queue)
2054 {
2055 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2056 	size_t size;
2057 	void *addr;
2058 
2059 	/* Release the DMA TX socket buffers */
2060 	dma_free_tx_skbufs(priv, dma_conf, queue);
2061 
2062 	if (priv->extend_desc) {
2063 		size = sizeof(struct dma_extended_desc);
2064 		addr = tx_q->dma_etx;
2065 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2066 		size = sizeof(struct dma_edesc);
2067 		addr = tx_q->dma_entx;
2068 	} else {
2069 		size = sizeof(struct dma_desc);
2070 		addr = tx_q->dma_tx;
2071 	}
2072 
2073 	size *= dma_conf->dma_tx_size;
2074 
2075 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2076 
2077 	kfree(tx_q->tx_skbuff_dma);
2078 	kfree(tx_q->tx_skbuff);
2079 }
2080 
2081 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2082 				       struct stmmac_dma_conf *dma_conf)
2083 {
2084 	u32 tx_count = priv->plat->tx_queues_to_use;
2085 	u32 queue;
2086 
2087 	/* Free TX queue resources */
2088 	for (queue = 0; queue < tx_count; queue++)
2089 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2090 }
2091 
2092 /**
2093  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2094  * @priv: private structure
2095  * @dma_conf: structure to take the dma data
2096  * @queue: RX queue index
2097  * Description: according to which descriptor can be used (extend or basic)
2098  * this function allocates the resources for TX and RX paths. In case of
2099  * reception, for example, it pre-allocated the RX socket buffer in order to
2100  * allow zero-copy mechanism.
2101  */
2102 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2103 					 struct stmmac_dma_conf *dma_conf,
2104 					 u32 queue)
2105 {
2106 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2107 	struct stmmac_channel *ch = &priv->channel[queue];
2108 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2109 	struct page_pool_params pp_params = { 0 };
2110 	unsigned int dma_buf_sz_pad, num_pages;
2111 	unsigned int napi_id;
2112 	int ret;
2113 
2114 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2115 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2116 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2117 
2118 	rx_q->queue_index = queue;
2119 	rx_q->priv_data = priv;
2120 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2121 
2122 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2123 	pp_params.pool_size = dma_conf->dma_rx_size;
2124 	pp_params.order = order_base_2(num_pages);
2125 	pp_params.nid = dev_to_node(priv->device);
2126 	pp_params.dev = priv->device;
2127 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2128 	pp_params.offset = stmmac_rx_offset(priv);
2129 	pp_params.max_len = dma_conf->dma_buf_sz;
2130 
2131 	if (priv->sph) {
2132 		pp_params.offset = 0;
2133 		pp_params.max_len += stmmac_rx_offset(priv);
2134 	}
2135 
2136 	rx_q->page_pool = page_pool_create(&pp_params);
2137 	if (IS_ERR(rx_q->page_pool)) {
2138 		ret = PTR_ERR(rx_q->page_pool);
2139 		rx_q->page_pool = NULL;
2140 		return ret;
2141 	}
2142 
2143 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2144 				 sizeof(*rx_q->buf_pool),
2145 				 GFP_KERNEL);
2146 	if (!rx_q->buf_pool)
2147 		return -ENOMEM;
2148 
2149 	if (priv->extend_desc) {
2150 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2151 						   dma_conf->dma_rx_size *
2152 						   sizeof(struct dma_extended_desc),
2153 						   &rx_q->dma_rx_phy,
2154 						   GFP_KERNEL);
2155 		if (!rx_q->dma_erx)
2156 			return -ENOMEM;
2157 
2158 	} else {
2159 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2160 						  dma_conf->dma_rx_size *
2161 						  sizeof(struct dma_desc),
2162 						  &rx_q->dma_rx_phy,
2163 						  GFP_KERNEL);
2164 		if (!rx_q->dma_rx)
2165 			return -ENOMEM;
2166 	}
2167 
2168 	if (stmmac_xdp_is_enabled(priv) &&
2169 	    test_bit(queue, priv->af_xdp_zc_qps))
2170 		napi_id = ch->rxtx_napi.napi_id;
2171 	else
2172 		napi_id = ch->rx_napi.napi_id;
2173 
2174 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2175 			       rx_q->queue_index,
2176 			       napi_id);
2177 	if (ret) {
2178 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2179 		return -EINVAL;
2180 	}
2181 
2182 	return 0;
2183 }
2184 
2185 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2186 				       struct stmmac_dma_conf *dma_conf)
2187 {
2188 	u32 rx_count = priv->plat->rx_queues_to_use;
2189 	u32 queue;
2190 	int ret;
2191 
2192 	/* RX queues buffers and DMA */
2193 	for (queue = 0; queue < rx_count; queue++) {
2194 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2195 		if (ret)
2196 			goto err_dma;
2197 	}
2198 
2199 	return 0;
2200 
2201 err_dma:
2202 	free_dma_rx_desc_resources(priv, dma_conf);
2203 
2204 	return ret;
2205 }
2206 
2207 /**
2208  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2209  * @priv: private structure
2210  * @dma_conf: structure to take the dma data
2211  * @queue: TX queue index
2212  * Description: according to which descriptor can be used (extend or basic)
2213  * this function allocates the resources for TX and RX paths. In case of
2214  * reception, for example, it pre-allocated the RX socket buffer in order to
2215  * allow zero-copy mechanism.
2216  */
2217 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2218 					 struct stmmac_dma_conf *dma_conf,
2219 					 u32 queue)
2220 {
2221 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2222 	size_t size;
2223 	void *addr;
2224 
2225 	tx_q->queue_index = queue;
2226 	tx_q->priv_data = priv;
2227 
2228 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2229 				      sizeof(*tx_q->tx_skbuff_dma),
2230 				      GFP_KERNEL);
2231 	if (!tx_q->tx_skbuff_dma)
2232 		return -ENOMEM;
2233 
2234 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2235 				  sizeof(struct sk_buff *),
2236 				  GFP_KERNEL);
2237 	if (!tx_q->tx_skbuff)
2238 		return -ENOMEM;
2239 
2240 	if (priv->extend_desc)
2241 		size = sizeof(struct dma_extended_desc);
2242 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2243 		size = sizeof(struct dma_edesc);
2244 	else
2245 		size = sizeof(struct dma_desc);
2246 
2247 	size *= dma_conf->dma_tx_size;
2248 
2249 	addr = dma_alloc_coherent(priv->device, size,
2250 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2251 	if (!addr)
2252 		return -ENOMEM;
2253 
2254 	if (priv->extend_desc)
2255 		tx_q->dma_etx = addr;
2256 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2257 		tx_q->dma_entx = addr;
2258 	else
2259 		tx_q->dma_tx = addr;
2260 
2261 	return 0;
2262 }
2263 
2264 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2265 				       struct stmmac_dma_conf *dma_conf)
2266 {
2267 	u32 tx_count = priv->plat->tx_queues_to_use;
2268 	u32 queue;
2269 	int ret;
2270 
2271 	/* TX queues buffers and DMA */
2272 	for (queue = 0; queue < tx_count; queue++) {
2273 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2274 		if (ret)
2275 			goto err_dma;
2276 	}
2277 
2278 	return 0;
2279 
2280 err_dma:
2281 	free_dma_tx_desc_resources(priv, dma_conf);
2282 	return ret;
2283 }
2284 
2285 /**
2286  * alloc_dma_desc_resources - alloc TX/RX resources.
2287  * @priv: private structure
2288  * @dma_conf: structure to take the dma data
2289  * Description: according to which descriptor can be used (extend or basic)
2290  * this function allocates the resources for TX and RX paths. In case of
2291  * reception, for example, it pre-allocated the RX socket buffer in order to
2292  * allow zero-copy mechanism.
2293  */
2294 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2295 				    struct stmmac_dma_conf *dma_conf)
2296 {
2297 	/* RX Allocation */
2298 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2299 
2300 	if (ret)
2301 		return ret;
2302 
2303 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2304 
2305 	return ret;
2306 }
2307 
2308 /**
2309  * free_dma_desc_resources - free dma desc resources
2310  * @priv: private structure
2311  * @dma_conf: structure to take the dma data
2312  */
2313 static void free_dma_desc_resources(struct stmmac_priv *priv,
2314 				    struct stmmac_dma_conf *dma_conf)
2315 {
2316 	/* Release the DMA TX socket buffers */
2317 	free_dma_tx_desc_resources(priv, dma_conf);
2318 
2319 	/* Release the DMA RX socket buffers later
2320 	 * to ensure all pending XDP_TX buffers are returned.
2321 	 */
2322 	free_dma_rx_desc_resources(priv, dma_conf);
2323 }
2324 
2325 /**
2326  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2327  *  @priv: driver private structure
2328  *  Description: It is used for enabling the rx queues in the MAC
2329  */
2330 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2331 {
2332 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2333 	int queue;
2334 	u8 mode;
2335 
2336 	for (queue = 0; queue < rx_queues_count; queue++) {
2337 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2338 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2339 	}
2340 }
2341 
2342 /**
2343  * stmmac_start_rx_dma - start RX DMA channel
2344  * @priv: driver private structure
2345  * @chan: RX channel index
2346  * Description:
2347  * This starts a RX DMA channel
2348  */
2349 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2350 {
2351 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2352 	stmmac_start_rx(priv, priv->ioaddr, chan);
2353 }
2354 
2355 /**
2356  * stmmac_start_tx_dma - start TX DMA channel
2357  * @priv: driver private structure
2358  * @chan: TX channel index
2359  * Description:
2360  * This starts a TX DMA channel
2361  */
2362 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2363 {
2364 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2365 	stmmac_start_tx(priv, priv->ioaddr, chan);
2366 }
2367 
2368 /**
2369  * stmmac_stop_rx_dma - stop RX DMA channel
2370  * @priv: driver private structure
2371  * @chan: RX channel index
2372  * Description:
2373  * This stops a RX DMA channel
2374  */
2375 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2376 {
2377 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2378 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2379 }
2380 
2381 /**
2382  * stmmac_stop_tx_dma - stop TX DMA channel
2383  * @priv: driver private structure
2384  * @chan: TX channel index
2385  * Description:
2386  * This stops a TX DMA channel
2387  */
2388 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2389 {
2390 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2391 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2392 }
2393 
2394 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2395 {
2396 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2397 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2398 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2399 	u32 chan;
2400 
2401 	for (chan = 0; chan < dma_csr_ch; chan++) {
2402 		struct stmmac_channel *ch = &priv->channel[chan];
2403 		unsigned long flags;
2404 
2405 		spin_lock_irqsave(&ch->lock, flags);
2406 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2407 		spin_unlock_irqrestore(&ch->lock, flags);
2408 	}
2409 }
2410 
2411 /**
2412  * stmmac_start_all_dma - start all RX and TX DMA channels
2413  * @priv: driver private structure
2414  * Description:
2415  * This starts all the RX and TX DMA channels
2416  */
2417 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2418 {
2419 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2420 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2421 	u32 chan = 0;
2422 
2423 	for (chan = 0; chan < rx_channels_count; chan++)
2424 		stmmac_start_rx_dma(priv, chan);
2425 
2426 	for (chan = 0; chan < tx_channels_count; chan++)
2427 		stmmac_start_tx_dma(priv, chan);
2428 }
2429 
2430 /**
2431  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2432  * @priv: driver private structure
2433  * Description:
2434  * This stops the RX and TX DMA channels
2435  */
2436 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2437 {
2438 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2439 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2440 	u32 chan = 0;
2441 
2442 	for (chan = 0; chan < rx_channels_count; chan++)
2443 		stmmac_stop_rx_dma(priv, chan);
2444 
2445 	for (chan = 0; chan < tx_channels_count; chan++)
2446 		stmmac_stop_tx_dma(priv, chan);
2447 }
2448 
2449 /**
2450  *  stmmac_dma_operation_mode - HW DMA operation mode
2451  *  @priv: driver private structure
2452  *  Description: it is used for configuring the DMA operation mode register in
2453  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2454  */
2455 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2456 {
2457 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2458 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2459 	int rxfifosz = priv->plat->rx_fifo_size;
2460 	int txfifosz = priv->plat->tx_fifo_size;
2461 	u32 txmode = 0;
2462 	u32 rxmode = 0;
2463 	u32 chan = 0;
2464 	u8 qmode = 0;
2465 
2466 	if (rxfifosz == 0)
2467 		rxfifosz = priv->dma_cap.rx_fifo_size;
2468 	if (txfifosz == 0)
2469 		txfifosz = priv->dma_cap.tx_fifo_size;
2470 
2471 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2472 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2473 		rxfifosz /= rx_channels_count;
2474 		txfifosz /= tx_channels_count;
2475 	}
2476 
2477 	if (priv->plat->force_thresh_dma_mode) {
2478 		txmode = tc;
2479 		rxmode = tc;
2480 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2481 		/*
2482 		 * In case of GMAC, SF mode can be enabled
2483 		 * to perform the TX COE in HW. This depends on:
2484 		 * 1) TX COE if actually supported
2485 		 * 2) There is no bugged Jumbo frame support
2486 		 *    that needs to not insert csum in the TDES.
2487 		 */
2488 		txmode = SF_DMA_MODE;
2489 		rxmode = SF_DMA_MODE;
2490 		priv->xstats.threshold = SF_DMA_MODE;
2491 	} else {
2492 		txmode = tc;
2493 		rxmode = SF_DMA_MODE;
2494 	}
2495 
2496 	/* configure all channels */
2497 	for (chan = 0; chan < rx_channels_count; chan++) {
2498 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2499 		u32 buf_size;
2500 
2501 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2502 
2503 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2504 				rxfifosz, qmode);
2505 
2506 		if (rx_q->xsk_pool) {
2507 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2508 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2509 					      buf_size,
2510 					      chan);
2511 		} else {
2512 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2513 					      priv->dma_conf.dma_buf_sz,
2514 					      chan);
2515 		}
2516 	}
2517 
2518 	for (chan = 0; chan < tx_channels_count; chan++) {
2519 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2520 
2521 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2522 				txfifosz, qmode);
2523 	}
2524 }
2525 
2526 static void stmmac_xsk_request_timestamp(void *_priv)
2527 {
2528 	struct stmmac_metadata_request *meta_req = _priv;
2529 
2530 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2531 	*meta_req->set_ic = true;
2532 }
2533 
2534 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2535 {
2536 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2537 	struct stmmac_priv *priv = tx_compl->priv;
2538 	struct dma_desc *desc = tx_compl->desc;
2539 	bool found = false;
2540 	u64 ns = 0;
2541 
2542 	if (!priv->hwts_tx_en)
2543 		return 0;
2544 
2545 	/* check tx tstamp status */
2546 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2547 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2548 		found = true;
2549 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2550 		found = true;
2551 	}
2552 
2553 	if (found) {
2554 		ns -= priv->plat->cdc_error_adj;
2555 		return ns_to_ktime(ns);
2556 	}
2557 
2558 	return 0;
2559 }
2560 
2561 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2562 {
2563 	struct timespec64 ts = ns_to_timespec64(launch_time);
2564 	struct stmmac_metadata_request *meta_req = _priv;
2565 
2566 	if (meta_req->tbs & STMMAC_TBS_EN)
2567 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2568 				    ts.tv_nsec);
2569 }
2570 
2571 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2572 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2573 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2574 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2575 };
2576 
2577 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2578 {
2579 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2580 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2581 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2582 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2583 	unsigned int entry = tx_q->cur_tx;
2584 	struct dma_desc *tx_desc = NULL;
2585 	struct xdp_desc xdp_desc;
2586 	bool work_done = true;
2587 	u32 tx_set_ic_bit = 0;
2588 
2589 	/* Avoids TX time-out as we are sharing with slow path */
2590 	txq_trans_cond_update(nq);
2591 
2592 	budget = min(budget, stmmac_tx_avail(priv, queue));
2593 
2594 	while (budget-- > 0) {
2595 		struct stmmac_metadata_request meta_req;
2596 		struct xsk_tx_metadata *meta = NULL;
2597 		dma_addr_t dma_addr;
2598 		bool set_ic;
2599 
2600 		/* We are sharing with slow path and stop XSK TX desc submission when
2601 		 * available TX ring is less than threshold.
2602 		 */
2603 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2604 		    !netif_carrier_ok(priv->dev)) {
2605 			work_done = false;
2606 			break;
2607 		}
2608 
2609 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2610 			break;
2611 
2612 		if (priv->est && priv->est->enable &&
2613 		    priv->est->max_sdu[queue] &&
2614 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2615 			priv->xstats.max_sdu_txq_drop[queue]++;
2616 			continue;
2617 		}
2618 
2619 		if (likely(priv->extend_desc))
2620 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2621 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2622 			tx_desc = &tx_q->dma_entx[entry].basic;
2623 		else
2624 			tx_desc = tx_q->dma_tx + entry;
2625 
2626 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2627 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2628 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2629 
2630 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2631 
2632 		/* To return XDP buffer to XSK pool, we simple call
2633 		 * xsk_tx_completed(), so we don't need to fill up
2634 		 * 'buf' and 'xdpf'.
2635 		 */
2636 		tx_q->tx_skbuff_dma[entry].buf = 0;
2637 		tx_q->xdpf[entry] = NULL;
2638 
2639 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2640 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2641 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2642 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2643 
2644 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2645 
2646 		tx_q->tx_count_frames++;
2647 
2648 		if (!priv->tx_coal_frames[queue])
2649 			set_ic = false;
2650 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2651 			set_ic = true;
2652 		else
2653 			set_ic = false;
2654 
2655 		meta_req.priv = priv;
2656 		meta_req.tx_desc = tx_desc;
2657 		meta_req.set_ic = &set_ic;
2658 		meta_req.tbs = tx_q->tbs;
2659 		meta_req.edesc = &tx_q->dma_entx[entry];
2660 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2661 					&meta_req);
2662 		if (set_ic) {
2663 			tx_q->tx_count_frames = 0;
2664 			stmmac_set_tx_ic(priv, tx_desc);
2665 			tx_set_ic_bit++;
2666 		}
2667 
2668 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2669 				       true, priv->mode, true, true,
2670 				       xdp_desc.len);
2671 
2672 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2673 
2674 		xsk_tx_metadata_to_compl(meta,
2675 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2676 
2677 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2678 		entry = tx_q->cur_tx;
2679 	}
2680 	u64_stats_update_begin(&txq_stats->napi_syncp);
2681 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2682 	u64_stats_update_end(&txq_stats->napi_syncp);
2683 
2684 	if (tx_desc) {
2685 		stmmac_flush_tx_descriptors(priv, queue);
2686 		xsk_tx_release(pool);
2687 	}
2688 
2689 	/* Return true if all of the 3 conditions are met
2690 	 *  a) TX Budget is still available
2691 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2692 	 *     pending XSK TX for transmission)
2693 	 */
2694 	return !!budget && work_done;
2695 }
2696 
2697 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2698 {
2699 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2700 		tc += 64;
2701 
2702 		if (priv->plat->force_thresh_dma_mode)
2703 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2704 		else
2705 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2706 						      chan);
2707 
2708 		priv->xstats.threshold = tc;
2709 	}
2710 }
2711 
2712 /**
2713  * stmmac_tx_clean - to manage the transmission completion
2714  * @priv: driver private structure
2715  * @budget: napi budget limiting this functions packet handling
2716  * @queue: TX queue index
2717  * @pending_packets: signal to arm the TX coal timer
2718  * Description: it reclaims the transmit resources after transmission completes.
2719  * If some packets still needs to be handled, due to TX coalesce, set
2720  * pending_packets to true to make NAPI arm the TX coal timer.
2721  */
2722 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2723 			   bool *pending_packets)
2724 {
2725 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2726 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2727 	unsigned int bytes_compl = 0, pkts_compl = 0;
2728 	unsigned int entry, xmits = 0, count = 0;
2729 	u32 tx_packets = 0, tx_errors = 0;
2730 
2731 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2732 
2733 	tx_q->xsk_frames_done = 0;
2734 
2735 	entry = tx_q->dirty_tx;
2736 
2737 	/* Try to clean all TX complete frame in 1 shot */
2738 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2739 		struct xdp_frame *xdpf;
2740 		struct sk_buff *skb;
2741 		struct dma_desc *p;
2742 		int status;
2743 
2744 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2745 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2746 			xdpf = tx_q->xdpf[entry];
2747 			skb = NULL;
2748 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2749 			xdpf = NULL;
2750 			skb = tx_q->tx_skbuff[entry];
2751 		} else {
2752 			xdpf = NULL;
2753 			skb = NULL;
2754 		}
2755 
2756 		if (priv->extend_desc)
2757 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2758 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2759 			p = &tx_q->dma_entx[entry].basic;
2760 		else
2761 			p = tx_q->dma_tx + entry;
2762 
2763 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2764 		/* Check if the descriptor is owned by the DMA */
2765 		if (unlikely(status & tx_dma_own))
2766 			break;
2767 
2768 		count++;
2769 
2770 		/* Make sure descriptor fields are read after reading
2771 		 * the own bit.
2772 		 */
2773 		dma_rmb();
2774 
2775 		/* Just consider the last segment and ...*/
2776 		if (likely(!(status & tx_not_ls))) {
2777 			/* ... verify the status error condition */
2778 			if (unlikely(status & tx_err)) {
2779 				tx_errors++;
2780 				if (unlikely(status & tx_err_bump_tc))
2781 					stmmac_bump_dma_threshold(priv, queue);
2782 			} else {
2783 				tx_packets++;
2784 			}
2785 			if (skb) {
2786 				stmmac_get_tx_hwtstamp(priv, p, skb);
2787 			} else if (tx_q->xsk_pool &&
2788 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2789 				struct stmmac_xsk_tx_complete tx_compl = {
2790 					.priv = priv,
2791 					.desc = p,
2792 				};
2793 
2794 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2795 							 &stmmac_xsk_tx_metadata_ops,
2796 							 &tx_compl);
2797 			}
2798 		}
2799 
2800 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2801 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2802 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2803 				dma_unmap_page(priv->device,
2804 					       tx_q->tx_skbuff_dma[entry].buf,
2805 					       tx_q->tx_skbuff_dma[entry].len,
2806 					       DMA_TO_DEVICE);
2807 			else
2808 				dma_unmap_single(priv->device,
2809 						 tx_q->tx_skbuff_dma[entry].buf,
2810 						 tx_q->tx_skbuff_dma[entry].len,
2811 						 DMA_TO_DEVICE);
2812 			tx_q->tx_skbuff_dma[entry].buf = 0;
2813 			tx_q->tx_skbuff_dma[entry].len = 0;
2814 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2815 		}
2816 
2817 		stmmac_clean_desc3(priv, tx_q, p);
2818 
2819 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2820 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2821 
2822 		if (xdpf &&
2823 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2824 			xdp_return_frame_rx_napi(xdpf);
2825 			tx_q->xdpf[entry] = NULL;
2826 		}
2827 
2828 		if (xdpf &&
2829 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2830 			xdp_return_frame(xdpf);
2831 			tx_q->xdpf[entry] = NULL;
2832 		}
2833 
2834 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2835 			tx_q->xsk_frames_done++;
2836 
2837 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2838 			if (likely(skb)) {
2839 				pkts_compl++;
2840 				bytes_compl += skb->len;
2841 				dev_consume_skb_any(skb);
2842 				tx_q->tx_skbuff[entry] = NULL;
2843 			}
2844 		}
2845 
2846 		stmmac_release_tx_desc(priv, p, priv->mode);
2847 
2848 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2849 	}
2850 	tx_q->dirty_tx = entry;
2851 
2852 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2853 				  pkts_compl, bytes_compl);
2854 
2855 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2856 								queue))) &&
2857 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2858 
2859 		netif_dbg(priv, tx_done, priv->dev,
2860 			  "%s: restart transmit\n", __func__);
2861 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2862 	}
2863 
2864 	if (tx_q->xsk_pool) {
2865 		bool work_done;
2866 
2867 		if (tx_q->xsk_frames_done)
2868 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2869 
2870 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2871 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2872 
2873 		/* For XSK TX, we try to send as many as possible.
2874 		 * If XSK work done (XSK TX desc empty and budget still
2875 		 * available), return "budget - 1" to reenable TX IRQ.
2876 		 * Else, return "budget" to make NAPI continue polling.
2877 		 */
2878 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2879 					       STMMAC_XSK_TX_BUDGET_MAX);
2880 		if (work_done)
2881 			xmits = budget - 1;
2882 		else
2883 			xmits = budget;
2884 	}
2885 
2886 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2887 		stmmac_restart_sw_lpi_timer(priv);
2888 
2889 	/* We still have pending packets, let's call for a new scheduling */
2890 	if (tx_q->dirty_tx != tx_q->cur_tx)
2891 		*pending_packets = true;
2892 
2893 	u64_stats_update_begin(&txq_stats->napi_syncp);
2894 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2895 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2896 	u64_stats_inc(&txq_stats->napi.tx_clean);
2897 	u64_stats_update_end(&txq_stats->napi_syncp);
2898 
2899 	priv->xstats.tx_errors += tx_errors;
2900 
2901 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2902 
2903 	/* Combine decisions from TX clean and XSK TX */
2904 	return max(count, xmits);
2905 }
2906 
2907 /**
2908  * stmmac_tx_err - to manage the tx error
2909  * @priv: driver private structure
2910  * @chan: channel index
2911  * Description: it cleans the descriptors and restarts the transmission
2912  * in case of transmission errors.
2913  */
2914 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2915 {
2916 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2917 
2918 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2919 
2920 	stmmac_stop_tx_dma(priv, chan);
2921 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2922 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2923 	stmmac_reset_tx_queue(priv, chan);
2924 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2925 			    tx_q->dma_tx_phy, chan);
2926 	stmmac_start_tx_dma(priv, chan);
2927 
2928 	priv->xstats.tx_errors++;
2929 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2930 }
2931 
2932 /**
2933  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2934  *  @priv: driver private structure
2935  *  @txmode: TX operating mode
2936  *  @rxmode: RX operating mode
2937  *  @chan: channel index
2938  *  Description: it is used for configuring of the DMA operation mode in
2939  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2940  *  mode.
2941  */
2942 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2943 					  u32 rxmode, u32 chan)
2944 {
2945 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2946 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2947 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2948 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2949 	int rxfifosz = priv->plat->rx_fifo_size;
2950 	int txfifosz = priv->plat->tx_fifo_size;
2951 
2952 	if (rxfifosz == 0)
2953 		rxfifosz = priv->dma_cap.rx_fifo_size;
2954 	if (txfifosz == 0)
2955 		txfifosz = priv->dma_cap.tx_fifo_size;
2956 
2957 	/* Adjust for real per queue fifo size */
2958 	rxfifosz /= rx_channels_count;
2959 	txfifosz /= tx_channels_count;
2960 
2961 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2962 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2963 }
2964 
2965 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2966 {
2967 	int ret;
2968 
2969 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2970 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2971 	if (ret && (ret != -EINVAL)) {
2972 		stmmac_global_err(priv);
2973 		return true;
2974 	}
2975 
2976 	return false;
2977 }
2978 
2979 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2980 {
2981 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2982 						 &priv->xstats, chan, dir);
2983 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2984 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2985 	struct stmmac_channel *ch = &priv->channel[chan];
2986 	struct napi_struct *rx_napi;
2987 	struct napi_struct *tx_napi;
2988 	unsigned long flags;
2989 
2990 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2991 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2992 
2993 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2994 		if (napi_schedule_prep(rx_napi)) {
2995 			spin_lock_irqsave(&ch->lock, flags);
2996 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2997 			spin_unlock_irqrestore(&ch->lock, flags);
2998 			__napi_schedule(rx_napi);
2999 		}
3000 	}
3001 
3002 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
3003 		if (napi_schedule_prep(tx_napi)) {
3004 			spin_lock_irqsave(&ch->lock, flags);
3005 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3006 			spin_unlock_irqrestore(&ch->lock, flags);
3007 			__napi_schedule(tx_napi);
3008 		}
3009 	}
3010 
3011 	return status;
3012 }
3013 
3014 /**
3015  * stmmac_dma_interrupt - DMA ISR
3016  * @priv: driver private structure
3017  * Description: this is the DMA ISR. It is called by the main ISR.
3018  * It calls the dwmac dma routine and schedule poll method in case of some
3019  * work can be done.
3020  */
3021 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3022 {
3023 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3024 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3025 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
3026 				tx_channel_count : rx_channel_count;
3027 	u32 chan;
3028 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3029 
3030 	/* Make sure we never check beyond our status buffer. */
3031 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3032 		channels_to_check = ARRAY_SIZE(status);
3033 
3034 	for (chan = 0; chan < channels_to_check; chan++)
3035 		status[chan] = stmmac_napi_check(priv, chan,
3036 						 DMA_DIR_RXTX);
3037 
3038 	for (chan = 0; chan < tx_channel_count; chan++) {
3039 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3040 			/* Try to bump up the dma threshold on this failure */
3041 			stmmac_bump_dma_threshold(priv, chan);
3042 		} else if (unlikely(status[chan] == tx_hard_error)) {
3043 			stmmac_tx_err(priv, chan);
3044 		}
3045 	}
3046 }
3047 
3048 /**
3049  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3050  * @priv: driver private structure
3051  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3052  */
3053 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3054 {
3055 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3056 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3057 
3058 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3059 
3060 	if (priv->dma_cap.rmon) {
3061 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3062 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3063 	} else
3064 		netdev_info(priv->dev, "No MAC Management Counters available\n");
3065 }
3066 
3067 /**
3068  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3069  * @priv: driver private structure
3070  * Description:
3071  *  new GMAC chip generations have a new register to indicate the
3072  *  presence of the optional feature/functions.
3073  *  This can be also used to override the value passed through the
3074  *  platform and necessary for old MAC10/100 and GMAC chips.
3075  */
3076 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3077 {
3078 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3079 }
3080 
3081 /**
3082  * stmmac_check_ether_addr - check if the MAC addr is valid
3083  * @priv: driver private structure
3084  * Description:
3085  * it is to verify if the MAC address is valid, in case of failures it
3086  * generates a random MAC address
3087  */
3088 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3089 {
3090 	u8 addr[ETH_ALEN];
3091 
3092 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3093 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3094 		if (is_valid_ether_addr(addr))
3095 			eth_hw_addr_set(priv->dev, addr);
3096 		else
3097 			eth_hw_addr_random(priv->dev);
3098 		dev_info(priv->device, "device MAC address %pM\n",
3099 			 priv->dev->dev_addr);
3100 	}
3101 }
3102 
3103 /**
3104  * stmmac_init_dma_engine - DMA init.
3105  * @priv: driver private structure
3106  * Description:
3107  * It inits the DMA invoking the specific MAC/GMAC callback.
3108  * Some DMA parameters can be passed from the platform;
3109  * in case of these are not passed a default is kept for the MAC or GMAC.
3110  */
3111 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3112 {
3113 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3114 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3115 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3116 	struct stmmac_rx_queue *rx_q;
3117 	struct stmmac_tx_queue *tx_q;
3118 	u32 chan = 0;
3119 	int ret = 0;
3120 
3121 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3122 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3123 		return -EINVAL;
3124 	}
3125 
3126 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3127 		priv->plat->dma_cfg->atds = 1;
3128 
3129 	ret = stmmac_reset(priv, priv->ioaddr);
3130 	if (ret) {
3131 		netdev_err(priv->dev, "Failed to reset the dma\n");
3132 		return ret;
3133 	}
3134 
3135 	/* DMA Configuration */
3136 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3137 
3138 	if (priv->plat->axi)
3139 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3140 
3141 	/* DMA CSR Channel configuration */
3142 	for (chan = 0; chan < dma_csr_ch; chan++) {
3143 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3144 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3145 	}
3146 
3147 	/* DMA RX Channel Configuration */
3148 	for (chan = 0; chan < rx_channels_count; chan++) {
3149 		rx_q = &priv->dma_conf.rx_queue[chan];
3150 
3151 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3152 				    rx_q->dma_rx_phy, chan);
3153 
3154 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3155 				     (rx_q->buf_alloc_num *
3156 				      sizeof(struct dma_desc));
3157 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3158 				       rx_q->rx_tail_addr, chan);
3159 	}
3160 
3161 	/* DMA TX Channel Configuration */
3162 	for (chan = 0; chan < tx_channels_count; chan++) {
3163 		tx_q = &priv->dma_conf.tx_queue[chan];
3164 
3165 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3166 				    tx_q->dma_tx_phy, chan);
3167 
3168 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3169 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3170 				       tx_q->tx_tail_addr, chan);
3171 	}
3172 
3173 	return ret;
3174 }
3175 
3176 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3177 {
3178 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3179 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3180 	struct stmmac_channel *ch;
3181 	struct napi_struct *napi;
3182 
3183 	if (!tx_coal_timer)
3184 		return;
3185 
3186 	ch = &priv->channel[tx_q->queue_index];
3187 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3188 
3189 	/* Arm timer only if napi is not already scheduled.
3190 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3191 	 * again in the next scheduled napi.
3192 	 */
3193 	if (unlikely(!napi_is_scheduled(napi)))
3194 		hrtimer_start(&tx_q->txtimer,
3195 			      STMMAC_COAL_TIMER(tx_coal_timer),
3196 			      HRTIMER_MODE_REL);
3197 	else
3198 		hrtimer_try_to_cancel(&tx_q->txtimer);
3199 }
3200 
3201 /**
3202  * stmmac_tx_timer - mitigation sw timer for tx.
3203  * @t: data pointer
3204  * Description:
3205  * This is the timer handler to directly invoke the stmmac_tx_clean.
3206  */
3207 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3208 {
3209 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3210 	struct stmmac_priv *priv = tx_q->priv_data;
3211 	struct stmmac_channel *ch;
3212 	struct napi_struct *napi;
3213 
3214 	ch = &priv->channel[tx_q->queue_index];
3215 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3216 
3217 	if (likely(napi_schedule_prep(napi))) {
3218 		unsigned long flags;
3219 
3220 		spin_lock_irqsave(&ch->lock, flags);
3221 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3222 		spin_unlock_irqrestore(&ch->lock, flags);
3223 		__napi_schedule(napi);
3224 	}
3225 
3226 	return HRTIMER_NORESTART;
3227 }
3228 
3229 /**
3230  * stmmac_init_coalesce - init mitigation options.
3231  * @priv: driver private structure
3232  * Description:
3233  * This inits the coalesce parameters: i.e. timer rate,
3234  * timer handler and default threshold used for enabling the
3235  * interrupt on completion bit.
3236  */
3237 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3238 {
3239 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3240 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3241 	u32 chan;
3242 
3243 	for (chan = 0; chan < tx_channel_count; chan++) {
3244 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3245 
3246 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3247 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3248 
3249 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3250 	}
3251 
3252 	for (chan = 0; chan < rx_channel_count; chan++)
3253 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3254 }
3255 
3256 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3257 {
3258 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3259 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3260 	u32 chan;
3261 
3262 	/* set TX ring length */
3263 	for (chan = 0; chan < tx_channels_count; chan++)
3264 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3265 				       (priv->dma_conf.dma_tx_size - 1), chan);
3266 
3267 	/* set RX ring length */
3268 	for (chan = 0; chan < rx_channels_count; chan++)
3269 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3270 				       (priv->dma_conf.dma_rx_size - 1), chan);
3271 }
3272 
3273 /**
3274  *  stmmac_set_tx_queue_weight - Set TX queue weight
3275  *  @priv: driver private structure
3276  *  Description: It is used for setting TX queues weight
3277  */
3278 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3279 {
3280 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3281 	u32 weight;
3282 	u32 queue;
3283 
3284 	for (queue = 0; queue < tx_queues_count; queue++) {
3285 		weight = priv->plat->tx_queues_cfg[queue].weight;
3286 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3287 	}
3288 }
3289 
3290 /**
3291  *  stmmac_configure_cbs - Configure CBS in TX queue
3292  *  @priv: driver private structure
3293  *  Description: It is used for configuring CBS in AVB TX queues
3294  */
3295 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3296 {
3297 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3298 	u32 mode_to_use;
3299 	u32 queue;
3300 
3301 	/* queue 0 is reserved for legacy traffic */
3302 	for (queue = 1; queue < tx_queues_count; queue++) {
3303 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3304 		if (mode_to_use == MTL_QUEUE_DCB)
3305 			continue;
3306 
3307 		stmmac_config_cbs(priv, priv->hw,
3308 				priv->plat->tx_queues_cfg[queue].send_slope,
3309 				priv->plat->tx_queues_cfg[queue].idle_slope,
3310 				priv->plat->tx_queues_cfg[queue].high_credit,
3311 				priv->plat->tx_queues_cfg[queue].low_credit,
3312 				queue);
3313 	}
3314 }
3315 
3316 /**
3317  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3318  *  @priv: driver private structure
3319  *  Description: It is used for mapping RX queues to RX dma channels
3320  */
3321 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3322 {
3323 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3324 	u32 queue;
3325 	u32 chan;
3326 
3327 	for (queue = 0; queue < rx_queues_count; queue++) {
3328 		chan = priv->plat->rx_queues_cfg[queue].chan;
3329 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3330 	}
3331 }
3332 
3333 /**
3334  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3335  *  @priv: driver private structure
3336  *  Description: It is used for configuring the RX Queue Priority
3337  */
3338 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3339 {
3340 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3341 	u32 queue;
3342 	u32 prio;
3343 
3344 	for (queue = 0; queue < rx_queues_count; queue++) {
3345 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3346 			continue;
3347 
3348 		prio = priv->plat->rx_queues_cfg[queue].prio;
3349 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3350 	}
3351 }
3352 
3353 /**
3354  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3355  *  @priv: driver private structure
3356  *  Description: It is used for configuring the TX Queue Priority
3357  */
3358 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3359 {
3360 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3361 	u32 queue;
3362 	u32 prio;
3363 
3364 	for (queue = 0; queue < tx_queues_count; queue++) {
3365 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3366 			continue;
3367 
3368 		prio = priv->plat->tx_queues_cfg[queue].prio;
3369 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3370 	}
3371 }
3372 
3373 /**
3374  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3375  *  @priv: driver private structure
3376  *  Description: It is used for configuring the RX queue routing
3377  */
3378 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3379 {
3380 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3381 	u32 queue;
3382 	u8 packet;
3383 
3384 	for (queue = 0; queue < rx_queues_count; queue++) {
3385 		/* no specific packet type routing specified for the queue */
3386 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3387 			continue;
3388 
3389 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3390 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3391 	}
3392 }
3393 
3394 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3395 {
3396 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3397 		priv->rss.enable = false;
3398 		return;
3399 	}
3400 
3401 	if (priv->dev->features & NETIF_F_RXHASH)
3402 		priv->rss.enable = true;
3403 	else
3404 		priv->rss.enable = false;
3405 
3406 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3407 			     priv->plat->rx_queues_to_use);
3408 }
3409 
3410 /**
3411  *  stmmac_mtl_configuration - Configure MTL
3412  *  @priv: driver private structure
3413  *  Description: It is used for configurring MTL
3414  */
3415 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3416 {
3417 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3418 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3419 
3420 	if (tx_queues_count > 1)
3421 		stmmac_set_tx_queue_weight(priv);
3422 
3423 	/* Configure MTL RX algorithms */
3424 	if (rx_queues_count > 1)
3425 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3426 				priv->plat->rx_sched_algorithm);
3427 
3428 	/* Configure MTL TX algorithms */
3429 	if (tx_queues_count > 1)
3430 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3431 				priv->plat->tx_sched_algorithm);
3432 
3433 	/* Configure CBS in AVB TX queues */
3434 	if (tx_queues_count > 1)
3435 		stmmac_configure_cbs(priv);
3436 
3437 	/* Map RX MTL to DMA channels */
3438 	stmmac_rx_queue_dma_chan_map(priv);
3439 
3440 	/* Enable MAC RX Queues */
3441 	stmmac_mac_enable_rx_queues(priv);
3442 
3443 	/* Set RX priorities */
3444 	if (rx_queues_count > 1)
3445 		stmmac_mac_config_rx_queues_prio(priv);
3446 
3447 	/* Set TX priorities */
3448 	if (tx_queues_count > 1)
3449 		stmmac_mac_config_tx_queues_prio(priv);
3450 
3451 	/* Set RX routing */
3452 	if (rx_queues_count > 1)
3453 		stmmac_mac_config_rx_queues_routing(priv);
3454 
3455 	/* Receive Side Scaling */
3456 	if (rx_queues_count > 1)
3457 		stmmac_mac_config_rss(priv);
3458 }
3459 
3460 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3461 {
3462 	if (priv->dma_cap.asp) {
3463 		netdev_info(priv->dev, "Enabling Safety Features\n");
3464 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3465 					  priv->plat->safety_feat_cfg);
3466 	} else {
3467 		netdev_info(priv->dev, "No Safety Features support found\n");
3468 	}
3469 }
3470 
3471 /**
3472  * stmmac_hw_setup - setup mac in a usable state.
3473  *  @dev : pointer to the device structure.
3474  *  @ptp_register: register PTP if set
3475  *  Description:
3476  *  this is the main function to setup the HW in a usable state because the
3477  *  dma engine is reset, the core registers are configured (e.g. AXI,
3478  *  Checksum features, timers). The DMA is ready to start receiving and
3479  *  transmitting.
3480  *  Return value:
3481  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3482  *  file on failure.
3483  */
3484 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3485 {
3486 	struct stmmac_priv *priv = netdev_priv(dev);
3487 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3488 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3489 	bool sph_en;
3490 	u32 chan;
3491 	int ret;
3492 
3493 	/* Make sure RX clock is enabled */
3494 	if (priv->hw->phylink_pcs)
3495 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3496 
3497 	/* Note that clk_rx_i must be running for reset to complete. This
3498 	 * clock may also be required when setting the MAC address.
3499 	 *
3500 	 * Block the receive clock stop for LPI mode at the PHY in case
3501 	 * the link is established with EEE mode active.
3502 	 */
3503 	phylink_rx_clk_stop_block(priv->phylink);
3504 
3505 	/* DMA initialization and SW reset */
3506 	ret = stmmac_init_dma_engine(priv);
3507 	if (ret < 0) {
3508 		phylink_rx_clk_stop_unblock(priv->phylink);
3509 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3510 			   __func__);
3511 		return ret;
3512 	}
3513 
3514 	/* Copy the MAC addr into the HW  */
3515 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3516 	phylink_rx_clk_stop_unblock(priv->phylink);
3517 
3518 	/* PS and related bits will be programmed according to the speed */
3519 	if (priv->hw->pcs) {
3520 		int speed = priv->plat->mac_port_sel_speed;
3521 
3522 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3523 		    (speed == SPEED_1000)) {
3524 			priv->hw->ps = speed;
3525 		} else {
3526 			dev_warn(priv->device, "invalid port speed\n");
3527 			priv->hw->ps = 0;
3528 		}
3529 	}
3530 
3531 	/* Initialize the MAC Core */
3532 	stmmac_core_init(priv, priv->hw, dev);
3533 
3534 	/* Initialize MTL*/
3535 	stmmac_mtl_configuration(priv);
3536 
3537 	/* Initialize Safety Features */
3538 	stmmac_safety_feat_configuration(priv);
3539 
3540 	ret = stmmac_rx_ipc(priv, priv->hw);
3541 	if (!ret) {
3542 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3543 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3544 		priv->hw->rx_csum = 0;
3545 	}
3546 
3547 	/* Enable the MAC Rx/Tx */
3548 	stmmac_mac_set(priv, priv->ioaddr, true);
3549 
3550 	/* Set the HW DMA mode and the COE */
3551 	stmmac_dma_operation_mode(priv);
3552 
3553 	stmmac_mmc_setup(priv);
3554 
3555 	if (ptp_register) {
3556 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3557 		if (ret < 0)
3558 			netdev_warn(priv->dev,
3559 				    "failed to enable PTP reference clock: %pe\n",
3560 				    ERR_PTR(ret));
3561 	}
3562 
3563 	ret = stmmac_init_ptp(priv);
3564 	if (ret == -EOPNOTSUPP)
3565 		netdev_info(priv->dev, "PTP not supported by HW\n");
3566 	else if (ret)
3567 		netdev_warn(priv->dev, "PTP init failed\n");
3568 	else if (ptp_register)
3569 		stmmac_ptp_register(priv);
3570 
3571 	if (priv->use_riwt) {
3572 		u32 queue;
3573 
3574 		for (queue = 0; queue < rx_cnt; queue++) {
3575 			if (!priv->rx_riwt[queue])
3576 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3577 
3578 			stmmac_rx_watchdog(priv, priv->ioaddr,
3579 					   priv->rx_riwt[queue], queue);
3580 		}
3581 	}
3582 
3583 	if (priv->hw->pcs)
3584 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3585 
3586 	/* set TX and RX rings length */
3587 	stmmac_set_rings_length(priv);
3588 
3589 	/* Enable TSO */
3590 	if (priv->tso) {
3591 		for (chan = 0; chan < tx_cnt; chan++) {
3592 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3593 
3594 			/* TSO and TBS cannot co-exist */
3595 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3596 				continue;
3597 
3598 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3599 		}
3600 	}
3601 
3602 	/* Enable Split Header */
3603 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3604 	for (chan = 0; chan < rx_cnt; chan++)
3605 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3606 
3607 
3608 	/* VLAN Tag Insertion */
3609 	if (priv->dma_cap.vlins)
3610 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3611 
3612 	/* TBS */
3613 	for (chan = 0; chan < tx_cnt; chan++) {
3614 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3615 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3616 
3617 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3618 	}
3619 
3620 	/* Configure real RX and TX queues */
3621 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3622 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3623 
3624 	/* Start the ball rolling... */
3625 	stmmac_start_all_dma(priv);
3626 
3627 	phylink_rx_clk_stop_block(priv->phylink);
3628 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3629 	phylink_rx_clk_stop_unblock(priv->phylink);
3630 
3631 	return 0;
3632 }
3633 
3634 static void stmmac_hw_teardown(struct net_device *dev)
3635 {
3636 	struct stmmac_priv *priv = netdev_priv(dev);
3637 
3638 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3639 }
3640 
3641 static void stmmac_free_irq(struct net_device *dev,
3642 			    enum request_irq_err irq_err, int irq_idx)
3643 {
3644 	struct stmmac_priv *priv = netdev_priv(dev);
3645 	int j;
3646 
3647 	switch (irq_err) {
3648 	case REQ_IRQ_ERR_ALL:
3649 		irq_idx = priv->plat->tx_queues_to_use;
3650 		fallthrough;
3651 	case REQ_IRQ_ERR_TX:
3652 		for (j = irq_idx - 1; j >= 0; j--) {
3653 			if (priv->tx_irq[j] > 0) {
3654 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3655 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3656 			}
3657 		}
3658 		irq_idx = priv->plat->rx_queues_to_use;
3659 		fallthrough;
3660 	case REQ_IRQ_ERR_RX:
3661 		for (j = irq_idx - 1; j >= 0; j--) {
3662 			if (priv->rx_irq[j] > 0) {
3663 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3664 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3665 			}
3666 		}
3667 
3668 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3669 			free_irq(priv->sfty_ue_irq, dev);
3670 		fallthrough;
3671 	case REQ_IRQ_ERR_SFTY_UE:
3672 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3673 			free_irq(priv->sfty_ce_irq, dev);
3674 		fallthrough;
3675 	case REQ_IRQ_ERR_SFTY_CE:
3676 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3677 			free_irq(priv->lpi_irq, dev);
3678 		fallthrough;
3679 	case REQ_IRQ_ERR_LPI:
3680 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3681 			free_irq(priv->wol_irq, dev);
3682 		fallthrough;
3683 	case REQ_IRQ_ERR_SFTY:
3684 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3685 			free_irq(priv->sfty_irq, dev);
3686 		fallthrough;
3687 	case REQ_IRQ_ERR_WOL:
3688 		free_irq(dev->irq, dev);
3689 		fallthrough;
3690 	case REQ_IRQ_ERR_MAC:
3691 	case REQ_IRQ_ERR_NO:
3692 		/* If MAC IRQ request error, no more IRQ to free */
3693 		break;
3694 	}
3695 }
3696 
3697 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3698 {
3699 	struct stmmac_priv *priv = netdev_priv(dev);
3700 	enum request_irq_err irq_err;
3701 	int irq_idx = 0;
3702 	char *int_name;
3703 	int ret;
3704 	int i;
3705 
3706 	/* For common interrupt */
3707 	int_name = priv->int_name_mac;
3708 	sprintf(int_name, "%s:%s", dev->name, "mac");
3709 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3710 			  0, int_name, dev);
3711 	if (unlikely(ret < 0)) {
3712 		netdev_err(priv->dev,
3713 			   "%s: alloc mac MSI %d (error: %d)\n",
3714 			   __func__, dev->irq, ret);
3715 		irq_err = REQ_IRQ_ERR_MAC;
3716 		goto irq_error;
3717 	}
3718 
3719 	/* Request the Wake IRQ in case of another line
3720 	 * is used for WoL
3721 	 */
3722 	priv->wol_irq_disabled = true;
3723 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3724 		int_name = priv->int_name_wol;
3725 		sprintf(int_name, "%s:%s", dev->name, "wol");
3726 		ret = request_irq(priv->wol_irq,
3727 				  stmmac_mac_interrupt,
3728 				  0, int_name, dev);
3729 		if (unlikely(ret < 0)) {
3730 			netdev_err(priv->dev,
3731 				   "%s: alloc wol MSI %d (error: %d)\n",
3732 				   __func__, priv->wol_irq, ret);
3733 			irq_err = REQ_IRQ_ERR_WOL;
3734 			goto irq_error;
3735 		}
3736 	}
3737 
3738 	/* Request the LPI IRQ in case of another line
3739 	 * is used for LPI
3740 	 */
3741 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3742 		int_name = priv->int_name_lpi;
3743 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3744 		ret = request_irq(priv->lpi_irq,
3745 				  stmmac_mac_interrupt,
3746 				  0, int_name, dev);
3747 		if (unlikely(ret < 0)) {
3748 			netdev_err(priv->dev,
3749 				   "%s: alloc lpi MSI %d (error: %d)\n",
3750 				   __func__, priv->lpi_irq, ret);
3751 			irq_err = REQ_IRQ_ERR_LPI;
3752 			goto irq_error;
3753 		}
3754 	}
3755 
3756 	/* Request the common Safety Feature Correctible/Uncorrectible
3757 	 * Error line in case of another line is used
3758 	 */
3759 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3760 		int_name = priv->int_name_sfty;
3761 		sprintf(int_name, "%s:%s", dev->name, "safety");
3762 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3763 				  0, int_name, dev);
3764 		if (unlikely(ret < 0)) {
3765 			netdev_err(priv->dev,
3766 				   "%s: alloc sfty MSI %d (error: %d)\n",
3767 				   __func__, priv->sfty_irq, ret);
3768 			irq_err = REQ_IRQ_ERR_SFTY;
3769 			goto irq_error;
3770 		}
3771 	}
3772 
3773 	/* Request the Safety Feature Correctible Error line in
3774 	 * case of another line is used
3775 	 */
3776 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3777 		int_name = priv->int_name_sfty_ce;
3778 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3779 		ret = request_irq(priv->sfty_ce_irq,
3780 				  stmmac_safety_interrupt,
3781 				  0, int_name, dev);
3782 		if (unlikely(ret < 0)) {
3783 			netdev_err(priv->dev,
3784 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3785 				   __func__, priv->sfty_ce_irq, ret);
3786 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3787 			goto irq_error;
3788 		}
3789 	}
3790 
3791 	/* Request the Safety Feature Uncorrectible Error line in
3792 	 * case of another line is used
3793 	 */
3794 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3795 		int_name = priv->int_name_sfty_ue;
3796 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3797 		ret = request_irq(priv->sfty_ue_irq,
3798 				  stmmac_safety_interrupt,
3799 				  0, int_name, dev);
3800 		if (unlikely(ret < 0)) {
3801 			netdev_err(priv->dev,
3802 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3803 				   __func__, priv->sfty_ue_irq, ret);
3804 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3805 			goto irq_error;
3806 		}
3807 	}
3808 
3809 	/* Request Rx MSI irq */
3810 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3811 		if (i >= MTL_MAX_RX_QUEUES)
3812 			break;
3813 		if (priv->rx_irq[i] == 0)
3814 			continue;
3815 
3816 		int_name = priv->int_name_rx_irq[i];
3817 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3818 		ret = request_irq(priv->rx_irq[i],
3819 				  stmmac_msi_intr_rx,
3820 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3821 		if (unlikely(ret < 0)) {
3822 			netdev_err(priv->dev,
3823 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3824 				   __func__, i, priv->rx_irq[i], ret);
3825 			irq_err = REQ_IRQ_ERR_RX;
3826 			irq_idx = i;
3827 			goto irq_error;
3828 		}
3829 		irq_set_affinity_hint(priv->rx_irq[i],
3830 				      cpumask_of(i % num_online_cpus()));
3831 	}
3832 
3833 	/* Request Tx MSI irq */
3834 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3835 		if (i >= MTL_MAX_TX_QUEUES)
3836 			break;
3837 		if (priv->tx_irq[i] == 0)
3838 			continue;
3839 
3840 		int_name = priv->int_name_tx_irq[i];
3841 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3842 		ret = request_irq(priv->tx_irq[i],
3843 				  stmmac_msi_intr_tx,
3844 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3845 		if (unlikely(ret < 0)) {
3846 			netdev_err(priv->dev,
3847 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3848 				   __func__, i, priv->tx_irq[i], ret);
3849 			irq_err = REQ_IRQ_ERR_TX;
3850 			irq_idx = i;
3851 			goto irq_error;
3852 		}
3853 		irq_set_affinity_hint(priv->tx_irq[i],
3854 				      cpumask_of(i % num_online_cpus()));
3855 	}
3856 
3857 	return 0;
3858 
3859 irq_error:
3860 	stmmac_free_irq(dev, irq_err, irq_idx);
3861 	return ret;
3862 }
3863 
3864 static int stmmac_request_irq_single(struct net_device *dev)
3865 {
3866 	struct stmmac_priv *priv = netdev_priv(dev);
3867 	enum request_irq_err irq_err;
3868 	int ret;
3869 
3870 	ret = request_irq(dev->irq, stmmac_interrupt,
3871 			  IRQF_SHARED, dev->name, dev);
3872 	if (unlikely(ret < 0)) {
3873 		netdev_err(priv->dev,
3874 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3875 			   __func__, dev->irq, ret);
3876 		irq_err = REQ_IRQ_ERR_MAC;
3877 		goto irq_error;
3878 	}
3879 
3880 	/* Request the Wake IRQ in case of another line
3881 	 * is used for WoL
3882 	 */
3883 	priv->wol_irq_disabled = true;
3884 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3885 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3886 				  IRQF_SHARED, dev->name, dev);
3887 		if (unlikely(ret < 0)) {
3888 			netdev_err(priv->dev,
3889 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3890 				   __func__, priv->wol_irq, ret);
3891 			irq_err = REQ_IRQ_ERR_WOL;
3892 			goto irq_error;
3893 		}
3894 	}
3895 
3896 	/* Request the IRQ lines */
3897 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3898 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3899 				  IRQF_SHARED, dev->name, dev);
3900 		if (unlikely(ret < 0)) {
3901 			netdev_err(priv->dev,
3902 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3903 				   __func__, priv->lpi_irq, ret);
3904 			irq_err = REQ_IRQ_ERR_LPI;
3905 			goto irq_error;
3906 		}
3907 	}
3908 
3909 	/* Request the common Safety Feature Correctible/Uncorrectible
3910 	 * Error line in case of another line is used
3911 	 */
3912 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3913 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3914 				  IRQF_SHARED, dev->name, dev);
3915 		if (unlikely(ret < 0)) {
3916 			netdev_err(priv->dev,
3917 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3918 				   __func__, priv->sfty_irq, ret);
3919 			irq_err = REQ_IRQ_ERR_SFTY;
3920 			goto irq_error;
3921 		}
3922 	}
3923 
3924 	return 0;
3925 
3926 irq_error:
3927 	stmmac_free_irq(dev, irq_err, 0);
3928 	return ret;
3929 }
3930 
3931 static int stmmac_request_irq(struct net_device *dev)
3932 {
3933 	struct stmmac_priv *priv = netdev_priv(dev);
3934 	int ret;
3935 
3936 	/* Request the IRQ lines */
3937 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3938 		ret = stmmac_request_irq_multi_msi(dev);
3939 	else
3940 		ret = stmmac_request_irq_single(dev);
3941 
3942 	return ret;
3943 }
3944 
3945 /**
3946  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3947  *  @priv: driver private structure
3948  *  @mtu: MTU to setup the dma queue and buf with
3949  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3950  *  Allocate the Tx/Rx DMA queue and init them.
3951  *  Return value:
3952  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3953  */
3954 static struct stmmac_dma_conf *
3955 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3956 {
3957 	struct stmmac_dma_conf *dma_conf;
3958 	int chan, bfsize, ret;
3959 
3960 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3961 	if (!dma_conf) {
3962 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3963 			   __func__);
3964 		return ERR_PTR(-ENOMEM);
3965 	}
3966 
3967 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3968 	if (bfsize < 0)
3969 		bfsize = 0;
3970 
3971 	if (bfsize < BUF_SIZE_16KiB)
3972 		bfsize = stmmac_set_bfsize(mtu, 0);
3973 
3974 	dma_conf->dma_buf_sz = bfsize;
3975 	/* Chose the tx/rx size from the already defined one in the
3976 	 * priv struct. (if defined)
3977 	 */
3978 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3979 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3980 
3981 	if (!dma_conf->dma_tx_size)
3982 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3983 	if (!dma_conf->dma_rx_size)
3984 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3985 
3986 	/* Earlier check for TBS */
3987 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3988 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3989 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3990 
3991 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3992 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3993 	}
3994 
3995 	ret = alloc_dma_desc_resources(priv, dma_conf);
3996 	if (ret < 0) {
3997 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3998 			   __func__);
3999 		goto alloc_error;
4000 	}
4001 
4002 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
4003 	if (ret < 0) {
4004 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4005 			   __func__);
4006 		goto init_error;
4007 	}
4008 
4009 	return dma_conf;
4010 
4011 init_error:
4012 	free_dma_desc_resources(priv, dma_conf);
4013 alloc_error:
4014 	kfree(dma_conf);
4015 	return ERR_PTR(ret);
4016 }
4017 
4018 /**
4019  *  __stmmac_open - open entry point of the driver
4020  *  @dev : pointer to the device structure.
4021  *  @dma_conf :  structure to take the dma data
4022  *  Description:
4023  *  This function is the open entry point of the driver.
4024  *  Return value:
4025  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4026  *  file on failure.
4027  */
4028 static int __stmmac_open(struct net_device *dev,
4029 			 struct stmmac_dma_conf *dma_conf)
4030 {
4031 	struct stmmac_priv *priv = netdev_priv(dev);
4032 	int mode = priv->plat->phy_interface;
4033 	u32 chan;
4034 	int ret;
4035 
4036 	/* Initialise the tx lpi timer, converting from msec to usec */
4037 	if (!priv->tx_lpi_timer)
4038 		priv->tx_lpi_timer = eee_timer * 1000;
4039 
4040 	ret = pm_runtime_resume_and_get(priv->device);
4041 	if (ret < 0)
4042 		return ret;
4043 
4044 	if ((!priv->hw->xpcs ||
4045 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
4046 		ret = stmmac_init_phy(dev);
4047 		if (ret) {
4048 			netdev_err(priv->dev,
4049 				   "%s: Cannot attach to PHY (error: %d)\n",
4050 				   __func__, ret);
4051 			goto init_phy_error;
4052 		}
4053 	}
4054 
4055 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4056 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4057 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4058 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4059 
4060 	stmmac_reset_queues_param(priv);
4061 
4062 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4063 	    priv->plat->serdes_powerup) {
4064 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4065 		if (ret < 0) {
4066 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4067 				   __func__);
4068 			goto init_error;
4069 		}
4070 	}
4071 
4072 	ret = stmmac_hw_setup(dev, true);
4073 	if (ret < 0) {
4074 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4075 		goto init_error;
4076 	}
4077 
4078 	stmmac_init_coalesce(priv);
4079 
4080 	phylink_start(priv->phylink);
4081 	/* We may have called phylink_speed_down before */
4082 	phylink_speed_up(priv->phylink);
4083 
4084 	ret = stmmac_request_irq(dev);
4085 	if (ret)
4086 		goto irq_error;
4087 
4088 	stmmac_enable_all_queues(priv);
4089 	netif_tx_start_all_queues(priv->dev);
4090 	stmmac_enable_all_dma_irq(priv);
4091 
4092 	return 0;
4093 
4094 irq_error:
4095 	phylink_stop(priv->phylink);
4096 
4097 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4098 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4099 
4100 	stmmac_hw_teardown(dev);
4101 init_error:
4102 	phylink_disconnect_phy(priv->phylink);
4103 init_phy_error:
4104 	pm_runtime_put(priv->device);
4105 	return ret;
4106 }
4107 
4108 static int stmmac_open(struct net_device *dev)
4109 {
4110 	struct stmmac_priv *priv = netdev_priv(dev);
4111 	struct stmmac_dma_conf *dma_conf;
4112 	int ret;
4113 
4114 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4115 	if (IS_ERR(dma_conf))
4116 		return PTR_ERR(dma_conf);
4117 
4118 	ret = __stmmac_open(dev, dma_conf);
4119 	if (ret)
4120 		free_dma_desc_resources(priv, dma_conf);
4121 
4122 	kfree(dma_conf);
4123 	return ret;
4124 }
4125 
4126 /**
4127  *  stmmac_release - close entry point of the driver
4128  *  @dev : device pointer.
4129  *  Description:
4130  *  This is the stop entry point of the driver.
4131  */
4132 static int stmmac_release(struct net_device *dev)
4133 {
4134 	struct stmmac_priv *priv = netdev_priv(dev);
4135 	u32 chan;
4136 
4137 	if (device_may_wakeup(priv->device))
4138 		phylink_speed_down(priv->phylink, false);
4139 	/* Stop and disconnect the PHY */
4140 	phylink_stop(priv->phylink);
4141 	phylink_disconnect_phy(priv->phylink);
4142 
4143 	stmmac_disable_all_queues(priv);
4144 
4145 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4146 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4147 
4148 	netif_tx_disable(dev);
4149 
4150 	/* Free the IRQ lines */
4151 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4152 
4153 	/* Stop TX/RX DMA and clear the descriptors */
4154 	stmmac_stop_all_dma(priv);
4155 
4156 	/* Release and free the Rx/Tx resources */
4157 	free_dma_desc_resources(priv, &priv->dma_conf);
4158 
4159 	/* Powerdown Serdes if there is */
4160 	if (priv->plat->serdes_powerdown)
4161 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4162 
4163 	stmmac_release_ptp(priv);
4164 
4165 	if (stmmac_fpe_supported(priv))
4166 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4167 
4168 	pm_runtime_put(priv->device);
4169 
4170 	return 0;
4171 }
4172 
4173 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4174 			       struct stmmac_tx_queue *tx_q)
4175 {
4176 	u16 tag = 0x0, inner_tag = 0x0;
4177 	u32 inner_type = 0x0;
4178 	struct dma_desc *p;
4179 
4180 	if (!priv->dma_cap.vlins)
4181 		return false;
4182 	if (!skb_vlan_tag_present(skb))
4183 		return false;
4184 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4185 		inner_tag = skb_vlan_tag_get(skb);
4186 		inner_type = STMMAC_VLAN_INSERT;
4187 	}
4188 
4189 	tag = skb_vlan_tag_get(skb);
4190 
4191 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4192 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4193 	else
4194 		p = &tx_q->dma_tx[tx_q->cur_tx];
4195 
4196 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4197 		return false;
4198 
4199 	stmmac_set_tx_owner(priv, p);
4200 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4201 	return true;
4202 }
4203 
4204 /**
4205  *  stmmac_tso_allocator - close entry point of the driver
4206  *  @priv: driver private structure
4207  *  @des: buffer start address
4208  *  @total_len: total length to fill in descriptors
4209  *  @last_segment: condition for the last descriptor
4210  *  @queue: TX queue index
4211  *  Description:
4212  *  This function fills descriptor and request new descriptors according to
4213  *  buffer length to fill
4214  */
4215 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4216 				 int total_len, bool last_segment, u32 queue)
4217 {
4218 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4219 	struct dma_desc *desc;
4220 	u32 buff_size;
4221 	int tmp_len;
4222 
4223 	tmp_len = total_len;
4224 
4225 	while (tmp_len > 0) {
4226 		dma_addr_t curr_addr;
4227 
4228 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4229 						priv->dma_conf.dma_tx_size);
4230 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4231 
4232 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4233 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4234 		else
4235 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4236 
4237 		curr_addr = des + (total_len - tmp_len);
4238 		stmmac_set_desc_addr(priv, desc, curr_addr);
4239 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4240 			    TSO_MAX_BUFF_SIZE : tmp_len;
4241 
4242 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4243 				0, 1,
4244 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4245 				0, 0);
4246 
4247 		tmp_len -= TSO_MAX_BUFF_SIZE;
4248 	}
4249 }
4250 
4251 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4252 {
4253 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4254 	int desc_size;
4255 
4256 	if (likely(priv->extend_desc))
4257 		desc_size = sizeof(struct dma_extended_desc);
4258 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4259 		desc_size = sizeof(struct dma_edesc);
4260 	else
4261 		desc_size = sizeof(struct dma_desc);
4262 
4263 	/* The own bit must be the latest setting done when prepare the
4264 	 * descriptor and then barrier is needed to make sure that
4265 	 * all is coherent before granting the DMA engine.
4266 	 */
4267 	wmb();
4268 
4269 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4270 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4271 }
4272 
4273 /**
4274  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4275  *  @skb : the socket buffer
4276  *  @dev : device pointer
4277  *  Description: this is the transmit function that is called on TSO frames
4278  *  (support available on GMAC4 and newer chips).
4279  *  Diagram below show the ring programming in case of TSO frames:
4280  *
4281  *  First Descriptor
4282  *   --------
4283  *   | DES0 |---> buffer1 = L2/L3/L4 header
4284  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4285  *   |      |     width is 32-bit, but we never use it.
4286  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4287  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4288  *   |      |     or 48-bit, and we always use it.
4289  *   | DES2 |---> buffer1 len
4290  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4291  *   --------
4292  *   --------
4293  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4294  *   | DES1 |---> same as the First Descriptor
4295  *   | DES2 |---> buffer1 len
4296  *   | DES3 |
4297  *   --------
4298  *	|
4299  *     ...
4300  *	|
4301  *   --------
4302  *   | DES0 |---> buffer1 = Split TCP Payload
4303  *   | DES1 |---> same as the First Descriptor
4304  *   | DES2 |---> buffer1 len
4305  *   | DES3 |
4306  *   --------
4307  *
4308  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4309  */
4310 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4311 {
4312 	struct dma_desc *desc, *first, *mss_desc = NULL;
4313 	struct stmmac_priv *priv = netdev_priv(dev);
4314 	unsigned int first_entry, tx_packets;
4315 	struct stmmac_txq_stats *txq_stats;
4316 	struct stmmac_tx_queue *tx_q;
4317 	u32 pay_len, mss, queue;
4318 	int i, first_tx, nfrags;
4319 	u8 proto_hdr_len, hdr;
4320 	dma_addr_t des;
4321 	bool set_ic;
4322 
4323 	/* Always insert VLAN tag to SKB payload for TSO frames.
4324 	 *
4325 	 * Never insert VLAN tag by HW, since segments splited by
4326 	 * TSO engine will be un-tagged by mistake.
4327 	 */
4328 	if (skb_vlan_tag_present(skb)) {
4329 		skb = __vlan_hwaccel_push_inside(skb);
4330 		if (unlikely(!skb)) {
4331 			priv->xstats.tx_dropped++;
4332 			return NETDEV_TX_OK;
4333 		}
4334 	}
4335 
4336 	nfrags = skb_shinfo(skb)->nr_frags;
4337 	queue = skb_get_queue_mapping(skb);
4338 
4339 	tx_q = &priv->dma_conf.tx_queue[queue];
4340 	txq_stats = &priv->xstats.txq_stats[queue];
4341 	first_tx = tx_q->cur_tx;
4342 
4343 	/* Compute header lengths */
4344 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4345 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4346 		hdr = sizeof(struct udphdr);
4347 	} else {
4348 		proto_hdr_len = skb_tcp_all_headers(skb);
4349 		hdr = tcp_hdrlen(skb);
4350 	}
4351 
4352 	/* Desc availability based on threshold should be enough safe */
4353 	if (unlikely(stmmac_tx_avail(priv, queue) <
4354 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4355 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4356 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4357 								queue));
4358 			/* This is a hard error, log it. */
4359 			netdev_err(priv->dev,
4360 				   "%s: Tx Ring full when queue awake\n",
4361 				   __func__);
4362 		}
4363 		return NETDEV_TX_BUSY;
4364 	}
4365 
4366 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4367 
4368 	mss = skb_shinfo(skb)->gso_size;
4369 
4370 	/* set new MSS value if needed */
4371 	if (mss != tx_q->mss) {
4372 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4373 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4374 		else
4375 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4376 
4377 		stmmac_set_mss(priv, mss_desc, mss);
4378 		tx_q->mss = mss;
4379 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4380 						priv->dma_conf.dma_tx_size);
4381 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4382 	}
4383 
4384 	if (netif_msg_tx_queued(priv)) {
4385 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4386 			__func__, hdr, proto_hdr_len, pay_len, mss);
4387 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4388 			skb->data_len);
4389 	}
4390 
4391 	first_entry = tx_q->cur_tx;
4392 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4393 
4394 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4395 		desc = &tx_q->dma_entx[first_entry].basic;
4396 	else
4397 		desc = &tx_q->dma_tx[first_entry];
4398 	first = desc;
4399 
4400 	/* first descriptor: fill Headers on Buf1 */
4401 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4402 			     DMA_TO_DEVICE);
4403 	if (dma_mapping_error(priv->device, des))
4404 		goto dma_map_err;
4405 
4406 	stmmac_set_desc_addr(priv, first, des);
4407 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4408 			     (nfrags == 0), queue);
4409 
4410 	/* In case two or more DMA transmit descriptors are allocated for this
4411 	 * non-paged SKB data, the DMA buffer address should be saved to
4412 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4413 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4414 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4415 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4416 	 * sooner or later.
4417 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4418 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4419 	 * this DMA buffer right after the DMA engine completely finishes the
4420 	 * full buffer transmission.
4421 	 */
4422 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4423 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4424 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4425 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4426 
4427 	/* Prepare fragments */
4428 	for (i = 0; i < nfrags; i++) {
4429 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4430 
4431 		des = skb_frag_dma_map(priv->device, frag, 0,
4432 				       skb_frag_size(frag),
4433 				       DMA_TO_DEVICE);
4434 		if (dma_mapping_error(priv->device, des))
4435 			goto dma_map_err;
4436 
4437 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4438 				     (i == nfrags - 1), queue);
4439 
4440 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4441 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4442 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4443 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4444 	}
4445 
4446 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4447 
4448 	/* Only the last descriptor gets to point to the skb. */
4449 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4450 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4451 
4452 	/* Manage tx mitigation */
4453 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4454 	tx_q->tx_count_frames += tx_packets;
4455 
4456 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4457 		set_ic = true;
4458 	else if (!priv->tx_coal_frames[queue])
4459 		set_ic = false;
4460 	else if (tx_packets > priv->tx_coal_frames[queue])
4461 		set_ic = true;
4462 	else if ((tx_q->tx_count_frames %
4463 		  priv->tx_coal_frames[queue]) < tx_packets)
4464 		set_ic = true;
4465 	else
4466 		set_ic = false;
4467 
4468 	if (set_ic) {
4469 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4470 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4471 		else
4472 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4473 
4474 		tx_q->tx_count_frames = 0;
4475 		stmmac_set_tx_ic(priv, desc);
4476 	}
4477 
4478 	/* We've used all descriptors we need for this skb, however,
4479 	 * advance cur_tx so that it references a fresh descriptor.
4480 	 * ndo_start_xmit will fill this descriptor the next time it's
4481 	 * called and stmmac_tx_clean may clean up to this descriptor.
4482 	 */
4483 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4484 
4485 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4486 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4487 			  __func__);
4488 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4489 	}
4490 
4491 	u64_stats_update_begin(&txq_stats->q_syncp);
4492 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4493 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4494 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4495 	if (set_ic)
4496 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4497 	u64_stats_update_end(&txq_stats->q_syncp);
4498 
4499 	if (priv->sarc_type)
4500 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4501 
4502 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4503 		     priv->hwts_tx_en)) {
4504 		/* declare that device is doing timestamping */
4505 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4506 		stmmac_enable_tx_timestamp(priv, first);
4507 	}
4508 
4509 	/* Complete the first descriptor before granting the DMA */
4510 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4511 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4512 				   hdr / 4, (skb->len - proto_hdr_len));
4513 
4514 	/* If context desc is used to change MSS */
4515 	if (mss_desc) {
4516 		/* Make sure that first descriptor has been completely
4517 		 * written, including its own bit. This is because MSS is
4518 		 * actually before first descriptor, so we need to make
4519 		 * sure that MSS's own bit is the last thing written.
4520 		 */
4521 		dma_wmb();
4522 		stmmac_set_tx_owner(priv, mss_desc);
4523 	}
4524 
4525 	if (netif_msg_pktdata(priv)) {
4526 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4527 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4528 			tx_q->cur_tx, first, nfrags);
4529 		pr_info(">>> frame to be transmitted: ");
4530 		print_pkt(skb->data, skb_headlen(skb));
4531 	}
4532 
4533 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4534 	skb_tx_timestamp(skb);
4535 
4536 	stmmac_flush_tx_descriptors(priv, queue);
4537 	stmmac_tx_timer_arm(priv, queue);
4538 
4539 	return NETDEV_TX_OK;
4540 
4541 dma_map_err:
4542 	dev_err(priv->device, "Tx dma map failed\n");
4543 	dev_kfree_skb(skb);
4544 	priv->xstats.tx_dropped++;
4545 	return NETDEV_TX_OK;
4546 }
4547 
4548 /**
4549  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4550  * @skb: socket buffer to check
4551  *
4552  * Check if a packet has an ethertype that will trigger the IP header checks
4553  * and IP/TCP checksum engine of the stmmac core.
4554  *
4555  * Return: true if the ethertype can trigger the checksum engine, false
4556  * otherwise
4557  */
4558 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4559 {
4560 	int depth = 0;
4561 	__be16 proto;
4562 
4563 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4564 				    &depth);
4565 
4566 	return (depth <= ETH_HLEN) &&
4567 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4568 }
4569 
4570 /**
4571  *  stmmac_xmit - Tx entry point of the driver
4572  *  @skb : the socket buffer
4573  *  @dev : device pointer
4574  *  Description : this is the tx entry point of the driver.
4575  *  It programs the chain or the ring and supports oversized frames
4576  *  and SG feature.
4577  */
4578 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4579 {
4580 	unsigned int first_entry, tx_packets, enh_desc;
4581 	struct stmmac_priv *priv = netdev_priv(dev);
4582 	unsigned int nopaged_len = skb_headlen(skb);
4583 	int i, csum_insertion = 0, is_jumbo = 0;
4584 	u32 queue = skb_get_queue_mapping(skb);
4585 	int nfrags = skb_shinfo(skb)->nr_frags;
4586 	int gso = skb_shinfo(skb)->gso_type;
4587 	struct stmmac_txq_stats *txq_stats;
4588 	struct dma_edesc *tbs_desc = NULL;
4589 	struct dma_desc *desc, *first;
4590 	struct stmmac_tx_queue *tx_q;
4591 	bool has_vlan, set_ic;
4592 	int entry, first_tx;
4593 	dma_addr_t des;
4594 
4595 	tx_q = &priv->dma_conf.tx_queue[queue];
4596 	txq_stats = &priv->xstats.txq_stats[queue];
4597 	first_tx = tx_q->cur_tx;
4598 
4599 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4600 		stmmac_stop_sw_lpi(priv);
4601 
4602 	/* Manage oversized TCP frames for GMAC4 device */
4603 	if (skb_is_gso(skb) && priv->tso) {
4604 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4605 			return stmmac_tso_xmit(skb, dev);
4606 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4607 			return stmmac_tso_xmit(skb, dev);
4608 	}
4609 
4610 	if (priv->est && priv->est->enable &&
4611 	    priv->est->max_sdu[queue] &&
4612 	    skb->len > priv->est->max_sdu[queue]){
4613 		priv->xstats.max_sdu_txq_drop[queue]++;
4614 		goto max_sdu_err;
4615 	}
4616 
4617 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4618 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4619 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4620 								queue));
4621 			/* This is a hard error, log it. */
4622 			netdev_err(priv->dev,
4623 				   "%s: Tx Ring full when queue awake\n",
4624 				   __func__);
4625 		}
4626 		return NETDEV_TX_BUSY;
4627 	}
4628 
4629 	/* Check if VLAN can be inserted by HW */
4630 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4631 
4632 	entry = tx_q->cur_tx;
4633 	first_entry = entry;
4634 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4635 
4636 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4637 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4638 	 * queues. In that case, checksum offloading for those queues that don't
4639 	 * support tx coe needs to fallback to software checksum calculation.
4640 	 *
4641 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4642 	 * also have to be checksummed in software.
4643 	 */
4644 	if (csum_insertion &&
4645 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4646 	     !stmmac_has_ip_ethertype(skb))) {
4647 		if (unlikely(skb_checksum_help(skb)))
4648 			goto dma_map_err;
4649 		csum_insertion = !csum_insertion;
4650 	}
4651 
4652 	if (likely(priv->extend_desc))
4653 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4654 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4655 		desc = &tx_q->dma_entx[entry].basic;
4656 	else
4657 		desc = tx_q->dma_tx + entry;
4658 
4659 	first = desc;
4660 
4661 	if (has_vlan)
4662 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4663 
4664 	enh_desc = priv->plat->enh_desc;
4665 	/* To program the descriptors according to the size of the frame */
4666 	if (enh_desc)
4667 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4668 
4669 	if (unlikely(is_jumbo)) {
4670 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4671 		if (unlikely(entry < 0) && (entry != -EINVAL))
4672 			goto dma_map_err;
4673 	}
4674 
4675 	for (i = 0; i < nfrags; i++) {
4676 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4677 		int len = skb_frag_size(frag);
4678 		bool last_segment = (i == (nfrags - 1));
4679 
4680 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4681 		WARN_ON(tx_q->tx_skbuff[entry]);
4682 
4683 		if (likely(priv->extend_desc))
4684 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4685 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4686 			desc = &tx_q->dma_entx[entry].basic;
4687 		else
4688 			desc = tx_q->dma_tx + entry;
4689 
4690 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4691 				       DMA_TO_DEVICE);
4692 		if (dma_mapping_error(priv->device, des))
4693 			goto dma_map_err; /* should reuse desc w/o issues */
4694 
4695 		tx_q->tx_skbuff_dma[entry].buf = des;
4696 
4697 		stmmac_set_desc_addr(priv, desc, des);
4698 
4699 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4700 		tx_q->tx_skbuff_dma[entry].len = len;
4701 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4702 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4703 
4704 		/* Prepare the descriptor and set the own bit too */
4705 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4706 				priv->mode, 1, last_segment, skb->len);
4707 	}
4708 
4709 	/* Only the last descriptor gets to point to the skb. */
4710 	tx_q->tx_skbuff[entry] = skb;
4711 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4712 
4713 	/* According to the coalesce parameter the IC bit for the latest
4714 	 * segment is reset and the timer re-started to clean the tx status.
4715 	 * This approach takes care about the fragments: desc is the first
4716 	 * element in case of no SG.
4717 	 */
4718 	tx_packets = (entry + 1) - first_tx;
4719 	tx_q->tx_count_frames += tx_packets;
4720 
4721 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4722 		set_ic = true;
4723 	else if (!priv->tx_coal_frames[queue])
4724 		set_ic = false;
4725 	else if (tx_packets > priv->tx_coal_frames[queue])
4726 		set_ic = true;
4727 	else if ((tx_q->tx_count_frames %
4728 		  priv->tx_coal_frames[queue]) < tx_packets)
4729 		set_ic = true;
4730 	else
4731 		set_ic = false;
4732 
4733 	if (set_ic) {
4734 		if (likely(priv->extend_desc))
4735 			desc = &tx_q->dma_etx[entry].basic;
4736 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4737 			desc = &tx_q->dma_entx[entry].basic;
4738 		else
4739 			desc = &tx_q->dma_tx[entry];
4740 
4741 		tx_q->tx_count_frames = 0;
4742 		stmmac_set_tx_ic(priv, desc);
4743 	}
4744 
4745 	/* We've used all descriptors we need for this skb, however,
4746 	 * advance cur_tx so that it references a fresh descriptor.
4747 	 * ndo_start_xmit will fill this descriptor the next time it's
4748 	 * called and stmmac_tx_clean may clean up to this descriptor.
4749 	 */
4750 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4751 	tx_q->cur_tx = entry;
4752 
4753 	if (netif_msg_pktdata(priv)) {
4754 		netdev_dbg(priv->dev,
4755 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4756 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4757 			   entry, first, nfrags);
4758 
4759 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4760 		print_pkt(skb->data, skb->len);
4761 	}
4762 
4763 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4764 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4765 			  __func__);
4766 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4767 	}
4768 
4769 	u64_stats_update_begin(&txq_stats->q_syncp);
4770 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4771 	if (set_ic)
4772 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4773 	u64_stats_update_end(&txq_stats->q_syncp);
4774 
4775 	if (priv->sarc_type)
4776 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4777 
4778 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4779 	 * problems because all the descriptors are actually ready to be
4780 	 * passed to the DMA engine.
4781 	 */
4782 	if (likely(!is_jumbo)) {
4783 		bool last_segment = (nfrags == 0);
4784 
4785 		des = dma_map_single(priv->device, skb->data,
4786 				     nopaged_len, DMA_TO_DEVICE);
4787 		if (dma_mapping_error(priv->device, des))
4788 			goto dma_map_err;
4789 
4790 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4791 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4792 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4793 
4794 		stmmac_set_desc_addr(priv, first, des);
4795 
4796 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4797 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4798 
4799 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4800 			     priv->hwts_tx_en)) {
4801 			/* declare that device is doing timestamping */
4802 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4803 			stmmac_enable_tx_timestamp(priv, first);
4804 		}
4805 
4806 		/* Prepare the first descriptor setting the OWN bit too */
4807 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4808 				csum_insertion, priv->mode, 0, last_segment,
4809 				skb->len);
4810 	}
4811 
4812 	if (tx_q->tbs & STMMAC_TBS_EN) {
4813 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4814 
4815 		tbs_desc = &tx_q->dma_entx[first_entry];
4816 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4817 	}
4818 
4819 	stmmac_set_tx_owner(priv, first);
4820 
4821 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4822 
4823 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4824 	skb_tx_timestamp(skb);
4825 	stmmac_flush_tx_descriptors(priv, queue);
4826 	stmmac_tx_timer_arm(priv, queue);
4827 
4828 	return NETDEV_TX_OK;
4829 
4830 dma_map_err:
4831 	netdev_err(priv->dev, "Tx DMA map failed\n");
4832 max_sdu_err:
4833 	dev_kfree_skb(skb);
4834 	priv->xstats.tx_dropped++;
4835 	return NETDEV_TX_OK;
4836 }
4837 
4838 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4839 {
4840 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4841 	__be16 vlan_proto = veth->h_vlan_proto;
4842 	u16 vlanid;
4843 
4844 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4845 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4846 	    (vlan_proto == htons(ETH_P_8021AD) &&
4847 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4848 		/* pop the vlan tag */
4849 		vlanid = ntohs(veth->h_vlan_TCI);
4850 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4851 		skb_pull(skb, VLAN_HLEN);
4852 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4853 	}
4854 }
4855 
4856 /**
4857  * stmmac_rx_refill - refill used skb preallocated buffers
4858  * @priv: driver private structure
4859  * @queue: RX queue index
4860  * Description : this is to reallocate the skb for the reception process
4861  * that is based on zero-copy.
4862  */
4863 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4864 {
4865 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4866 	int dirty = stmmac_rx_dirty(priv, queue);
4867 	unsigned int entry = rx_q->dirty_rx;
4868 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4869 
4870 	if (priv->dma_cap.host_dma_width <= 32)
4871 		gfp |= GFP_DMA32;
4872 
4873 	while (dirty-- > 0) {
4874 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4875 		struct dma_desc *p;
4876 		bool use_rx_wd;
4877 
4878 		if (priv->extend_desc)
4879 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4880 		else
4881 			p = rx_q->dma_rx + entry;
4882 
4883 		if (!buf->page) {
4884 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4885 			if (!buf->page)
4886 				break;
4887 		}
4888 
4889 		if (priv->sph && !buf->sec_page) {
4890 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4891 			if (!buf->sec_page)
4892 				break;
4893 
4894 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4895 		}
4896 
4897 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4898 
4899 		stmmac_set_desc_addr(priv, p, buf->addr);
4900 		if (priv->sph)
4901 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4902 		else
4903 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4904 		stmmac_refill_desc3(priv, rx_q, p);
4905 
4906 		rx_q->rx_count_frames++;
4907 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4908 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4909 			rx_q->rx_count_frames = 0;
4910 
4911 		use_rx_wd = !priv->rx_coal_frames[queue];
4912 		use_rx_wd |= rx_q->rx_count_frames > 0;
4913 		if (!priv->use_riwt)
4914 			use_rx_wd = false;
4915 
4916 		dma_wmb();
4917 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4918 
4919 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4920 	}
4921 	rx_q->dirty_rx = entry;
4922 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4923 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4924 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4925 }
4926 
4927 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4928 				       struct dma_desc *p,
4929 				       int status, unsigned int len)
4930 {
4931 	unsigned int plen = 0, hlen = 0;
4932 	int coe = priv->hw->rx_csum;
4933 
4934 	/* Not first descriptor, buffer is always zero */
4935 	if (priv->sph && len)
4936 		return 0;
4937 
4938 	/* First descriptor, get split header length */
4939 	stmmac_get_rx_header_len(priv, p, &hlen);
4940 	if (priv->sph && hlen) {
4941 		priv->xstats.rx_split_hdr_pkt_n++;
4942 		return hlen;
4943 	}
4944 
4945 	/* First descriptor, not last descriptor and not split header */
4946 	if (status & rx_not_ls)
4947 		return priv->dma_conf.dma_buf_sz;
4948 
4949 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4950 
4951 	/* First descriptor and last descriptor and not split header */
4952 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4953 }
4954 
4955 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4956 				       struct dma_desc *p,
4957 				       int status, unsigned int len)
4958 {
4959 	int coe = priv->hw->rx_csum;
4960 	unsigned int plen = 0;
4961 
4962 	/* Not split header, buffer is not available */
4963 	if (!priv->sph)
4964 		return 0;
4965 
4966 	/* Not last descriptor */
4967 	if (status & rx_not_ls)
4968 		return priv->dma_conf.dma_buf_sz;
4969 
4970 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4971 
4972 	/* Last descriptor */
4973 	return plen - len;
4974 }
4975 
4976 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4977 				struct xdp_frame *xdpf, bool dma_map)
4978 {
4979 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4980 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4981 	unsigned int entry = tx_q->cur_tx;
4982 	struct dma_desc *tx_desc;
4983 	dma_addr_t dma_addr;
4984 	bool set_ic;
4985 
4986 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4987 		return STMMAC_XDP_CONSUMED;
4988 
4989 	if (priv->est && priv->est->enable &&
4990 	    priv->est->max_sdu[queue] &&
4991 	    xdpf->len > priv->est->max_sdu[queue]) {
4992 		priv->xstats.max_sdu_txq_drop[queue]++;
4993 		return STMMAC_XDP_CONSUMED;
4994 	}
4995 
4996 	if (likely(priv->extend_desc))
4997 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4998 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4999 		tx_desc = &tx_q->dma_entx[entry].basic;
5000 	else
5001 		tx_desc = tx_q->dma_tx + entry;
5002 
5003 	if (dma_map) {
5004 		dma_addr = dma_map_single(priv->device, xdpf->data,
5005 					  xdpf->len, DMA_TO_DEVICE);
5006 		if (dma_mapping_error(priv->device, dma_addr))
5007 			return STMMAC_XDP_CONSUMED;
5008 
5009 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
5010 	} else {
5011 		struct page *page = virt_to_page(xdpf->data);
5012 
5013 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5014 			   xdpf->headroom;
5015 		dma_sync_single_for_device(priv->device, dma_addr,
5016 					   xdpf->len, DMA_BIDIRECTIONAL);
5017 
5018 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5019 	}
5020 
5021 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5022 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
5023 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5024 	tx_q->tx_skbuff_dma[entry].last_segment = true;
5025 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5026 
5027 	tx_q->xdpf[entry] = xdpf;
5028 
5029 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5030 
5031 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5032 			       true, priv->mode, true, true,
5033 			       xdpf->len);
5034 
5035 	tx_q->tx_count_frames++;
5036 
5037 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5038 		set_ic = true;
5039 	else
5040 		set_ic = false;
5041 
5042 	if (set_ic) {
5043 		tx_q->tx_count_frames = 0;
5044 		stmmac_set_tx_ic(priv, tx_desc);
5045 		u64_stats_update_begin(&txq_stats->q_syncp);
5046 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5047 		u64_stats_update_end(&txq_stats->q_syncp);
5048 	}
5049 
5050 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5051 
5052 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5053 	tx_q->cur_tx = entry;
5054 
5055 	return STMMAC_XDP_TX;
5056 }
5057 
5058 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5059 				   int cpu)
5060 {
5061 	int index = cpu;
5062 
5063 	if (unlikely(index < 0))
5064 		index = 0;
5065 
5066 	while (index >= priv->plat->tx_queues_to_use)
5067 		index -= priv->plat->tx_queues_to_use;
5068 
5069 	return index;
5070 }
5071 
5072 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5073 				struct xdp_buff *xdp)
5074 {
5075 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5076 	int cpu = smp_processor_id();
5077 	struct netdev_queue *nq;
5078 	int queue;
5079 	int res;
5080 
5081 	if (unlikely(!xdpf))
5082 		return STMMAC_XDP_CONSUMED;
5083 
5084 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5085 	nq = netdev_get_tx_queue(priv->dev, queue);
5086 
5087 	__netif_tx_lock(nq, cpu);
5088 	/* Avoids TX time-out as we are sharing with slow path */
5089 	txq_trans_cond_update(nq);
5090 
5091 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5092 	if (res == STMMAC_XDP_TX)
5093 		stmmac_flush_tx_descriptors(priv, queue);
5094 
5095 	__netif_tx_unlock(nq);
5096 
5097 	return res;
5098 }
5099 
5100 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5101 				 struct bpf_prog *prog,
5102 				 struct xdp_buff *xdp)
5103 {
5104 	u32 act;
5105 	int res;
5106 
5107 	act = bpf_prog_run_xdp(prog, xdp);
5108 	switch (act) {
5109 	case XDP_PASS:
5110 		res = STMMAC_XDP_PASS;
5111 		break;
5112 	case XDP_TX:
5113 		res = stmmac_xdp_xmit_back(priv, xdp);
5114 		break;
5115 	case XDP_REDIRECT:
5116 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5117 			res = STMMAC_XDP_CONSUMED;
5118 		else
5119 			res = STMMAC_XDP_REDIRECT;
5120 		break;
5121 	default:
5122 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5123 		fallthrough;
5124 	case XDP_ABORTED:
5125 		trace_xdp_exception(priv->dev, prog, act);
5126 		fallthrough;
5127 	case XDP_DROP:
5128 		res = STMMAC_XDP_CONSUMED;
5129 		break;
5130 	}
5131 
5132 	return res;
5133 }
5134 
5135 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5136 					   struct xdp_buff *xdp)
5137 {
5138 	struct bpf_prog *prog;
5139 	int res;
5140 
5141 	prog = READ_ONCE(priv->xdp_prog);
5142 	if (!prog) {
5143 		res = STMMAC_XDP_PASS;
5144 		goto out;
5145 	}
5146 
5147 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5148 out:
5149 	return ERR_PTR(-res);
5150 }
5151 
5152 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5153 				   int xdp_status)
5154 {
5155 	int cpu = smp_processor_id();
5156 	int queue;
5157 
5158 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5159 
5160 	if (xdp_status & STMMAC_XDP_TX)
5161 		stmmac_tx_timer_arm(priv, queue);
5162 
5163 	if (xdp_status & STMMAC_XDP_REDIRECT)
5164 		xdp_do_flush();
5165 }
5166 
5167 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5168 					       struct xdp_buff *xdp)
5169 {
5170 	unsigned int metasize = xdp->data - xdp->data_meta;
5171 	unsigned int datasize = xdp->data_end - xdp->data;
5172 	struct sk_buff *skb;
5173 
5174 	skb = napi_alloc_skb(&ch->rxtx_napi,
5175 			     xdp->data_end - xdp->data_hard_start);
5176 	if (unlikely(!skb))
5177 		return NULL;
5178 
5179 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5180 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5181 	if (metasize)
5182 		skb_metadata_set(skb, metasize);
5183 
5184 	return skb;
5185 }
5186 
5187 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5188 				   struct dma_desc *p, struct dma_desc *np,
5189 				   struct xdp_buff *xdp)
5190 {
5191 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5192 	struct stmmac_channel *ch = &priv->channel[queue];
5193 	unsigned int len = xdp->data_end - xdp->data;
5194 	enum pkt_hash_types hash_type;
5195 	int coe = priv->hw->rx_csum;
5196 	struct sk_buff *skb;
5197 	u32 hash;
5198 
5199 	skb = stmmac_construct_skb_zc(ch, xdp);
5200 	if (!skb) {
5201 		priv->xstats.rx_dropped++;
5202 		return;
5203 	}
5204 
5205 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5206 	if (priv->hw->hw_vlan_en)
5207 		/* MAC level stripping. */
5208 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5209 	else
5210 		/* Driver level stripping. */
5211 		stmmac_rx_vlan(priv->dev, skb);
5212 	skb->protocol = eth_type_trans(skb, priv->dev);
5213 
5214 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5215 		skb_checksum_none_assert(skb);
5216 	else
5217 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5218 
5219 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5220 		skb_set_hash(skb, hash, hash_type);
5221 
5222 	skb_record_rx_queue(skb, queue);
5223 	napi_gro_receive(&ch->rxtx_napi, skb);
5224 
5225 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5226 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5227 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5228 	u64_stats_update_end(&rxq_stats->napi_syncp);
5229 }
5230 
5231 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5232 {
5233 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5234 	unsigned int entry = rx_q->dirty_rx;
5235 	struct dma_desc *rx_desc = NULL;
5236 	bool ret = true;
5237 
5238 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5239 
5240 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5241 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5242 		dma_addr_t dma_addr;
5243 		bool use_rx_wd;
5244 
5245 		if (!buf->xdp) {
5246 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5247 			if (!buf->xdp) {
5248 				ret = false;
5249 				break;
5250 			}
5251 		}
5252 
5253 		if (priv->extend_desc)
5254 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5255 		else
5256 			rx_desc = rx_q->dma_rx + entry;
5257 
5258 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5259 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5260 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5261 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5262 
5263 		rx_q->rx_count_frames++;
5264 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5265 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5266 			rx_q->rx_count_frames = 0;
5267 
5268 		use_rx_wd = !priv->rx_coal_frames[queue];
5269 		use_rx_wd |= rx_q->rx_count_frames > 0;
5270 		if (!priv->use_riwt)
5271 			use_rx_wd = false;
5272 
5273 		dma_wmb();
5274 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5275 
5276 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5277 	}
5278 
5279 	if (rx_desc) {
5280 		rx_q->dirty_rx = entry;
5281 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5282 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5283 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5284 	}
5285 
5286 	return ret;
5287 }
5288 
5289 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5290 {
5291 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5292 	 * to represent incoming packet, whereas cb field in the same structure
5293 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5294 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5295 	 */
5296 	return (struct stmmac_xdp_buff *)xdp;
5297 }
5298 
5299 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5300 {
5301 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5302 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5303 	unsigned int count = 0, error = 0, len = 0;
5304 	int dirty = stmmac_rx_dirty(priv, queue);
5305 	unsigned int next_entry = rx_q->cur_rx;
5306 	u32 rx_errors = 0, rx_dropped = 0;
5307 	unsigned int desc_size;
5308 	struct bpf_prog *prog;
5309 	bool failure = false;
5310 	int xdp_status = 0;
5311 	int status = 0;
5312 
5313 	if (netif_msg_rx_status(priv)) {
5314 		void *rx_head;
5315 
5316 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5317 		if (priv->extend_desc) {
5318 			rx_head = (void *)rx_q->dma_erx;
5319 			desc_size = sizeof(struct dma_extended_desc);
5320 		} else {
5321 			rx_head = (void *)rx_q->dma_rx;
5322 			desc_size = sizeof(struct dma_desc);
5323 		}
5324 
5325 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5326 				    rx_q->dma_rx_phy, desc_size);
5327 	}
5328 	while (count < limit) {
5329 		struct stmmac_rx_buffer *buf;
5330 		struct stmmac_xdp_buff *ctx;
5331 		unsigned int buf1_len = 0;
5332 		struct dma_desc *np, *p;
5333 		int entry;
5334 		int res;
5335 
5336 		if (!count && rx_q->state_saved) {
5337 			error = rx_q->state.error;
5338 			len = rx_q->state.len;
5339 		} else {
5340 			rx_q->state_saved = false;
5341 			error = 0;
5342 			len = 0;
5343 		}
5344 
5345 		if (count >= limit)
5346 			break;
5347 
5348 read_again:
5349 		buf1_len = 0;
5350 		entry = next_entry;
5351 		buf = &rx_q->buf_pool[entry];
5352 
5353 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5354 			failure = failure ||
5355 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5356 			dirty = 0;
5357 		}
5358 
5359 		if (priv->extend_desc)
5360 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5361 		else
5362 			p = rx_q->dma_rx + entry;
5363 
5364 		/* read the status of the incoming frame */
5365 		status = stmmac_rx_status(priv, &priv->xstats, p);
5366 		/* check if managed by the DMA otherwise go ahead */
5367 		if (unlikely(status & dma_own))
5368 			break;
5369 
5370 		/* Prefetch the next RX descriptor */
5371 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5372 						priv->dma_conf.dma_rx_size);
5373 		next_entry = rx_q->cur_rx;
5374 
5375 		if (priv->extend_desc)
5376 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5377 		else
5378 			np = rx_q->dma_rx + next_entry;
5379 
5380 		prefetch(np);
5381 
5382 		/* Ensure a valid XSK buffer before proceed */
5383 		if (!buf->xdp)
5384 			break;
5385 
5386 		if (priv->extend_desc)
5387 			stmmac_rx_extended_status(priv, &priv->xstats,
5388 						  rx_q->dma_erx + entry);
5389 		if (unlikely(status == discard_frame)) {
5390 			xsk_buff_free(buf->xdp);
5391 			buf->xdp = NULL;
5392 			dirty++;
5393 			error = 1;
5394 			if (!priv->hwts_rx_en)
5395 				rx_errors++;
5396 		}
5397 
5398 		if (unlikely(error && (status & rx_not_ls)))
5399 			goto read_again;
5400 		if (unlikely(error)) {
5401 			count++;
5402 			continue;
5403 		}
5404 
5405 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5406 		if (likely(status & rx_not_ls)) {
5407 			xsk_buff_free(buf->xdp);
5408 			buf->xdp = NULL;
5409 			dirty++;
5410 			count++;
5411 			goto read_again;
5412 		}
5413 
5414 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5415 		ctx->priv = priv;
5416 		ctx->desc = p;
5417 		ctx->ndesc = np;
5418 
5419 		/* XDP ZC Frame only support primary buffers for now */
5420 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5421 		len += buf1_len;
5422 
5423 		/* ACS is disabled; strip manually. */
5424 		if (likely(!(status & rx_not_ls))) {
5425 			buf1_len -= ETH_FCS_LEN;
5426 			len -= ETH_FCS_LEN;
5427 		}
5428 
5429 		/* RX buffer is good and fit into a XSK pool buffer */
5430 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5431 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5432 
5433 		prog = READ_ONCE(priv->xdp_prog);
5434 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5435 
5436 		switch (res) {
5437 		case STMMAC_XDP_PASS:
5438 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5439 			xsk_buff_free(buf->xdp);
5440 			break;
5441 		case STMMAC_XDP_CONSUMED:
5442 			xsk_buff_free(buf->xdp);
5443 			rx_dropped++;
5444 			break;
5445 		case STMMAC_XDP_TX:
5446 		case STMMAC_XDP_REDIRECT:
5447 			xdp_status |= res;
5448 			break;
5449 		}
5450 
5451 		buf->xdp = NULL;
5452 		dirty++;
5453 		count++;
5454 	}
5455 
5456 	if (status & rx_not_ls) {
5457 		rx_q->state_saved = true;
5458 		rx_q->state.error = error;
5459 		rx_q->state.len = len;
5460 	}
5461 
5462 	stmmac_finalize_xdp_rx(priv, xdp_status);
5463 
5464 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5465 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5466 	u64_stats_update_end(&rxq_stats->napi_syncp);
5467 
5468 	priv->xstats.rx_dropped += rx_dropped;
5469 	priv->xstats.rx_errors += rx_errors;
5470 
5471 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5472 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5473 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5474 		else
5475 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5476 
5477 		return (int)count;
5478 	}
5479 
5480 	return failure ? limit : (int)count;
5481 }
5482 
5483 /**
5484  * stmmac_rx - manage the receive process
5485  * @priv: driver private structure
5486  * @limit: napi bugget
5487  * @queue: RX queue index.
5488  * Description :  this the function called by the napi poll method.
5489  * It gets all the frames inside the ring.
5490  */
5491 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5492 {
5493 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5494 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5495 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5496 	struct stmmac_channel *ch = &priv->channel[queue];
5497 	unsigned int count = 0, error = 0, len = 0;
5498 	int status = 0, coe = priv->hw->rx_csum;
5499 	unsigned int next_entry = rx_q->cur_rx;
5500 	enum dma_data_direction dma_dir;
5501 	unsigned int desc_size;
5502 	struct sk_buff *skb = NULL;
5503 	struct stmmac_xdp_buff ctx;
5504 	int xdp_status = 0;
5505 	int bufsz;
5506 
5507 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5508 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5509 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5510 
5511 	if (netif_msg_rx_status(priv)) {
5512 		void *rx_head;
5513 
5514 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5515 		if (priv->extend_desc) {
5516 			rx_head = (void *)rx_q->dma_erx;
5517 			desc_size = sizeof(struct dma_extended_desc);
5518 		} else {
5519 			rx_head = (void *)rx_q->dma_rx;
5520 			desc_size = sizeof(struct dma_desc);
5521 		}
5522 
5523 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5524 				    rx_q->dma_rx_phy, desc_size);
5525 	}
5526 	while (count < limit) {
5527 		unsigned int buf1_len = 0, buf2_len = 0;
5528 		enum pkt_hash_types hash_type;
5529 		struct stmmac_rx_buffer *buf;
5530 		struct dma_desc *np, *p;
5531 		int entry;
5532 		u32 hash;
5533 
5534 		if (!count && rx_q->state_saved) {
5535 			skb = rx_q->state.skb;
5536 			error = rx_q->state.error;
5537 			len = rx_q->state.len;
5538 		} else {
5539 			rx_q->state_saved = false;
5540 			skb = NULL;
5541 			error = 0;
5542 			len = 0;
5543 		}
5544 
5545 read_again:
5546 		if (count >= limit)
5547 			break;
5548 
5549 		buf1_len = 0;
5550 		buf2_len = 0;
5551 		entry = next_entry;
5552 		buf = &rx_q->buf_pool[entry];
5553 
5554 		if (priv->extend_desc)
5555 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5556 		else
5557 			p = rx_q->dma_rx + entry;
5558 
5559 		/* read the status of the incoming frame */
5560 		status = stmmac_rx_status(priv, &priv->xstats, p);
5561 		/* check if managed by the DMA otherwise go ahead */
5562 		if (unlikely(status & dma_own))
5563 			break;
5564 
5565 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5566 						priv->dma_conf.dma_rx_size);
5567 		next_entry = rx_q->cur_rx;
5568 
5569 		if (priv->extend_desc)
5570 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5571 		else
5572 			np = rx_q->dma_rx + next_entry;
5573 
5574 		prefetch(np);
5575 
5576 		if (priv->extend_desc)
5577 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5578 		if (unlikely(status == discard_frame)) {
5579 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5580 			buf->page = NULL;
5581 			error = 1;
5582 			if (!priv->hwts_rx_en)
5583 				rx_errors++;
5584 		}
5585 
5586 		if (unlikely(error && (status & rx_not_ls)))
5587 			goto read_again;
5588 		if (unlikely(error)) {
5589 			dev_kfree_skb(skb);
5590 			skb = NULL;
5591 			count++;
5592 			continue;
5593 		}
5594 
5595 		/* Buffer is good. Go on. */
5596 
5597 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5598 		len += buf1_len;
5599 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5600 		len += buf2_len;
5601 
5602 		/* ACS is disabled; strip manually. */
5603 		if (likely(!(status & rx_not_ls))) {
5604 			if (buf2_len) {
5605 				buf2_len -= ETH_FCS_LEN;
5606 				len -= ETH_FCS_LEN;
5607 			} else if (buf1_len) {
5608 				buf1_len -= ETH_FCS_LEN;
5609 				len -= ETH_FCS_LEN;
5610 			}
5611 		}
5612 
5613 		if (!skb) {
5614 			unsigned int pre_len, sync_len;
5615 
5616 			dma_sync_single_for_cpu(priv->device, buf->addr,
5617 						buf1_len, dma_dir);
5618 			net_prefetch(page_address(buf->page) +
5619 				     buf->page_offset);
5620 
5621 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5622 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5623 					 buf->page_offset, buf1_len, true);
5624 
5625 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5626 				  buf->page_offset;
5627 
5628 			ctx.priv = priv;
5629 			ctx.desc = p;
5630 			ctx.ndesc = np;
5631 
5632 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5633 			/* Due xdp_adjust_tail: DMA sync for_device
5634 			 * cover max len CPU touch
5635 			 */
5636 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5637 				   buf->page_offset;
5638 			sync_len = max(sync_len, pre_len);
5639 
5640 			/* For Not XDP_PASS verdict */
5641 			if (IS_ERR(skb)) {
5642 				unsigned int xdp_res = -PTR_ERR(skb);
5643 
5644 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5645 					page_pool_put_page(rx_q->page_pool,
5646 							   virt_to_head_page(ctx.xdp.data),
5647 							   sync_len, true);
5648 					buf->page = NULL;
5649 					rx_dropped++;
5650 
5651 					/* Clear skb as it was set as
5652 					 * status by XDP program.
5653 					 */
5654 					skb = NULL;
5655 
5656 					if (unlikely((status & rx_not_ls)))
5657 						goto read_again;
5658 
5659 					count++;
5660 					continue;
5661 				} else if (xdp_res & (STMMAC_XDP_TX |
5662 						      STMMAC_XDP_REDIRECT)) {
5663 					xdp_status |= xdp_res;
5664 					buf->page = NULL;
5665 					skb = NULL;
5666 					count++;
5667 					continue;
5668 				}
5669 			}
5670 		}
5671 
5672 		if (!skb) {
5673 			unsigned int head_pad_len;
5674 
5675 			/* XDP program may expand or reduce tail */
5676 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5677 
5678 			skb = napi_build_skb(page_address(buf->page),
5679 					     rx_q->napi_skb_frag_size);
5680 			if (!skb) {
5681 				page_pool_recycle_direct(rx_q->page_pool,
5682 							 buf->page);
5683 				rx_dropped++;
5684 				count++;
5685 				goto drain_data;
5686 			}
5687 
5688 			/* XDP program may adjust header */
5689 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5690 			skb_reserve(skb, head_pad_len);
5691 			skb_put(skb, buf1_len);
5692 			skb_mark_for_recycle(skb);
5693 			buf->page = NULL;
5694 		} else if (buf1_len) {
5695 			dma_sync_single_for_cpu(priv->device, buf->addr,
5696 						buf1_len, dma_dir);
5697 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5698 					buf->page, buf->page_offset, buf1_len,
5699 					priv->dma_conf.dma_buf_sz);
5700 			buf->page = NULL;
5701 		}
5702 
5703 		if (buf2_len) {
5704 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5705 						buf2_len, dma_dir);
5706 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5707 					buf->sec_page, 0, buf2_len,
5708 					priv->dma_conf.dma_buf_sz);
5709 			buf->sec_page = NULL;
5710 		}
5711 
5712 drain_data:
5713 		if (likely(status & rx_not_ls))
5714 			goto read_again;
5715 		if (!skb)
5716 			continue;
5717 
5718 		/* Got entire packet into SKB. Finish it. */
5719 
5720 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5721 
5722 		if (priv->hw->hw_vlan_en)
5723 			/* MAC level stripping. */
5724 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5725 		else
5726 			/* Driver level stripping. */
5727 			stmmac_rx_vlan(priv->dev, skb);
5728 
5729 		skb->protocol = eth_type_trans(skb, priv->dev);
5730 
5731 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5732 			skb_checksum_none_assert(skb);
5733 		else
5734 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5735 
5736 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5737 			skb_set_hash(skb, hash, hash_type);
5738 
5739 		skb_record_rx_queue(skb, queue);
5740 		napi_gro_receive(&ch->rx_napi, skb);
5741 		skb = NULL;
5742 
5743 		rx_packets++;
5744 		rx_bytes += len;
5745 		count++;
5746 	}
5747 
5748 	if (status & rx_not_ls || skb) {
5749 		rx_q->state_saved = true;
5750 		rx_q->state.skb = skb;
5751 		rx_q->state.error = error;
5752 		rx_q->state.len = len;
5753 	}
5754 
5755 	stmmac_finalize_xdp_rx(priv, xdp_status);
5756 
5757 	stmmac_rx_refill(priv, queue);
5758 
5759 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5760 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5761 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5762 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5763 	u64_stats_update_end(&rxq_stats->napi_syncp);
5764 
5765 	priv->xstats.rx_dropped += rx_dropped;
5766 	priv->xstats.rx_errors += rx_errors;
5767 
5768 	return count;
5769 }
5770 
5771 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5772 {
5773 	struct stmmac_channel *ch =
5774 		container_of(napi, struct stmmac_channel, rx_napi);
5775 	struct stmmac_priv *priv = ch->priv_data;
5776 	struct stmmac_rxq_stats *rxq_stats;
5777 	u32 chan = ch->index;
5778 	int work_done;
5779 
5780 	rxq_stats = &priv->xstats.rxq_stats[chan];
5781 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5782 	u64_stats_inc(&rxq_stats->napi.poll);
5783 	u64_stats_update_end(&rxq_stats->napi_syncp);
5784 
5785 	work_done = stmmac_rx(priv, budget, chan);
5786 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5787 		unsigned long flags;
5788 
5789 		spin_lock_irqsave(&ch->lock, flags);
5790 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5791 		spin_unlock_irqrestore(&ch->lock, flags);
5792 	}
5793 
5794 	return work_done;
5795 }
5796 
5797 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5798 {
5799 	struct stmmac_channel *ch =
5800 		container_of(napi, struct stmmac_channel, tx_napi);
5801 	struct stmmac_priv *priv = ch->priv_data;
5802 	struct stmmac_txq_stats *txq_stats;
5803 	bool pending_packets = false;
5804 	u32 chan = ch->index;
5805 	int work_done;
5806 
5807 	txq_stats = &priv->xstats.txq_stats[chan];
5808 	u64_stats_update_begin(&txq_stats->napi_syncp);
5809 	u64_stats_inc(&txq_stats->napi.poll);
5810 	u64_stats_update_end(&txq_stats->napi_syncp);
5811 
5812 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5813 	work_done = min(work_done, budget);
5814 
5815 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5816 		unsigned long flags;
5817 
5818 		spin_lock_irqsave(&ch->lock, flags);
5819 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5820 		spin_unlock_irqrestore(&ch->lock, flags);
5821 	}
5822 
5823 	/* TX still have packet to handle, check if we need to arm tx timer */
5824 	if (pending_packets)
5825 		stmmac_tx_timer_arm(priv, chan);
5826 
5827 	return work_done;
5828 }
5829 
5830 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5831 {
5832 	struct stmmac_channel *ch =
5833 		container_of(napi, struct stmmac_channel, rxtx_napi);
5834 	struct stmmac_priv *priv = ch->priv_data;
5835 	bool tx_pending_packets = false;
5836 	int rx_done, tx_done, rxtx_done;
5837 	struct stmmac_rxq_stats *rxq_stats;
5838 	struct stmmac_txq_stats *txq_stats;
5839 	u32 chan = ch->index;
5840 
5841 	rxq_stats = &priv->xstats.rxq_stats[chan];
5842 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5843 	u64_stats_inc(&rxq_stats->napi.poll);
5844 	u64_stats_update_end(&rxq_stats->napi_syncp);
5845 
5846 	txq_stats = &priv->xstats.txq_stats[chan];
5847 	u64_stats_update_begin(&txq_stats->napi_syncp);
5848 	u64_stats_inc(&txq_stats->napi.poll);
5849 	u64_stats_update_end(&txq_stats->napi_syncp);
5850 
5851 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5852 	tx_done = min(tx_done, budget);
5853 
5854 	rx_done = stmmac_rx_zc(priv, budget, chan);
5855 
5856 	rxtx_done = max(tx_done, rx_done);
5857 
5858 	/* If either TX or RX work is not complete, return budget
5859 	 * and keep pooling
5860 	 */
5861 	if (rxtx_done >= budget)
5862 		return budget;
5863 
5864 	/* all work done, exit the polling mode */
5865 	if (napi_complete_done(napi, rxtx_done)) {
5866 		unsigned long flags;
5867 
5868 		spin_lock_irqsave(&ch->lock, flags);
5869 		/* Both RX and TX work done are compelte,
5870 		 * so enable both RX & TX IRQs.
5871 		 */
5872 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5873 		spin_unlock_irqrestore(&ch->lock, flags);
5874 	}
5875 
5876 	/* TX still have packet to handle, check if we need to arm tx timer */
5877 	if (tx_pending_packets)
5878 		stmmac_tx_timer_arm(priv, chan);
5879 
5880 	return min(rxtx_done, budget - 1);
5881 }
5882 
5883 /**
5884  *  stmmac_tx_timeout
5885  *  @dev : Pointer to net device structure
5886  *  @txqueue: the index of the hanging transmit queue
5887  *  Description: this function is called when a packet transmission fails to
5888  *   complete within a reasonable time. The driver will mark the error in the
5889  *   netdev structure and arrange for the device to be reset to a sane state
5890  *   in order to transmit a new packet.
5891  */
5892 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5893 {
5894 	struct stmmac_priv *priv = netdev_priv(dev);
5895 
5896 	stmmac_global_err(priv);
5897 }
5898 
5899 /**
5900  *  stmmac_set_rx_mode - entry point for multicast addressing
5901  *  @dev : pointer to the device structure
5902  *  Description:
5903  *  This function is a driver entry point which gets called by the kernel
5904  *  whenever multicast addresses must be enabled/disabled.
5905  *  Return value:
5906  *  void.
5907  *
5908  *  FIXME: This may need RXC to be running, but it may be called with BH
5909  *  disabled, which means we can't call phylink_rx_clk_stop*().
5910  */
5911 static void stmmac_set_rx_mode(struct net_device *dev)
5912 {
5913 	struct stmmac_priv *priv = netdev_priv(dev);
5914 
5915 	stmmac_set_filter(priv, priv->hw, dev);
5916 }
5917 
5918 /**
5919  *  stmmac_change_mtu - entry point to change MTU size for the device.
5920  *  @dev : device pointer.
5921  *  @new_mtu : the new MTU size for the device.
5922  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5923  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5924  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5925  *  Return value:
5926  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5927  *  file on failure.
5928  */
5929 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5930 {
5931 	struct stmmac_priv *priv = netdev_priv(dev);
5932 	int txfifosz = priv->plat->tx_fifo_size;
5933 	struct stmmac_dma_conf *dma_conf;
5934 	const int mtu = new_mtu;
5935 	int ret;
5936 
5937 	if (txfifosz == 0)
5938 		txfifosz = priv->dma_cap.tx_fifo_size;
5939 
5940 	txfifosz /= priv->plat->tx_queues_to_use;
5941 
5942 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5943 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5944 		return -EINVAL;
5945 	}
5946 
5947 	new_mtu = STMMAC_ALIGN(new_mtu);
5948 
5949 	/* If condition true, FIFO is too small or MTU too large */
5950 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5951 		return -EINVAL;
5952 
5953 	if (netif_running(dev)) {
5954 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5955 		/* Try to allocate the new DMA conf with the new mtu */
5956 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5957 		if (IS_ERR(dma_conf)) {
5958 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5959 				   mtu);
5960 			return PTR_ERR(dma_conf);
5961 		}
5962 
5963 		stmmac_release(dev);
5964 
5965 		ret = __stmmac_open(dev, dma_conf);
5966 		if (ret) {
5967 			free_dma_desc_resources(priv, dma_conf);
5968 			kfree(dma_conf);
5969 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5970 			return ret;
5971 		}
5972 
5973 		kfree(dma_conf);
5974 
5975 		stmmac_set_rx_mode(dev);
5976 	}
5977 
5978 	WRITE_ONCE(dev->mtu, mtu);
5979 	netdev_update_features(dev);
5980 
5981 	return 0;
5982 }
5983 
5984 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5985 					     netdev_features_t features)
5986 {
5987 	struct stmmac_priv *priv = netdev_priv(dev);
5988 
5989 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5990 		features &= ~NETIF_F_RXCSUM;
5991 
5992 	if (!priv->plat->tx_coe)
5993 		features &= ~NETIF_F_CSUM_MASK;
5994 
5995 	/* Some GMAC devices have a bugged Jumbo frame support that
5996 	 * needs to have the Tx COE disabled for oversized frames
5997 	 * (due to limited buffer sizes). In this case we disable
5998 	 * the TX csum insertion in the TDES and not use SF.
5999 	 */
6000 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
6001 		features &= ~NETIF_F_CSUM_MASK;
6002 
6003 	/* Disable tso if asked by ethtool */
6004 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
6005 		if (features & NETIF_F_TSO)
6006 			priv->tso = true;
6007 		else
6008 			priv->tso = false;
6009 	}
6010 
6011 	return features;
6012 }
6013 
6014 static int stmmac_set_features(struct net_device *netdev,
6015 			       netdev_features_t features)
6016 {
6017 	struct stmmac_priv *priv = netdev_priv(netdev);
6018 
6019 	/* Keep the COE Type in case of csum is supporting */
6020 	if (features & NETIF_F_RXCSUM)
6021 		priv->hw->rx_csum = priv->plat->rx_coe;
6022 	else
6023 		priv->hw->rx_csum = 0;
6024 	/* No check needed because rx_coe has been set before and it will be
6025 	 * fixed in case of issue.
6026 	 */
6027 	stmmac_rx_ipc(priv, priv->hw);
6028 
6029 	if (priv->sph_cap) {
6030 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6031 		u32 chan;
6032 
6033 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6034 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6035 	}
6036 
6037 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
6038 		priv->hw->hw_vlan_en = true;
6039 	else
6040 		priv->hw->hw_vlan_en = false;
6041 
6042 	phylink_rx_clk_stop_block(priv->phylink);
6043 	stmmac_set_hw_vlan_mode(priv, priv->hw);
6044 	phylink_rx_clk_stop_unblock(priv->phylink);
6045 
6046 	return 0;
6047 }
6048 
6049 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6050 {
6051 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6052 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6053 	u32 queues_count;
6054 	u32 queue;
6055 	bool xmac;
6056 
6057 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6058 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6059 
6060 	if (priv->irq_wake)
6061 		pm_wakeup_event(priv->device, 0);
6062 
6063 	if (priv->dma_cap.estsel)
6064 		stmmac_est_irq_status(priv, priv, priv->dev,
6065 				      &priv->xstats, tx_cnt);
6066 
6067 	if (stmmac_fpe_supported(priv))
6068 		stmmac_fpe_irq_status(priv);
6069 
6070 	/* To handle GMAC own interrupts */
6071 	if ((priv->plat->has_gmac) || xmac) {
6072 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6073 
6074 		if (unlikely(status)) {
6075 			/* For LPI we need to save the tx status */
6076 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6077 				priv->tx_path_in_lpi_mode = true;
6078 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6079 				priv->tx_path_in_lpi_mode = false;
6080 		}
6081 
6082 		for (queue = 0; queue < queues_count; queue++)
6083 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6084 
6085 		/* PCS link status */
6086 		if (priv->hw->pcs &&
6087 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6088 			if (priv->xstats.pcs_link)
6089 				netif_carrier_on(priv->dev);
6090 			else
6091 				netif_carrier_off(priv->dev);
6092 		}
6093 
6094 		stmmac_timestamp_interrupt(priv, priv);
6095 	}
6096 }
6097 
6098 /**
6099  *  stmmac_interrupt - main ISR
6100  *  @irq: interrupt number.
6101  *  @dev_id: to pass the net device pointer.
6102  *  Description: this is the main driver interrupt service routine.
6103  *  It can call:
6104  *  o DMA service routine (to manage incoming frame reception and transmission
6105  *    status)
6106  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6107  *    interrupts.
6108  */
6109 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6110 {
6111 	struct net_device *dev = (struct net_device *)dev_id;
6112 	struct stmmac_priv *priv = netdev_priv(dev);
6113 
6114 	/* Check if adapter is up */
6115 	if (test_bit(STMMAC_DOWN, &priv->state))
6116 		return IRQ_HANDLED;
6117 
6118 	/* Check ASP error if it isn't delivered via an individual IRQ */
6119 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6120 		return IRQ_HANDLED;
6121 
6122 	/* To handle Common interrupts */
6123 	stmmac_common_interrupt(priv);
6124 
6125 	/* To handle DMA interrupts */
6126 	stmmac_dma_interrupt(priv);
6127 
6128 	return IRQ_HANDLED;
6129 }
6130 
6131 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6132 {
6133 	struct net_device *dev = (struct net_device *)dev_id;
6134 	struct stmmac_priv *priv = netdev_priv(dev);
6135 
6136 	/* Check if adapter is up */
6137 	if (test_bit(STMMAC_DOWN, &priv->state))
6138 		return IRQ_HANDLED;
6139 
6140 	/* To handle Common interrupts */
6141 	stmmac_common_interrupt(priv);
6142 
6143 	return IRQ_HANDLED;
6144 }
6145 
6146 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6147 {
6148 	struct net_device *dev = (struct net_device *)dev_id;
6149 	struct stmmac_priv *priv = netdev_priv(dev);
6150 
6151 	/* Check if adapter is up */
6152 	if (test_bit(STMMAC_DOWN, &priv->state))
6153 		return IRQ_HANDLED;
6154 
6155 	/* Check if a fatal error happened */
6156 	stmmac_safety_feat_interrupt(priv);
6157 
6158 	return IRQ_HANDLED;
6159 }
6160 
6161 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6162 {
6163 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6164 	struct stmmac_dma_conf *dma_conf;
6165 	int chan = tx_q->queue_index;
6166 	struct stmmac_priv *priv;
6167 	int status;
6168 
6169 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6170 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6171 
6172 	/* Check if adapter is up */
6173 	if (test_bit(STMMAC_DOWN, &priv->state))
6174 		return IRQ_HANDLED;
6175 
6176 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6177 
6178 	if (unlikely(status & tx_hard_error_bump_tc)) {
6179 		/* Try to bump up the dma threshold on this failure */
6180 		stmmac_bump_dma_threshold(priv, chan);
6181 	} else if (unlikely(status == tx_hard_error)) {
6182 		stmmac_tx_err(priv, chan);
6183 	}
6184 
6185 	return IRQ_HANDLED;
6186 }
6187 
6188 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6189 {
6190 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6191 	struct stmmac_dma_conf *dma_conf;
6192 	int chan = rx_q->queue_index;
6193 	struct stmmac_priv *priv;
6194 
6195 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6196 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6197 
6198 	/* Check if adapter is up */
6199 	if (test_bit(STMMAC_DOWN, &priv->state))
6200 		return IRQ_HANDLED;
6201 
6202 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6203 
6204 	return IRQ_HANDLED;
6205 }
6206 
6207 /**
6208  *  stmmac_ioctl - Entry point for the Ioctl
6209  *  @dev: Device pointer.
6210  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6211  *  a proprietary structure used to pass information to the driver.
6212  *  @cmd: IOCTL command
6213  *  Description:
6214  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6215  */
6216 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6217 {
6218 	struct stmmac_priv *priv = netdev_priv (dev);
6219 	int ret = -EOPNOTSUPP;
6220 
6221 	if (!netif_running(dev))
6222 		return -EINVAL;
6223 
6224 	switch (cmd) {
6225 	case SIOCGMIIPHY:
6226 	case SIOCGMIIREG:
6227 	case SIOCSMIIREG:
6228 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6229 		break;
6230 	default:
6231 		break;
6232 	}
6233 
6234 	return ret;
6235 }
6236 
6237 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6238 				    void *cb_priv)
6239 {
6240 	struct stmmac_priv *priv = cb_priv;
6241 	int ret = -EOPNOTSUPP;
6242 
6243 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6244 		return ret;
6245 
6246 	__stmmac_disable_all_queues(priv);
6247 
6248 	switch (type) {
6249 	case TC_SETUP_CLSU32:
6250 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6251 		break;
6252 	case TC_SETUP_CLSFLOWER:
6253 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6254 		break;
6255 	default:
6256 		break;
6257 	}
6258 
6259 	stmmac_enable_all_queues(priv);
6260 	return ret;
6261 }
6262 
6263 static LIST_HEAD(stmmac_block_cb_list);
6264 
6265 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6266 			   void *type_data)
6267 {
6268 	struct stmmac_priv *priv = netdev_priv(ndev);
6269 
6270 	switch (type) {
6271 	case TC_QUERY_CAPS:
6272 		return stmmac_tc_query_caps(priv, priv, type_data);
6273 	case TC_SETUP_QDISC_MQPRIO:
6274 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6275 	case TC_SETUP_BLOCK:
6276 		return flow_block_cb_setup_simple(type_data,
6277 						  &stmmac_block_cb_list,
6278 						  stmmac_setup_tc_block_cb,
6279 						  priv, priv, true);
6280 	case TC_SETUP_QDISC_CBS:
6281 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6282 	case TC_SETUP_QDISC_TAPRIO:
6283 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6284 	case TC_SETUP_QDISC_ETF:
6285 		return stmmac_tc_setup_etf(priv, priv, type_data);
6286 	default:
6287 		return -EOPNOTSUPP;
6288 	}
6289 }
6290 
6291 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6292 			       struct net_device *sb_dev)
6293 {
6294 	int gso = skb_shinfo(skb)->gso_type;
6295 
6296 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6297 		/*
6298 		 * There is no way to determine the number of TSO/USO
6299 		 * capable Queues. Let's use always the Queue 0
6300 		 * because if TSO/USO is supported then at least this
6301 		 * one will be capable.
6302 		 */
6303 		return 0;
6304 	}
6305 
6306 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6307 }
6308 
6309 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6310 {
6311 	struct stmmac_priv *priv = netdev_priv(ndev);
6312 	int ret = 0;
6313 
6314 	ret = pm_runtime_resume_and_get(priv->device);
6315 	if (ret < 0)
6316 		return ret;
6317 
6318 	ret = eth_mac_addr(ndev, addr);
6319 	if (ret)
6320 		goto set_mac_error;
6321 
6322 	phylink_rx_clk_stop_block(priv->phylink);
6323 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6324 	phylink_rx_clk_stop_unblock(priv->phylink);
6325 
6326 set_mac_error:
6327 	pm_runtime_put(priv->device);
6328 
6329 	return ret;
6330 }
6331 
6332 #ifdef CONFIG_DEBUG_FS
6333 static struct dentry *stmmac_fs_dir;
6334 
6335 static void sysfs_display_ring(void *head, int size, int extend_desc,
6336 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6337 {
6338 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6339 	struct dma_desc *p = (struct dma_desc *)head;
6340 	unsigned int desc_size;
6341 	dma_addr_t dma_addr;
6342 	int i;
6343 
6344 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6345 	for (i = 0; i < size; i++) {
6346 		dma_addr = dma_phy_addr + i * desc_size;
6347 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6348 				i, &dma_addr,
6349 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6350 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6351 		if (extend_desc)
6352 			p = &(++ep)->basic;
6353 		else
6354 			p++;
6355 	}
6356 }
6357 
6358 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6359 {
6360 	struct net_device *dev = seq->private;
6361 	struct stmmac_priv *priv = netdev_priv(dev);
6362 	u32 rx_count = priv->plat->rx_queues_to_use;
6363 	u32 tx_count = priv->plat->tx_queues_to_use;
6364 	u32 queue;
6365 
6366 	if ((dev->flags & IFF_UP) == 0)
6367 		return 0;
6368 
6369 	for (queue = 0; queue < rx_count; queue++) {
6370 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6371 
6372 		seq_printf(seq, "RX Queue %d:\n", queue);
6373 
6374 		if (priv->extend_desc) {
6375 			seq_printf(seq, "Extended descriptor ring:\n");
6376 			sysfs_display_ring((void *)rx_q->dma_erx,
6377 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6378 		} else {
6379 			seq_printf(seq, "Descriptor ring:\n");
6380 			sysfs_display_ring((void *)rx_q->dma_rx,
6381 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6382 		}
6383 	}
6384 
6385 	for (queue = 0; queue < tx_count; queue++) {
6386 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6387 
6388 		seq_printf(seq, "TX Queue %d:\n", queue);
6389 
6390 		if (priv->extend_desc) {
6391 			seq_printf(seq, "Extended descriptor ring:\n");
6392 			sysfs_display_ring((void *)tx_q->dma_etx,
6393 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6394 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6395 			seq_printf(seq, "Descriptor ring:\n");
6396 			sysfs_display_ring((void *)tx_q->dma_tx,
6397 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6398 		}
6399 	}
6400 
6401 	return 0;
6402 }
6403 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6404 
6405 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6406 {
6407 	static const char * const dwxgmac_timestamp_source[] = {
6408 		"None",
6409 		"Internal",
6410 		"External",
6411 		"Both",
6412 	};
6413 	static const char * const dwxgmac_safety_feature_desc[] = {
6414 		"No",
6415 		"All Safety Features with ECC and Parity",
6416 		"All Safety Features without ECC or Parity",
6417 		"All Safety Features with Parity Only",
6418 		"ECC Only",
6419 		"UNDEFINED",
6420 		"UNDEFINED",
6421 		"UNDEFINED",
6422 	};
6423 	struct net_device *dev = seq->private;
6424 	struct stmmac_priv *priv = netdev_priv(dev);
6425 
6426 	if (!priv->hw_cap_support) {
6427 		seq_printf(seq, "DMA HW features not supported\n");
6428 		return 0;
6429 	}
6430 
6431 	seq_printf(seq, "==============================\n");
6432 	seq_printf(seq, "\tDMA HW features\n");
6433 	seq_printf(seq, "==============================\n");
6434 
6435 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6436 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6437 	seq_printf(seq, "\t1000 Mbps: %s\n",
6438 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6439 	seq_printf(seq, "\tHalf duplex: %s\n",
6440 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6441 	if (priv->plat->has_xgmac) {
6442 		seq_printf(seq,
6443 			   "\tNumber of Additional MAC address registers: %d\n",
6444 			   priv->dma_cap.multi_addr);
6445 	} else {
6446 		seq_printf(seq, "\tHash Filter: %s\n",
6447 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6448 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6449 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6450 	}
6451 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6452 		   (priv->dma_cap.pcs) ? "Y" : "N");
6453 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6454 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6455 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6456 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6457 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6458 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6459 	seq_printf(seq, "\tRMON module: %s\n",
6460 		   (priv->dma_cap.rmon) ? "Y" : "N");
6461 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6462 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6463 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6464 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6465 	if (priv->plat->has_xgmac)
6466 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6467 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6468 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6469 		   (priv->dma_cap.eee) ? "Y" : "N");
6470 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6471 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6472 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6473 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6474 	    priv->plat->has_xgmac) {
6475 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6476 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6477 	} else {
6478 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6479 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6480 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6481 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6482 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6483 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6484 	}
6485 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6486 		   priv->dma_cap.number_rx_channel);
6487 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6488 		   priv->dma_cap.number_tx_channel);
6489 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6490 		   priv->dma_cap.number_rx_queues);
6491 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6492 		   priv->dma_cap.number_tx_queues);
6493 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6494 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6495 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6496 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6497 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6498 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6499 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6500 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6501 		   priv->dma_cap.pps_out_num);
6502 	seq_printf(seq, "\tSafety Features: %s\n",
6503 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6504 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6505 		   priv->dma_cap.frpsel ? "Y" : "N");
6506 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6507 		   priv->dma_cap.host_dma_width);
6508 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6509 		   priv->dma_cap.rssen ? "Y" : "N");
6510 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6511 		   priv->dma_cap.vlhash ? "Y" : "N");
6512 	seq_printf(seq, "\tSplit Header: %s\n",
6513 		   priv->dma_cap.sphen ? "Y" : "N");
6514 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6515 		   priv->dma_cap.vlins ? "Y" : "N");
6516 	seq_printf(seq, "\tDouble VLAN: %s\n",
6517 		   priv->dma_cap.dvlan ? "Y" : "N");
6518 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6519 		   priv->dma_cap.l3l4fnum);
6520 	seq_printf(seq, "\tARP Offloading: %s\n",
6521 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6522 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6523 		   priv->dma_cap.estsel ? "Y" : "N");
6524 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6525 		   priv->dma_cap.fpesel ? "Y" : "N");
6526 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6527 		   priv->dma_cap.tbssel ? "Y" : "N");
6528 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6529 		   priv->dma_cap.tbs_ch_num);
6530 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6531 		   priv->dma_cap.sgfsel ? "Y" : "N");
6532 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6533 		   BIT(priv->dma_cap.ttsfd) >> 1);
6534 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6535 		   priv->dma_cap.numtc);
6536 	seq_printf(seq, "\tDCB Feature: %s\n",
6537 		   priv->dma_cap.dcben ? "Y" : "N");
6538 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6539 		   priv->dma_cap.advthword ? "Y" : "N");
6540 	seq_printf(seq, "\tPTP Offload: %s\n",
6541 		   priv->dma_cap.ptoen ? "Y" : "N");
6542 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6543 		   priv->dma_cap.osten ? "Y" : "N");
6544 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6545 		   priv->dma_cap.pfcen ? "Y" : "N");
6546 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6547 		   BIT(priv->dma_cap.frpes) << 6);
6548 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6549 		   BIT(priv->dma_cap.frpbs) << 6);
6550 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6551 		   priv->dma_cap.frppipe_num);
6552 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6553 		   priv->dma_cap.nrvf_num ?
6554 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6555 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6556 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6557 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6558 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6559 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6560 		   priv->dma_cap.cbtisel ? "Y" : "N");
6561 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6562 		   priv->dma_cap.aux_snapshot_n);
6563 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6564 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6565 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6566 		   priv->dma_cap.edma ? "Y" : "N");
6567 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6568 		   priv->dma_cap.ediffc ? "Y" : "N");
6569 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6570 		   priv->dma_cap.vxn ? "Y" : "N");
6571 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6572 		   priv->dma_cap.dbgmem ? "Y" : "N");
6573 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6574 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6575 	return 0;
6576 }
6577 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6578 
6579 /* Use network device events to rename debugfs file entries.
6580  */
6581 static int stmmac_device_event(struct notifier_block *unused,
6582 			       unsigned long event, void *ptr)
6583 {
6584 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6585 	struct stmmac_priv *priv = netdev_priv(dev);
6586 
6587 	if (dev->netdev_ops != &stmmac_netdev_ops)
6588 		goto done;
6589 
6590 	switch (event) {
6591 	case NETDEV_CHANGENAME:
6592 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6593 		break;
6594 	}
6595 done:
6596 	return NOTIFY_DONE;
6597 }
6598 
6599 static struct notifier_block stmmac_notifier = {
6600 	.notifier_call = stmmac_device_event,
6601 };
6602 
6603 static void stmmac_init_fs(struct net_device *dev)
6604 {
6605 	struct stmmac_priv *priv = netdev_priv(dev);
6606 
6607 	rtnl_lock();
6608 
6609 	/* Create per netdev entries */
6610 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6611 
6612 	/* Entry to report DMA RX/TX rings */
6613 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6614 			    &stmmac_rings_status_fops);
6615 
6616 	/* Entry to report the DMA HW features */
6617 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6618 			    &stmmac_dma_cap_fops);
6619 
6620 	rtnl_unlock();
6621 }
6622 
6623 static void stmmac_exit_fs(struct net_device *dev)
6624 {
6625 	struct stmmac_priv *priv = netdev_priv(dev);
6626 
6627 	debugfs_remove_recursive(priv->dbgfs_dir);
6628 }
6629 #endif /* CONFIG_DEBUG_FS */
6630 
6631 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6632 {
6633 	unsigned char *data = (unsigned char *)&vid_le;
6634 	unsigned char data_byte = 0;
6635 	u32 crc = ~0x0;
6636 	u32 temp = 0;
6637 	int i, bits;
6638 
6639 	bits = get_bitmask_order(VLAN_VID_MASK);
6640 	for (i = 0; i < bits; i++) {
6641 		if ((i % 8) == 0)
6642 			data_byte = data[i / 8];
6643 
6644 		temp = ((crc & 1) ^ data_byte) & 1;
6645 		crc >>= 1;
6646 		data_byte >>= 1;
6647 
6648 		if (temp)
6649 			crc ^= 0xedb88320;
6650 	}
6651 
6652 	return crc;
6653 }
6654 
6655 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6656 {
6657 	u32 crc, hash = 0;
6658 	u16 pmatch = 0;
6659 	int count = 0;
6660 	u16 vid = 0;
6661 
6662 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6663 		__le16 vid_le = cpu_to_le16(vid);
6664 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6665 		hash |= (1 << crc);
6666 		count++;
6667 	}
6668 
6669 	if (!priv->dma_cap.vlhash) {
6670 		if (count > 2) /* VID = 0 always passes filter */
6671 			return -EOPNOTSUPP;
6672 
6673 		pmatch = vid;
6674 		hash = 0;
6675 	}
6676 
6677 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6678 }
6679 
6680 /* FIXME: This may need RXC to be running, but it may be called with BH
6681  * disabled, which means we can't call phylink_rx_clk_stop*().
6682  */
6683 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6684 {
6685 	struct stmmac_priv *priv = netdev_priv(ndev);
6686 	bool is_double = false;
6687 	int ret;
6688 
6689 	ret = pm_runtime_resume_and_get(priv->device);
6690 	if (ret < 0)
6691 		return ret;
6692 
6693 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6694 		is_double = true;
6695 
6696 	set_bit(vid, priv->active_vlans);
6697 	ret = stmmac_vlan_update(priv, is_double);
6698 	if (ret) {
6699 		clear_bit(vid, priv->active_vlans);
6700 		goto err_pm_put;
6701 	}
6702 
6703 	if (priv->hw->num_vlan) {
6704 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6705 		if (ret)
6706 			goto err_pm_put;
6707 	}
6708 err_pm_put:
6709 	pm_runtime_put(priv->device);
6710 
6711 	return ret;
6712 }
6713 
6714 /* FIXME: This may need RXC to be running, but it may be called with BH
6715  * disabled, which means we can't call phylink_rx_clk_stop*().
6716  */
6717 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6718 {
6719 	struct stmmac_priv *priv = netdev_priv(ndev);
6720 	bool is_double = false;
6721 	int ret;
6722 
6723 	ret = pm_runtime_resume_and_get(priv->device);
6724 	if (ret < 0)
6725 		return ret;
6726 
6727 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6728 		is_double = true;
6729 
6730 	clear_bit(vid, priv->active_vlans);
6731 
6732 	if (priv->hw->num_vlan) {
6733 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6734 		if (ret)
6735 			goto del_vlan_error;
6736 	}
6737 
6738 	ret = stmmac_vlan_update(priv, is_double);
6739 
6740 del_vlan_error:
6741 	pm_runtime_put(priv->device);
6742 
6743 	return ret;
6744 }
6745 
6746 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6747 {
6748 	struct stmmac_priv *priv = netdev_priv(dev);
6749 
6750 	switch (bpf->command) {
6751 	case XDP_SETUP_PROG:
6752 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6753 	case XDP_SETUP_XSK_POOL:
6754 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6755 					     bpf->xsk.queue_id);
6756 	default:
6757 		return -EOPNOTSUPP;
6758 	}
6759 }
6760 
6761 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6762 			   struct xdp_frame **frames, u32 flags)
6763 {
6764 	struct stmmac_priv *priv = netdev_priv(dev);
6765 	int cpu = smp_processor_id();
6766 	struct netdev_queue *nq;
6767 	int i, nxmit = 0;
6768 	int queue;
6769 
6770 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6771 		return -ENETDOWN;
6772 
6773 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6774 		return -EINVAL;
6775 
6776 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6777 	nq = netdev_get_tx_queue(priv->dev, queue);
6778 
6779 	__netif_tx_lock(nq, cpu);
6780 	/* Avoids TX time-out as we are sharing with slow path */
6781 	txq_trans_cond_update(nq);
6782 
6783 	for (i = 0; i < num_frames; i++) {
6784 		int res;
6785 
6786 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6787 		if (res == STMMAC_XDP_CONSUMED)
6788 			break;
6789 
6790 		nxmit++;
6791 	}
6792 
6793 	if (flags & XDP_XMIT_FLUSH) {
6794 		stmmac_flush_tx_descriptors(priv, queue);
6795 		stmmac_tx_timer_arm(priv, queue);
6796 	}
6797 
6798 	__netif_tx_unlock(nq);
6799 
6800 	return nxmit;
6801 }
6802 
6803 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6804 {
6805 	struct stmmac_channel *ch = &priv->channel[queue];
6806 	unsigned long flags;
6807 
6808 	spin_lock_irqsave(&ch->lock, flags);
6809 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6810 	spin_unlock_irqrestore(&ch->lock, flags);
6811 
6812 	stmmac_stop_rx_dma(priv, queue);
6813 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6814 }
6815 
6816 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6817 {
6818 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6819 	struct stmmac_channel *ch = &priv->channel[queue];
6820 	unsigned long flags;
6821 	u32 buf_size;
6822 	int ret;
6823 
6824 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6825 	if (ret) {
6826 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6827 		return;
6828 	}
6829 
6830 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6831 	if (ret) {
6832 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6833 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6834 		return;
6835 	}
6836 
6837 	stmmac_reset_rx_queue(priv, queue);
6838 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6839 
6840 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6841 			    rx_q->dma_rx_phy, rx_q->queue_index);
6842 
6843 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6844 			     sizeof(struct dma_desc));
6845 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6846 			       rx_q->rx_tail_addr, rx_q->queue_index);
6847 
6848 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6849 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6850 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6851 				      buf_size,
6852 				      rx_q->queue_index);
6853 	} else {
6854 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6855 				      priv->dma_conf.dma_buf_sz,
6856 				      rx_q->queue_index);
6857 	}
6858 
6859 	stmmac_start_rx_dma(priv, queue);
6860 
6861 	spin_lock_irqsave(&ch->lock, flags);
6862 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6863 	spin_unlock_irqrestore(&ch->lock, flags);
6864 }
6865 
6866 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6867 {
6868 	struct stmmac_channel *ch = &priv->channel[queue];
6869 	unsigned long flags;
6870 
6871 	spin_lock_irqsave(&ch->lock, flags);
6872 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6873 	spin_unlock_irqrestore(&ch->lock, flags);
6874 
6875 	stmmac_stop_tx_dma(priv, queue);
6876 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6877 }
6878 
6879 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6880 {
6881 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6882 	struct stmmac_channel *ch = &priv->channel[queue];
6883 	unsigned long flags;
6884 	int ret;
6885 
6886 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6887 	if (ret) {
6888 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6889 		return;
6890 	}
6891 
6892 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6893 	if (ret) {
6894 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6895 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6896 		return;
6897 	}
6898 
6899 	stmmac_reset_tx_queue(priv, queue);
6900 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6901 
6902 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6903 			    tx_q->dma_tx_phy, tx_q->queue_index);
6904 
6905 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6906 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6907 
6908 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6909 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6910 			       tx_q->tx_tail_addr, tx_q->queue_index);
6911 
6912 	stmmac_start_tx_dma(priv, queue);
6913 
6914 	spin_lock_irqsave(&ch->lock, flags);
6915 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6916 	spin_unlock_irqrestore(&ch->lock, flags);
6917 }
6918 
6919 void stmmac_xdp_release(struct net_device *dev)
6920 {
6921 	struct stmmac_priv *priv = netdev_priv(dev);
6922 	u32 chan;
6923 
6924 	/* Ensure tx function is not running */
6925 	netif_tx_disable(dev);
6926 
6927 	/* Disable NAPI process */
6928 	stmmac_disable_all_queues(priv);
6929 
6930 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6931 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6932 
6933 	/* Free the IRQ lines */
6934 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6935 
6936 	/* Stop TX/RX DMA channels */
6937 	stmmac_stop_all_dma(priv);
6938 
6939 	/* Release and free the Rx/Tx resources */
6940 	free_dma_desc_resources(priv, &priv->dma_conf);
6941 
6942 	/* Disable the MAC Rx/Tx */
6943 	stmmac_mac_set(priv, priv->ioaddr, false);
6944 
6945 	/* set trans_start so we don't get spurious
6946 	 * watchdogs during reset
6947 	 */
6948 	netif_trans_update(dev);
6949 	netif_carrier_off(dev);
6950 }
6951 
6952 int stmmac_xdp_open(struct net_device *dev)
6953 {
6954 	struct stmmac_priv *priv = netdev_priv(dev);
6955 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6956 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6957 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6958 	struct stmmac_rx_queue *rx_q;
6959 	struct stmmac_tx_queue *tx_q;
6960 	u32 buf_size;
6961 	bool sph_en;
6962 	u32 chan;
6963 	int ret;
6964 
6965 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6966 	if (ret < 0) {
6967 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6968 			   __func__);
6969 		goto dma_desc_error;
6970 	}
6971 
6972 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6973 	if (ret < 0) {
6974 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6975 			   __func__);
6976 		goto init_error;
6977 	}
6978 
6979 	stmmac_reset_queues_param(priv);
6980 
6981 	/* DMA CSR Channel configuration */
6982 	for (chan = 0; chan < dma_csr_ch; chan++) {
6983 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6984 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6985 	}
6986 
6987 	/* Adjust Split header */
6988 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6989 
6990 	/* DMA RX Channel Configuration */
6991 	for (chan = 0; chan < rx_cnt; chan++) {
6992 		rx_q = &priv->dma_conf.rx_queue[chan];
6993 
6994 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6995 				    rx_q->dma_rx_phy, chan);
6996 
6997 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6998 				     (rx_q->buf_alloc_num *
6999 				      sizeof(struct dma_desc));
7000 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
7001 				       rx_q->rx_tail_addr, chan);
7002 
7003 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
7004 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
7005 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7006 					      buf_size,
7007 					      rx_q->queue_index);
7008 		} else {
7009 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7010 					      priv->dma_conf.dma_buf_sz,
7011 					      rx_q->queue_index);
7012 		}
7013 
7014 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7015 	}
7016 
7017 	/* DMA TX Channel Configuration */
7018 	for (chan = 0; chan < tx_cnt; chan++) {
7019 		tx_q = &priv->dma_conf.tx_queue[chan];
7020 
7021 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7022 				    tx_q->dma_tx_phy, chan);
7023 
7024 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7025 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7026 				       tx_q->tx_tail_addr, chan);
7027 
7028 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7029 	}
7030 
7031 	/* Enable the MAC Rx/Tx */
7032 	stmmac_mac_set(priv, priv->ioaddr, true);
7033 
7034 	/* Start Rx & Tx DMA Channels */
7035 	stmmac_start_all_dma(priv);
7036 
7037 	ret = stmmac_request_irq(dev);
7038 	if (ret)
7039 		goto irq_error;
7040 
7041 	/* Enable NAPI process*/
7042 	stmmac_enable_all_queues(priv);
7043 	netif_carrier_on(dev);
7044 	netif_tx_start_all_queues(dev);
7045 	stmmac_enable_all_dma_irq(priv);
7046 
7047 	return 0;
7048 
7049 irq_error:
7050 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7051 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7052 
7053 	stmmac_hw_teardown(dev);
7054 init_error:
7055 	free_dma_desc_resources(priv, &priv->dma_conf);
7056 dma_desc_error:
7057 	return ret;
7058 }
7059 
7060 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7061 {
7062 	struct stmmac_priv *priv = netdev_priv(dev);
7063 	struct stmmac_rx_queue *rx_q;
7064 	struct stmmac_tx_queue *tx_q;
7065 	struct stmmac_channel *ch;
7066 
7067 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7068 	    !netif_carrier_ok(priv->dev))
7069 		return -ENETDOWN;
7070 
7071 	if (!stmmac_xdp_is_enabled(priv))
7072 		return -EINVAL;
7073 
7074 	if (queue >= priv->plat->rx_queues_to_use ||
7075 	    queue >= priv->plat->tx_queues_to_use)
7076 		return -EINVAL;
7077 
7078 	rx_q = &priv->dma_conf.rx_queue[queue];
7079 	tx_q = &priv->dma_conf.tx_queue[queue];
7080 	ch = &priv->channel[queue];
7081 
7082 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7083 		return -EINVAL;
7084 
7085 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7086 		/* EQoS does not have per-DMA channel SW interrupt,
7087 		 * so we schedule RX Napi straight-away.
7088 		 */
7089 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7090 			__napi_schedule(&ch->rxtx_napi);
7091 	}
7092 
7093 	return 0;
7094 }
7095 
7096 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7097 {
7098 	struct stmmac_priv *priv = netdev_priv(dev);
7099 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7100 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7101 	unsigned int start;
7102 	int q;
7103 
7104 	for (q = 0; q < tx_cnt; q++) {
7105 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7106 		u64 tx_packets;
7107 		u64 tx_bytes;
7108 
7109 		do {
7110 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7111 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7112 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7113 		do {
7114 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7115 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7116 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7117 
7118 		stats->tx_packets += tx_packets;
7119 		stats->tx_bytes += tx_bytes;
7120 	}
7121 
7122 	for (q = 0; q < rx_cnt; q++) {
7123 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7124 		u64 rx_packets;
7125 		u64 rx_bytes;
7126 
7127 		do {
7128 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7129 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7130 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7131 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7132 
7133 		stats->rx_packets += rx_packets;
7134 		stats->rx_bytes += rx_bytes;
7135 	}
7136 
7137 	stats->rx_dropped = priv->xstats.rx_dropped;
7138 	stats->rx_errors = priv->xstats.rx_errors;
7139 	stats->tx_dropped = priv->xstats.tx_dropped;
7140 	stats->tx_errors = priv->xstats.tx_errors;
7141 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7142 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7143 	stats->rx_length_errors = priv->xstats.rx_length;
7144 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7145 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7146 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7147 }
7148 
7149 static const struct net_device_ops stmmac_netdev_ops = {
7150 	.ndo_open = stmmac_open,
7151 	.ndo_start_xmit = stmmac_xmit,
7152 	.ndo_stop = stmmac_release,
7153 	.ndo_change_mtu = stmmac_change_mtu,
7154 	.ndo_fix_features = stmmac_fix_features,
7155 	.ndo_set_features = stmmac_set_features,
7156 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7157 	.ndo_tx_timeout = stmmac_tx_timeout,
7158 	.ndo_eth_ioctl = stmmac_ioctl,
7159 	.ndo_get_stats64 = stmmac_get_stats64,
7160 	.ndo_setup_tc = stmmac_setup_tc,
7161 	.ndo_select_queue = stmmac_select_queue,
7162 	.ndo_set_mac_address = stmmac_set_mac_address,
7163 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7164 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7165 	.ndo_bpf = stmmac_bpf,
7166 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7167 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7168 	.ndo_hwtstamp_get = stmmac_hwtstamp_get,
7169 	.ndo_hwtstamp_set = stmmac_hwtstamp_set,
7170 };
7171 
7172 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7173 {
7174 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7175 		return;
7176 	if (test_bit(STMMAC_DOWN, &priv->state))
7177 		return;
7178 
7179 	netdev_err(priv->dev, "Reset adapter.\n");
7180 
7181 	rtnl_lock();
7182 	netif_trans_update(priv->dev);
7183 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7184 		usleep_range(1000, 2000);
7185 
7186 	set_bit(STMMAC_DOWN, &priv->state);
7187 	dev_close(priv->dev);
7188 	dev_open(priv->dev, NULL);
7189 	clear_bit(STMMAC_DOWN, &priv->state);
7190 	clear_bit(STMMAC_RESETING, &priv->state);
7191 	rtnl_unlock();
7192 }
7193 
7194 static void stmmac_service_task(struct work_struct *work)
7195 {
7196 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7197 			service_task);
7198 
7199 	stmmac_reset_subtask(priv);
7200 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7201 }
7202 
7203 /**
7204  *  stmmac_hw_init - Init the MAC device
7205  *  @priv: driver private structure
7206  *  Description: this function is to configure the MAC device according to
7207  *  some platform parameters or the HW capability register. It prepares the
7208  *  driver to use either ring or chain modes and to setup either enhanced or
7209  *  normal descriptors.
7210  */
7211 static int stmmac_hw_init(struct stmmac_priv *priv)
7212 {
7213 	int ret;
7214 
7215 	/* dwmac-sun8i only work in chain mode */
7216 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7217 		chain_mode = 1;
7218 	priv->chain_mode = chain_mode;
7219 
7220 	/* Initialize HW Interface */
7221 	ret = stmmac_hwif_init(priv);
7222 	if (ret)
7223 		return ret;
7224 
7225 	/* Get the HW capability (new GMAC newer than 3.50a) */
7226 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7227 	if (priv->hw_cap_support) {
7228 		dev_info(priv->device, "DMA HW capability register supported\n");
7229 
7230 		/* We can override some gmac/dma configuration fields: e.g.
7231 		 * enh_desc, tx_coe (e.g. that are passed through the
7232 		 * platform) with the values from the HW capability
7233 		 * register (if supported).
7234 		 */
7235 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7236 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7237 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7238 		priv->hw->pmt = priv->plat->pmt;
7239 		if (priv->dma_cap.hash_tb_sz) {
7240 			priv->hw->multicast_filter_bins =
7241 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7242 			priv->hw->mcast_bits_log2 =
7243 					ilog2(priv->hw->multicast_filter_bins);
7244 		}
7245 
7246 		/* TXCOE doesn't work in thresh DMA mode */
7247 		if (priv->plat->force_thresh_dma_mode)
7248 			priv->plat->tx_coe = 0;
7249 		else
7250 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7251 
7252 		/* In case of GMAC4 rx_coe is from HW cap register. */
7253 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7254 
7255 		if (priv->dma_cap.rx_coe_type2)
7256 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7257 		else if (priv->dma_cap.rx_coe_type1)
7258 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7259 
7260 	} else {
7261 		dev_info(priv->device, "No HW DMA feature register supported\n");
7262 	}
7263 
7264 	if (priv->plat->rx_coe) {
7265 		priv->hw->rx_csum = priv->plat->rx_coe;
7266 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7267 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7268 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7269 	}
7270 	if (priv->plat->tx_coe)
7271 		dev_info(priv->device, "TX Checksum insertion supported\n");
7272 
7273 	if (priv->plat->pmt) {
7274 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7275 		device_set_wakeup_capable(priv->device, 1);
7276 	}
7277 
7278 	if (priv->dma_cap.tsoen)
7279 		dev_info(priv->device, "TSO supported\n");
7280 
7281 	if (priv->dma_cap.number_rx_queues &&
7282 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7283 		dev_warn(priv->device,
7284 			 "Number of Rx queues (%u) exceeds dma capability\n",
7285 			 priv->plat->rx_queues_to_use);
7286 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7287 	}
7288 	if (priv->dma_cap.number_tx_queues &&
7289 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7290 		dev_warn(priv->device,
7291 			 "Number of Tx queues (%u) exceeds dma capability\n",
7292 			 priv->plat->tx_queues_to_use);
7293 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7294 	}
7295 
7296 	if (priv->dma_cap.rx_fifo_size &&
7297 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7298 		dev_warn(priv->device,
7299 			 "Rx FIFO size (%u) exceeds dma capability\n",
7300 			 priv->plat->rx_fifo_size);
7301 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7302 	}
7303 	if (priv->dma_cap.tx_fifo_size &&
7304 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7305 		dev_warn(priv->device,
7306 			 "Tx FIFO size (%u) exceeds dma capability\n",
7307 			 priv->plat->tx_fifo_size);
7308 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7309 	}
7310 
7311 	priv->hw->vlan_fail_q_en =
7312 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7313 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7314 
7315 	/* Run HW quirks, if any */
7316 	if (priv->hwif_quirks) {
7317 		ret = priv->hwif_quirks(priv);
7318 		if (ret)
7319 			return ret;
7320 	}
7321 
7322 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7323 	 * In some case, for example on bugged HW this feature
7324 	 * has to be disable and this can be done by passing the
7325 	 * riwt_off field from the platform.
7326 	 */
7327 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7328 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7329 		priv->use_riwt = 1;
7330 		dev_info(priv->device,
7331 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7332 	}
7333 
7334 	return 0;
7335 }
7336 
7337 static void stmmac_napi_add(struct net_device *dev)
7338 {
7339 	struct stmmac_priv *priv = netdev_priv(dev);
7340 	u32 queue, maxq;
7341 
7342 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7343 
7344 	for (queue = 0; queue < maxq; queue++) {
7345 		struct stmmac_channel *ch = &priv->channel[queue];
7346 
7347 		ch->priv_data = priv;
7348 		ch->index = queue;
7349 		spin_lock_init(&ch->lock);
7350 
7351 		if (queue < priv->plat->rx_queues_to_use) {
7352 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7353 		}
7354 		if (queue < priv->plat->tx_queues_to_use) {
7355 			netif_napi_add_tx(dev, &ch->tx_napi,
7356 					  stmmac_napi_poll_tx);
7357 		}
7358 		if (queue < priv->plat->rx_queues_to_use &&
7359 		    queue < priv->plat->tx_queues_to_use) {
7360 			netif_napi_add(dev, &ch->rxtx_napi,
7361 				       stmmac_napi_poll_rxtx);
7362 		}
7363 	}
7364 }
7365 
7366 static void stmmac_napi_del(struct net_device *dev)
7367 {
7368 	struct stmmac_priv *priv = netdev_priv(dev);
7369 	u32 queue, maxq;
7370 
7371 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7372 
7373 	for (queue = 0; queue < maxq; queue++) {
7374 		struct stmmac_channel *ch = &priv->channel[queue];
7375 
7376 		if (queue < priv->plat->rx_queues_to_use)
7377 			netif_napi_del(&ch->rx_napi);
7378 		if (queue < priv->plat->tx_queues_to_use)
7379 			netif_napi_del(&ch->tx_napi);
7380 		if (queue < priv->plat->rx_queues_to_use &&
7381 		    queue < priv->plat->tx_queues_to_use) {
7382 			netif_napi_del(&ch->rxtx_napi);
7383 		}
7384 	}
7385 }
7386 
7387 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7388 {
7389 	struct stmmac_priv *priv = netdev_priv(dev);
7390 	int ret = 0, i;
7391 
7392 	if (netif_running(dev))
7393 		stmmac_release(dev);
7394 
7395 	stmmac_napi_del(dev);
7396 
7397 	priv->plat->rx_queues_to_use = rx_cnt;
7398 	priv->plat->tx_queues_to_use = tx_cnt;
7399 	if (!netif_is_rxfh_configured(dev))
7400 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7401 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7402 									rx_cnt);
7403 
7404 	stmmac_napi_add(dev);
7405 
7406 	if (netif_running(dev))
7407 		ret = stmmac_open(dev);
7408 
7409 	return ret;
7410 }
7411 
7412 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7413 {
7414 	struct stmmac_priv *priv = netdev_priv(dev);
7415 	int ret = 0;
7416 
7417 	if (netif_running(dev))
7418 		stmmac_release(dev);
7419 
7420 	priv->dma_conf.dma_rx_size = rx_size;
7421 	priv->dma_conf.dma_tx_size = tx_size;
7422 
7423 	if (netif_running(dev))
7424 		ret = stmmac_open(dev);
7425 
7426 	return ret;
7427 }
7428 
7429 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7430 {
7431 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7432 	struct dma_desc *desc_contains_ts = ctx->desc;
7433 	struct stmmac_priv *priv = ctx->priv;
7434 	struct dma_desc *ndesc = ctx->ndesc;
7435 	struct dma_desc *desc = ctx->desc;
7436 	u64 ns = 0;
7437 
7438 	if (!priv->hwts_rx_en)
7439 		return -ENODATA;
7440 
7441 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7442 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7443 		desc_contains_ts = ndesc;
7444 
7445 	/* Check if timestamp is available */
7446 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7447 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7448 		ns -= priv->plat->cdc_error_adj;
7449 		*timestamp = ns_to_ktime(ns);
7450 		return 0;
7451 	}
7452 
7453 	return -ENODATA;
7454 }
7455 
7456 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7457 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7458 };
7459 
7460 /**
7461  * stmmac_dvr_probe
7462  * @device: device pointer
7463  * @plat_dat: platform data pointer
7464  * @res: stmmac resource pointer
7465  * Description: this is the main probe function used to
7466  * call the alloc_etherdev, allocate the priv structure.
7467  * Return:
7468  * returns 0 on success, otherwise errno.
7469  */
7470 int stmmac_dvr_probe(struct device *device,
7471 		     struct plat_stmmacenet_data *plat_dat,
7472 		     struct stmmac_resources *res)
7473 {
7474 	struct net_device *ndev = NULL;
7475 	struct stmmac_priv *priv;
7476 	u32 rxq;
7477 	int i, ret = 0;
7478 
7479 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7480 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7481 	if (!ndev)
7482 		return -ENOMEM;
7483 
7484 	SET_NETDEV_DEV(ndev, device);
7485 
7486 	priv = netdev_priv(ndev);
7487 	priv->device = device;
7488 	priv->dev = ndev;
7489 
7490 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7491 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7492 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7493 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7494 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7495 	}
7496 
7497 	priv->xstats.pcpu_stats =
7498 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7499 	if (!priv->xstats.pcpu_stats)
7500 		return -ENOMEM;
7501 
7502 	stmmac_set_ethtool_ops(ndev);
7503 	priv->pause_time = pause;
7504 	priv->plat = plat_dat;
7505 	priv->ioaddr = res->addr;
7506 	priv->dev->base_addr = (unsigned long)res->addr;
7507 	priv->plat->dma_cfg->multi_msi_en =
7508 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7509 
7510 	priv->dev->irq = res->irq;
7511 	priv->wol_irq = res->wol_irq;
7512 	priv->lpi_irq = res->lpi_irq;
7513 	priv->sfty_irq = res->sfty_irq;
7514 	priv->sfty_ce_irq = res->sfty_ce_irq;
7515 	priv->sfty_ue_irq = res->sfty_ue_irq;
7516 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7517 		priv->rx_irq[i] = res->rx_irq[i];
7518 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7519 		priv->tx_irq[i] = res->tx_irq[i];
7520 
7521 	if (!is_zero_ether_addr(res->mac))
7522 		eth_hw_addr_set(priv->dev, res->mac);
7523 
7524 	dev_set_drvdata(device, priv->dev);
7525 
7526 	/* Verify driver arguments */
7527 	stmmac_verify_args();
7528 
7529 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7530 	if (!priv->af_xdp_zc_qps)
7531 		return -ENOMEM;
7532 
7533 	/* Allocate workqueue */
7534 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7535 	if (!priv->wq) {
7536 		dev_err(priv->device, "failed to create workqueue\n");
7537 		ret = -ENOMEM;
7538 		goto error_wq_init;
7539 	}
7540 
7541 	INIT_WORK(&priv->service_task, stmmac_service_task);
7542 
7543 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7544 
7545 	/* Override with kernel parameters if supplied XXX CRS XXX
7546 	 * this needs to have multiple instances
7547 	 */
7548 	if ((phyaddr >= 0) && (phyaddr <= 31))
7549 		priv->plat->phy_addr = phyaddr;
7550 
7551 	if (priv->plat->stmmac_rst) {
7552 		ret = reset_control_assert(priv->plat->stmmac_rst);
7553 		reset_control_deassert(priv->plat->stmmac_rst);
7554 		/* Some reset controllers have only reset callback instead of
7555 		 * assert + deassert callbacks pair.
7556 		 */
7557 		if (ret == -ENOTSUPP)
7558 			reset_control_reset(priv->plat->stmmac_rst);
7559 	}
7560 
7561 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7562 	if (ret == -ENOTSUPP)
7563 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7564 			ERR_PTR(ret));
7565 
7566 	/* Wait a bit for the reset to take effect */
7567 	udelay(10);
7568 
7569 	/* Init MAC and get the capabilities */
7570 	ret = stmmac_hw_init(priv);
7571 	if (ret)
7572 		goto error_hw_init;
7573 
7574 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7575 	 */
7576 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7577 		priv->plat->dma_cfg->dche = false;
7578 
7579 	stmmac_check_ether_addr(priv);
7580 
7581 	ndev->netdev_ops = &stmmac_netdev_ops;
7582 
7583 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7584 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7585 
7586 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7587 			    NETIF_F_RXCSUM;
7588 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7589 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7590 
7591 	ret = stmmac_tc_init(priv, priv);
7592 	if (!ret) {
7593 		ndev->hw_features |= NETIF_F_HW_TC;
7594 	}
7595 
7596 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7597 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7598 		if (priv->plat->has_gmac4)
7599 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7600 		priv->tso = true;
7601 		dev_info(priv->device, "TSO feature enabled\n");
7602 	}
7603 
7604 	if (priv->dma_cap.sphen &&
7605 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7606 		ndev->hw_features |= NETIF_F_GRO;
7607 		priv->sph_cap = true;
7608 		priv->sph = priv->sph_cap;
7609 		dev_info(priv->device, "SPH feature enabled\n");
7610 	}
7611 
7612 	/* Ideally our host DMA address width is the same as for the
7613 	 * device. However, it may differ and then we have to use our
7614 	 * host DMA width for allocation and the device DMA width for
7615 	 * register handling.
7616 	 */
7617 	if (priv->plat->host_dma_width)
7618 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7619 	else
7620 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7621 
7622 	if (priv->dma_cap.host_dma_width) {
7623 		ret = dma_set_mask_and_coherent(device,
7624 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7625 		if (!ret) {
7626 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7627 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7628 
7629 			/*
7630 			 * If more than 32 bits can be addressed, make sure to
7631 			 * enable enhanced addressing mode.
7632 			 */
7633 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7634 				priv->plat->dma_cfg->eame = true;
7635 		} else {
7636 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7637 			if (ret) {
7638 				dev_err(priv->device, "Failed to set DMA Mask\n");
7639 				goto error_hw_init;
7640 			}
7641 
7642 			priv->dma_cap.host_dma_width = 32;
7643 		}
7644 	}
7645 
7646 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7647 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7648 #ifdef STMMAC_VLAN_TAG_USED
7649 	/* Both mac100 and gmac support receive VLAN tag detection */
7650 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7651 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
7652 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7653 		priv->hw->hw_vlan_en = true;
7654 	}
7655 	if (priv->dma_cap.vlhash) {
7656 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7657 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7658 	}
7659 	if (priv->dma_cap.vlins) {
7660 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7661 		if (priv->dma_cap.dvlan)
7662 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7663 	}
7664 #endif
7665 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7666 
7667 	priv->xstats.threshold = tc;
7668 
7669 	/* Initialize RSS */
7670 	rxq = priv->plat->rx_queues_to_use;
7671 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7672 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7673 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7674 
7675 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7676 		ndev->features |= NETIF_F_RXHASH;
7677 
7678 	ndev->vlan_features |= ndev->features;
7679 
7680 	/* MTU range: 46 - hw-specific max */
7681 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7682 	if (priv->plat->has_xgmac)
7683 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7684 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7685 		ndev->max_mtu = JUMBO_LEN;
7686 	else
7687 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7688 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7689 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7690 	 */
7691 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7692 	    (priv->plat->maxmtu >= ndev->min_mtu))
7693 		ndev->max_mtu = priv->plat->maxmtu;
7694 	else if (priv->plat->maxmtu < ndev->min_mtu)
7695 		dev_warn(priv->device,
7696 			 "%s: warning: maxmtu having invalid value (%d)\n",
7697 			 __func__, priv->plat->maxmtu);
7698 
7699 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7700 
7701 	/* Setup channels NAPI */
7702 	stmmac_napi_add(ndev);
7703 
7704 	mutex_init(&priv->lock);
7705 
7706 	stmmac_fpe_init(priv);
7707 
7708 	/* If a specific clk_csr value is passed from the platform
7709 	 * this means that the CSR Clock Range selection cannot be
7710 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7711 	 * set the MDC clock dynamically according to the csr actual
7712 	 * clock input.
7713 	 */
7714 	if (priv->plat->clk_csr >= 0)
7715 		priv->clk_csr = priv->plat->clk_csr;
7716 	else
7717 		stmmac_clk_csr_set(priv);
7718 
7719 	stmmac_check_pcs_mode(priv);
7720 
7721 	pm_runtime_get_noresume(device);
7722 	pm_runtime_set_active(device);
7723 	if (!pm_runtime_enabled(device))
7724 		pm_runtime_enable(device);
7725 
7726 	ret = stmmac_mdio_register(ndev);
7727 	if (ret < 0) {
7728 		dev_err_probe(priv->device, ret,
7729 			      "MDIO bus (id: %d) registration failed\n",
7730 			      priv->plat->bus_id);
7731 		goto error_mdio_register;
7732 	}
7733 
7734 	ret = stmmac_pcs_setup(ndev);
7735 	if (ret)
7736 		goto error_pcs_setup;
7737 
7738 	ret = stmmac_phy_setup(priv);
7739 	if (ret) {
7740 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7741 		goto error_phy_setup;
7742 	}
7743 
7744 	ret = register_netdev(ndev);
7745 	if (ret) {
7746 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7747 			__func__, ret);
7748 		goto error_netdev_register;
7749 	}
7750 
7751 #ifdef CONFIG_DEBUG_FS
7752 	stmmac_init_fs(ndev);
7753 #endif
7754 
7755 	if (priv->plat->dump_debug_regs)
7756 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7757 
7758 	/* Let pm_runtime_put() disable the clocks.
7759 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7760 	 */
7761 	pm_runtime_put(device);
7762 
7763 	return ret;
7764 
7765 error_netdev_register:
7766 	phylink_destroy(priv->phylink);
7767 error_phy_setup:
7768 	stmmac_pcs_clean(ndev);
7769 error_pcs_setup:
7770 	stmmac_mdio_unregister(ndev);
7771 error_mdio_register:
7772 	stmmac_napi_del(ndev);
7773 error_hw_init:
7774 	destroy_workqueue(priv->wq);
7775 error_wq_init:
7776 	bitmap_free(priv->af_xdp_zc_qps);
7777 
7778 	return ret;
7779 }
7780 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7781 
7782 /**
7783  * stmmac_dvr_remove
7784  * @dev: device pointer
7785  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7786  * changes the link status, releases the DMA descriptor rings.
7787  */
7788 void stmmac_dvr_remove(struct device *dev)
7789 {
7790 	struct net_device *ndev = dev_get_drvdata(dev);
7791 	struct stmmac_priv *priv = netdev_priv(ndev);
7792 
7793 	netdev_info(priv->dev, "%s: removing driver", __func__);
7794 
7795 	pm_runtime_get_sync(dev);
7796 
7797 	unregister_netdev(ndev);
7798 
7799 #ifdef CONFIG_DEBUG_FS
7800 	stmmac_exit_fs(ndev);
7801 #endif
7802 	phylink_destroy(priv->phylink);
7803 	if (priv->plat->stmmac_rst)
7804 		reset_control_assert(priv->plat->stmmac_rst);
7805 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7806 
7807 	stmmac_pcs_clean(ndev);
7808 	stmmac_mdio_unregister(ndev);
7809 
7810 	destroy_workqueue(priv->wq);
7811 	mutex_destroy(&priv->lock);
7812 	bitmap_free(priv->af_xdp_zc_qps);
7813 
7814 	pm_runtime_disable(dev);
7815 	pm_runtime_put_noidle(dev);
7816 }
7817 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7818 
7819 /**
7820  * stmmac_suspend - suspend callback
7821  * @dev: device pointer
7822  * Description: this is the function to suspend the device and it is called
7823  * by the platform driver to stop the network queue, release the resources,
7824  * program the PMT register (for WoL), clean and release driver resources.
7825  */
7826 int stmmac_suspend(struct device *dev)
7827 {
7828 	struct net_device *ndev = dev_get_drvdata(dev);
7829 	struct stmmac_priv *priv = netdev_priv(ndev);
7830 	u32 chan;
7831 
7832 	if (!ndev || !netif_running(ndev))
7833 		return 0;
7834 
7835 	mutex_lock(&priv->lock);
7836 
7837 	netif_device_detach(ndev);
7838 
7839 	stmmac_disable_all_queues(priv);
7840 
7841 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7842 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7843 
7844 	if (priv->eee_sw_timer_en) {
7845 		priv->tx_path_in_lpi_mode = false;
7846 		timer_delete_sync(&priv->eee_ctrl_timer);
7847 	}
7848 
7849 	/* Stop TX/RX DMA */
7850 	stmmac_stop_all_dma(priv);
7851 
7852 	if (priv->plat->serdes_powerdown)
7853 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7854 
7855 	/* Enable Power down mode by programming the PMT regs */
7856 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7857 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7858 		priv->irq_wake = 1;
7859 	} else {
7860 		stmmac_mac_set(priv, priv->ioaddr, false);
7861 		pinctrl_pm_select_sleep_state(priv->device);
7862 	}
7863 
7864 	mutex_unlock(&priv->lock);
7865 
7866 	rtnl_lock();
7867 	if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7868 		phylink_speed_down(priv->phylink, false);
7869 
7870 	phylink_suspend(priv->phylink,
7871 			device_may_wakeup(priv->device) && priv->plat->pmt);
7872 	rtnl_unlock();
7873 
7874 	if (stmmac_fpe_supported(priv))
7875 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
7876 
7877 	return 0;
7878 }
7879 EXPORT_SYMBOL_GPL(stmmac_suspend);
7880 
7881 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7882 {
7883 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7884 
7885 	rx_q->cur_rx = 0;
7886 	rx_q->dirty_rx = 0;
7887 }
7888 
7889 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7890 {
7891 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7892 
7893 	tx_q->cur_tx = 0;
7894 	tx_q->dirty_tx = 0;
7895 	tx_q->mss = 0;
7896 
7897 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7898 }
7899 
7900 /**
7901  * stmmac_reset_queues_param - reset queue parameters
7902  * @priv: device pointer
7903  */
7904 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7905 {
7906 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7907 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7908 	u32 queue;
7909 
7910 	for (queue = 0; queue < rx_cnt; queue++)
7911 		stmmac_reset_rx_queue(priv, queue);
7912 
7913 	for (queue = 0; queue < tx_cnt; queue++)
7914 		stmmac_reset_tx_queue(priv, queue);
7915 }
7916 
7917 /**
7918  * stmmac_resume - resume callback
7919  * @dev: device pointer
7920  * Description: when resume this function is invoked to setup the DMA and CORE
7921  * in a usable state.
7922  */
7923 int stmmac_resume(struct device *dev)
7924 {
7925 	struct net_device *ndev = dev_get_drvdata(dev);
7926 	struct stmmac_priv *priv = netdev_priv(ndev);
7927 	int ret;
7928 
7929 	if (!netif_running(ndev))
7930 		return 0;
7931 
7932 	/* Power Down bit, into the PM register, is cleared
7933 	 * automatically as soon as a magic packet or a Wake-up frame
7934 	 * is received. Anyway, it's better to manually clear
7935 	 * this bit because it can generate problems while resuming
7936 	 * from another devices (e.g. serial console).
7937 	 */
7938 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7939 		mutex_lock(&priv->lock);
7940 		stmmac_pmt(priv, priv->hw, 0);
7941 		mutex_unlock(&priv->lock);
7942 		priv->irq_wake = 0;
7943 	} else {
7944 		pinctrl_pm_select_default_state(priv->device);
7945 		/* reset the phy so that it's ready */
7946 		if (priv->mii)
7947 			stmmac_mdio_reset(priv->mii);
7948 	}
7949 
7950 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7951 	    priv->plat->serdes_powerup) {
7952 		ret = priv->plat->serdes_powerup(ndev,
7953 						 priv->plat->bsp_priv);
7954 
7955 		if (ret < 0)
7956 			return ret;
7957 	}
7958 
7959 	rtnl_lock();
7960 
7961 	/* Prepare the PHY to resume, ensuring that its clocks which are
7962 	 * necessary for the MAC DMA reset to complete are running
7963 	 */
7964 	phylink_prepare_resume(priv->phylink);
7965 
7966 	mutex_lock(&priv->lock);
7967 
7968 	stmmac_reset_queues_param(priv);
7969 
7970 	stmmac_free_tx_skbufs(priv);
7971 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7972 
7973 	stmmac_hw_setup(ndev, false);
7974 	stmmac_init_coalesce(priv);
7975 	phylink_rx_clk_stop_block(priv->phylink);
7976 	stmmac_set_rx_mode(ndev);
7977 
7978 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7979 	phylink_rx_clk_stop_unblock(priv->phylink);
7980 
7981 	stmmac_enable_all_queues(priv);
7982 	stmmac_enable_all_dma_irq(priv);
7983 
7984 	mutex_unlock(&priv->lock);
7985 
7986 	/* phylink_resume() must be called after the hardware has been
7987 	 * initialised because it may bring the link up immediately in a
7988 	 * workqueue thread, which will race with initialisation.
7989 	 */
7990 	phylink_resume(priv->phylink);
7991 	if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7992 		phylink_speed_up(priv->phylink);
7993 
7994 	rtnl_unlock();
7995 
7996 	netif_device_attach(ndev);
7997 
7998 	return 0;
7999 }
8000 EXPORT_SYMBOL_GPL(stmmac_resume);
8001 
8002 #ifndef MODULE
8003 static int __init stmmac_cmdline_opt(char *str)
8004 {
8005 	char *opt;
8006 
8007 	if (!str || !*str)
8008 		return 1;
8009 	while ((opt = strsep(&str, ",")) != NULL) {
8010 		if (!strncmp(opt, "debug:", 6)) {
8011 			if (kstrtoint(opt + 6, 0, &debug))
8012 				goto err;
8013 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8014 			if (kstrtoint(opt + 8, 0, &phyaddr))
8015 				goto err;
8016 		} else if (!strncmp(opt, "tc:", 3)) {
8017 			if (kstrtoint(opt + 3, 0, &tc))
8018 				goto err;
8019 		} else if (!strncmp(opt, "watchdog:", 9)) {
8020 			if (kstrtoint(opt + 9, 0, &watchdog))
8021 				goto err;
8022 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8023 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8024 				goto err;
8025 		} else if (!strncmp(opt, "pause:", 6)) {
8026 			if (kstrtoint(opt + 6, 0, &pause))
8027 				goto err;
8028 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8029 			if (kstrtoint(opt + 10, 0, &eee_timer))
8030 				goto err;
8031 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8032 			if (kstrtoint(opt + 11, 0, &chain_mode))
8033 				goto err;
8034 		}
8035 	}
8036 	return 1;
8037 
8038 err:
8039 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8040 	return 1;
8041 }
8042 
8043 __setup("stmmaceth=", stmmac_cmdline_opt);
8044 #endif /* MODULE */
8045 
8046 static int __init stmmac_init(void)
8047 {
8048 #ifdef CONFIG_DEBUG_FS
8049 	/* Create debugfs main directory if it doesn't exist yet */
8050 	if (!stmmac_fs_dir)
8051 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8052 	register_netdevice_notifier(&stmmac_notifier);
8053 #endif
8054 
8055 	return 0;
8056 }
8057 
8058 static void __exit stmmac_exit(void)
8059 {
8060 #ifdef CONFIG_DEBUG_FS
8061 	unregister_netdevice_notifier(&stmmac_notifier);
8062 	debugfs_remove_recursive(stmmac_fs_dir);
8063 #endif
8064 }
8065 
8066 module_init(stmmac_init)
8067 module_exit(stmmac_exit)
8068 
8069 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8070 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8071 MODULE_LICENSE("GPL");
8072