xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968 					MPACKET_VERIFY);
969 	} else {
970 		*lo_state = FPE_STATE_OFF;
971 		*lp_state = FPE_STATE_OFF;
972 	}
973 }
974 
975 static void stmmac_mac_link_down(struct phylink_config *config,
976 				 unsigned int mode, phy_interface_t interface)
977 {
978 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979 
980 	stmmac_mac_set(priv, priv->ioaddr, false);
981 	priv->eee_active = false;
982 	priv->tx_lpi_enabled = false;
983 	priv->eee_enabled = stmmac_eee_init(priv);
984 	stmmac_set_eee_pls(priv, priv->hw, false);
985 
986 	if (priv->dma_cap.fpesel)
987 		stmmac_fpe_link_state_handle(priv, false);
988 }
989 
990 static void stmmac_mac_link_up(struct phylink_config *config,
991 			       struct phy_device *phy,
992 			       unsigned int mode, phy_interface_t interface,
993 			       int speed, int duplex,
994 			       bool tx_pause, bool rx_pause)
995 {
996 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997 	u32 old_ctrl, ctrl;
998 
999 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000 	    priv->plat->serdes_powerup)
1001 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002 
1003 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005 
1006 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007 		switch (speed) {
1008 		case SPEED_10000:
1009 			ctrl |= priv->hw->link.xgmii.speed10000;
1010 			break;
1011 		case SPEED_5000:
1012 			ctrl |= priv->hw->link.xgmii.speed5000;
1013 			break;
1014 		case SPEED_2500:
1015 			ctrl |= priv->hw->link.xgmii.speed2500;
1016 			break;
1017 		default:
1018 			return;
1019 		}
1020 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021 		switch (speed) {
1022 		case SPEED_100000:
1023 			ctrl |= priv->hw->link.xlgmii.speed100000;
1024 			break;
1025 		case SPEED_50000:
1026 			ctrl |= priv->hw->link.xlgmii.speed50000;
1027 			break;
1028 		case SPEED_40000:
1029 			ctrl |= priv->hw->link.xlgmii.speed40000;
1030 			break;
1031 		case SPEED_25000:
1032 			ctrl |= priv->hw->link.xlgmii.speed25000;
1033 			break;
1034 		case SPEED_10000:
1035 			ctrl |= priv->hw->link.xgmii.speed10000;
1036 			break;
1037 		case SPEED_2500:
1038 			ctrl |= priv->hw->link.speed2500;
1039 			break;
1040 		case SPEED_1000:
1041 			ctrl |= priv->hw->link.speed1000;
1042 			break;
1043 		default:
1044 			return;
1045 		}
1046 	} else {
1047 		switch (speed) {
1048 		case SPEED_2500:
1049 			ctrl |= priv->hw->link.speed2500;
1050 			break;
1051 		case SPEED_1000:
1052 			ctrl |= priv->hw->link.speed1000;
1053 			break;
1054 		case SPEED_100:
1055 			ctrl |= priv->hw->link.speed100;
1056 			break;
1057 		case SPEED_10:
1058 			ctrl |= priv->hw->link.speed10;
1059 			break;
1060 		default:
1061 			return;
1062 		}
1063 	}
1064 
1065 	priv->speed = speed;
1066 
1067 	if (priv->plat->fix_mac_speed)
1068 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069 
1070 	if (!duplex)
1071 		ctrl &= ~priv->hw->link.duplex;
1072 	else
1073 		ctrl |= priv->hw->link.duplex;
1074 
1075 	/* Flow Control operation */
1076 	if (rx_pause && tx_pause)
1077 		priv->flow_ctrl = FLOW_AUTO;
1078 	else if (rx_pause && !tx_pause)
1079 		priv->flow_ctrl = FLOW_RX;
1080 	else if (!rx_pause && tx_pause)
1081 		priv->flow_ctrl = FLOW_TX;
1082 	else
1083 		priv->flow_ctrl = FLOW_OFF;
1084 
1085 	stmmac_mac_flow_ctrl(priv, duplex);
1086 
1087 	if (ctrl != old_ctrl)
1088 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089 
1090 	stmmac_mac_set(priv, priv->ioaddr, true);
1091 	if (phy && priv->dma_cap.eee) {
1092 		priv->eee_active =
1093 			phy_init_eee(phy, !(priv->plat->flags &
1094 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095 		priv->eee_enabled = stmmac_eee_init(priv);
1096 		priv->tx_lpi_enabled = priv->eee_enabled;
1097 		stmmac_set_eee_pls(priv, priv->hw, true);
1098 	}
1099 
1100 	if (priv->dma_cap.fpesel)
1101 		stmmac_fpe_link_state_handle(priv, true);
1102 
1103 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104 		stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106 
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108 	.mac_select_pcs = stmmac_mac_select_pcs,
1109 	.mac_config = stmmac_mac_config,
1110 	.mac_link_down = stmmac_mac_link_down,
1111 	.mac_link_up = stmmac_mac_link_up,
1112 };
1113 
1114 /**
1115  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116  * @priv: driver private structure
1117  * Description: this is to verify if the HW supports the PCS.
1118  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119  * configured for the TBI, RTBI, or SGMII PHY interface.
1120  */
1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 	int interface = priv->plat->mac_interface;
1124 
1125 	if (priv->dma_cap.pcs) {
1126 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_RGMII;
1132 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135 		}
1136 	}
1137 }
1138 
1139 /**
1140  * stmmac_init_phy - PHY initialization
1141  * @dev: net device structure
1142  * Description: it initializes the driver's PHY state, and attaches the PHY
1143  * to the mac driver.
1144  *  Return value:
1145  *  0 on success
1146  */
1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 	struct stmmac_priv *priv = netdev_priv(dev);
1150 	struct fwnode_handle *phy_fwnode;
1151 	struct fwnode_handle *fwnode;
1152 	int ret;
1153 
1154 	if (!phylink_expects_phy(priv->phylink))
1155 		return 0;
1156 
1157 	fwnode = priv->plat->port_node;
1158 	if (!fwnode)
1159 		fwnode = dev_fwnode(priv->device);
1160 
1161 	if (fwnode)
1162 		phy_fwnode = fwnode_get_phy_node(fwnode);
1163 	else
1164 		phy_fwnode = NULL;
1165 
1166 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1167 	 * manually parse it
1168 	 */
1169 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 		int addr = priv->plat->phy_addr;
1171 		struct phy_device *phydev;
1172 
1173 		if (addr < 0) {
1174 			netdev_err(priv->dev, "no phy found\n");
1175 			return -ENODEV;
1176 		}
1177 
1178 		phydev = mdiobus_get_phy(priv->mii, addr);
1179 		if (!phydev) {
1180 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 			return -ENODEV;
1182 		}
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (!priv->plat->pmt) {
1191 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192 
1193 		phylink_ethtool_get_wol(priv->phylink, &wol);
1194 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1202 {
1203 	/* Half-Duplex can only work with single tx queue */
1204 	if (priv->plat->tx_queues_to_use > 1)
1205 		priv->phylink_config.mac_capabilities &=
1206 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1207 	else
1208 		priv->phylink_config.mac_capabilities |=
1209 			(MAC_10HD | MAC_100HD | MAC_1000HD);
1210 }
1211 
1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214 	struct stmmac_mdio_bus_data *mdio_bus_data;
1215 	int mode = priv->plat->phy_interface;
1216 	struct fwnode_handle *fwnode;
1217 	struct phylink *phylink;
1218 	int max_speed;
1219 
1220 	priv->phylink_config.dev = &priv->dev->dev;
1221 	priv->phylink_config.type = PHYLINK_NETDEV;
1222 	priv->phylink_config.mac_managed_pm = true;
1223 
1224 	mdio_bus_data = priv->plat->mdio_bus_data;
1225 	if (mdio_bus_data)
1226 		priv->phylink_config.ovr_an_inband =
1227 			mdio_bus_data->xpcs_an_inband;
1228 
1229 	/* Set the platform/firmware specified interface mode. Note, phylink
1230 	 * deals with the PHY interface mode, not the MAC interface mode.
1231 	 */
1232 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1233 
1234 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1235 	if (priv->hw->xpcs)
1236 		xpcs_get_interfaces(priv->hw->xpcs,
1237 				    priv->phylink_config.supported_interfaces);
1238 
1239 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1240 						MAC_10FD | MAC_100FD |
1241 						MAC_1000FD;
1242 
1243 	stmmac_set_half_duplex(priv);
1244 
1245 	/* Get the MAC specific capabilities */
1246 	stmmac_mac_phylink_get_caps(priv);
1247 
1248 	max_speed = priv->plat->max_speed;
1249 	if (max_speed)
1250 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1251 
1252 	fwnode = priv->plat->port_node;
1253 	if (!fwnode)
1254 		fwnode = dev_fwnode(priv->device);
1255 
1256 	phylink = phylink_create(&priv->phylink_config, fwnode,
1257 				 mode, &stmmac_phylink_mac_ops);
1258 	if (IS_ERR(phylink))
1259 		return PTR_ERR(phylink);
1260 
1261 	priv->phylink = phylink;
1262 	return 0;
1263 }
1264 
1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1266 				    struct stmmac_dma_conf *dma_conf)
1267 {
1268 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1269 	unsigned int desc_size;
1270 	void *head_rx;
1271 	u32 queue;
1272 
1273 	/* Display RX rings */
1274 	for (queue = 0; queue < rx_cnt; queue++) {
1275 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1276 
1277 		pr_info("\tRX Queue %u rings\n", queue);
1278 
1279 		if (priv->extend_desc) {
1280 			head_rx = (void *)rx_q->dma_erx;
1281 			desc_size = sizeof(struct dma_extended_desc);
1282 		} else {
1283 			head_rx = (void *)rx_q->dma_rx;
1284 			desc_size = sizeof(struct dma_desc);
1285 		}
1286 
1287 		/* Display RX ring */
1288 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1289 				    rx_q->dma_rx_phy, desc_size);
1290 	}
1291 }
1292 
1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1294 				    struct stmmac_dma_conf *dma_conf)
1295 {
1296 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1297 	unsigned int desc_size;
1298 	void *head_tx;
1299 	u32 queue;
1300 
1301 	/* Display TX rings */
1302 	for (queue = 0; queue < tx_cnt; queue++) {
1303 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1304 
1305 		pr_info("\tTX Queue %d rings\n", queue);
1306 
1307 		if (priv->extend_desc) {
1308 			head_tx = (void *)tx_q->dma_etx;
1309 			desc_size = sizeof(struct dma_extended_desc);
1310 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1311 			head_tx = (void *)tx_q->dma_entx;
1312 			desc_size = sizeof(struct dma_edesc);
1313 		} else {
1314 			head_tx = (void *)tx_q->dma_tx;
1315 			desc_size = sizeof(struct dma_desc);
1316 		}
1317 
1318 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1319 				    tx_q->dma_tx_phy, desc_size);
1320 	}
1321 }
1322 
1323 static void stmmac_display_rings(struct stmmac_priv *priv,
1324 				 struct stmmac_dma_conf *dma_conf)
1325 {
1326 	/* Display RX ring */
1327 	stmmac_display_rx_rings(priv, dma_conf);
1328 
1329 	/* Display TX ring */
1330 	stmmac_display_tx_rings(priv, dma_conf);
1331 }
1332 
1333 static int stmmac_set_bfsize(int mtu, int bufsize)
1334 {
1335 	int ret = bufsize;
1336 
1337 	if (mtu >= BUF_SIZE_8KiB)
1338 		ret = BUF_SIZE_16KiB;
1339 	else if (mtu >= BUF_SIZE_4KiB)
1340 		ret = BUF_SIZE_8KiB;
1341 	else if (mtu >= BUF_SIZE_2KiB)
1342 		ret = BUF_SIZE_4KiB;
1343 	else if (mtu > DEFAULT_BUFSIZE)
1344 		ret = BUF_SIZE_2KiB;
1345 	else
1346 		ret = DEFAULT_BUFSIZE;
1347 
1348 	return ret;
1349 }
1350 
1351 /**
1352  * stmmac_clear_rx_descriptors - clear RX descriptors
1353  * @priv: driver private structure
1354  * @dma_conf: structure to take the dma data
1355  * @queue: RX queue index
1356  * Description: this function is called to clear the RX descriptors
1357  * in case of both basic and extended descriptors are used.
1358  */
1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1360 					struct stmmac_dma_conf *dma_conf,
1361 					u32 queue)
1362 {
1363 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1364 	int i;
1365 
1366 	/* Clear the RX descriptors */
1367 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1368 		if (priv->extend_desc)
1369 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1370 					priv->use_riwt, priv->mode,
1371 					(i == dma_conf->dma_rx_size - 1),
1372 					dma_conf->dma_buf_sz);
1373 		else
1374 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1375 					priv->use_riwt, priv->mode,
1376 					(i == dma_conf->dma_rx_size - 1),
1377 					dma_conf->dma_buf_sz);
1378 }
1379 
1380 /**
1381  * stmmac_clear_tx_descriptors - clear tx descriptors
1382  * @priv: driver private structure
1383  * @dma_conf: structure to take the dma data
1384  * @queue: TX queue index.
1385  * Description: this function is called to clear the TX descriptors
1386  * in case of both basic and extended descriptors are used.
1387  */
1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1389 					struct stmmac_dma_conf *dma_conf,
1390 					u32 queue)
1391 {
1392 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1393 	int i;
1394 
1395 	/* Clear the TX descriptors */
1396 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1397 		int last = (i == (dma_conf->dma_tx_size - 1));
1398 		struct dma_desc *p;
1399 
1400 		if (priv->extend_desc)
1401 			p = &tx_q->dma_etx[i].basic;
1402 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1403 			p = &tx_q->dma_entx[i].basic;
1404 		else
1405 			p = &tx_q->dma_tx[i];
1406 
1407 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1408 	}
1409 }
1410 
1411 /**
1412  * stmmac_clear_descriptors - clear descriptors
1413  * @priv: driver private structure
1414  * @dma_conf: structure to take the dma data
1415  * Description: this function is called to clear the TX and RX descriptors
1416  * in case of both basic and extended descriptors are used.
1417  */
1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1419 				     struct stmmac_dma_conf *dma_conf)
1420 {
1421 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1422 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1423 	u32 queue;
1424 
1425 	/* Clear the RX descriptors */
1426 	for (queue = 0; queue < rx_queue_cnt; queue++)
1427 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1428 
1429 	/* Clear the TX descriptors */
1430 	for (queue = 0; queue < tx_queue_cnt; queue++)
1431 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1432 }
1433 
1434 /**
1435  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436  * @priv: driver private structure
1437  * @dma_conf: structure to take the dma data
1438  * @p: descriptor pointer
1439  * @i: descriptor index
1440  * @flags: gfp flag
1441  * @queue: RX queue index
1442  * Description: this function is called to allocate a receive buffer, perform
1443  * the DMA mapping and init the descriptor.
1444  */
1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1446 				  struct stmmac_dma_conf *dma_conf,
1447 				  struct dma_desc *p,
1448 				  int i, gfp_t flags, u32 queue)
1449 {
1450 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1452 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1453 
1454 	if (priv->dma_cap.host_dma_width <= 32)
1455 		gfp |= GFP_DMA32;
1456 
1457 	if (!buf->page) {
1458 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459 		if (!buf->page)
1460 			return -ENOMEM;
1461 		buf->page_offset = stmmac_rx_offset(priv);
1462 	}
1463 
1464 	if (priv->sph && !buf->sec_page) {
1465 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1466 		if (!buf->sec_page)
1467 			return -ENOMEM;
1468 
1469 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471 	} else {
1472 		buf->sec_page = NULL;
1473 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474 	}
1475 
1476 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477 
1478 	stmmac_set_desc_addr(priv, p, buf->addr);
1479 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1480 		stmmac_init_desc3(priv, p);
1481 
1482 	return 0;
1483 }
1484 
1485 /**
1486  * stmmac_free_rx_buffer - free RX dma buffers
1487  * @priv: private structure
1488  * @rx_q: RX queue
1489  * @i: buffer index.
1490  */
1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1492 				  struct stmmac_rx_queue *rx_q,
1493 				  int i)
1494 {
1495 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1496 
1497 	if (buf->page)
1498 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1499 	buf->page = NULL;
1500 
1501 	if (buf->sec_page)
1502 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1503 	buf->sec_page = NULL;
1504 }
1505 
1506 /**
1507  * stmmac_free_tx_buffer - free RX dma buffers
1508  * @priv: private structure
1509  * @dma_conf: structure to take the dma data
1510  * @queue: RX queue index
1511  * @i: buffer index.
1512  */
1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1514 				  struct stmmac_dma_conf *dma_conf,
1515 				  u32 queue, int i)
1516 {
1517 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1518 
1519 	if (tx_q->tx_skbuff_dma[i].buf &&
1520 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1522 			dma_unmap_page(priv->device,
1523 				       tx_q->tx_skbuff_dma[i].buf,
1524 				       tx_q->tx_skbuff_dma[i].len,
1525 				       DMA_TO_DEVICE);
1526 		else
1527 			dma_unmap_single(priv->device,
1528 					 tx_q->tx_skbuff_dma[i].buf,
1529 					 tx_q->tx_skbuff_dma[i].len,
1530 					 DMA_TO_DEVICE);
1531 	}
1532 
1533 	if (tx_q->xdpf[i] &&
1534 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536 		xdp_return_frame(tx_q->xdpf[i]);
1537 		tx_q->xdpf[i] = NULL;
1538 	}
1539 
1540 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541 		tx_q->xsk_frames_done++;
1542 
1543 	if (tx_q->tx_skbuff[i] &&
1544 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546 		tx_q->tx_skbuff[i] = NULL;
1547 	}
1548 
1549 	tx_q->tx_skbuff_dma[i].buf = 0;
1550 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1551 }
1552 
1553 /**
1554  * dma_free_rx_skbufs - free RX dma buffers
1555  * @priv: private structure
1556  * @dma_conf: structure to take the dma data
1557  * @queue: RX queue index
1558  */
1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1560 			       struct stmmac_dma_conf *dma_conf,
1561 			       u32 queue)
1562 {
1563 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 	int i;
1565 
1566 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1567 		stmmac_free_rx_buffer(priv, rx_q, i);
1568 }
1569 
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1571 				   struct stmmac_dma_conf *dma_conf,
1572 				   u32 queue, gfp_t flags)
1573 {
1574 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1575 	int i;
1576 
1577 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1578 		struct dma_desc *p;
1579 		int ret;
1580 
1581 		if (priv->extend_desc)
1582 			p = &((rx_q->dma_erx + i)->basic);
1583 		else
1584 			p = rx_q->dma_rx + i;
1585 
1586 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1587 					     queue);
1588 		if (ret)
1589 			return ret;
1590 
1591 		rx_q->buf_alloc_num++;
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 /**
1598  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599  * @priv: private structure
1600  * @dma_conf: structure to take the dma data
1601  * @queue: RX queue index
1602  */
1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1604 				struct stmmac_dma_conf *dma_conf,
1605 				u32 queue)
1606 {
1607 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608 	int i;
1609 
1610 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1611 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1612 
1613 		if (!buf->xdp)
1614 			continue;
1615 
1616 		xsk_buff_free(buf->xdp);
1617 		buf->xdp = NULL;
1618 	}
1619 }
1620 
1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1622 				      struct stmmac_dma_conf *dma_conf,
1623 				      u32 queue)
1624 {
1625 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626 	int i;
1627 
1628 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 	 * use this macro to make sure no size violations.
1631 	 */
1632 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1633 
1634 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1635 		struct stmmac_rx_buffer *buf;
1636 		dma_addr_t dma_addr;
1637 		struct dma_desc *p;
1638 
1639 		if (priv->extend_desc)
1640 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1641 		else
1642 			p = rx_q->dma_rx + i;
1643 
1644 		buf = &rx_q->buf_pool[i];
1645 
1646 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1647 		if (!buf->xdp)
1648 			return -ENOMEM;
1649 
1650 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1651 		stmmac_set_desc_addr(priv, p, dma_addr);
1652 		rx_q->buf_alloc_num++;
1653 	}
1654 
1655 	return 0;
1656 }
1657 
1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1659 {
1660 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1661 		return NULL;
1662 
1663 	return xsk_get_pool_from_qid(priv->dev, queue);
1664 }
1665 
1666 /**
1667  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668  * @priv: driver private structure
1669  * @dma_conf: structure to take the dma data
1670  * @queue: RX queue index
1671  * @flags: gfp flag.
1672  * Description: this function initializes the DMA RX descriptors
1673  * and allocates the socket buffers. It supports the chained and ring
1674  * modes.
1675  */
1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1677 				    struct stmmac_dma_conf *dma_conf,
1678 				    u32 queue, gfp_t flags)
1679 {
1680 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1681 	int ret;
1682 
1683 	netif_dbg(priv, probe, priv->dev,
1684 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1685 		  (u32)rx_q->dma_rx_phy);
1686 
1687 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1688 
1689 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1690 
1691 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1692 
1693 	if (rx_q->xsk_pool) {
1694 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695 						   MEM_TYPE_XSK_BUFF_POOL,
1696 						   NULL));
1697 		netdev_info(priv->dev,
1698 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699 			    rx_q->queue_index);
1700 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1701 	} else {
1702 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1703 						   MEM_TYPE_PAGE_POOL,
1704 						   rx_q->page_pool));
1705 		netdev_info(priv->dev,
1706 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1707 			    rx_q->queue_index);
1708 	}
1709 
1710 	if (rx_q->xsk_pool) {
1711 		/* RX XDP ZC buffer pool may not be populated, e.g.
1712 		 * xdpsock TX-only.
1713 		 */
1714 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1715 	} else {
1716 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1717 		if (ret < 0)
1718 			return -ENOMEM;
1719 	}
1720 
1721 	/* Setup the chained descriptor addresses */
1722 	if (priv->mode == STMMAC_CHAIN_MODE) {
1723 		if (priv->extend_desc)
1724 			stmmac_mode_init(priv, rx_q->dma_erx,
1725 					 rx_q->dma_rx_phy,
1726 					 dma_conf->dma_rx_size, 1);
1727 		else
1728 			stmmac_mode_init(priv, rx_q->dma_rx,
1729 					 rx_q->dma_rx_phy,
1730 					 dma_conf->dma_rx_size, 0);
1731 	}
1732 
1733 	return 0;
1734 }
1735 
1736 static int init_dma_rx_desc_rings(struct net_device *dev,
1737 				  struct stmmac_dma_conf *dma_conf,
1738 				  gfp_t flags)
1739 {
1740 	struct stmmac_priv *priv = netdev_priv(dev);
1741 	u32 rx_count = priv->plat->rx_queues_to_use;
1742 	int queue;
1743 	int ret;
1744 
1745 	/* RX INITIALIZATION */
1746 	netif_dbg(priv, probe, priv->dev,
1747 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748 
1749 	for (queue = 0; queue < rx_count; queue++) {
1750 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1751 		if (ret)
1752 			goto err_init_rx_buffers;
1753 	}
1754 
1755 	return 0;
1756 
1757 err_init_rx_buffers:
1758 	while (queue >= 0) {
1759 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1760 
1761 		if (rx_q->xsk_pool)
1762 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1763 		else
1764 			dma_free_rx_skbufs(priv, dma_conf, queue);
1765 
1766 		rx_q->buf_alloc_num = 0;
1767 		rx_q->xsk_pool = NULL;
1768 
1769 		queue--;
1770 	}
1771 
1772 	return ret;
1773 }
1774 
1775 /**
1776  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777  * @priv: driver private structure
1778  * @dma_conf: structure to take the dma data
1779  * @queue: TX queue index
1780  * Description: this function initializes the DMA TX descriptors
1781  * and allocates the socket buffers. It supports the chained and ring
1782  * modes.
1783  */
1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1785 				    struct stmmac_dma_conf *dma_conf,
1786 				    u32 queue)
1787 {
1788 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1789 	int i;
1790 
1791 	netif_dbg(priv, probe, priv->dev,
1792 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1793 		  (u32)tx_q->dma_tx_phy);
1794 
1795 	/* Setup the chained descriptor addresses */
1796 	if (priv->mode == STMMAC_CHAIN_MODE) {
1797 		if (priv->extend_desc)
1798 			stmmac_mode_init(priv, tx_q->dma_etx,
1799 					 tx_q->dma_tx_phy,
1800 					 dma_conf->dma_tx_size, 1);
1801 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1802 			stmmac_mode_init(priv, tx_q->dma_tx,
1803 					 tx_q->dma_tx_phy,
1804 					 dma_conf->dma_tx_size, 0);
1805 	}
1806 
1807 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1808 
1809 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1810 		struct dma_desc *p;
1811 
1812 		if (priv->extend_desc)
1813 			p = &((tx_q->dma_etx + i)->basic);
1814 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1815 			p = &((tx_q->dma_entx + i)->basic);
1816 		else
1817 			p = tx_q->dma_tx + i;
1818 
1819 		stmmac_clear_desc(priv, p);
1820 
1821 		tx_q->tx_skbuff_dma[i].buf = 0;
1822 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1823 		tx_q->tx_skbuff_dma[i].len = 0;
1824 		tx_q->tx_skbuff_dma[i].last_segment = false;
1825 		tx_q->tx_skbuff[i] = NULL;
1826 	}
1827 
1828 	return 0;
1829 }
1830 
1831 static int init_dma_tx_desc_rings(struct net_device *dev,
1832 				  struct stmmac_dma_conf *dma_conf)
1833 {
1834 	struct stmmac_priv *priv = netdev_priv(dev);
1835 	u32 tx_queue_cnt;
1836 	u32 queue;
1837 
1838 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1839 
1840 	for (queue = 0; queue < tx_queue_cnt; queue++)
1841 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1842 
1843 	return 0;
1844 }
1845 
1846 /**
1847  * init_dma_desc_rings - init the RX/TX descriptor rings
1848  * @dev: net device structure
1849  * @dma_conf: structure to take the dma data
1850  * @flags: gfp flag.
1851  * Description: this function initializes the DMA RX/TX descriptors
1852  * and allocates the socket buffers. It supports the chained and ring
1853  * modes.
1854  */
1855 static int init_dma_desc_rings(struct net_device *dev,
1856 			       struct stmmac_dma_conf *dma_conf,
1857 			       gfp_t flags)
1858 {
1859 	struct stmmac_priv *priv = netdev_priv(dev);
1860 	int ret;
1861 
1862 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1863 	if (ret)
1864 		return ret;
1865 
1866 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1867 
1868 	stmmac_clear_descriptors(priv, dma_conf);
1869 
1870 	if (netif_msg_hw(priv))
1871 		stmmac_display_rings(priv, dma_conf);
1872 
1873 	return ret;
1874 }
1875 
1876 /**
1877  * dma_free_tx_skbufs - free TX dma buffers
1878  * @priv: private structure
1879  * @dma_conf: structure to take the dma data
1880  * @queue: TX queue index
1881  */
1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1883 			       struct stmmac_dma_conf *dma_conf,
1884 			       u32 queue)
1885 {
1886 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1887 	int i;
1888 
1889 	tx_q->xsk_frames_done = 0;
1890 
1891 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1892 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893 
1894 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1895 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1896 		tx_q->xsk_frames_done = 0;
1897 		tx_q->xsk_pool = NULL;
1898 	}
1899 }
1900 
1901 /**
1902  * stmmac_free_tx_skbufs - free TX skb buffers
1903  * @priv: private structure
1904  */
1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1906 {
1907 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1908 	u32 queue;
1909 
1910 	for (queue = 0; queue < tx_queue_cnt; queue++)
1911 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1912 }
1913 
1914 /**
1915  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916  * @priv: private structure
1917  * @dma_conf: structure to take the dma data
1918  * @queue: RX queue index
1919  */
1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921 					 struct stmmac_dma_conf *dma_conf,
1922 					 u32 queue)
1923 {
1924 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1925 
1926 	/* Release the DMA RX socket buffers */
1927 	if (rx_q->xsk_pool)
1928 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1929 	else
1930 		dma_free_rx_skbufs(priv, dma_conf, queue);
1931 
1932 	rx_q->buf_alloc_num = 0;
1933 	rx_q->xsk_pool = NULL;
1934 
1935 	/* Free DMA regions of consistent memory previously allocated */
1936 	if (!priv->extend_desc)
1937 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1938 				  sizeof(struct dma_desc),
1939 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1940 	else
1941 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1942 				  sizeof(struct dma_extended_desc),
1943 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1944 
1945 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1946 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1947 
1948 	kfree(rx_q->buf_pool);
1949 	if (rx_q->page_pool)
1950 		page_pool_destroy(rx_q->page_pool);
1951 }
1952 
1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1954 				       struct stmmac_dma_conf *dma_conf)
1955 {
1956 	u32 rx_count = priv->plat->rx_queues_to_use;
1957 	u32 queue;
1958 
1959 	/* Free RX queue resources */
1960 	for (queue = 0; queue < rx_count; queue++)
1961 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1962 }
1963 
1964 /**
1965  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966  * @priv: private structure
1967  * @dma_conf: structure to take the dma data
1968  * @queue: TX queue index
1969  */
1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971 					 struct stmmac_dma_conf *dma_conf,
1972 					 u32 queue)
1973 {
1974 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 	size_t size;
1976 	void *addr;
1977 
1978 	/* Release the DMA TX socket buffers */
1979 	dma_free_tx_skbufs(priv, dma_conf, queue);
1980 
1981 	if (priv->extend_desc) {
1982 		size = sizeof(struct dma_extended_desc);
1983 		addr = tx_q->dma_etx;
1984 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1985 		size = sizeof(struct dma_edesc);
1986 		addr = tx_q->dma_entx;
1987 	} else {
1988 		size = sizeof(struct dma_desc);
1989 		addr = tx_q->dma_tx;
1990 	}
1991 
1992 	size *= dma_conf->dma_tx_size;
1993 
1994 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1995 
1996 	kfree(tx_q->tx_skbuff_dma);
1997 	kfree(tx_q->tx_skbuff);
1998 }
1999 
2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2001 				       struct stmmac_dma_conf *dma_conf)
2002 {
2003 	u32 tx_count = priv->plat->tx_queues_to_use;
2004 	u32 queue;
2005 
2006 	/* Free TX queue resources */
2007 	for (queue = 0; queue < tx_count; queue++)
2008 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2009 }
2010 
2011 /**
2012  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013  * @priv: private structure
2014  * @dma_conf: structure to take the dma data
2015  * @queue: RX queue index
2016  * Description: according to which descriptor can be used (extend or basic)
2017  * this function allocates the resources for TX and RX paths. In case of
2018  * reception, for example, it pre-allocated the RX socket buffer in order to
2019  * allow zero-copy mechanism.
2020  */
2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2022 					 struct stmmac_dma_conf *dma_conf,
2023 					 u32 queue)
2024 {
2025 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2026 	struct stmmac_channel *ch = &priv->channel[queue];
2027 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2028 	struct page_pool_params pp_params = { 0 };
2029 	unsigned int num_pages;
2030 	unsigned int napi_id;
2031 	int ret;
2032 
2033 	rx_q->queue_index = queue;
2034 	rx_q->priv_data = priv;
2035 
2036 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2037 	pp_params.pool_size = dma_conf->dma_rx_size;
2038 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2039 	pp_params.order = ilog2(num_pages);
2040 	pp_params.nid = dev_to_node(priv->device);
2041 	pp_params.dev = priv->device;
2042 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2043 	pp_params.offset = stmmac_rx_offset(priv);
2044 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2045 
2046 	rx_q->page_pool = page_pool_create(&pp_params);
2047 	if (IS_ERR(rx_q->page_pool)) {
2048 		ret = PTR_ERR(rx_q->page_pool);
2049 		rx_q->page_pool = NULL;
2050 		return ret;
2051 	}
2052 
2053 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2054 				 sizeof(*rx_q->buf_pool),
2055 				 GFP_KERNEL);
2056 	if (!rx_q->buf_pool)
2057 		return -ENOMEM;
2058 
2059 	if (priv->extend_desc) {
2060 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2061 						   dma_conf->dma_rx_size *
2062 						   sizeof(struct dma_extended_desc),
2063 						   &rx_q->dma_rx_phy,
2064 						   GFP_KERNEL);
2065 		if (!rx_q->dma_erx)
2066 			return -ENOMEM;
2067 
2068 	} else {
2069 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2070 						  dma_conf->dma_rx_size *
2071 						  sizeof(struct dma_desc),
2072 						  &rx_q->dma_rx_phy,
2073 						  GFP_KERNEL);
2074 		if (!rx_q->dma_rx)
2075 			return -ENOMEM;
2076 	}
2077 
2078 	if (stmmac_xdp_is_enabled(priv) &&
2079 	    test_bit(queue, priv->af_xdp_zc_qps))
2080 		napi_id = ch->rxtx_napi.napi_id;
2081 	else
2082 		napi_id = ch->rx_napi.napi_id;
2083 
2084 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2085 			       rx_q->queue_index,
2086 			       napi_id);
2087 	if (ret) {
2088 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2089 		return -EINVAL;
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2096 				       struct stmmac_dma_conf *dma_conf)
2097 {
2098 	u32 rx_count = priv->plat->rx_queues_to_use;
2099 	u32 queue;
2100 	int ret;
2101 
2102 	/* RX queues buffers and DMA */
2103 	for (queue = 0; queue < rx_count; queue++) {
2104 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2105 		if (ret)
2106 			goto err_dma;
2107 	}
2108 
2109 	return 0;
2110 
2111 err_dma:
2112 	free_dma_rx_desc_resources(priv, dma_conf);
2113 
2114 	return ret;
2115 }
2116 
2117 /**
2118  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119  * @priv: private structure
2120  * @dma_conf: structure to take the dma data
2121  * @queue: TX queue index
2122  * Description: according to which descriptor can be used (extend or basic)
2123  * this function allocates the resources for TX and RX paths. In case of
2124  * reception, for example, it pre-allocated the RX socket buffer in order to
2125  * allow zero-copy mechanism.
2126  */
2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2128 					 struct stmmac_dma_conf *dma_conf,
2129 					 u32 queue)
2130 {
2131 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2132 	size_t size;
2133 	void *addr;
2134 
2135 	tx_q->queue_index = queue;
2136 	tx_q->priv_data = priv;
2137 
2138 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2139 				      sizeof(*tx_q->tx_skbuff_dma),
2140 				      GFP_KERNEL);
2141 	if (!tx_q->tx_skbuff_dma)
2142 		return -ENOMEM;
2143 
2144 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2145 				  sizeof(struct sk_buff *),
2146 				  GFP_KERNEL);
2147 	if (!tx_q->tx_skbuff)
2148 		return -ENOMEM;
2149 
2150 	if (priv->extend_desc)
2151 		size = sizeof(struct dma_extended_desc);
2152 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153 		size = sizeof(struct dma_edesc);
2154 	else
2155 		size = sizeof(struct dma_desc);
2156 
2157 	size *= dma_conf->dma_tx_size;
2158 
2159 	addr = dma_alloc_coherent(priv->device, size,
2160 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2161 	if (!addr)
2162 		return -ENOMEM;
2163 
2164 	if (priv->extend_desc)
2165 		tx_q->dma_etx = addr;
2166 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2167 		tx_q->dma_entx = addr;
2168 	else
2169 		tx_q->dma_tx = addr;
2170 
2171 	return 0;
2172 }
2173 
2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2175 				       struct stmmac_dma_conf *dma_conf)
2176 {
2177 	u32 tx_count = priv->plat->tx_queues_to_use;
2178 	u32 queue;
2179 	int ret;
2180 
2181 	/* TX queues buffers and DMA */
2182 	for (queue = 0; queue < tx_count; queue++) {
2183 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2184 		if (ret)
2185 			goto err_dma;
2186 	}
2187 
2188 	return 0;
2189 
2190 err_dma:
2191 	free_dma_tx_desc_resources(priv, dma_conf);
2192 	return ret;
2193 }
2194 
2195 /**
2196  * alloc_dma_desc_resources - alloc TX/RX resources.
2197  * @priv: private structure
2198  * @dma_conf: structure to take the dma data
2199  * Description: according to which descriptor can be used (extend or basic)
2200  * this function allocates the resources for TX and RX paths. In case of
2201  * reception, for example, it pre-allocated the RX socket buffer in order to
2202  * allow zero-copy mechanism.
2203  */
2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2205 				    struct stmmac_dma_conf *dma_conf)
2206 {
2207 	/* RX Allocation */
2208 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2209 
2210 	if (ret)
2211 		return ret;
2212 
2213 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2214 
2215 	return ret;
2216 }
2217 
2218 /**
2219  * free_dma_desc_resources - free dma desc resources
2220  * @priv: private structure
2221  * @dma_conf: structure to take the dma data
2222  */
2223 static void free_dma_desc_resources(struct stmmac_priv *priv,
2224 				    struct stmmac_dma_conf *dma_conf)
2225 {
2226 	/* Release the DMA TX socket buffers */
2227 	free_dma_tx_desc_resources(priv, dma_conf);
2228 
2229 	/* Release the DMA RX socket buffers later
2230 	 * to ensure all pending XDP_TX buffers are returned.
2231 	 */
2232 	free_dma_rx_desc_resources(priv, dma_conf);
2233 }
2234 
2235 /**
2236  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237  *  @priv: driver private structure
2238  *  Description: It is used for enabling the rx queues in the MAC
2239  */
2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2241 {
2242 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2243 	int queue;
2244 	u8 mode;
2245 
2246 	for (queue = 0; queue < rx_queues_count; queue++) {
2247 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2248 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2249 	}
2250 }
2251 
2252 /**
2253  * stmmac_start_rx_dma - start RX DMA channel
2254  * @priv: driver private structure
2255  * @chan: RX channel index
2256  * Description:
2257  * This starts a RX DMA channel
2258  */
2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2262 	stmmac_start_rx(priv, priv->ioaddr, chan);
2263 }
2264 
2265 /**
2266  * stmmac_start_tx_dma - start TX DMA channel
2267  * @priv: driver private structure
2268  * @chan: TX channel index
2269  * Description:
2270  * This starts a TX DMA channel
2271  */
2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2275 	stmmac_start_tx(priv, priv->ioaddr, chan);
2276 }
2277 
2278 /**
2279  * stmmac_stop_rx_dma - stop RX DMA channel
2280  * @priv: driver private structure
2281  * @chan: RX channel index
2282  * Description:
2283  * This stops a RX DMA channel
2284  */
2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2288 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2289 }
2290 
2291 /**
2292  * stmmac_stop_tx_dma - stop TX DMA channel
2293  * @priv: driver private structure
2294  * @chan: TX channel index
2295  * Description:
2296  * This stops a TX DMA channel
2297  */
2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2301 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2302 }
2303 
2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2305 {
2306 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2309 	u32 chan;
2310 
2311 	for (chan = 0; chan < dma_csr_ch; chan++) {
2312 		struct stmmac_channel *ch = &priv->channel[chan];
2313 		unsigned long flags;
2314 
2315 		spin_lock_irqsave(&ch->lock, flags);
2316 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2317 		spin_unlock_irqrestore(&ch->lock, flags);
2318 	}
2319 }
2320 
2321 /**
2322  * stmmac_start_all_dma - start all RX and TX DMA channels
2323  * @priv: driver private structure
2324  * Description:
2325  * This starts all the RX and TX DMA channels
2326  */
2327 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2328 {
2329 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331 	u32 chan = 0;
2332 
2333 	for (chan = 0; chan < rx_channels_count; chan++)
2334 		stmmac_start_rx_dma(priv, chan);
2335 
2336 	for (chan = 0; chan < tx_channels_count; chan++)
2337 		stmmac_start_tx_dma(priv, chan);
2338 }
2339 
2340 /**
2341  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342  * @priv: driver private structure
2343  * Description:
2344  * This stops the RX and TX DMA channels
2345  */
2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2347 {
2348 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350 	u32 chan = 0;
2351 
2352 	for (chan = 0; chan < rx_channels_count; chan++)
2353 		stmmac_stop_rx_dma(priv, chan);
2354 
2355 	for (chan = 0; chan < tx_channels_count; chan++)
2356 		stmmac_stop_tx_dma(priv, chan);
2357 }
2358 
2359 /**
2360  *  stmmac_dma_operation_mode - HW DMA operation mode
2361  *  @priv: driver private structure
2362  *  Description: it is used for configuring the DMA operation mode register in
2363  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364  */
2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2366 {
2367 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2368 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2369 	int rxfifosz = priv->plat->rx_fifo_size;
2370 	int txfifosz = priv->plat->tx_fifo_size;
2371 	u32 txmode = 0;
2372 	u32 rxmode = 0;
2373 	u32 chan = 0;
2374 	u8 qmode = 0;
2375 
2376 	if (rxfifosz == 0)
2377 		rxfifosz = priv->dma_cap.rx_fifo_size;
2378 	if (txfifosz == 0)
2379 		txfifosz = priv->dma_cap.tx_fifo_size;
2380 
2381 	/* Adjust for real per queue fifo size */
2382 	rxfifosz /= rx_channels_count;
2383 	txfifosz /= tx_channels_count;
2384 
2385 	if (priv->plat->force_thresh_dma_mode) {
2386 		txmode = tc;
2387 		rxmode = tc;
2388 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2389 		/*
2390 		 * In case of GMAC, SF mode can be enabled
2391 		 * to perform the TX COE in HW. This depends on:
2392 		 * 1) TX COE if actually supported
2393 		 * 2) There is no bugged Jumbo frame support
2394 		 *    that needs to not insert csum in the TDES.
2395 		 */
2396 		txmode = SF_DMA_MODE;
2397 		rxmode = SF_DMA_MODE;
2398 		priv->xstats.threshold = SF_DMA_MODE;
2399 	} else {
2400 		txmode = tc;
2401 		rxmode = SF_DMA_MODE;
2402 	}
2403 
2404 	/* configure all channels */
2405 	for (chan = 0; chan < rx_channels_count; chan++) {
2406 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2407 		u32 buf_size;
2408 
2409 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2410 
2411 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2412 				rxfifosz, qmode);
2413 
2414 		if (rx_q->xsk_pool) {
2415 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2416 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2417 					      buf_size,
2418 					      chan);
2419 		} else {
2420 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 					      priv->dma_conf.dma_buf_sz,
2422 					      chan);
2423 		}
2424 	}
2425 
2426 	for (chan = 0; chan < tx_channels_count; chan++) {
2427 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2428 
2429 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2430 				txfifosz, qmode);
2431 	}
2432 }
2433 
2434 static void stmmac_xsk_request_timestamp(void *_priv)
2435 {
2436 	struct stmmac_metadata_request *meta_req = _priv;
2437 
2438 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2439 	*meta_req->set_ic = true;
2440 }
2441 
2442 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2443 {
2444 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2445 	struct stmmac_priv *priv = tx_compl->priv;
2446 	struct dma_desc *desc = tx_compl->desc;
2447 	bool found = false;
2448 	u64 ns = 0;
2449 
2450 	if (!priv->hwts_tx_en)
2451 		return 0;
2452 
2453 	/* check tx tstamp status */
2454 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2455 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2456 		found = true;
2457 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2458 		found = true;
2459 	}
2460 
2461 	if (found) {
2462 		ns -= priv->plat->cdc_error_adj;
2463 		return ns_to_ktime(ns);
2464 	}
2465 
2466 	return 0;
2467 }
2468 
2469 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2470 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2471 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2472 };
2473 
2474 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2475 {
2476 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2477 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2478 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2479 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2480 	unsigned int entry = tx_q->cur_tx;
2481 	struct dma_desc *tx_desc = NULL;
2482 	struct xdp_desc xdp_desc;
2483 	bool work_done = true;
2484 	u32 tx_set_ic_bit = 0;
2485 
2486 	/* Avoids TX time-out as we are sharing with slow path */
2487 	txq_trans_cond_update(nq);
2488 
2489 	budget = min(budget, stmmac_tx_avail(priv, queue));
2490 
2491 	while (budget-- > 0) {
2492 		struct stmmac_metadata_request meta_req;
2493 		struct xsk_tx_metadata *meta = NULL;
2494 		dma_addr_t dma_addr;
2495 		bool set_ic;
2496 
2497 		/* We are sharing with slow path and stop XSK TX desc submission when
2498 		 * available TX ring is less than threshold.
2499 		 */
2500 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2501 		    !netif_carrier_ok(priv->dev)) {
2502 			work_done = false;
2503 			break;
2504 		}
2505 
2506 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2507 			break;
2508 
2509 		if (priv->plat->est && priv->plat->est->enable &&
2510 		    priv->plat->est->max_sdu[queue] &&
2511 		    xdp_desc.len > priv->plat->est->max_sdu[queue]) {
2512 			priv->xstats.max_sdu_txq_drop[queue]++;
2513 			continue;
2514 		}
2515 
2516 		if (likely(priv->extend_desc))
2517 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2518 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2519 			tx_desc = &tx_q->dma_entx[entry].basic;
2520 		else
2521 			tx_desc = tx_q->dma_tx + entry;
2522 
2523 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2524 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2525 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2526 
2527 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2528 
2529 		/* To return XDP buffer to XSK pool, we simple call
2530 		 * xsk_tx_completed(), so we don't need to fill up
2531 		 * 'buf' and 'xdpf'.
2532 		 */
2533 		tx_q->tx_skbuff_dma[entry].buf = 0;
2534 		tx_q->xdpf[entry] = NULL;
2535 
2536 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2537 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2538 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2539 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2540 
2541 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2542 
2543 		tx_q->tx_count_frames++;
2544 
2545 		if (!priv->tx_coal_frames[queue])
2546 			set_ic = false;
2547 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2548 			set_ic = true;
2549 		else
2550 			set_ic = false;
2551 
2552 		meta_req.priv = priv;
2553 		meta_req.tx_desc = tx_desc;
2554 		meta_req.set_ic = &set_ic;
2555 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2556 					&meta_req);
2557 		if (set_ic) {
2558 			tx_q->tx_count_frames = 0;
2559 			stmmac_set_tx_ic(priv, tx_desc);
2560 			tx_set_ic_bit++;
2561 		}
2562 
2563 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2564 				       true, priv->mode, true, true,
2565 				       xdp_desc.len);
2566 
2567 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2568 
2569 		xsk_tx_metadata_to_compl(meta,
2570 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2571 
2572 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2573 		entry = tx_q->cur_tx;
2574 	}
2575 	u64_stats_update_begin(&txq_stats->napi_syncp);
2576 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2577 	u64_stats_update_end(&txq_stats->napi_syncp);
2578 
2579 	if (tx_desc) {
2580 		stmmac_flush_tx_descriptors(priv, queue);
2581 		xsk_tx_release(pool);
2582 	}
2583 
2584 	/* Return true if all of the 3 conditions are met
2585 	 *  a) TX Budget is still available
2586 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2587 	 *     pending XSK TX for transmission)
2588 	 */
2589 	return !!budget && work_done;
2590 }
2591 
2592 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2593 {
2594 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2595 		tc += 64;
2596 
2597 		if (priv->plat->force_thresh_dma_mode)
2598 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2599 		else
2600 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2601 						      chan);
2602 
2603 		priv->xstats.threshold = tc;
2604 	}
2605 }
2606 
2607 /**
2608  * stmmac_tx_clean - to manage the transmission completion
2609  * @priv: driver private structure
2610  * @budget: napi budget limiting this functions packet handling
2611  * @queue: TX queue index
2612  * @pending_packets: signal to arm the TX coal timer
2613  * Description: it reclaims the transmit resources after transmission completes.
2614  * If some packets still needs to be handled, due to TX coalesce, set
2615  * pending_packets to true to make NAPI arm the TX coal timer.
2616  */
2617 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2618 			   bool *pending_packets)
2619 {
2620 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2621 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2622 	unsigned int bytes_compl = 0, pkts_compl = 0;
2623 	unsigned int entry, xmits = 0, count = 0;
2624 	u32 tx_packets = 0, tx_errors = 0;
2625 
2626 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2627 
2628 	tx_q->xsk_frames_done = 0;
2629 
2630 	entry = tx_q->dirty_tx;
2631 
2632 	/* Try to clean all TX complete frame in 1 shot */
2633 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2634 		struct xdp_frame *xdpf;
2635 		struct sk_buff *skb;
2636 		struct dma_desc *p;
2637 		int status;
2638 
2639 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2640 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2641 			xdpf = tx_q->xdpf[entry];
2642 			skb = NULL;
2643 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2644 			xdpf = NULL;
2645 			skb = tx_q->tx_skbuff[entry];
2646 		} else {
2647 			xdpf = NULL;
2648 			skb = NULL;
2649 		}
2650 
2651 		if (priv->extend_desc)
2652 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2653 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2654 			p = &tx_q->dma_entx[entry].basic;
2655 		else
2656 			p = tx_q->dma_tx + entry;
2657 
2658 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2659 		/* Check if the descriptor is owned by the DMA */
2660 		if (unlikely(status & tx_dma_own))
2661 			break;
2662 
2663 		count++;
2664 
2665 		/* Make sure descriptor fields are read after reading
2666 		 * the own bit.
2667 		 */
2668 		dma_rmb();
2669 
2670 		/* Just consider the last segment and ...*/
2671 		if (likely(!(status & tx_not_ls))) {
2672 			/* ... verify the status error condition */
2673 			if (unlikely(status & tx_err)) {
2674 				tx_errors++;
2675 				if (unlikely(status & tx_err_bump_tc))
2676 					stmmac_bump_dma_threshold(priv, queue);
2677 			} else {
2678 				tx_packets++;
2679 			}
2680 			if (skb) {
2681 				stmmac_get_tx_hwtstamp(priv, p, skb);
2682 			} else if (tx_q->xsk_pool &&
2683 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2684 				struct stmmac_xsk_tx_complete tx_compl = {
2685 					.priv = priv,
2686 					.desc = p,
2687 				};
2688 
2689 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2690 							 &stmmac_xsk_tx_metadata_ops,
2691 							 &tx_compl);
2692 			}
2693 		}
2694 
2695 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2696 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2697 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2698 				dma_unmap_page(priv->device,
2699 					       tx_q->tx_skbuff_dma[entry].buf,
2700 					       tx_q->tx_skbuff_dma[entry].len,
2701 					       DMA_TO_DEVICE);
2702 			else
2703 				dma_unmap_single(priv->device,
2704 						 tx_q->tx_skbuff_dma[entry].buf,
2705 						 tx_q->tx_skbuff_dma[entry].len,
2706 						 DMA_TO_DEVICE);
2707 			tx_q->tx_skbuff_dma[entry].buf = 0;
2708 			tx_q->tx_skbuff_dma[entry].len = 0;
2709 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2710 		}
2711 
2712 		stmmac_clean_desc3(priv, tx_q, p);
2713 
2714 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2715 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2716 
2717 		if (xdpf &&
2718 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2719 			xdp_return_frame_rx_napi(xdpf);
2720 			tx_q->xdpf[entry] = NULL;
2721 		}
2722 
2723 		if (xdpf &&
2724 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2725 			xdp_return_frame(xdpf);
2726 			tx_q->xdpf[entry] = NULL;
2727 		}
2728 
2729 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2730 			tx_q->xsk_frames_done++;
2731 
2732 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2733 			if (likely(skb)) {
2734 				pkts_compl++;
2735 				bytes_compl += skb->len;
2736 				dev_consume_skb_any(skb);
2737 				tx_q->tx_skbuff[entry] = NULL;
2738 			}
2739 		}
2740 
2741 		stmmac_release_tx_desc(priv, p, priv->mode);
2742 
2743 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2744 	}
2745 	tx_q->dirty_tx = entry;
2746 
2747 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2748 				  pkts_compl, bytes_compl);
2749 
2750 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2751 								queue))) &&
2752 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2753 
2754 		netif_dbg(priv, tx_done, priv->dev,
2755 			  "%s: restart transmit\n", __func__);
2756 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2757 	}
2758 
2759 	if (tx_q->xsk_pool) {
2760 		bool work_done;
2761 
2762 		if (tx_q->xsk_frames_done)
2763 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2764 
2765 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2766 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2767 
2768 		/* For XSK TX, we try to send as many as possible.
2769 		 * If XSK work done (XSK TX desc empty and budget still
2770 		 * available), return "budget - 1" to reenable TX IRQ.
2771 		 * Else, return "budget" to make NAPI continue polling.
2772 		 */
2773 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2774 					       STMMAC_XSK_TX_BUDGET_MAX);
2775 		if (work_done)
2776 			xmits = budget - 1;
2777 		else
2778 			xmits = budget;
2779 	}
2780 
2781 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2782 	    priv->eee_sw_timer_en) {
2783 		if (stmmac_enable_eee_mode(priv))
2784 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2785 	}
2786 
2787 	/* We still have pending packets, let's call for a new scheduling */
2788 	if (tx_q->dirty_tx != tx_q->cur_tx)
2789 		*pending_packets = true;
2790 
2791 	u64_stats_update_begin(&txq_stats->napi_syncp);
2792 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2793 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2794 	u64_stats_inc(&txq_stats->napi.tx_clean);
2795 	u64_stats_update_end(&txq_stats->napi_syncp);
2796 
2797 	priv->xstats.tx_errors += tx_errors;
2798 
2799 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2800 
2801 	/* Combine decisions from TX clean and XSK TX */
2802 	return max(count, xmits);
2803 }
2804 
2805 /**
2806  * stmmac_tx_err - to manage the tx error
2807  * @priv: driver private structure
2808  * @chan: channel index
2809  * Description: it cleans the descriptors and restarts the transmission
2810  * in case of transmission errors.
2811  */
2812 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2813 {
2814 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2815 
2816 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2817 
2818 	stmmac_stop_tx_dma(priv, chan);
2819 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2820 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2821 	stmmac_reset_tx_queue(priv, chan);
2822 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2823 			    tx_q->dma_tx_phy, chan);
2824 	stmmac_start_tx_dma(priv, chan);
2825 
2826 	priv->xstats.tx_errors++;
2827 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2828 }
2829 
2830 /**
2831  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2832  *  @priv: driver private structure
2833  *  @txmode: TX operating mode
2834  *  @rxmode: RX operating mode
2835  *  @chan: channel index
2836  *  Description: it is used for configuring of the DMA operation mode in
2837  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2838  *  mode.
2839  */
2840 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2841 					  u32 rxmode, u32 chan)
2842 {
2843 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2844 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2845 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2846 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2847 	int rxfifosz = priv->plat->rx_fifo_size;
2848 	int txfifosz = priv->plat->tx_fifo_size;
2849 
2850 	if (rxfifosz == 0)
2851 		rxfifosz = priv->dma_cap.rx_fifo_size;
2852 	if (txfifosz == 0)
2853 		txfifosz = priv->dma_cap.tx_fifo_size;
2854 
2855 	/* Adjust for real per queue fifo size */
2856 	rxfifosz /= rx_channels_count;
2857 	txfifosz /= tx_channels_count;
2858 
2859 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2860 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2861 }
2862 
2863 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2864 {
2865 	int ret;
2866 
2867 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2868 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2869 	if (ret && (ret != -EINVAL)) {
2870 		stmmac_global_err(priv);
2871 		return true;
2872 	}
2873 
2874 	return false;
2875 }
2876 
2877 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2878 {
2879 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2880 						 &priv->xstats, chan, dir);
2881 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2882 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2883 	struct stmmac_channel *ch = &priv->channel[chan];
2884 	struct napi_struct *rx_napi;
2885 	struct napi_struct *tx_napi;
2886 	unsigned long flags;
2887 
2888 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2889 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2890 
2891 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2892 		if (napi_schedule_prep(rx_napi)) {
2893 			spin_lock_irqsave(&ch->lock, flags);
2894 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2895 			spin_unlock_irqrestore(&ch->lock, flags);
2896 			__napi_schedule(rx_napi);
2897 		}
2898 	}
2899 
2900 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2901 		if (napi_schedule_prep(tx_napi)) {
2902 			spin_lock_irqsave(&ch->lock, flags);
2903 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2904 			spin_unlock_irqrestore(&ch->lock, flags);
2905 			__napi_schedule(tx_napi);
2906 		}
2907 	}
2908 
2909 	return status;
2910 }
2911 
2912 /**
2913  * stmmac_dma_interrupt - DMA ISR
2914  * @priv: driver private structure
2915  * Description: this is the DMA ISR. It is called by the main ISR.
2916  * It calls the dwmac dma routine and schedule poll method in case of some
2917  * work can be done.
2918  */
2919 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2920 {
2921 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2922 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2923 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2924 				tx_channel_count : rx_channel_count;
2925 	u32 chan;
2926 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2927 
2928 	/* Make sure we never check beyond our status buffer. */
2929 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2930 		channels_to_check = ARRAY_SIZE(status);
2931 
2932 	for (chan = 0; chan < channels_to_check; chan++)
2933 		status[chan] = stmmac_napi_check(priv, chan,
2934 						 DMA_DIR_RXTX);
2935 
2936 	for (chan = 0; chan < tx_channel_count; chan++) {
2937 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2938 			/* Try to bump up the dma threshold on this failure */
2939 			stmmac_bump_dma_threshold(priv, chan);
2940 		} else if (unlikely(status[chan] == tx_hard_error)) {
2941 			stmmac_tx_err(priv, chan);
2942 		}
2943 	}
2944 }
2945 
2946 /**
2947  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2948  * @priv: driver private structure
2949  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2950  */
2951 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2952 {
2953 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2954 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2955 
2956 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2957 
2958 	if (priv->dma_cap.rmon) {
2959 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2960 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2961 	} else
2962 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2963 }
2964 
2965 /**
2966  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2967  * @priv: driver private structure
2968  * Description:
2969  *  new GMAC chip generations have a new register to indicate the
2970  *  presence of the optional feature/functions.
2971  *  This can be also used to override the value passed through the
2972  *  platform and necessary for old MAC10/100 and GMAC chips.
2973  */
2974 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2975 {
2976 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2977 }
2978 
2979 /**
2980  * stmmac_check_ether_addr - check if the MAC addr is valid
2981  * @priv: driver private structure
2982  * Description:
2983  * it is to verify if the MAC address is valid, in case of failures it
2984  * generates a random MAC address
2985  */
2986 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2987 {
2988 	u8 addr[ETH_ALEN];
2989 
2990 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2991 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2992 		if (is_valid_ether_addr(addr))
2993 			eth_hw_addr_set(priv->dev, addr);
2994 		else
2995 			eth_hw_addr_random(priv->dev);
2996 		dev_info(priv->device, "device MAC address %pM\n",
2997 			 priv->dev->dev_addr);
2998 	}
2999 }
3000 
3001 /**
3002  * stmmac_init_dma_engine - DMA init.
3003  * @priv: driver private structure
3004  * Description:
3005  * It inits the DMA invoking the specific MAC/GMAC callback.
3006  * Some DMA parameters can be passed from the platform;
3007  * in case of these are not passed a default is kept for the MAC or GMAC.
3008  */
3009 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3010 {
3011 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3012 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3013 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3014 	struct stmmac_rx_queue *rx_q;
3015 	struct stmmac_tx_queue *tx_q;
3016 	u32 chan = 0;
3017 	int atds = 0;
3018 	int ret = 0;
3019 
3020 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3021 		dev_err(priv->device, "Invalid DMA configuration\n");
3022 		return -EINVAL;
3023 	}
3024 
3025 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3026 		atds = 1;
3027 
3028 	ret = stmmac_reset(priv, priv->ioaddr);
3029 	if (ret) {
3030 		dev_err(priv->device, "Failed to reset the dma\n");
3031 		return ret;
3032 	}
3033 
3034 	/* DMA Configuration */
3035 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3036 
3037 	if (priv->plat->axi)
3038 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3039 
3040 	/* DMA CSR Channel configuration */
3041 	for (chan = 0; chan < dma_csr_ch; chan++) {
3042 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3043 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3044 	}
3045 
3046 	/* DMA RX Channel Configuration */
3047 	for (chan = 0; chan < rx_channels_count; chan++) {
3048 		rx_q = &priv->dma_conf.rx_queue[chan];
3049 
3050 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3051 				    rx_q->dma_rx_phy, chan);
3052 
3053 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3054 				     (rx_q->buf_alloc_num *
3055 				      sizeof(struct dma_desc));
3056 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3057 				       rx_q->rx_tail_addr, chan);
3058 	}
3059 
3060 	/* DMA TX Channel Configuration */
3061 	for (chan = 0; chan < tx_channels_count; chan++) {
3062 		tx_q = &priv->dma_conf.tx_queue[chan];
3063 
3064 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3065 				    tx_q->dma_tx_phy, chan);
3066 
3067 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3068 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3069 				       tx_q->tx_tail_addr, chan);
3070 	}
3071 
3072 	return ret;
3073 }
3074 
3075 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3076 {
3077 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3078 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3079 	struct stmmac_channel *ch;
3080 	struct napi_struct *napi;
3081 
3082 	if (!tx_coal_timer)
3083 		return;
3084 
3085 	ch = &priv->channel[tx_q->queue_index];
3086 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3087 
3088 	/* Arm timer only if napi is not already scheduled.
3089 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3090 	 * again in the next scheduled napi.
3091 	 */
3092 	if (unlikely(!napi_is_scheduled(napi)))
3093 		hrtimer_start(&tx_q->txtimer,
3094 			      STMMAC_COAL_TIMER(tx_coal_timer),
3095 			      HRTIMER_MODE_REL);
3096 	else
3097 		hrtimer_try_to_cancel(&tx_q->txtimer);
3098 }
3099 
3100 /**
3101  * stmmac_tx_timer - mitigation sw timer for tx.
3102  * @t: data pointer
3103  * Description:
3104  * This is the timer handler to directly invoke the stmmac_tx_clean.
3105  */
3106 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3107 {
3108 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3109 	struct stmmac_priv *priv = tx_q->priv_data;
3110 	struct stmmac_channel *ch;
3111 	struct napi_struct *napi;
3112 
3113 	ch = &priv->channel[tx_q->queue_index];
3114 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3115 
3116 	if (likely(napi_schedule_prep(napi))) {
3117 		unsigned long flags;
3118 
3119 		spin_lock_irqsave(&ch->lock, flags);
3120 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3121 		spin_unlock_irqrestore(&ch->lock, flags);
3122 		__napi_schedule(napi);
3123 	}
3124 
3125 	return HRTIMER_NORESTART;
3126 }
3127 
3128 /**
3129  * stmmac_init_coalesce - init mitigation options.
3130  * @priv: driver private structure
3131  * Description:
3132  * This inits the coalesce parameters: i.e. timer rate,
3133  * timer handler and default threshold used for enabling the
3134  * interrupt on completion bit.
3135  */
3136 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3137 {
3138 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3139 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3140 	u32 chan;
3141 
3142 	for (chan = 0; chan < tx_channel_count; chan++) {
3143 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3144 
3145 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3146 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3147 
3148 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3149 		tx_q->txtimer.function = stmmac_tx_timer;
3150 	}
3151 
3152 	for (chan = 0; chan < rx_channel_count; chan++)
3153 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3154 }
3155 
3156 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3157 {
3158 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3159 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3160 	u32 chan;
3161 
3162 	/* set TX ring length */
3163 	for (chan = 0; chan < tx_channels_count; chan++)
3164 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3165 				       (priv->dma_conf.dma_tx_size - 1), chan);
3166 
3167 	/* set RX ring length */
3168 	for (chan = 0; chan < rx_channels_count; chan++)
3169 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3170 				       (priv->dma_conf.dma_rx_size - 1), chan);
3171 }
3172 
3173 /**
3174  *  stmmac_set_tx_queue_weight - Set TX queue weight
3175  *  @priv: driver private structure
3176  *  Description: It is used for setting TX queues weight
3177  */
3178 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3179 {
3180 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3181 	u32 weight;
3182 	u32 queue;
3183 
3184 	for (queue = 0; queue < tx_queues_count; queue++) {
3185 		weight = priv->plat->tx_queues_cfg[queue].weight;
3186 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3187 	}
3188 }
3189 
3190 /**
3191  *  stmmac_configure_cbs - Configure CBS in TX queue
3192  *  @priv: driver private structure
3193  *  Description: It is used for configuring CBS in AVB TX queues
3194  */
3195 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3196 {
3197 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3198 	u32 mode_to_use;
3199 	u32 queue;
3200 
3201 	/* queue 0 is reserved for legacy traffic */
3202 	for (queue = 1; queue < tx_queues_count; queue++) {
3203 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3204 		if (mode_to_use == MTL_QUEUE_DCB)
3205 			continue;
3206 
3207 		stmmac_config_cbs(priv, priv->hw,
3208 				priv->plat->tx_queues_cfg[queue].send_slope,
3209 				priv->plat->tx_queues_cfg[queue].idle_slope,
3210 				priv->plat->tx_queues_cfg[queue].high_credit,
3211 				priv->plat->tx_queues_cfg[queue].low_credit,
3212 				queue);
3213 	}
3214 }
3215 
3216 /**
3217  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3218  *  @priv: driver private structure
3219  *  Description: It is used for mapping RX queues to RX dma channels
3220  */
3221 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3222 {
3223 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3224 	u32 queue;
3225 	u32 chan;
3226 
3227 	for (queue = 0; queue < rx_queues_count; queue++) {
3228 		chan = priv->plat->rx_queues_cfg[queue].chan;
3229 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3230 	}
3231 }
3232 
3233 /**
3234  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3235  *  @priv: driver private structure
3236  *  Description: It is used for configuring the RX Queue Priority
3237  */
3238 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3239 {
3240 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3241 	u32 queue;
3242 	u32 prio;
3243 
3244 	for (queue = 0; queue < rx_queues_count; queue++) {
3245 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3246 			continue;
3247 
3248 		prio = priv->plat->rx_queues_cfg[queue].prio;
3249 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3250 	}
3251 }
3252 
3253 /**
3254  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3255  *  @priv: driver private structure
3256  *  Description: It is used for configuring the TX Queue Priority
3257  */
3258 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3259 {
3260 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3261 	u32 queue;
3262 	u32 prio;
3263 
3264 	for (queue = 0; queue < tx_queues_count; queue++) {
3265 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3266 			continue;
3267 
3268 		prio = priv->plat->tx_queues_cfg[queue].prio;
3269 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3270 	}
3271 }
3272 
3273 /**
3274  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3275  *  @priv: driver private structure
3276  *  Description: It is used for configuring the RX queue routing
3277  */
3278 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3279 {
3280 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3281 	u32 queue;
3282 	u8 packet;
3283 
3284 	for (queue = 0; queue < rx_queues_count; queue++) {
3285 		/* no specific packet type routing specified for the queue */
3286 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3287 			continue;
3288 
3289 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3290 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3291 	}
3292 }
3293 
3294 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3295 {
3296 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3297 		priv->rss.enable = false;
3298 		return;
3299 	}
3300 
3301 	if (priv->dev->features & NETIF_F_RXHASH)
3302 		priv->rss.enable = true;
3303 	else
3304 		priv->rss.enable = false;
3305 
3306 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3307 			     priv->plat->rx_queues_to_use);
3308 }
3309 
3310 /**
3311  *  stmmac_mtl_configuration - Configure MTL
3312  *  @priv: driver private structure
3313  *  Description: It is used for configurring MTL
3314  */
3315 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3316 {
3317 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3318 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3319 
3320 	if (tx_queues_count > 1)
3321 		stmmac_set_tx_queue_weight(priv);
3322 
3323 	/* Configure MTL RX algorithms */
3324 	if (rx_queues_count > 1)
3325 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3326 				priv->plat->rx_sched_algorithm);
3327 
3328 	/* Configure MTL TX algorithms */
3329 	if (tx_queues_count > 1)
3330 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3331 				priv->plat->tx_sched_algorithm);
3332 
3333 	/* Configure CBS in AVB TX queues */
3334 	if (tx_queues_count > 1)
3335 		stmmac_configure_cbs(priv);
3336 
3337 	/* Map RX MTL to DMA channels */
3338 	stmmac_rx_queue_dma_chan_map(priv);
3339 
3340 	/* Enable MAC RX Queues */
3341 	stmmac_mac_enable_rx_queues(priv);
3342 
3343 	/* Set RX priorities */
3344 	if (rx_queues_count > 1)
3345 		stmmac_mac_config_rx_queues_prio(priv);
3346 
3347 	/* Set TX priorities */
3348 	if (tx_queues_count > 1)
3349 		stmmac_mac_config_tx_queues_prio(priv);
3350 
3351 	/* Set RX routing */
3352 	if (rx_queues_count > 1)
3353 		stmmac_mac_config_rx_queues_routing(priv);
3354 
3355 	/* Receive Side Scaling */
3356 	if (rx_queues_count > 1)
3357 		stmmac_mac_config_rss(priv);
3358 }
3359 
3360 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3361 {
3362 	if (priv->dma_cap.asp) {
3363 		netdev_info(priv->dev, "Enabling Safety Features\n");
3364 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3365 					  priv->plat->safety_feat_cfg);
3366 	} else {
3367 		netdev_info(priv->dev, "No Safety Features support found\n");
3368 	}
3369 }
3370 
3371 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3372 {
3373 	char *name;
3374 
3375 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3376 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3377 
3378 	name = priv->wq_name;
3379 	sprintf(name, "%s-fpe", priv->dev->name);
3380 
3381 	priv->fpe_wq = create_singlethread_workqueue(name);
3382 	if (!priv->fpe_wq) {
3383 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3384 
3385 		return -ENOMEM;
3386 	}
3387 	netdev_info(priv->dev, "FPE workqueue start");
3388 
3389 	return 0;
3390 }
3391 
3392 /**
3393  * stmmac_hw_setup - setup mac in a usable state.
3394  *  @dev : pointer to the device structure.
3395  *  @ptp_register: register PTP if set
3396  *  Description:
3397  *  this is the main function to setup the HW in a usable state because the
3398  *  dma engine is reset, the core registers are configured (e.g. AXI,
3399  *  Checksum features, timers). The DMA is ready to start receiving and
3400  *  transmitting.
3401  *  Return value:
3402  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3403  *  file on failure.
3404  */
3405 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3406 {
3407 	struct stmmac_priv *priv = netdev_priv(dev);
3408 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3409 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3410 	bool sph_en;
3411 	u32 chan;
3412 	int ret;
3413 
3414 	/* DMA initialization and SW reset */
3415 	ret = stmmac_init_dma_engine(priv);
3416 	if (ret < 0) {
3417 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3418 			   __func__);
3419 		return ret;
3420 	}
3421 
3422 	/* Copy the MAC addr into the HW  */
3423 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3424 
3425 	/* PS and related bits will be programmed according to the speed */
3426 	if (priv->hw->pcs) {
3427 		int speed = priv->plat->mac_port_sel_speed;
3428 
3429 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3430 		    (speed == SPEED_1000)) {
3431 			priv->hw->ps = speed;
3432 		} else {
3433 			dev_warn(priv->device, "invalid port speed\n");
3434 			priv->hw->ps = 0;
3435 		}
3436 	}
3437 
3438 	/* Initialize the MAC Core */
3439 	stmmac_core_init(priv, priv->hw, dev);
3440 
3441 	/* Initialize MTL*/
3442 	stmmac_mtl_configuration(priv);
3443 
3444 	/* Initialize Safety Features */
3445 	stmmac_safety_feat_configuration(priv);
3446 
3447 	ret = stmmac_rx_ipc(priv, priv->hw);
3448 	if (!ret) {
3449 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3450 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3451 		priv->hw->rx_csum = 0;
3452 	}
3453 
3454 	/* Enable the MAC Rx/Tx */
3455 	stmmac_mac_set(priv, priv->ioaddr, true);
3456 
3457 	/* Set the HW DMA mode and the COE */
3458 	stmmac_dma_operation_mode(priv);
3459 
3460 	stmmac_mmc_setup(priv);
3461 
3462 	if (ptp_register) {
3463 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3464 		if (ret < 0)
3465 			netdev_warn(priv->dev,
3466 				    "failed to enable PTP reference clock: %pe\n",
3467 				    ERR_PTR(ret));
3468 	}
3469 
3470 	ret = stmmac_init_ptp(priv);
3471 	if (ret == -EOPNOTSUPP)
3472 		netdev_info(priv->dev, "PTP not supported by HW\n");
3473 	else if (ret)
3474 		netdev_warn(priv->dev, "PTP init failed\n");
3475 	else if (ptp_register)
3476 		stmmac_ptp_register(priv);
3477 
3478 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3479 
3480 	/* Convert the timer from msec to usec */
3481 	if (!priv->tx_lpi_timer)
3482 		priv->tx_lpi_timer = eee_timer * 1000;
3483 
3484 	if (priv->use_riwt) {
3485 		u32 queue;
3486 
3487 		for (queue = 0; queue < rx_cnt; queue++) {
3488 			if (!priv->rx_riwt[queue])
3489 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3490 
3491 			stmmac_rx_watchdog(priv, priv->ioaddr,
3492 					   priv->rx_riwt[queue], queue);
3493 		}
3494 	}
3495 
3496 	if (priv->hw->pcs)
3497 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3498 
3499 	/* set TX and RX rings length */
3500 	stmmac_set_rings_length(priv);
3501 
3502 	/* Enable TSO */
3503 	if (priv->tso) {
3504 		for (chan = 0; chan < tx_cnt; chan++) {
3505 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3506 
3507 			/* TSO and TBS cannot co-exist */
3508 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3509 				continue;
3510 
3511 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3512 		}
3513 	}
3514 
3515 	/* Enable Split Header */
3516 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3517 	for (chan = 0; chan < rx_cnt; chan++)
3518 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3519 
3520 
3521 	/* VLAN Tag Insertion */
3522 	if (priv->dma_cap.vlins)
3523 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3524 
3525 	/* TBS */
3526 	for (chan = 0; chan < tx_cnt; chan++) {
3527 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3528 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3529 
3530 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3531 	}
3532 
3533 	/* Configure real RX and TX queues */
3534 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3535 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3536 
3537 	/* Start the ball rolling... */
3538 	stmmac_start_all_dma(priv);
3539 
3540 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3541 
3542 	if (priv->dma_cap.fpesel) {
3543 		stmmac_fpe_start_wq(priv);
3544 
3545 		if (priv->plat->fpe_cfg->enable)
3546 			stmmac_fpe_handshake(priv, true);
3547 	}
3548 
3549 	return 0;
3550 }
3551 
3552 static void stmmac_hw_teardown(struct net_device *dev)
3553 {
3554 	struct stmmac_priv *priv = netdev_priv(dev);
3555 
3556 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3557 }
3558 
3559 static void stmmac_free_irq(struct net_device *dev,
3560 			    enum request_irq_err irq_err, int irq_idx)
3561 {
3562 	struct stmmac_priv *priv = netdev_priv(dev);
3563 	int j;
3564 
3565 	switch (irq_err) {
3566 	case REQ_IRQ_ERR_ALL:
3567 		irq_idx = priv->plat->tx_queues_to_use;
3568 		fallthrough;
3569 	case REQ_IRQ_ERR_TX:
3570 		for (j = irq_idx - 1; j >= 0; j--) {
3571 			if (priv->tx_irq[j] > 0) {
3572 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3573 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3574 			}
3575 		}
3576 		irq_idx = priv->plat->rx_queues_to_use;
3577 		fallthrough;
3578 	case REQ_IRQ_ERR_RX:
3579 		for (j = irq_idx - 1; j >= 0; j--) {
3580 			if (priv->rx_irq[j] > 0) {
3581 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3582 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3583 			}
3584 		}
3585 
3586 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3587 			free_irq(priv->sfty_ue_irq, dev);
3588 		fallthrough;
3589 	case REQ_IRQ_ERR_SFTY_UE:
3590 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3591 			free_irq(priv->sfty_ce_irq, dev);
3592 		fallthrough;
3593 	case REQ_IRQ_ERR_SFTY_CE:
3594 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3595 			free_irq(priv->lpi_irq, dev);
3596 		fallthrough;
3597 	case REQ_IRQ_ERR_LPI:
3598 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3599 			free_irq(priv->wol_irq, dev);
3600 		fallthrough;
3601 	case REQ_IRQ_ERR_SFTY:
3602 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3603 			free_irq(priv->sfty_irq, dev);
3604 		fallthrough;
3605 	case REQ_IRQ_ERR_WOL:
3606 		free_irq(dev->irq, dev);
3607 		fallthrough;
3608 	case REQ_IRQ_ERR_MAC:
3609 	case REQ_IRQ_ERR_NO:
3610 		/* If MAC IRQ request error, no more IRQ to free */
3611 		break;
3612 	}
3613 }
3614 
3615 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3616 {
3617 	struct stmmac_priv *priv = netdev_priv(dev);
3618 	enum request_irq_err irq_err;
3619 	cpumask_t cpu_mask;
3620 	int irq_idx = 0;
3621 	char *int_name;
3622 	int ret;
3623 	int i;
3624 
3625 	/* For common interrupt */
3626 	int_name = priv->int_name_mac;
3627 	sprintf(int_name, "%s:%s", dev->name, "mac");
3628 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3629 			  0, int_name, dev);
3630 	if (unlikely(ret < 0)) {
3631 		netdev_err(priv->dev,
3632 			   "%s: alloc mac MSI %d (error: %d)\n",
3633 			   __func__, dev->irq, ret);
3634 		irq_err = REQ_IRQ_ERR_MAC;
3635 		goto irq_error;
3636 	}
3637 
3638 	/* Request the Wake IRQ in case of another line
3639 	 * is used for WoL
3640 	 */
3641 	priv->wol_irq_disabled = true;
3642 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3643 		int_name = priv->int_name_wol;
3644 		sprintf(int_name, "%s:%s", dev->name, "wol");
3645 		ret = request_irq(priv->wol_irq,
3646 				  stmmac_mac_interrupt,
3647 				  0, int_name, dev);
3648 		if (unlikely(ret < 0)) {
3649 			netdev_err(priv->dev,
3650 				   "%s: alloc wol MSI %d (error: %d)\n",
3651 				   __func__, priv->wol_irq, ret);
3652 			irq_err = REQ_IRQ_ERR_WOL;
3653 			goto irq_error;
3654 		}
3655 	}
3656 
3657 	/* Request the LPI IRQ in case of another line
3658 	 * is used for LPI
3659 	 */
3660 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3661 		int_name = priv->int_name_lpi;
3662 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3663 		ret = request_irq(priv->lpi_irq,
3664 				  stmmac_mac_interrupt,
3665 				  0, int_name, dev);
3666 		if (unlikely(ret < 0)) {
3667 			netdev_err(priv->dev,
3668 				   "%s: alloc lpi MSI %d (error: %d)\n",
3669 				   __func__, priv->lpi_irq, ret);
3670 			irq_err = REQ_IRQ_ERR_LPI;
3671 			goto irq_error;
3672 		}
3673 	}
3674 
3675 	/* Request the common Safety Feature Correctible/Uncorrectible
3676 	 * Error line in case of another line is used
3677 	 */
3678 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3679 		int_name = priv->int_name_sfty;
3680 		sprintf(int_name, "%s:%s", dev->name, "safety");
3681 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3682 				  0, int_name, dev);
3683 		if (unlikely(ret < 0)) {
3684 			netdev_err(priv->dev,
3685 				   "%s: alloc sfty MSI %d (error: %d)\n",
3686 				   __func__, priv->sfty_irq, ret);
3687 			irq_err = REQ_IRQ_ERR_SFTY;
3688 			goto irq_error;
3689 		}
3690 	}
3691 
3692 	/* Request the Safety Feature Correctible Error line in
3693 	 * case of another line is used
3694 	 */
3695 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3696 		int_name = priv->int_name_sfty_ce;
3697 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3698 		ret = request_irq(priv->sfty_ce_irq,
3699 				  stmmac_safety_interrupt,
3700 				  0, int_name, dev);
3701 		if (unlikely(ret < 0)) {
3702 			netdev_err(priv->dev,
3703 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3704 				   __func__, priv->sfty_ce_irq, ret);
3705 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3706 			goto irq_error;
3707 		}
3708 	}
3709 
3710 	/* Request the Safety Feature Uncorrectible Error line in
3711 	 * case of another line is used
3712 	 */
3713 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3714 		int_name = priv->int_name_sfty_ue;
3715 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3716 		ret = request_irq(priv->sfty_ue_irq,
3717 				  stmmac_safety_interrupt,
3718 				  0, int_name, dev);
3719 		if (unlikely(ret < 0)) {
3720 			netdev_err(priv->dev,
3721 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3722 				   __func__, priv->sfty_ue_irq, ret);
3723 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3724 			goto irq_error;
3725 		}
3726 	}
3727 
3728 	/* Request Rx MSI irq */
3729 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3730 		if (i >= MTL_MAX_RX_QUEUES)
3731 			break;
3732 		if (priv->rx_irq[i] == 0)
3733 			continue;
3734 
3735 		int_name = priv->int_name_rx_irq[i];
3736 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3737 		ret = request_irq(priv->rx_irq[i],
3738 				  stmmac_msi_intr_rx,
3739 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3740 		if (unlikely(ret < 0)) {
3741 			netdev_err(priv->dev,
3742 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3743 				   __func__, i, priv->rx_irq[i], ret);
3744 			irq_err = REQ_IRQ_ERR_RX;
3745 			irq_idx = i;
3746 			goto irq_error;
3747 		}
3748 		cpumask_clear(&cpu_mask);
3749 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3750 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3751 	}
3752 
3753 	/* Request Tx MSI irq */
3754 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3755 		if (i >= MTL_MAX_TX_QUEUES)
3756 			break;
3757 		if (priv->tx_irq[i] == 0)
3758 			continue;
3759 
3760 		int_name = priv->int_name_tx_irq[i];
3761 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3762 		ret = request_irq(priv->tx_irq[i],
3763 				  stmmac_msi_intr_tx,
3764 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3765 		if (unlikely(ret < 0)) {
3766 			netdev_err(priv->dev,
3767 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3768 				   __func__, i, priv->tx_irq[i], ret);
3769 			irq_err = REQ_IRQ_ERR_TX;
3770 			irq_idx = i;
3771 			goto irq_error;
3772 		}
3773 		cpumask_clear(&cpu_mask);
3774 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3775 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3776 	}
3777 
3778 	return 0;
3779 
3780 irq_error:
3781 	stmmac_free_irq(dev, irq_err, irq_idx);
3782 	return ret;
3783 }
3784 
3785 static int stmmac_request_irq_single(struct net_device *dev)
3786 {
3787 	struct stmmac_priv *priv = netdev_priv(dev);
3788 	enum request_irq_err irq_err;
3789 	int ret;
3790 
3791 	ret = request_irq(dev->irq, stmmac_interrupt,
3792 			  IRQF_SHARED, dev->name, dev);
3793 	if (unlikely(ret < 0)) {
3794 		netdev_err(priv->dev,
3795 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3796 			   __func__, dev->irq, ret);
3797 		irq_err = REQ_IRQ_ERR_MAC;
3798 		goto irq_error;
3799 	}
3800 
3801 	/* Request the Wake IRQ in case of another line
3802 	 * is used for WoL
3803 	 */
3804 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3805 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3806 				  IRQF_SHARED, dev->name, dev);
3807 		if (unlikely(ret < 0)) {
3808 			netdev_err(priv->dev,
3809 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3810 				   __func__, priv->wol_irq, ret);
3811 			irq_err = REQ_IRQ_ERR_WOL;
3812 			goto irq_error;
3813 		}
3814 	}
3815 
3816 	/* Request the IRQ lines */
3817 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3818 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3819 				  IRQF_SHARED, dev->name, dev);
3820 		if (unlikely(ret < 0)) {
3821 			netdev_err(priv->dev,
3822 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3823 				   __func__, priv->lpi_irq, ret);
3824 			irq_err = REQ_IRQ_ERR_LPI;
3825 			goto irq_error;
3826 		}
3827 	}
3828 
3829 	/* Request the common Safety Feature Correctible/Uncorrectible
3830 	 * Error line in case of another line is used
3831 	 */
3832 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3833 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3834 				  IRQF_SHARED, dev->name, dev);
3835 		if (unlikely(ret < 0)) {
3836 			netdev_err(priv->dev,
3837 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3838 				   __func__, priv->sfty_irq, ret);
3839 			irq_err = REQ_IRQ_ERR_SFTY;
3840 			goto irq_error;
3841 		}
3842 	}
3843 
3844 	return 0;
3845 
3846 irq_error:
3847 	stmmac_free_irq(dev, irq_err, 0);
3848 	return ret;
3849 }
3850 
3851 static int stmmac_request_irq(struct net_device *dev)
3852 {
3853 	struct stmmac_priv *priv = netdev_priv(dev);
3854 	int ret;
3855 
3856 	/* Request the IRQ lines */
3857 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3858 		ret = stmmac_request_irq_multi_msi(dev);
3859 	else
3860 		ret = stmmac_request_irq_single(dev);
3861 
3862 	return ret;
3863 }
3864 
3865 /**
3866  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3867  *  @priv: driver private structure
3868  *  @mtu: MTU to setup the dma queue and buf with
3869  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3870  *  Allocate the Tx/Rx DMA queue and init them.
3871  *  Return value:
3872  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3873  */
3874 static struct stmmac_dma_conf *
3875 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3876 {
3877 	struct stmmac_dma_conf *dma_conf;
3878 	int chan, bfsize, ret;
3879 
3880 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3881 	if (!dma_conf) {
3882 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3883 			   __func__);
3884 		return ERR_PTR(-ENOMEM);
3885 	}
3886 
3887 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3888 	if (bfsize < 0)
3889 		bfsize = 0;
3890 
3891 	if (bfsize < BUF_SIZE_16KiB)
3892 		bfsize = stmmac_set_bfsize(mtu, 0);
3893 
3894 	dma_conf->dma_buf_sz = bfsize;
3895 	/* Chose the tx/rx size from the already defined one in the
3896 	 * priv struct. (if defined)
3897 	 */
3898 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3899 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3900 
3901 	if (!dma_conf->dma_tx_size)
3902 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3903 	if (!dma_conf->dma_rx_size)
3904 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3905 
3906 	/* Earlier check for TBS */
3907 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3908 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3909 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3910 
3911 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3912 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3913 	}
3914 
3915 	ret = alloc_dma_desc_resources(priv, dma_conf);
3916 	if (ret < 0) {
3917 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3918 			   __func__);
3919 		goto alloc_error;
3920 	}
3921 
3922 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3923 	if (ret < 0) {
3924 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3925 			   __func__);
3926 		goto init_error;
3927 	}
3928 
3929 	return dma_conf;
3930 
3931 init_error:
3932 	free_dma_desc_resources(priv, dma_conf);
3933 alloc_error:
3934 	kfree(dma_conf);
3935 	return ERR_PTR(ret);
3936 }
3937 
3938 /**
3939  *  __stmmac_open - open entry point of the driver
3940  *  @dev : pointer to the device structure.
3941  *  @dma_conf :  structure to take the dma data
3942  *  Description:
3943  *  This function is the open entry point of the driver.
3944  *  Return value:
3945  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3946  *  file on failure.
3947  */
3948 static int __stmmac_open(struct net_device *dev,
3949 			 struct stmmac_dma_conf *dma_conf)
3950 {
3951 	struct stmmac_priv *priv = netdev_priv(dev);
3952 	int mode = priv->plat->phy_interface;
3953 	u32 chan;
3954 	int ret;
3955 
3956 	ret = pm_runtime_resume_and_get(priv->device);
3957 	if (ret < 0)
3958 		return ret;
3959 
3960 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3961 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3962 	    (!priv->hw->xpcs ||
3963 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3964 	    !priv->hw->lynx_pcs) {
3965 		ret = stmmac_init_phy(dev);
3966 		if (ret) {
3967 			netdev_err(priv->dev,
3968 				   "%s: Cannot attach to PHY (error: %d)\n",
3969 				   __func__, ret);
3970 			goto init_phy_error;
3971 		}
3972 	}
3973 
3974 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3975 
3976 	buf_sz = dma_conf->dma_buf_sz;
3977 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3978 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3979 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3980 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3981 
3982 	stmmac_reset_queues_param(priv);
3983 
3984 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3985 	    priv->plat->serdes_powerup) {
3986 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3987 		if (ret < 0) {
3988 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3989 				   __func__);
3990 			goto init_error;
3991 		}
3992 	}
3993 
3994 	ret = stmmac_hw_setup(dev, true);
3995 	if (ret < 0) {
3996 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3997 		goto init_error;
3998 	}
3999 
4000 	stmmac_init_coalesce(priv);
4001 
4002 	phylink_start(priv->phylink);
4003 	/* We may have called phylink_speed_down before */
4004 	phylink_speed_up(priv->phylink);
4005 
4006 	ret = stmmac_request_irq(dev);
4007 	if (ret)
4008 		goto irq_error;
4009 
4010 	stmmac_enable_all_queues(priv);
4011 	netif_tx_start_all_queues(priv->dev);
4012 	stmmac_enable_all_dma_irq(priv);
4013 
4014 	return 0;
4015 
4016 irq_error:
4017 	phylink_stop(priv->phylink);
4018 
4019 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4020 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4021 
4022 	stmmac_hw_teardown(dev);
4023 init_error:
4024 	phylink_disconnect_phy(priv->phylink);
4025 init_phy_error:
4026 	pm_runtime_put(priv->device);
4027 	return ret;
4028 }
4029 
4030 static int stmmac_open(struct net_device *dev)
4031 {
4032 	struct stmmac_priv *priv = netdev_priv(dev);
4033 	struct stmmac_dma_conf *dma_conf;
4034 	int ret;
4035 
4036 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4037 	if (IS_ERR(dma_conf))
4038 		return PTR_ERR(dma_conf);
4039 
4040 	ret = __stmmac_open(dev, dma_conf);
4041 	if (ret)
4042 		free_dma_desc_resources(priv, dma_conf);
4043 
4044 	kfree(dma_conf);
4045 	return ret;
4046 }
4047 
4048 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4049 {
4050 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4051 
4052 	if (priv->fpe_wq) {
4053 		destroy_workqueue(priv->fpe_wq);
4054 		priv->fpe_wq = NULL;
4055 	}
4056 
4057 	netdev_info(priv->dev, "FPE workqueue stop");
4058 }
4059 
4060 /**
4061  *  stmmac_release - close entry point of the driver
4062  *  @dev : device pointer.
4063  *  Description:
4064  *  This is the stop entry point of the driver.
4065  */
4066 static int stmmac_release(struct net_device *dev)
4067 {
4068 	struct stmmac_priv *priv = netdev_priv(dev);
4069 	u32 chan;
4070 
4071 	if (device_may_wakeup(priv->device))
4072 		phylink_speed_down(priv->phylink, false);
4073 	/* Stop and disconnect the PHY */
4074 	phylink_stop(priv->phylink);
4075 	phylink_disconnect_phy(priv->phylink);
4076 
4077 	stmmac_disable_all_queues(priv);
4078 
4079 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4080 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4081 
4082 	netif_tx_disable(dev);
4083 
4084 	/* Free the IRQ lines */
4085 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4086 
4087 	if (priv->eee_enabled) {
4088 		priv->tx_path_in_lpi_mode = false;
4089 		del_timer_sync(&priv->eee_ctrl_timer);
4090 	}
4091 
4092 	/* Stop TX/RX DMA and clear the descriptors */
4093 	stmmac_stop_all_dma(priv);
4094 
4095 	/* Release and free the Rx/Tx resources */
4096 	free_dma_desc_resources(priv, &priv->dma_conf);
4097 
4098 	/* Disable the MAC Rx/Tx */
4099 	stmmac_mac_set(priv, priv->ioaddr, false);
4100 
4101 	/* Powerdown Serdes if there is */
4102 	if (priv->plat->serdes_powerdown)
4103 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4104 
4105 	netif_carrier_off(dev);
4106 
4107 	stmmac_release_ptp(priv);
4108 
4109 	pm_runtime_put(priv->device);
4110 
4111 	if (priv->dma_cap.fpesel)
4112 		stmmac_fpe_stop_wq(priv);
4113 
4114 	return 0;
4115 }
4116 
4117 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4118 			       struct stmmac_tx_queue *tx_q)
4119 {
4120 	u16 tag = 0x0, inner_tag = 0x0;
4121 	u32 inner_type = 0x0;
4122 	struct dma_desc *p;
4123 
4124 	if (!priv->dma_cap.vlins)
4125 		return false;
4126 	if (!skb_vlan_tag_present(skb))
4127 		return false;
4128 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4129 		inner_tag = skb_vlan_tag_get(skb);
4130 		inner_type = STMMAC_VLAN_INSERT;
4131 	}
4132 
4133 	tag = skb_vlan_tag_get(skb);
4134 
4135 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4136 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4137 	else
4138 		p = &tx_q->dma_tx[tx_q->cur_tx];
4139 
4140 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4141 		return false;
4142 
4143 	stmmac_set_tx_owner(priv, p);
4144 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4145 	return true;
4146 }
4147 
4148 /**
4149  *  stmmac_tso_allocator - close entry point of the driver
4150  *  @priv: driver private structure
4151  *  @des: buffer start address
4152  *  @total_len: total length to fill in descriptors
4153  *  @last_segment: condition for the last descriptor
4154  *  @queue: TX queue index
4155  *  Description:
4156  *  This function fills descriptor and request new descriptors according to
4157  *  buffer length to fill
4158  */
4159 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4160 				 int total_len, bool last_segment, u32 queue)
4161 {
4162 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4163 	struct dma_desc *desc;
4164 	u32 buff_size;
4165 	int tmp_len;
4166 
4167 	tmp_len = total_len;
4168 
4169 	while (tmp_len > 0) {
4170 		dma_addr_t curr_addr;
4171 
4172 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4173 						priv->dma_conf.dma_tx_size);
4174 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4175 
4176 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4177 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4178 		else
4179 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4180 
4181 		curr_addr = des + (total_len - tmp_len);
4182 		if (priv->dma_cap.addr64 <= 32)
4183 			desc->des0 = cpu_to_le32(curr_addr);
4184 		else
4185 			stmmac_set_desc_addr(priv, desc, curr_addr);
4186 
4187 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4188 			    TSO_MAX_BUFF_SIZE : tmp_len;
4189 
4190 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4191 				0, 1,
4192 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4193 				0, 0);
4194 
4195 		tmp_len -= TSO_MAX_BUFF_SIZE;
4196 	}
4197 }
4198 
4199 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4200 {
4201 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4202 	int desc_size;
4203 
4204 	if (likely(priv->extend_desc))
4205 		desc_size = sizeof(struct dma_extended_desc);
4206 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4207 		desc_size = sizeof(struct dma_edesc);
4208 	else
4209 		desc_size = sizeof(struct dma_desc);
4210 
4211 	/* The own bit must be the latest setting done when prepare the
4212 	 * descriptor and then barrier is needed to make sure that
4213 	 * all is coherent before granting the DMA engine.
4214 	 */
4215 	wmb();
4216 
4217 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4218 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4219 }
4220 
4221 /**
4222  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4223  *  @skb : the socket buffer
4224  *  @dev : device pointer
4225  *  Description: this is the transmit function that is called on TSO frames
4226  *  (support available on GMAC4 and newer chips).
4227  *  Diagram below show the ring programming in case of TSO frames:
4228  *
4229  *  First Descriptor
4230  *   --------
4231  *   | DES0 |---> buffer1 = L2/L3/L4 header
4232  *   | DES1 |---> TCP Payload (can continue on next descr...)
4233  *   | DES2 |---> buffer 1 and 2 len
4234  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4235  *   --------
4236  *	|
4237  *     ...
4238  *	|
4239  *   --------
4240  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4241  *   | DES1 | --|
4242  *   | DES2 | --> buffer 1 and 2 len
4243  *   | DES3 |
4244  *   --------
4245  *
4246  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4247  */
4248 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4249 {
4250 	struct dma_desc *desc, *first, *mss_desc = NULL;
4251 	struct stmmac_priv *priv = netdev_priv(dev);
4252 	int nfrags = skb_shinfo(skb)->nr_frags;
4253 	u32 queue = skb_get_queue_mapping(skb);
4254 	unsigned int first_entry, tx_packets;
4255 	struct stmmac_txq_stats *txq_stats;
4256 	int tmp_pay_len = 0, first_tx;
4257 	struct stmmac_tx_queue *tx_q;
4258 	bool has_vlan, set_ic;
4259 	u8 proto_hdr_len, hdr;
4260 	u32 pay_len, mss;
4261 	dma_addr_t des;
4262 	int i;
4263 
4264 	tx_q = &priv->dma_conf.tx_queue[queue];
4265 	txq_stats = &priv->xstats.txq_stats[queue];
4266 	first_tx = tx_q->cur_tx;
4267 
4268 	/* Compute header lengths */
4269 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4270 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4271 		hdr = sizeof(struct udphdr);
4272 	} else {
4273 		proto_hdr_len = skb_tcp_all_headers(skb);
4274 		hdr = tcp_hdrlen(skb);
4275 	}
4276 
4277 	/* Desc availability based on threshold should be enough safe */
4278 	if (unlikely(stmmac_tx_avail(priv, queue) <
4279 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4280 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4281 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4282 								queue));
4283 			/* This is a hard error, log it. */
4284 			netdev_err(priv->dev,
4285 				   "%s: Tx Ring full when queue awake\n",
4286 				   __func__);
4287 		}
4288 		return NETDEV_TX_BUSY;
4289 	}
4290 
4291 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4292 
4293 	mss = skb_shinfo(skb)->gso_size;
4294 
4295 	/* set new MSS value if needed */
4296 	if (mss != tx_q->mss) {
4297 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4298 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4299 		else
4300 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4301 
4302 		stmmac_set_mss(priv, mss_desc, mss);
4303 		tx_q->mss = mss;
4304 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4305 						priv->dma_conf.dma_tx_size);
4306 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4307 	}
4308 
4309 	if (netif_msg_tx_queued(priv)) {
4310 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4311 			__func__, hdr, proto_hdr_len, pay_len, mss);
4312 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4313 			skb->data_len);
4314 	}
4315 
4316 	/* Check if VLAN can be inserted by HW */
4317 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4318 
4319 	first_entry = tx_q->cur_tx;
4320 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4321 
4322 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4323 		desc = &tx_q->dma_entx[first_entry].basic;
4324 	else
4325 		desc = &tx_q->dma_tx[first_entry];
4326 	first = desc;
4327 
4328 	if (has_vlan)
4329 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4330 
4331 	/* first descriptor: fill Headers on Buf1 */
4332 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4333 			     DMA_TO_DEVICE);
4334 	if (dma_mapping_error(priv->device, des))
4335 		goto dma_map_err;
4336 
4337 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4338 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4339 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4340 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4341 
4342 	if (priv->dma_cap.addr64 <= 32) {
4343 		first->des0 = cpu_to_le32(des);
4344 
4345 		/* Fill start of payload in buff2 of first descriptor */
4346 		if (pay_len)
4347 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4348 
4349 		/* If needed take extra descriptors to fill the remaining payload */
4350 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4351 	} else {
4352 		stmmac_set_desc_addr(priv, first, des);
4353 		tmp_pay_len = pay_len;
4354 		des += proto_hdr_len;
4355 		pay_len = 0;
4356 	}
4357 
4358 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4359 
4360 	/* Prepare fragments */
4361 	for (i = 0; i < nfrags; i++) {
4362 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4363 
4364 		des = skb_frag_dma_map(priv->device, frag, 0,
4365 				       skb_frag_size(frag),
4366 				       DMA_TO_DEVICE);
4367 		if (dma_mapping_error(priv->device, des))
4368 			goto dma_map_err;
4369 
4370 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4371 				     (i == nfrags - 1), queue);
4372 
4373 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4374 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4375 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4376 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4377 	}
4378 
4379 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4380 
4381 	/* Only the last descriptor gets to point to the skb. */
4382 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4383 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4384 
4385 	/* Manage tx mitigation */
4386 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4387 	tx_q->tx_count_frames += tx_packets;
4388 
4389 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4390 		set_ic = true;
4391 	else if (!priv->tx_coal_frames[queue])
4392 		set_ic = false;
4393 	else if (tx_packets > priv->tx_coal_frames[queue])
4394 		set_ic = true;
4395 	else if ((tx_q->tx_count_frames %
4396 		  priv->tx_coal_frames[queue]) < tx_packets)
4397 		set_ic = true;
4398 	else
4399 		set_ic = false;
4400 
4401 	if (set_ic) {
4402 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4403 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4404 		else
4405 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4406 
4407 		tx_q->tx_count_frames = 0;
4408 		stmmac_set_tx_ic(priv, desc);
4409 	}
4410 
4411 	/* We've used all descriptors we need for this skb, however,
4412 	 * advance cur_tx so that it references a fresh descriptor.
4413 	 * ndo_start_xmit will fill this descriptor the next time it's
4414 	 * called and stmmac_tx_clean may clean up to this descriptor.
4415 	 */
4416 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4417 
4418 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4419 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4420 			  __func__);
4421 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4422 	}
4423 
4424 	u64_stats_update_begin(&txq_stats->q_syncp);
4425 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4426 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4427 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4428 	if (set_ic)
4429 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4430 	u64_stats_update_end(&txq_stats->q_syncp);
4431 
4432 	if (priv->sarc_type)
4433 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4434 
4435 	skb_tx_timestamp(skb);
4436 
4437 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4438 		     priv->hwts_tx_en)) {
4439 		/* declare that device is doing timestamping */
4440 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4441 		stmmac_enable_tx_timestamp(priv, first);
4442 	}
4443 
4444 	/* Complete the first descriptor before granting the DMA */
4445 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4446 			proto_hdr_len,
4447 			pay_len,
4448 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4449 			hdr / 4, (skb->len - proto_hdr_len));
4450 
4451 	/* If context desc is used to change MSS */
4452 	if (mss_desc) {
4453 		/* Make sure that first descriptor has been completely
4454 		 * written, including its own bit. This is because MSS is
4455 		 * actually before first descriptor, so we need to make
4456 		 * sure that MSS's own bit is the last thing written.
4457 		 */
4458 		dma_wmb();
4459 		stmmac_set_tx_owner(priv, mss_desc);
4460 	}
4461 
4462 	if (netif_msg_pktdata(priv)) {
4463 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4464 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4465 			tx_q->cur_tx, first, nfrags);
4466 		pr_info(">>> frame to be transmitted: ");
4467 		print_pkt(skb->data, skb_headlen(skb));
4468 	}
4469 
4470 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4471 
4472 	stmmac_flush_tx_descriptors(priv, queue);
4473 	stmmac_tx_timer_arm(priv, queue);
4474 
4475 	return NETDEV_TX_OK;
4476 
4477 dma_map_err:
4478 	dev_err(priv->device, "Tx dma map failed\n");
4479 	dev_kfree_skb(skb);
4480 	priv->xstats.tx_dropped++;
4481 	return NETDEV_TX_OK;
4482 }
4483 
4484 /**
4485  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4486  * @skb: socket buffer to check
4487  *
4488  * Check if a packet has an ethertype that will trigger the IP header checks
4489  * and IP/TCP checksum engine of the stmmac core.
4490  *
4491  * Return: true if the ethertype can trigger the checksum engine, false
4492  * otherwise
4493  */
4494 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4495 {
4496 	int depth = 0;
4497 	__be16 proto;
4498 
4499 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4500 				    &depth);
4501 
4502 	return (depth <= ETH_HLEN) &&
4503 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4504 }
4505 
4506 /**
4507  *  stmmac_xmit - Tx entry point of the driver
4508  *  @skb : the socket buffer
4509  *  @dev : device pointer
4510  *  Description : this is the tx entry point of the driver.
4511  *  It programs the chain or the ring and supports oversized frames
4512  *  and SG feature.
4513  */
4514 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4515 {
4516 	unsigned int first_entry, tx_packets, enh_desc;
4517 	struct stmmac_priv *priv = netdev_priv(dev);
4518 	unsigned int nopaged_len = skb_headlen(skb);
4519 	int i, csum_insertion = 0, is_jumbo = 0;
4520 	u32 queue = skb_get_queue_mapping(skb);
4521 	int nfrags = skb_shinfo(skb)->nr_frags;
4522 	int gso = skb_shinfo(skb)->gso_type;
4523 	struct stmmac_txq_stats *txq_stats;
4524 	struct dma_edesc *tbs_desc = NULL;
4525 	struct dma_desc *desc, *first;
4526 	struct stmmac_tx_queue *tx_q;
4527 	bool has_vlan, set_ic;
4528 	int entry, first_tx;
4529 	dma_addr_t des;
4530 
4531 	tx_q = &priv->dma_conf.tx_queue[queue];
4532 	txq_stats = &priv->xstats.txq_stats[queue];
4533 	first_tx = tx_q->cur_tx;
4534 
4535 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4536 		stmmac_disable_eee_mode(priv);
4537 
4538 	/* Manage oversized TCP frames for GMAC4 device */
4539 	if (skb_is_gso(skb) && priv->tso) {
4540 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4541 			return stmmac_tso_xmit(skb, dev);
4542 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4543 			return stmmac_tso_xmit(skb, dev);
4544 	}
4545 
4546 	if (priv->plat->est && priv->plat->est->enable &&
4547 	    priv->plat->est->max_sdu[queue] &&
4548 	    skb->len > priv->plat->est->max_sdu[queue]){
4549 		priv->xstats.max_sdu_txq_drop[queue]++;
4550 		goto max_sdu_err;
4551 	}
4552 
4553 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4554 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4555 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4556 								queue));
4557 			/* This is a hard error, log it. */
4558 			netdev_err(priv->dev,
4559 				   "%s: Tx Ring full when queue awake\n",
4560 				   __func__);
4561 		}
4562 		return NETDEV_TX_BUSY;
4563 	}
4564 
4565 	/* Check if VLAN can be inserted by HW */
4566 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4567 
4568 	entry = tx_q->cur_tx;
4569 	first_entry = entry;
4570 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4571 
4572 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4573 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4574 	 * queues. In that case, checksum offloading for those queues that don't
4575 	 * support tx coe needs to fallback to software checksum calculation.
4576 	 *
4577 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4578 	 * also have to be checksummed in software.
4579 	 */
4580 	if (csum_insertion &&
4581 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4582 	     !stmmac_has_ip_ethertype(skb))) {
4583 		if (unlikely(skb_checksum_help(skb)))
4584 			goto dma_map_err;
4585 		csum_insertion = !csum_insertion;
4586 	}
4587 
4588 	if (likely(priv->extend_desc))
4589 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4590 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4591 		desc = &tx_q->dma_entx[entry].basic;
4592 	else
4593 		desc = tx_q->dma_tx + entry;
4594 
4595 	first = desc;
4596 
4597 	if (has_vlan)
4598 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4599 
4600 	enh_desc = priv->plat->enh_desc;
4601 	/* To program the descriptors according to the size of the frame */
4602 	if (enh_desc)
4603 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4604 
4605 	if (unlikely(is_jumbo)) {
4606 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4607 		if (unlikely(entry < 0) && (entry != -EINVAL))
4608 			goto dma_map_err;
4609 	}
4610 
4611 	for (i = 0; i < nfrags; i++) {
4612 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4613 		int len = skb_frag_size(frag);
4614 		bool last_segment = (i == (nfrags - 1));
4615 
4616 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4617 		WARN_ON(tx_q->tx_skbuff[entry]);
4618 
4619 		if (likely(priv->extend_desc))
4620 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4621 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4622 			desc = &tx_q->dma_entx[entry].basic;
4623 		else
4624 			desc = tx_q->dma_tx + entry;
4625 
4626 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4627 				       DMA_TO_DEVICE);
4628 		if (dma_mapping_error(priv->device, des))
4629 			goto dma_map_err; /* should reuse desc w/o issues */
4630 
4631 		tx_q->tx_skbuff_dma[entry].buf = des;
4632 
4633 		stmmac_set_desc_addr(priv, desc, des);
4634 
4635 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4636 		tx_q->tx_skbuff_dma[entry].len = len;
4637 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4638 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4639 
4640 		/* Prepare the descriptor and set the own bit too */
4641 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4642 				priv->mode, 1, last_segment, skb->len);
4643 	}
4644 
4645 	/* Only the last descriptor gets to point to the skb. */
4646 	tx_q->tx_skbuff[entry] = skb;
4647 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4648 
4649 	/* According to the coalesce parameter the IC bit for the latest
4650 	 * segment is reset and the timer re-started to clean the tx status.
4651 	 * This approach takes care about the fragments: desc is the first
4652 	 * element in case of no SG.
4653 	 */
4654 	tx_packets = (entry + 1) - first_tx;
4655 	tx_q->tx_count_frames += tx_packets;
4656 
4657 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4658 		set_ic = true;
4659 	else if (!priv->tx_coal_frames[queue])
4660 		set_ic = false;
4661 	else if (tx_packets > priv->tx_coal_frames[queue])
4662 		set_ic = true;
4663 	else if ((tx_q->tx_count_frames %
4664 		  priv->tx_coal_frames[queue]) < tx_packets)
4665 		set_ic = true;
4666 	else
4667 		set_ic = false;
4668 
4669 	if (set_ic) {
4670 		if (likely(priv->extend_desc))
4671 			desc = &tx_q->dma_etx[entry].basic;
4672 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4673 			desc = &tx_q->dma_entx[entry].basic;
4674 		else
4675 			desc = &tx_q->dma_tx[entry];
4676 
4677 		tx_q->tx_count_frames = 0;
4678 		stmmac_set_tx_ic(priv, desc);
4679 	}
4680 
4681 	/* We've used all descriptors we need for this skb, however,
4682 	 * advance cur_tx so that it references a fresh descriptor.
4683 	 * ndo_start_xmit will fill this descriptor the next time it's
4684 	 * called and stmmac_tx_clean may clean up to this descriptor.
4685 	 */
4686 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4687 	tx_q->cur_tx = entry;
4688 
4689 	if (netif_msg_pktdata(priv)) {
4690 		netdev_dbg(priv->dev,
4691 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4692 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4693 			   entry, first, nfrags);
4694 
4695 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4696 		print_pkt(skb->data, skb->len);
4697 	}
4698 
4699 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4700 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4701 			  __func__);
4702 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4703 	}
4704 
4705 	u64_stats_update_begin(&txq_stats->q_syncp);
4706 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4707 	if (set_ic)
4708 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4709 	u64_stats_update_end(&txq_stats->q_syncp);
4710 
4711 	if (priv->sarc_type)
4712 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4713 
4714 	skb_tx_timestamp(skb);
4715 
4716 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4717 	 * problems because all the descriptors are actually ready to be
4718 	 * passed to the DMA engine.
4719 	 */
4720 	if (likely(!is_jumbo)) {
4721 		bool last_segment = (nfrags == 0);
4722 
4723 		des = dma_map_single(priv->device, skb->data,
4724 				     nopaged_len, DMA_TO_DEVICE);
4725 		if (dma_mapping_error(priv->device, des))
4726 			goto dma_map_err;
4727 
4728 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4729 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4730 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4731 
4732 		stmmac_set_desc_addr(priv, first, des);
4733 
4734 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4735 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4736 
4737 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4738 			     priv->hwts_tx_en)) {
4739 			/* declare that device is doing timestamping */
4740 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4741 			stmmac_enable_tx_timestamp(priv, first);
4742 		}
4743 
4744 		/* Prepare the first descriptor setting the OWN bit too */
4745 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4746 				csum_insertion, priv->mode, 0, last_segment,
4747 				skb->len);
4748 	}
4749 
4750 	if (tx_q->tbs & STMMAC_TBS_EN) {
4751 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4752 
4753 		tbs_desc = &tx_q->dma_entx[first_entry];
4754 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4755 	}
4756 
4757 	stmmac_set_tx_owner(priv, first);
4758 
4759 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4760 
4761 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4762 
4763 	stmmac_flush_tx_descriptors(priv, queue);
4764 	stmmac_tx_timer_arm(priv, queue);
4765 
4766 	return NETDEV_TX_OK;
4767 
4768 dma_map_err:
4769 	netdev_err(priv->dev, "Tx DMA map failed\n");
4770 max_sdu_err:
4771 	dev_kfree_skb(skb);
4772 	priv->xstats.tx_dropped++;
4773 	return NETDEV_TX_OK;
4774 }
4775 
4776 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4777 {
4778 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4779 	__be16 vlan_proto = veth->h_vlan_proto;
4780 	u16 vlanid;
4781 
4782 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4783 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4784 	    (vlan_proto == htons(ETH_P_8021AD) &&
4785 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4786 		/* pop the vlan tag */
4787 		vlanid = ntohs(veth->h_vlan_TCI);
4788 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4789 		skb_pull(skb, VLAN_HLEN);
4790 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4791 	}
4792 }
4793 
4794 /**
4795  * stmmac_rx_refill - refill used skb preallocated buffers
4796  * @priv: driver private structure
4797  * @queue: RX queue index
4798  * Description : this is to reallocate the skb for the reception process
4799  * that is based on zero-copy.
4800  */
4801 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4802 {
4803 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4804 	int dirty = stmmac_rx_dirty(priv, queue);
4805 	unsigned int entry = rx_q->dirty_rx;
4806 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4807 
4808 	if (priv->dma_cap.host_dma_width <= 32)
4809 		gfp |= GFP_DMA32;
4810 
4811 	while (dirty-- > 0) {
4812 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4813 		struct dma_desc *p;
4814 		bool use_rx_wd;
4815 
4816 		if (priv->extend_desc)
4817 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4818 		else
4819 			p = rx_q->dma_rx + entry;
4820 
4821 		if (!buf->page) {
4822 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4823 			if (!buf->page)
4824 				break;
4825 		}
4826 
4827 		if (priv->sph && !buf->sec_page) {
4828 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4829 			if (!buf->sec_page)
4830 				break;
4831 
4832 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4833 		}
4834 
4835 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4836 
4837 		stmmac_set_desc_addr(priv, p, buf->addr);
4838 		if (priv->sph)
4839 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4840 		else
4841 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4842 		stmmac_refill_desc3(priv, rx_q, p);
4843 
4844 		rx_q->rx_count_frames++;
4845 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4846 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4847 			rx_q->rx_count_frames = 0;
4848 
4849 		use_rx_wd = !priv->rx_coal_frames[queue];
4850 		use_rx_wd |= rx_q->rx_count_frames > 0;
4851 		if (!priv->use_riwt)
4852 			use_rx_wd = false;
4853 
4854 		dma_wmb();
4855 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4856 
4857 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4858 	}
4859 	rx_q->dirty_rx = entry;
4860 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4861 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4862 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4863 }
4864 
4865 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4866 				       struct dma_desc *p,
4867 				       int status, unsigned int len)
4868 {
4869 	unsigned int plen = 0, hlen = 0;
4870 	int coe = priv->hw->rx_csum;
4871 
4872 	/* Not first descriptor, buffer is always zero */
4873 	if (priv->sph && len)
4874 		return 0;
4875 
4876 	/* First descriptor, get split header length */
4877 	stmmac_get_rx_header_len(priv, p, &hlen);
4878 	if (priv->sph && hlen) {
4879 		priv->xstats.rx_split_hdr_pkt_n++;
4880 		return hlen;
4881 	}
4882 
4883 	/* First descriptor, not last descriptor and not split header */
4884 	if (status & rx_not_ls)
4885 		return priv->dma_conf.dma_buf_sz;
4886 
4887 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4888 
4889 	/* First descriptor and last descriptor and not split header */
4890 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4891 }
4892 
4893 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4894 				       struct dma_desc *p,
4895 				       int status, unsigned int len)
4896 {
4897 	int coe = priv->hw->rx_csum;
4898 	unsigned int plen = 0;
4899 
4900 	/* Not split header, buffer is not available */
4901 	if (!priv->sph)
4902 		return 0;
4903 
4904 	/* Not last descriptor */
4905 	if (status & rx_not_ls)
4906 		return priv->dma_conf.dma_buf_sz;
4907 
4908 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4909 
4910 	/* Last descriptor */
4911 	return plen - len;
4912 }
4913 
4914 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4915 				struct xdp_frame *xdpf, bool dma_map)
4916 {
4917 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4918 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4919 	unsigned int entry = tx_q->cur_tx;
4920 	struct dma_desc *tx_desc;
4921 	dma_addr_t dma_addr;
4922 	bool set_ic;
4923 
4924 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4925 		return STMMAC_XDP_CONSUMED;
4926 
4927 	if (priv->plat->est && priv->plat->est->enable &&
4928 	    priv->plat->est->max_sdu[queue] &&
4929 	    xdpf->len > priv->plat->est->max_sdu[queue]) {
4930 		priv->xstats.max_sdu_txq_drop[queue]++;
4931 		return STMMAC_XDP_CONSUMED;
4932 	}
4933 
4934 	if (likely(priv->extend_desc))
4935 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4936 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4937 		tx_desc = &tx_q->dma_entx[entry].basic;
4938 	else
4939 		tx_desc = tx_q->dma_tx + entry;
4940 
4941 	if (dma_map) {
4942 		dma_addr = dma_map_single(priv->device, xdpf->data,
4943 					  xdpf->len, DMA_TO_DEVICE);
4944 		if (dma_mapping_error(priv->device, dma_addr))
4945 			return STMMAC_XDP_CONSUMED;
4946 
4947 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4948 	} else {
4949 		struct page *page = virt_to_page(xdpf->data);
4950 
4951 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4952 			   xdpf->headroom;
4953 		dma_sync_single_for_device(priv->device, dma_addr,
4954 					   xdpf->len, DMA_BIDIRECTIONAL);
4955 
4956 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4957 	}
4958 
4959 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4960 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4961 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4962 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4963 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4964 
4965 	tx_q->xdpf[entry] = xdpf;
4966 
4967 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4968 
4969 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4970 			       true, priv->mode, true, true,
4971 			       xdpf->len);
4972 
4973 	tx_q->tx_count_frames++;
4974 
4975 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4976 		set_ic = true;
4977 	else
4978 		set_ic = false;
4979 
4980 	if (set_ic) {
4981 		tx_q->tx_count_frames = 0;
4982 		stmmac_set_tx_ic(priv, tx_desc);
4983 		u64_stats_update_begin(&txq_stats->q_syncp);
4984 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4985 		u64_stats_update_end(&txq_stats->q_syncp);
4986 	}
4987 
4988 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4989 
4990 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4991 	tx_q->cur_tx = entry;
4992 
4993 	return STMMAC_XDP_TX;
4994 }
4995 
4996 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4997 				   int cpu)
4998 {
4999 	int index = cpu;
5000 
5001 	if (unlikely(index < 0))
5002 		index = 0;
5003 
5004 	while (index >= priv->plat->tx_queues_to_use)
5005 		index -= priv->plat->tx_queues_to_use;
5006 
5007 	return index;
5008 }
5009 
5010 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5011 				struct xdp_buff *xdp)
5012 {
5013 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5014 	int cpu = smp_processor_id();
5015 	struct netdev_queue *nq;
5016 	int queue;
5017 	int res;
5018 
5019 	if (unlikely(!xdpf))
5020 		return STMMAC_XDP_CONSUMED;
5021 
5022 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5023 	nq = netdev_get_tx_queue(priv->dev, queue);
5024 
5025 	__netif_tx_lock(nq, cpu);
5026 	/* Avoids TX time-out as we are sharing with slow path */
5027 	txq_trans_cond_update(nq);
5028 
5029 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5030 	if (res == STMMAC_XDP_TX)
5031 		stmmac_flush_tx_descriptors(priv, queue);
5032 
5033 	__netif_tx_unlock(nq);
5034 
5035 	return res;
5036 }
5037 
5038 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5039 				 struct bpf_prog *prog,
5040 				 struct xdp_buff *xdp)
5041 {
5042 	u32 act;
5043 	int res;
5044 
5045 	act = bpf_prog_run_xdp(prog, xdp);
5046 	switch (act) {
5047 	case XDP_PASS:
5048 		res = STMMAC_XDP_PASS;
5049 		break;
5050 	case XDP_TX:
5051 		res = stmmac_xdp_xmit_back(priv, xdp);
5052 		break;
5053 	case XDP_REDIRECT:
5054 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5055 			res = STMMAC_XDP_CONSUMED;
5056 		else
5057 			res = STMMAC_XDP_REDIRECT;
5058 		break;
5059 	default:
5060 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5061 		fallthrough;
5062 	case XDP_ABORTED:
5063 		trace_xdp_exception(priv->dev, prog, act);
5064 		fallthrough;
5065 	case XDP_DROP:
5066 		res = STMMAC_XDP_CONSUMED;
5067 		break;
5068 	}
5069 
5070 	return res;
5071 }
5072 
5073 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5074 					   struct xdp_buff *xdp)
5075 {
5076 	struct bpf_prog *prog;
5077 	int res;
5078 
5079 	prog = READ_ONCE(priv->xdp_prog);
5080 	if (!prog) {
5081 		res = STMMAC_XDP_PASS;
5082 		goto out;
5083 	}
5084 
5085 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5086 out:
5087 	return ERR_PTR(-res);
5088 }
5089 
5090 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5091 				   int xdp_status)
5092 {
5093 	int cpu = smp_processor_id();
5094 	int queue;
5095 
5096 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5097 
5098 	if (xdp_status & STMMAC_XDP_TX)
5099 		stmmac_tx_timer_arm(priv, queue);
5100 
5101 	if (xdp_status & STMMAC_XDP_REDIRECT)
5102 		xdp_do_flush();
5103 }
5104 
5105 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5106 					       struct xdp_buff *xdp)
5107 {
5108 	unsigned int metasize = xdp->data - xdp->data_meta;
5109 	unsigned int datasize = xdp->data_end - xdp->data;
5110 	struct sk_buff *skb;
5111 
5112 	skb = __napi_alloc_skb(&ch->rxtx_napi,
5113 			       xdp->data_end - xdp->data_hard_start,
5114 			       GFP_ATOMIC | __GFP_NOWARN);
5115 	if (unlikely(!skb))
5116 		return NULL;
5117 
5118 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5119 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5120 	if (metasize)
5121 		skb_metadata_set(skb, metasize);
5122 
5123 	return skb;
5124 }
5125 
5126 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5127 				   struct dma_desc *p, struct dma_desc *np,
5128 				   struct xdp_buff *xdp)
5129 {
5130 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5131 	struct stmmac_channel *ch = &priv->channel[queue];
5132 	unsigned int len = xdp->data_end - xdp->data;
5133 	enum pkt_hash_types hash_type;
5134 	int coe = priv->hw->rx_csum;
5135 	struct sk_buff *skb;
5136 	u32 hash;
5137 
5138 	skb = stmmac_construct_skb_zc(ch, xdp);
5139 	if (!skb) {
5140 		priv->xstats.rx_dropped++;
5141 		return;
5142 	}
5143 
5144 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5145 	if (priv->hw->hw_vlan_en)
5146 		/* MAC level stripping. */
5147 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5148 	else
5149 		/* Driver level stripping. */
5150 		stmmac_rx_vlan(priv->dev, skb);
5151 	skb->protocol = eth_type_trans(skb, priv->dev);
5152 
5153 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5154 		skb_checksum_none_assert(skb);
5155 	else
5156 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5157 
5158 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5159 		skb_set_hash(skb, hash, hash_type);
5160 
5161 	skb_record_rx_queue(skb, queue);
5162 	napi_gro_receive(&ch->rxtx_napi, skb);
5163 
5164 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5165 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5166 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5167 	u64_stats_update_end(&rxq_stats->napi_syncp);
5168 }
5169 
5170 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5171 {
5172 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5173 	unsigned int entry = rx_q->dirty_rx;
5174 	struct dma_desc *rx_desc = NULL;
5175 	bool ret = true;
5176 
5177 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5178 
5179 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5180 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5181 		dma_addr_t dma_addr;
5182 		bool use_rx_wd;
5183 
5184 		if (!buf->xdp) {
5185 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5186 			if (!buf->xdp) {
5187 				ret = false;
5188 				break;
5189 			}
5190 		}
5191 
5192 		if (priv->extend_desc)
5193 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5194 		else
5195 			rx_desc = rx_q->dma_rx + entry;
5196 
5197 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5198 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5199 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5200 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5201 
5202 		rx_q->rx_count_frames++;
5203 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5204 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5205 			rx_q->rx_count_frames = 0;
5206 
5207 		use_rx_wd = !priv->rx_coal_frames[queue];
5208 		use_rx_wd |= rx_q->rx_count_frames > 0;
5209 		if (!priv->use_riwt)
5210 			use_rx_wd = false;
5211 
5212 		dma_wmb();
5213 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5214 
5215 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5216 	}
5217 
5218 	if (rx_desc) {
5219 		rx_q->dirty_rx = entry;
5220 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5221 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5222 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5223 	}
5224 
5225 	return ret;
5226 }
5227 
5228 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5229 {
5230 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5231 	 * to represent incoming packet, whereas cb field in the same structure
5232 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5233 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5234 	 */
5235 	return (struct stmmac_xdp_buff *)xdp;
5236 }
5237 
5238 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5239 {
5240 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5241 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5242 	unsigned int count = 0, error = 0, len = 0;
5243 	int dirty = stmmac_rx_dirty(priv, queue);
5244 	unsigned int next_entry = rx_q->cur_rx;
5245 	u32 rx_errors = 0, rx_dropped = 0;
5246 	unsigned int desc_size;
5247 	struct bpf_prog *prog;
5248 	bool failure = false;
5249 	int xdp_status = 0;
5250 	int status = 0;
5251 
5252 	if (netif_msg_rx_status(priv)) {
5253 		void *rx_head;
5254 
5255 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5256 		if (priv->extend_desc) {
5257 			rx_head = (void *)rx_q->dma_erx;
5258 			desc_size = sizeof(struct dma_extended_desc);
5259 		} else {
5260 			rx_head = (void *)rx_q->dma_rx;
5261 			desc_size = sizeof(struct dma_desc);
5262 		}
5263 
5264 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5265 				    rx_q->dma_rx_phy, desc_size);
5266 	}
5267 	while (count < limit) {
5268 		struct stmmac_rx_buffer *buf;
5269 		struct stmmac_xdp_buff *ctx;
5270 		unsigned int buf1_len = 0;
5271 		struct dma_desc *np, *p;
5272 		int entry;
5273 		int res;
5274 
5275 		if (!count && rx_q->state_saved) {
5276 			error = rx_q->state.error;
5277 			len = rx_q->state.len;
5278 		} else {
5279 			rx_q->state_saved = false;
5280 			error = 0;
5281 			len = 0;
5282 		}
5283 
5284 		if (count >= limit)
5285 			break;
5286 
5287 read_again:
5288 		buf1_len = 0;
5289 		entry = next_entry;
5290 		buf = &rx_q->buf_pool[entry];
5291 
5292 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5293 			failure = failure ||
5294 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5295 			dirty = 0;
5296 		}
5297 
5298 		if (priv->extend_desc)
5299 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5300 		else
5301 			p = rx_q->dma_rx + entry;
5302 
5303 		/* read the status of the incoming frame */
5304 		status = stmmac_rx_status(priv, &priv->xstats, p);
5305 		/* check if managed by the DMA otherwise go ahead */
5306 		if (unlikely(status & dma_own))
5307 			break;
5308 
5309 		/* Prefetch the next RX descriptor */
5310 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5311 						priv->dma_conf.dma_rx_size);
5312 		next_entry = rx_q->cur_rx;
5313 
5314 		if (priv->extend_desc)
5315 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5316 		else
5317 			np = rx_q->dma_rx + next_entry;
5318 
5319 		prefetch(np);
5320 
5321 		/* Ensure a valid XSK buffer before proceed */
5322 		if (!buf->xdp)
5323 			break;
5324 
5325 		if (priv->extend_desc)
5326 			stmmac_rx_extended_status(priv, &priv->xstats,
5327 						  rx_q->dma_erx + entry);
5328 		if (unlikely(status == discard_frame)) {
5329 			xsk_buff_free(buf->xdp);
5330 			buf->xdp = NULL;
5331 			dirty++;
5332 			error = 1;
5333 			if (!priv->hwts_rx_en)
5334 				rx_errors++;
5335 		}
5336 
5337 		if (unlikely(error && (status & rx_not_ls)))
5338 			goto read_again;
5339 		if (unlikely(error)) {
5340 			count++;
5341 			continue;
5342 		}
5343 
5344 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5345 		if (likely(status & rx_not_ls)) {
5346 			xsk_buff_free(buf->xdp);
5347 			buf->xdp = NULL;
5348 			dirty++;
5349 			count++;
5350 			goto read_again;
5351 		}
5352 
5353 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5354 		ctx->priv = priv;
5355 		ctx->desc = p;
5356 		ctx->ndesc = np;
5357 
5358 		/* XDP ZC Frame only support primary buffers for now */
5359 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5360 		len += buf1_len;
5361 
5362 		/* ACS is disabled; strip manually. */
5363 		if (likely(!(status & rx_not_ls))) {
5364 			buf1_len -= ETH_FCS_LEN;
5365 			len -= ETH_FCS_LEN;
5366 		}
5367 
5368 		/* RX buffer is good and fit into a XSK pool buffer */
5369 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5370 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5371 
5372 		prog = READ_ONCE(priv->xdp_prog);
5373 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5374 
5375 		switch (res) {
5376 		case STMMAC_XDP_PASS:
5377 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5378 			xsk_buff_free(buf->xdp);
5379 			break;
5380 		case STMMAC_XDP_CONSUMED:
5381 			xsk_buff_free(buf->xdp);
5382 			rx_dropped++;
5383 			break;
5384 		case STMMAC_XDP_TX:
5385 		case STMMAC_XDP_REDIRECT:
5386 			xdp_status |= res;
5387 			break;
5388 		}
5389 
5390 		buf->xdp = NULL;
5391 		dirty++;
5392 		count++;
5393 	}
5394 
5395 	if (status & rx_not_ls) {
5396 		rx_q->state_saved = true;
5397 		rx_q->state.error = error;
5398 		rx_q->state.len = len;
5399 	}
5400 
5401 	stmmac_finalize_xdp_rx(priv, xdp_status);
5402 
5403 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5404 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5405 	u64_stats_update_end(&rxq_stats->napi_syncp);
5406 
5407 	priv->xstats.rx_dropped += rx_dropped;
5408 	priv->xstats.rx_errors += rx_errors;
5409 
5410 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5411 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5412 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5413 		else
5414 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5415 
5416 		return (int)count;
5417 	}
5418 
5419 	return failure ? limit : (int)count;
5420 }
5421 
5422 /**
5423  * stmmac_rx - manage the receive process
5424  * @priv: driver private structure
5425  * @limit: napi bugget
5426  * @queue: RX queue index.
5427  * Description :  this the function called by the napi poll method.
5428  * It gets all the frames inside the ring.
5429  */
5430 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5431 {
5432 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5433 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5434 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5435 	struct stmmac_channel *ch = &priv->channel[queue];
5436 	unsigned int count = 0, error = 0, len = 0;
5437 	int status = 0, coe = priv->hw->rx_csum;
5438 	unsigned int next_entry = rx_q->cur_rx;
5439 	enum dma_data_direction dma_dir;
5440 	unsigned int desc_size;
5441 	struct sk_buff *skb = NULL;
5442 	struct stmmac_xdp_buff ctx;
5443 	int xdp_status = 0;
5444 	int buf_sz;
5445 
5446 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5447 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5448 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5449 
5450 	if (netif_msg_rx_status(priv)) {
5451 		void *rx_head;
5452 
5453 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5454 		if (priv->extend_desc) {
5455 			rx_head = (void *)rx_q->dma_erx;
5456 			desc_size = sizeof(struct dma_extended_desc);
5457 		} else {
5458 			rx_head = (void *)rx_q->dma_rx;
5459 			desc_size = sizeof(struct dma_desc);
5460 		}
5461 
5462 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5463 				    rx_q->dma_rx_phy, desc_size);
5464 	}
5465 	while (count < limit) {
5466 		unsigned int buf1_len = 0, buf2_len = 0;
5467 		enum pkt_hash_types hash_type;
5468 		struct stmmac_rx_buffer *buf;
5469 		struct dma_desc *np, *p;
5470 		int entry;
5471 		u32 hash;
5472 
5473 		if (!count && rx_q->state_saved) {
5474 			skb = rx_q->state.skb;
5475 			error = rx_q->state.error;
5476 			len = rx_q->state.len;
5477 		} else {
5478 			rx_q->state_saved = false;
5479 			skb = NULL;
5480 			error = 0;
5481 			len = 0;
5482 		}
5483 
5484 read_again:
5485 		if (count >= limit)
5486 			break;
5487 
5488 		buf1_len = 0;
5489 		buf2_len = 0;
5490 		entry = next_entry;
5491 		buf = &rx_q->buf_pool[entry];
5492 
5493 		if (priv->extend_desc)
5494 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5495 		else
5496 			p = rx_q->dma_rx + entry;
5497 
5498 		/* read the status of the incoming frame */
5499 		status = stmmac_rx_status(priv, &priv->xstats, p);
5500 		/* check if managed by the DMA otherwise go ahead */
5501 		if (unlikely(status & dma_own))
5502 			break;
5503 
5504 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5505 						priv->dma_conf.dma_rx_size);
5506 		next_entry = rx_q->cur_rx;
5507 
5508 		if (priv->extend_desc)
5509 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5510 		else
5511 			np = rx_q->dma_rx + next_entry;
5512 
5513 		prefetch(np);
5514 
5515 		if (priv->extend_desc)
5516 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5517 		if (unlikely(status == discard_frame)) {
5518 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5519 			buf->page = NULL;
5520 			error = 1;
5521 			if (!priv->hwts_rx_en)
5522 				rx_errors++;
5523 		}
5524 
5525 		if (unlikely(error && (status & rx_not_ls)))
5526 			goto read_again;
5527 		if (unlikely(error)) {
5528 			dev_kfree_skb(skb);
5529 			skb = NULL;
5530 			count++;
5531 			continue;
5532 		}
5533 
5534 		/* Buffer is good. Go on. */
5535 
5536 		prefetch(page_address(buf->page) + buf->page_offset);
5537 		if (buf->sec_page)
5538 			prefetch(page_address(buf->sec_page));
5539 
5540 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5541 		len += buf1_len;
5542 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5543 		len += buf2_len;
5544 
5545 		/* ACS is disabled; strip manually. */
5546 		if (likely(!(status & rx_not_ls))) {
5547 			if (buf2_len) {
5548 				buf2_len -= ETH_FCS_LEN;
5549 				len -= ETH_FCS_LEN;
5550 			} else if (buf1_len) {
5551 				buf1_len -= ETH_FCS_LEN;
5552 				len -= ETH_FCS_LEN;
5553 			}
5554 		}
5555 
5556 		if (!skb) {
5557 			unsigned int pre_len, sync_len;
5558 
5559 			dma_sync_single_for_cpu(priv->device, buf->addr,
5560 						buf1_len, dma_dir);
5561 
5562 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5563 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5564 					 buf->page_offset, buf1_len, true);
5565 
5566 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5567 				  buf->page_offset;
5568 
5569 			ctx.priv = priv;
5570 			ctx.desc = p;
5571 			ctx.ndesc = np;
5572 
5573 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5574 			/* Due xdp_adjust_tail: DMA sync for_device
5575 			 * cover max len CPU touch
5576 			 */
5577 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5578 				   buf->page_offset;
5579 			sync_len = max(sync_len, pre_len);
5580 
5581 			/* For Not XDP_PASS verdict */
5582 			if (IS_ERR(skb)) {
5583 				unsigned int xdp_res = -PTR_ERR(skb);
5584 
5585 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5586 					page_pool_put_page(rx_q->page_pool,
5587 							   virt_to_head_page(ctx.xdp.data),
5588 							   sync_len, true);
5589 					buf->page = NULL;
5590 					rx_dropped++;
5591 
5592 					/* Clear skb as it was set as
5593 					 * status by XDP program.
5594 					 */
5595 					skb = NULL;
5596 
5597 					if (unlikely((status & rx_not_ls)))
5598 						goto read_again;
5599 
5600 					count++;
5601 					continue;
5602 				} else if (xdp_res & (STMMAC_XDP_TX |
5603 						      STMMAC_XDP_REDIRECT)) {
5604 					xdp_status |= xdp_res;
5605 					buf->page = NULL;
5606 					skb = NULL;
5607 					count++;
5608 					continue;
5609 				}
5610 			}
5611 		}
5612 
5613 		if (!skb) {
5614 			/* XDP program may expand or reduce tail */
5615 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5616 
5617 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5618 			if (!skb) {
5619 				rx_dropped++;
5620 				count++;
5621 				goto drain_data;
5622 			}
5623 
5624 			/* XDP program may adjust header */
5625 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5626 			skb_put(skb, buf1_len);
5627 
5628 			/* Data payload copied into SKB, page ready for recycle */
5629 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5630 			buf->page = NULL;
5631 		} else if (buf1_len) {
5632 			dma_sync_single_for_cpu(priv->device, buf->addr,
5633 						buf1_len, dma_dir);
5634 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5635 					buf->page, buf->page_offset, buf1_len,
5636 					priv->dma_conf.dma_buf_sz);
5637 
5638 			/* Data payload appended into SKB */
5639 			skb_mark_for_recycle(skb);
5640 			buf->page = NULL;
5641 		}
5642 
5643 		if (buf2_len) {
5644 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5645 						buf2_len, dma_dir);
5646 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5647 					buf->sec_page, 0, buf2_len,
5648 					priv->dma_conf.dma_buf_sz);
5649 
5650 			/* Data payload appended into SKB */
5651 			skb_mark_for_recycle(skb);
5652 			buf->sec_page = NULL;
5653 		}
5654 
5655 drain_data:
5656 		if (likely(status & rx_not_ls))
5657 			goto read_again;
5658 		if (!skb)
5659 			continue;
5660 
5661 		/* Got entire packet into SKB. Finish it. */
5662 
5663 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5664 
5665 		if (priv->hw->hw_vlan_en)
5666 			/* MAC level stripping. */
5667 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5668 		else
5669 			/* Driver level stripping. */
5670 			stmmac_rx_vlan(priv->dev, skb);
5671 
5672 		skb->protocol = eth_type_trans(skb, priv->dev);
5673 
5674 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5675 			skb_checksum_none_assert(skb);
5676 		else
5677 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5678 
5679 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5680 			skb_set_hash(skb, hash, hash_type);
5681 
5682 		skb_record_rx_queue(skb, queue);
5683 		napi_gro_receive(&ch->rx_napi, skb);
5684 		skb = NULL;
5685 
5686 		rx_packets++;
5687 		rx_bytes += len;
5688 		count++;
5689 	}
5690 
5691 	if (status & rx_not_ls || skb) {
5692 		rx_q->state_saved = true;
5693 		rx_q->state.skb = skb;
5694 		rx_q->state.error = error;
5695 		rx_q->state.len = len;
5696 	}
5697 
5698 	stmmac_finalize_xdp_rx(priv, xdp_status);
5699 
5700 	stmmac_rx_refill(priv, queue);
5701 
5702 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5703 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5704 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5705 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5706 	u64_stats_update_end(&rxq_stats->napi_syncp);
5707 
5708 	priv->xstats.rx_dropped += rx_dropped;
5709 	priv->xstats.rx_errors += rx_errors;
5710 
5711 	return count;
5712 }
5713 
5714 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5715 {
5716 	struct stmmac_channel *ch =
5717 		container_of(napi, struct stmmac_channel, rx_napi);
5718 	struct stmmac_priv *priv = ch->priv_data;
5719 	struct stmmac_rxq_stats *rxq_stats;
5720 	u32 chan = ch->index;
5721 	int work_done;
5722 
5723 	rxq_stats = &priv->xstats.rxq_stats[chan];
5724 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5725 	u64_stats_inc(&rxq_stats->napi.poll);
5726 	u64_stats_update_end(&rxq_stats->napi_syncp);
5727 
5728 	work_done = stmmac_rx(priv, budget, chan);
5729 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5730 		unsigned long flags;
5731 
5732 		spin_lock_irqsave(&ch->lock, flags);
5733 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5734 		spin_unlock_irqrestore(&ch->lock, flags);
5735 	}
5736 
5737 	return work_done;
5738 }
5739 
5740 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5741 {
5742 	struct stmmac_channel *ch =
5743 		container_of(napi, struct stmmac_channel, tx_napi);
5744 	struct stmmac_priv *priv = ch->priv_data;
5745 	struct stmmac_txq_stats *txq_stats;
5746 	bool pending_packets = false;
5747 	u32 chan = ch->index;
5748 	int work_done;
5749 
5750 	txq_stats = &priv->xstats.txq_stats[chan];
5751 	u64_stats_update_begin(&txq_stats->napi_syncp);
5752 	u64_stats_inc(&txq_stats->napi.poll);
5753 	u64_stats_update_end(&txq_stats->napi_syncp);
5754 
5755 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5756 	work_done = min(work_done, budget);
5757 
5758 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5759 		unsigned long flags;
5760 
5761 		spin_lock_irqsave(&ch->lock, flags);
5762 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5763 		spin_unlock_irqrestore(&ch->lock, flags);
5764 	}
5765 
5766 	/* TX still have packet to handle, check if we need to arm tx timer */
5767 	if (pending_packets)
5768 		stmmac_tx_timer_arm(priv, chan);
5769 
5770 	return work_done;
5771 }
5772 
5773 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5774 {
5775 	struct stmmac_channel *ch =
5776 		container_of(napi, struct stmmac_channel, rxtx_napi);
5777 	struct stmmac_priv *priv = ch->priv_data;
5778 	bool tx_pending_packets = false;
5779 	int rx_done, tx_done, rxtx_done;
5780 	struct stmmac_rxq_stats *rxq_stats;
5781 	struct stmmac_txq_stats *txq_stats;
5782 	u32 chan = ch->index;
5783 
5784 	rxq_stats = &priv->xstats.rxq_stats[chan];
5785 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5786 	u64_stats_inc(&rxq_stats->napi.poll);
5787 	u64_stats_update_end(&rxq_stats->napi_syncp);
5788 
5789 	txq_stats = &priv->xstats.txq_stats[chan];
5790 	u64_stats_update_begin(&txq_stats->napi_syncp);
5791 	u64_stats_inc(&txq_stats->napi.poll);
5792 	u64_stats_update_end(&txq_stats->napi_syncp);
5793 
5794 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5795 	tx_done = min(tx_done, budget);
5796 
5797 	rx_done = stmmac_rx_zc(priv, budget, chan);
5798 
5799 	rxtx_done = max(tx_done, rx_done);
5800 
5801 	/* If either TX or RX work is not complete, return budget
5802 	 * and keep pooling
5803 	 */
5804 	if (rxtx_done >= budget)
5805 		return budget;
5806 
5807 	/* all work done, exit the polling mode */
5808 	if (napi_complete_done(napi, rxtx_done)) {
5809 		unsigned long flags;
5810 
5811 		spin_lock_irqsave(&ch->lock, flags);
5812 		/* Both RX and TX work done are compelte,
5813 		 * so enable both RX & TX IRQs.
5814 		 */
5815 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5816 		spin_unlock_irqrestore(&ch->lock, flags);
5817 	}
5818 
5819 	/* TX still have packet to handle, check if we need to arm tx timer */
5820 	if (tx_pending_packets)
5821 		stmmac_tx_timer_arm(priv, chan);
5822 
5823 	return min(rxtx_done, budget - 1);
5824 }
5825 
5826 /**
5827  *  stmmac_tx_timeout
5828  *  @dev : Pointer to net device structure
5829  *  @txqueue: the index of the hanging transmit queue
5830  *  Description: this function is called when a packet transmission fails to
5831  *   complete within a reasonable time. The driver will mark the error in the
5832  *   netdev structure and arrange for the device to be reset to a sane state
5833  *   in order to transmit a new packet.
5834  */
5835 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5836 {
5837 	struct stmmac_priv *priv = netdev_priv(dev);
5838 
5839 	stmmac_global_err(priv);
5840 }
5841 
5842 /**
5843  *  stmmac_set_rx_mode - entry point for multicast addressing
5844  *  @dev : pointer to the device structure
5845  *  Description:
5846  *  This function is a driver entry point which gets called by the kernel
5847  *  whenever multicast addresses must be enabled/disabled.
5848  *  Return value:
5849  *  void.
5850  */
5851 static void stmmac_set_rx_mode(struct net_device *dev)
5852 {
5853 	struct stmmac_priv *priv = netdev_priv(dev);
5854 
5855 	stmmac_set_filter(priv, priv->hw, dev);
5856 }
5857 
5858 /**
5859  *  stmmac_change_mtu - entry point to change MTU size for the device.
5860  *  @dev : device pointer.
5861  *  @new_mtu : the new MTU size for the device.
5862  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5863  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5864  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5865  *  Return value:
5866  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5867  *  file on failure.
5868  */
5869 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5870 {
5871 	struct stmmac_priv *priv = netdev_priv(dev);
5872 	int txfifosz = priv->plat->tx_fifo_size;
5873 	struct stmmac_dma_conf *dma_conf;
5874 	const int mtu = new_mtu;
5875 	int ret;
5876 
5877 	if (txfifosz == 0)
5878 		txfifosz = priv->dma_cap.tx_fifo_size;
5879 
5880 	txfifosz /= priv->plat->tx_queues_to_use;
5881 
5882 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5883 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5884 		return -EINVAL;
5885 	}
5886 
5887 	new_mtu = STMMAC_ALIGN(new_mtu);
5888 
5889 	/* If condition true, FIFO is too small or MTU too large */
5890 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5891 		return -EINVAL;
5892 
5893 	if (netif_running(dev)) {
5894 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5895 		/* Try to allocate the new DMA conf with the new mtu */
5896 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5897 		if (IS_ERR(dma_conf)) {
5898 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5899 				   mtu);
5900 			return PTR_ERR(dma_conf);
5901 		}
5902 
5903 		stmmac_release(dev);
5904 
5905 		ret = __stmmac_open(dev, dma_conf);
5906 		if (ret) {
5907 			free_dma_desc_resources(priv, dma_conf);
5908 			kfree(dma_conf);
5909 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5910 			return ret;
5911 		}
5912 
5913 		kfree(dma_conf);
5914 
5915 		stmmac_set_rx_mode(dev);
5916 	}
5917 
5918 	dev->mtu = mtu;
5919 	netdev_update_features(dev);
5920 
5921 	return 0;
5922 }
5923 
5924 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5925 					     netdev_features_t features)
5926 {
5927 	struct stmmac_priv *priv = netdev_priv(dev);
5928 
5929 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5930 		features &= ~NETIF_F_RXCSUM;
5931 
5932 	if (!priv->plat->tx_coe)
5933 		features &= ~NETIF_F_CSUM_MASK;
5934 
5935 	/* Some GMAC devices have a bugged Jumbo frame support that
5936 	 * needs to have the Tx COE disabled for oversized frames
5937 	 * (due to limited buffer sizes). In this case we disable
5938 	 * the TX csum insertion in the TDES and not use SF.
5939 	 */
5940 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5941 		features &= ~NETIF_F_CSUM_MASK;
5942 
5943 	/* Disable tso if asked by ethtool */
5944 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5945 		if (features & NETIF_F_TSO)
5946 			priv->tso = true;
5947 		else
5948 			priv->tso = false;
5949 	}
5950 
5951 	return features;
5952 }
5953 
5954 static int stmmac_set_features(struct net_device *netdev,
5955 			       netdev_features_t features)
5956 {
5957 	struct stmmac_priv *priv = netdev_priv(netdev);
5958 
5959 	/* Keep the COE Type in case of csum is supporting */
5960 	if (features & NETIF_F_RXCSUM)
5961 		priv->hw->rx_csum = priv->plat->rx_coe;
5962 	else
5963 		priv->hw->rx_csum = 0;
5964 	/* No check needed because rx_coe has been set before and it will be
5965 	 * fixed in case of issue.
5966 	 */
5967 	stmmac_rx_ipc(priv, priv->hw);
5968 
5969 	if (priv->sph_cap) {
5970 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5971 		u32 chan;
5972 
5973 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5974 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5975 	}
5976 
5977 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5978 		priv->hw->hw_vlan_en = true;
5979 	else
5980 		priv->hw->hw_vlan_en = false;
5981 
5982 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5983 
5984 	return 0;
5985 }
5986 
5987 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5988 {
5989 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5990 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5991 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5992 	bool *hs_enable = &fpe_cfg->hs_enable;
5993 
5994 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5995 		return;
5996 
5997 	/* If LP has sent verify mPacket, LP is FPE capable */
5998 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5999 		if (*lp_state < FPE_STATE_CAPABLE)
6000 			*lp_state = FPE_STATE_CAPABLE;
6001 
6002 		/* If user has requested FPE enable, quickly response */
6003 		if (*hs_enable)
6004 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6005 						fpe_cfg,
6006 						MPACKET_RESPONSE);
6007 	}
6008 
6009 	/* If Local has sent verify mPacket, Local is FPE capable */
6010 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
6011 		if (*lo_state < FPE_STATE_CAPABLE)
6012 			*lo_state = FPE_STATE_CAPABLE;
6013 	}
6014 
6015 	/* If LP has sent response mPacket, LP is entering FPE ON */
6016 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
6017 		*lp_state = FPE_STATE_ENTERING_ON;
6018 
6019 	/* If Local has sent response mPacket, Local is entering FPE ON */
6020 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
6021 		*lo_state = FPE_STATE_ENTERING_ON;
6022 
6023 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
6024 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
6025 	    priv->fpe_wq) {
6026 		queue_work(priv->fpe_wq, &priv->fpe_task);
6027 	}
6028 }
6029 
6030 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6031 {
6032 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6033 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6034 	u32 queues_count;
6035 	u32 queue;
6036 	bool xmac;
6037 
6038 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6039 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6040 
6041 	if (priv->irq_wake)
6042 		pm_wakeup_event(priv->device, 0);
6043 
6044 	if (priv->dma_cap.estsel)
6045 		stmmac_est_irq_status(priv, priv, priv->dev,
6046 				      &priv->xstats, tx_cnt);
6047 
6048 	if (priv->dma_cap.fpesel) {
6049 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6050 						   priv->dev);
6051 
6052 		stmmac_fpe_event_status(priv, status);
6053 	}
6054 
6055 	/* To handle GMAC own interrupts */
6056 	if ((priv->plat->has_gmac) || xmac) {
6057 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6058 
6059 		if (unlikely(status)) {
6060 			/* For LPI we need to save the tx status */
6061 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6062 				priv->tx_path_in_lpi_mode = true;
6063 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6064 				priv->tx_path_in_lpi_mode = false;
6065 		}
6066 
6067 		for (queue = 0; queue < queues_count; queue++)
6068 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6069 
6070 		/* PCS link status */
6071 		if (priv->hw->pcs &&
6072 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6073 			if (priv->xstats.pcs_link)
6074 				netif_carrier_on(priv->dev);
6075 			else
6076 				netif_carrier_off(priv->dev);
6077 		}
6078 
6079 		stmmac_timestamp_interrupt(priv, priv);
6080 	}
6081 }
6082 
6083 /**
6084  *  stmmac_interrupt - main ISR
6085  *  @irq: interrupt number.
6086  *  @dev_id: to pass the net device pointer.
6087  *  Description: this is the main driver interrupt service routine.
6088  *  It can call:
6089  *  o DMA service routine (to manage incoming frame reception and transmission
6090  *    status)
6091  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6092  *    interrupts.
6093  */
6094 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6095 {
6096 	struct net_device *dev = (struct net_device *)dev_id;
6097 	struct stmmac_priv *priv = netdev_priv(dev);
6098 
6099 	/* Check if adapter is up */
6100 	if (test_bit(STMMAC_DOWN, &priv->state))
6101 		return IRQ_HANDLED;
6102 
6103 	/* Check ASP error if it isn't delivered via an individual IRQ */
6104 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6105 		return IRQ_HANDLED;
6106 
6107 	/* To handle Common interrupts */
6108 	stmmac_common_interrupt(priv);
6109 
6110 	/* To handle DMA interrupts */
6111 	stmmac_dma_interrupt(priv);
6112 
6113 	return IRQ_HANDLED;
6114 }
6115 
6116 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6117 {
6118 	struct net_device *dev = (struct net_device *)dev_id;
6119 	struct stmmac_priv *priv = netdev_priv(dev);
6120 
6121 	/* Check if adapter is up */
6122 	if (test_bit(STMMAC_DOWN, &priv->state))
6123 		return IRQ_HANDLED;
6124 
6125 	/* To handle Common interrupts */
6126 	stmmac_common_interrupt(priv);
6127 
6128 	return IRQ_HANDLED;
6129 }
6130 
6131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6132 {
6133 	struct net_device *dev = (struct net_device *)dev_id;
6134 	struct stmmac_priv *priv = netdev_priv(dev);
6135 
6136 	/* Check if adapter is up */
6137 	if (test_bit(STMMAC_DOWN, &priv->state))
6138 		return IRQ_HANDLED;
6139 
6140 	/* Check if a fatal error happened */
6141 	stmmac_safety_feat_interrupt(priv);
6142 
6143 	return IRQ_HANDLED;
6144 }
6145 
6146 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6147 {
6148 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6149 	struct stmmac_dma_conf *dma_conf;
6150 	int chan = tx_q->queue_index;
6151 	struct stmmac_priv *priv;
6152 	int status;
6153 
6154 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6155 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6156 
6157 	/* Check if adapter is up */
6158 	if (test_bit(STMMAC_DOWN, &priv->state))
6159 		return IRQ_HANDLED;
6160 
6161 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6162 
6163 	if (unlikely(status & tx_hard_error_bump_tc)) {
6164 		/* Try to bump up the dma threshold on this failure */
6165 		stmmac_bump_dma_threshold(priv, chan);
6166 	} else if (unlikely(status == tx_hard_error)) {
6167 		stmmac_tx_err(priv, chan);
6168 	}
6169 
6170 	return IRQ_HANDLED;
6171 }
6172 
6173 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6174 {
6175 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6176 	struct stmmac_dma_conf *dma_conf;
6177 	int chan = rx_q->queue_index;
6178 	struct stmmac_priv *priv;
6179 
6180 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6181 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6182 
6183 	/* Check if adapter is up */
6184 	if (test_bit(STMMAC_DOWN, &priv->state))
6185 		return IRQ_HANDLED;
6186 
6187 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6188 
6189 	return IRQ_HANDLED;
6190 }
6191 
6192 /**
6193  *  stmmac_ioctl - Entry point for the Ioctl
6194  *  @dev: Device pointer.
6195  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6196  *  a proprietary structure used to pass information to the driver.
6197  *  @cmd: IOCTL command
6198  *  Description:
6199  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6200  */
6201 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6202 {
6203 	struct stmmac_priv *priv = netdev_priv (dev);
6204 	int ret = -EOPNOTSUPP;
6205 
6206 	if (!netif_running(dev))
6207 		return -EINVAL;
6208 
6209 	switch (cmd) {
6210 	case SIOCGMIIPHY:
6211 	case SIOCGMIIREG:
6212 	case SIOCSMIIREG:
6213 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6214 		break;
6215 	case SIOCSHWTSTAMP:
6216 		ret = stmmac_hwtstamp_set(dev, rq);
6217 		break;
6218 	case SIOCGHWTSTAMP:
6219 		ret = stmmac_hwtstamp_get(dev, rq);
6220 		break;
6221 	default:
6222 		break;
6223 	}
6224 
6225 	return ret;
6226 }
6227 
6228 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6229 				    void *cb_priv)
6230 {
6231 	struct stmmac_priv *priv = cb_priv;
6232 	int ret = -EOPNOTSUPP;
6233 
6234 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6235 		return ret;
6236 
6237 	__stmmac_disable_all_queues(priv);
6238 
6239 	switch (type) {
6240 	case TC_SETUP_CLSU32:
6241 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6242 		break;
6243 	case TC_SETUP_CLSFLOWER:
6244 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6245 		break;
6246 	default:
6247 		break;
6248 	}
6249 
6250 	stmmac_enable_all_queues(priv);
6251 	return ret;
6252 }
6253 
6254 static LIST_HEAD(stmmac_block_cb_list);
6255 
6256 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6257 			   void *type_data)
6258 {
6259 	struct stmmac_priv *priv = netdev_priv(ndev);
6260 
6261 	switch (type) {
6262 	case TC_QUERY_CAPS:
6263 		return stmmac_tc_query_caps(priv, priv, type_data);
6264 	case TC_SETUP_BLOCK:
6265 		return flow_block_cb_setup_simple(type_data,
6266 						  &stmmac_block_cb_list,
6267 						  stmmac_setup_tc_block_cb,
6268 						  priv, priv, true);
6269 	case TC_SETUP_QDISC_CBS:
6270 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6271 	case TC_SETUP_QDISC_TAPRIO:
6272 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6273 	case TC_SETUP_QDISC_ETF:
6274 		return stmmac_tc_setup_etf(priv, priv, type_data);
6275 	default:
6276 		return -EOPNOTSUPP;
6277 	}
6278 }
6279 
6280 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6281 			       struct net_device *sb_dev)
6282 {
6283 	int gso = skb_shinfo(skb)->gso_type;
6284 
6285 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6286 		/*
6287 		 * There is no way to determine the number of TSO/USO
6288 		 * capable Queues. Let's use always the Queue 0
6289 		 * because if TSO/USO is supported then at least this
6290 		 * one will be capable.
6291 		 */
6292 		return 0;
6293 	}
6294 
6295 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6296 }
6297 
6298 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6299 {
6300 	struct stmmac_priv *priv = netdev_priv(ndev);
6301 	int ret = 0;
6302 
6303 	ret = pm_runtime_resume_and_get(priv->device);
6304 	if (ret < 0)
6305 		return ret;
6306 
6307 	ret = eth_mac_addr(ndev, addr);
6308 	if (ret)
6309 		goto set_mac_error;
6310 
6311 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6312 
6313 set_mac_error:
6314 	pm_runtime_put(priv->device);
6315 
6316 	return ret;
6317 }
6318 
6319 #ifdef CONFIG_DEBUG_FS
6320 static struct dentry *stmmac_fs_dir;
6321 
6322 static void sysfs_display_ring(void *head, int size, int extend_desc,
6323 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6324 {
6325 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6326 	struct dma_desc *p = (struct dma_desc *)head;
6327 	unsigned int desc_size;
6328 	dma_addr_t dma_addr;
6329 	int i;
6330 
6331 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6332 	for (i = 0; i < size; i++) {
6333 		dma_addr = dma_phy_addr + i * desc_size;
6334 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6335 				i, &dma_addr,
6336 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6337 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6338 		if (extend_desc)
6339 			p = &(++ep)->basic;
6340 		else
6341 			p++;
6342 	}
6343 }
6344 
6345 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6346 {
6347 	struct net_device *dev = seq->private;
6348 	struct stmmac_priv *priv = netdev_priv(dev);
6349 	u32 rx_count = priv->plat->rx_queues_to_use;
6350 	u32 tx_count = priv->plat->tx_queues_to_use;
6351 	u32 queue;
6352 
6353 	if ((dev->flags & IFF_UP) == 0)
6354 		return 0;
6355 
6356 	for (queue = 0; queue < rx_count; queue++) {
6357 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6358 
6359 		seq_printf(seq, "RX Queue %d:\n", queue);
6360 
6361 		if (priv->extend_desc) {
6362 			seq_printf(seq, "Extended descriptor ring:\n");
6363 			sysfs_display_ring((void *)rx_q->dma_erx,
6364 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6365 		} else {
6366 			seq_printf(seq, "Descriptor ring:\n");
6367 			sysfs_display_ring((void *)rx_q->dma_rx,
6368 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6369 		}
6370 	}
6371 
6372 	for (queue = 0; queue < tx_count; queue++) {
6373 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6374 
6375 		seq_printf(seq, "TX Queue %d:\n", queue);
6376 
6377 		if (priv->extend_desc) {
6378 			seq_printf(seq, "Extended descriptor ring:\n");
6379 			sysfs_display_ring((void *)tx_q->dma_etx,
6380 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6381 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6382 			seq_printf(seq, "Descriptor ring:\n");
6383 			sysfs_display_ring((void *)tx_q->dma_tx,
6384 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6385 		}
6386 	}
6387 
6388 	return 0;
6389 }
6390 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6391 
6392 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6393 {
6394 	static const char * const dwxgmac_timestamp_source[] = {
6395 		"None",
6396 		"Internal",
6397 		"External",
6398 		"Both",
6399 	};
6400 	static const char * const dwxgmac_safety_feature_desc[] = {
6401 		"No",
6402 		"All Safety Features with ECC and Parity",
6403 		"All Safety Features without ECC or Parity",
6404 		"All Safety Features with Parity Only",
6405 		"ECC Only",
6406 		"UNDEFINED",
6407 		"UNDEFINED",
6408 		"UNDEFINED",
6409 	};
6410 	struct net_device *dev = seq->private;
6411 	struct stmmac_priv *priv = netdev_priv(dev);
6412 
6413 	if (!priv->hw_cap_support) {
6414 		seq_printf(seq, "DMA HW features not supported\n");
6415 		return 0;
6416 	}
6417 
6418 	seq_printf(seq, "==============================\n");
6419 	seq_printf(seq, "\tDMA HW features\n");
6420 	seq_printf(seq, "==============================\n");
6421 
6422 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6423 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6424 	seq_printf(seq, "\t1000 Mbps: %s\n",
6425 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6426 	seq_printf(seq, "\tHalf duplex: %s\n",
6427 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6428 	if (priv->plat->has_xgmac) {
6429 		seq_printf(seq,
6430 			   "\tNumber of Additional MAC address registers: %d\n",
6431 			   priv->dma_cap.multi_addr);
6432 	} else {
6433 		seq_printf(seq, "\tHash Filter: %s\n",
6434 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6435 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6436 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6437 	}
6438 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6439 		   (priv->dma_cap.pcs) ? "Y" : "N");
6440 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6441 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6442 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6443 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6444 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6445 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6446 	seq_printf(seq, "\tRMON module: %s\n",
6447 		   (priv->dma_cap.rmon) ? "Y" : "N");
6448 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6449 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6450 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6451 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6452 	if (priv->plat->has_xgmac)
6453 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6454 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6455 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6456 		   (priv->dma_cap.eee) ? "Y" : "N");
6457 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6458 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6459 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6460 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6461 	    priv->plat->has_xgmac) {
6462 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6463 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6464 	} else {
6465 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6466 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6467 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6468 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6469 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6470 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6471 	}
6472 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6473 		   priv->dma_cap.number_rx_channel);
6474 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6475 		   priv->dma_cap.number_tx_channel);
6476 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6477 		   priv->dma_cap.number_rx_queues);
6478 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6479 		   priv->dma_cap.number_tx_queues);
6480 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6481 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6482 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6483 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6484 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6485 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6486 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6487 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6488 		   priv->dma_cap.pps_out_num);
6489 	seq_printf(seq, "\tSafety Features: %s\n",
6490 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6491 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6492 		   priv->dma_cap.frpsel ? "Y" : "N");
6493 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6494 		   priv->dma_cap.host_dma_width);
6495 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6496 		   priv->dma_cap.rssen ? "Y" : "N");
6497 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6498 		   priv->dma_cap.vlhash ? "Y" : "N");
6499 	seq_printf(seq, "\tSplit Header: %s\n",
6500 		   priv->dma_cap.sphen ? "Y" : "N");
6501 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6502 		   priv->dma_cap.vlins ? "Y" : "N");
6503 	seq_printf(seq, "\tDouble VLAN: %s\n",
6504 		   priv->dma_cap.dvlan ? "Y" : "N");
6505 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6506 		   priv->dma_cap.l3l4fnum);
6507 	seq_printf(seq, "\tARP Offloading: %s\n",
6508 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6509 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6510 		   priv->dma_cap.estsel ? "Y" : "N");
6511 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6512 		   priv->dma_cap.fpesel ? "Y" : "N");
6513 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6514 		   priv->dma_cap.tbssel ? "Y" : "N");
6515 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6516 		   priv->dma_cap.tbs_ch_num);
6517 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6518 		   priv->dma_cap.sgfsel ? "Y" : "N");
6519 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6520 		   BIT(priv->dma_cap.ttsfd) >> 1);
6521 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6522 		   priv->dma_cap.numtc);
6523 	seq_printf(seq, "\tDCB Feature: %s\n",
6524 		   priv->dma_cap.dcben ? "Y" : "N");
6525 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6526 		   priv->dma_cap.advthword ? "Y" : "N");
6527 	seq_printf(seq, "\tPTP Offload: %s\n",
6528 		   priv->dma_cap.ptoen ? "Y" : "N");
6529 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6530 		   priv->dma_cap.osten ? "Y" : "N");
6531 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6532 		   priv->dma_cap.pfcen ? "Y" : "N");
6533 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6534 		   BIT(priv->dma_cap.frpes) << 6);
6535 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6536 		   BIT(priv->dma_cap.frpbs) << 6);
6537 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6538 		   priv->dma_cap.frppipe_num);
6539 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6540 		   priv->dma_cap.nrvf_num ?
6541 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6542 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6543 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6544 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6545 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6546 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6547 		   priv->dma_cap.cbtisel ? "Y" : "N");
6548 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6549 		   priv->dma_cap.aux_snapshot_n);
6550 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6551 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6552 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6553 		   priv->dma_cap.edma ? "Y" : "N");
6554 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6555 		   priv->dma_cap.ediffc ? "Y" : "N");
6556 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6557 		   priv->dma_cap.vxn ? "Y" : "N");
6558 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6559 		   priv->dma_cap.dbgmem ? "Y" : "N");
6560 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6561 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6562 	return 0;
6563 }
6564 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6565 
6566 /* Use network device events to rename debugfs file entries.
6567  */
6568 static int stmmac_device_event(struct notifier_block *unused,
6569 			       unsigned long event, void *ptr)
6570 {
6571 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6572 	struct stmmac_priv *priv = netdev_priv(dev);
6573 
6574 	if (dev->netdev_ops != &stmmac_netdev_ops)
6575 		goto done;
6576 
6577 	switch (event) {
6578 	case NETDEV_CHANGENAME:
6579 		if (priv->dbgfs_dir)
6580 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6581 							 priv->dbgfs_dir,
6582 							 stmmac_fs_dir,
6583 							 dev->name);
6584 		break;
6585 	}
6586 done:
6587 	return NOTIFY_DONE;
6588 }
6589 
6590 static struct notifier_block stmmac_notifier = {
6591 	.notifier_call = stmmac_device_event,
6592 };
6593 
6594 static void stmmac_init_fs(struct net_device *dev)
6595 {
6596 	struct stmmac_priv *priv = netdev_priv(dev);
6597 
6598 	rtnl_lock();
6599 
6600 	/* Create per netdev entries */
6601 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6602 
6603 	/* Entry to report DMA RX/TX rings */
6604 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6605 			    &stmmac_rings_status_fops);
6606 
6607 	/* Entry to report the DMA HW features */
6608 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6609 			    &stmmac_dma_cap_fops);
6610 
6611 	rtnl_unlock();
6612 }
6613 
6614 static void stmmac_exit_fs(struct net_device *dev)
6615 {
6616 	struct stmmac_priv *priv = netdev_priv(dev);
6617 
6618 	debugfs_remove_recursive(priv->dbgfs_dir);
6619 }
6620 #endif /* CONFIG_DEBUG_FS */
6621 
6622 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6623 {
6624 	unsigned char *data = (unsigned char *)&vid_le;
6625 	unsigned char data_byte = 0;
6626 	u32 crc = ~0x0;
6627 	u32 temp = 0;
6628 	int i, bits;
6629 
6630 	bits = get_bitmask_order(VLAN_VID_MASK);
6631 	for (i = 0; i < bits; i++) {
6632 		if ((i % 8) == 0)
6633 			data_byte = data[i / 8];
6634 
6635 		temp = ((crc & 1) ^ data_byte) & 1;
6636 		crc >>= 1;
6637 		data_byte >>= 1;
6638 
6639 		if (temp)
6640 			crc ^= 0xedb88320;
6641 	}
6642 
6643 	return crc;
6644 }
6645 
6646 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6647 {
6648 	u32 crc, hash = 0;
6649 	__le16 pmatch = 0;
6650 	int count = 0;
6651 	u16 vid = 0;
6652 
6653 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6654 		__le16 vid_le = cpu_to_le16(vid);
6655 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6656 		hash |= (1 << crc);
6657 		count++;
6658 	}
6659 
6660 	if (!priv->dma_cap.vlhash) {
6661 		if (count > 2) /* VID = 0 always passes filter */
6662 			return -EOPNOTSUPP;
6663 
6664 		pmatch = cpu_to_le16(vid);
6665 		hash = 0;
6666 	}
6667 
6668 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6669 }
6670 
6671 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6672 {
6673 	struct stmmac_priv *priv = netdev_priv(ndev);
6674 	bool is_double = false;
6675 	int ret;
6676 
6677 	ret = pm_runtime_resume_and_get(priv->device);
6678 	if (ret < 0)
6679 		return ret;
6680 
6681 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6682 		is_double = true;
6683 
6684 	set_bit(vid, priv->active_vlans);
6685 	ret = stmmac_vlan_update(priv, is_double);
6686 	if (ret) {
6687 		clear_bit(vid, priv->active_vlans);
6688 		goto err_pm_put;
6689 	}
6690 
6691 	if (priv->hw->num_vlan) {
6692 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6693 		if (ret)
6694 			goto err_pm_put;
6695 	}
6696 err_pm_put:
6697 	pm_runtime_put(priv->device);
6698 
6699 	return ret;
6700 }
6701 
6702 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6703 {
6704 	struct stmmac_priv *priv = netdev_priv(ndev);
6705 	bool is_double = false;
6706 	int ret;
6707 
6708 	ret = pm_runtime_resume_and_get(priv->device);
6709 	if (ret < 0)
6710 		return ret;
6711 
6712 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6713 		is_double = true;
6714 
6715 	clear_bit(vid, priv->active_vlans);
6716 
6717 	if (priv->hw->num_vlan) {
6718 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6719 		if (ret)
6720 			goto del_vlan_error;
6721 	}
6722 
6723 	ret = stmmac_vlan_update(priv, is_double);
6724 
6725 del_vlan_error:
6726 	pm_runtime_put(priv->device);
6727 
6728 	return ret;
6729 }
6730 
6731 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6732 {
6733 	struct stmmac_priv *priv = netdev_priv(dev);
6734 
6735 	switch (bpf->command) {
6736 	case XDP_SETUP_PROG:
6737 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6738 	case XDP_SETUP_XSK_POOL:
6739 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6740 					     bpf->xsk.queue_id);
6741 	default:
6742 		return -EOPNOTSUPP;
6743 	}
6744 }
6745 
6746 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6747 			   struct xdp_frame **frames, u32 flags)
6748 {
6749 	struct stmmac_priv *priv = netdev_priv(dev);
6750 	int cpu = smp_processor_id();
6751 	struct netdev_queue *nq;
6752 	int i, nxmit = 0;
6753 	int queue;
6754 
6755 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6756 		return -ENETDOWN;
6757 
6758 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6759 		return -EINVAL;
6760 
6761 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6762 	nq = netdev_get_tx_queue(priv->dev, queue);
6763 
6764 	__netif_tx_lock(nq, cpu);
6765 	/* Avoids TX time-out as we are sharing with slow path */
6766 	txq_trans_cond_update(nq);
6767 
6768 	for (i = 0; i < num_frames; i++) {
6769 		int res;
6770 
6771 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6772 		if (res == STMMAC_XDP_CONSUMED)
6773 			break;
6774 
6775 		nxmit++;
6776 	}
6777 
6778 	if (flags & XDP_XMIT_FLUSH) {
6779 		stmmac_flush_tx_descriptors(priv, queue);
6780 		stmmac_tx_timer_arm(priv, queue);
6781 	}
6782 
6783 	__netif_tx_unlock(nq);
6784 
6785 	return nxmit;
6786 }
6787 
6788 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6789 {
6790 	struct stmmac_channel *ch = &priv->channel[queue];
6791 	unsigned long flags;
6792 
6793 	spin_lock_irqsave(&ch->lock, flags);
6794 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6795 	spin_unlock_irqrestore(&ch->lock, flags);
6796 
6797 	stmmac_stop_rx_dma(priv, queue);
6798 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6799 }
6800 
6801 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6802 {
6803 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6804 	struct stmmac_channel *ch = &priv->channel[queue];
6805 	unsigned long flags;
6806 	u32 buf_size;
6807 	int ret;
6808 
6809 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6810 	if (ret) {
6811 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6812 		return;
6813 	}
6814 
6815 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6816 	if (ret) {
6817 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6818 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6819 		return;
6820 	}
6821 
6822 	stmmac_reset_rx_queue(priv, queue);
6823 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6824 
6825 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6826 			    rx_q->dma_rx_phy, rx_q->queue_index);
6827 
6828 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6829 			     sizeof(struct dma_desc));
6830 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6831 			       rx_q->rx_tail_addr, rx_q->queue_index);
6832 
6833 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6834 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6835 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6836 				      buf_size,
6837 				      rx_q->queue_index);
6838 	} else {
6839 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6840 				      priv->dma_conf.dma_buf_sz,
6841 				      rx_q->queue_index);
6842 	}
6843 
6844 	stmmac_start_rx_dma(priv, queue);
6845 
6846 	spin_lock_irqsave(&ch->lock, flags);
6847 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6848 	spin_unlock_irqrestore(&ch->lock, flags);
6849 }
6850 
6851 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6852 {
6853 	struct stmmac_channel *ch = &priv->channel[queue];
6854 	unsigned long flags;
6855 
6856 	spin_lock_irqsave(&ch->lock, flags);
6857 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6858 	spin_unlock_irqrestore(&ch->lock, flags);
6859 
6860 	stmmac_stop_tx_dma(priv, queue);
6861 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6862 }
6863 
6864 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6865 {
6866 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6867 	struct stmmac_channel *ch = &priv->channel[queue];
6868 	unsigned long flags;
6869 	int ret;
6870 
6871 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6872 	if (ret) {
6873 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6874 		return;
6875 	}
6876 
6877 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6878 	if (ret) {
6879 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6880 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6881 		return;
6882 	}
6883 
6884 	stmmac_reset_tx_queue(priv, queue);
6885 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6886 
6887 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6888 			    tx_q->dma_tx_phy, tx_q->queue_index);
6889 
6890 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6891 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6892 
6893 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6894 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6895 			       tx_q->tx_tail_addr, tx_q->queue_index);
6896 
6897 	stmmac_start_tx_dma(priv, queue);
6898 
6899 	spin_lock_irqsave(&ch->lock, flags);
6900 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6901 	spin_unlock_irqrestore(&ch->lock, flags);
6902 }
6903 
6904 void stmmac_xdp_release(struct net_device *dev)
6905 {
6906 	struct stmmac_priv *priv = netdev_priv(dev);
6907 	u32 chan;
6908 
6909 	/* Ensure tx function is not running */
6910 	netif_tx_disable(dev);
6911 
6912 	/* Disable NAPI process */
6913 	stmmac_disable_all_queues(priv);
6914 
6915 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6916 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6917 
6918 	/* Free the IRQ lines */
6919 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6920 
6921 	/* Stop TX/RX DMA channels */
6922 	stmmac_stop_all_dma(priv);
6923 
6924 	/* Release and free the Rx/Tx resources */
6925 	free_dma_desc_resources(priv, &priv->dma_conf);
6926 
6927 	/* Disable the MAC Rx/Tx */
6928 	stmmac_mac_set(priv, priv->ioaddr, false);
6929 
6930 	/* set trans_start so we don't get spurious
6931 	 * watchdogs during reset
6932 	 */
6933 	netif_trans_update(dev);
6934 	netif_carrier_off(dev);
6935 }
6936 
6937 int stmmac_xdp_open(struct net_device *dev)
6938 {
6939 	struct stmmac_priv *priv = netdev_priv(dev);
6940 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6941 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6942 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6943 	struct stmmac_rx_queue *rx_q;
6944 	struct stmmac_tx_queue *tx_q;
6945 	u32 buf_size;
6946 	bool sph_en;
6947 	u32 chan;
6948 	int ret;
6949 
6950 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6951 	if (ret < 0) {
6952 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6953 			   __func__);
6954 		goto dma_desc_error;
6955 	}
6956 
6957 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6958 	if (ret < 0) {
6959 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6960 			   __func__);
6961 		goto init_error;
6962 	}
6963 
6964 	stmmac_reset_queues_param(priv);
6965 
6966 	/* DMA CSR Channel configuration */
6967 	for (chan = 0; chan < dma_csr_ch; chan++) {
6968 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6969 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6970 	}
6971 
6972 	/* Adjust Split header */
6973 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6974 
6975 	/* DMA RX Channel Configuration */
6976 	for (chan = 0; chan < rx_cnt; chan++) {
6977 		rx_q = &priv->dma_conf.rx_queue[chan];
6978 
6979 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6980 				    rx_q->dma_rx_phy, chan);
6981 
6982 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6983 				     (rx_q->buf_alloc_num *
6984 				      sizeof(struct dma_desc));
6985 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6986 				       rx_q->rx_tail_addr, chan);
6987 
6988 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6989 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6990 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6991 					      buf_size,
6992 					      rx_q->queue_index);
6993 		} else {
6994 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6995 					      priv->dma_conf.dma_buf_sz,
6996 					      rx_q->queue_index);
6997 		}
6998 
6999 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7000 	}
7001 
7002 	/* DMA TX Channel Configuration */
7003 	for (chan = 0; chan < tx_cnt; chan++) {
7004 		tx_q = &priv->dma_conf.tx_queue[chan];
7005 
7006 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7007 				    tx_q->dma_tx_phy, chan);
7008 
7009 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7010 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7011 				       tx_q->tx_tail_addr, chan);
7012 
7013 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7014 		tx_q->txtimer.function = stmmac_tx_timer;
7015 	}
7016 
7017 	/* Enable the MAC Rx/Tx */
7018 	stmmac_mac_set(priv, priv->ioaddr, true);
7019 
7020 	/* Start Rx & Tx DMA Channels */
7021 	stmmac_start_all_dma(priv);
7022 
7023 	ret = stmmac_request_irq(dev);
7024 	if (ret)
7025 		goto irq_error;
7026 
7027 	/* Enable NAPI process*/
7028 	stmmac_enable_all_queues(priv);
7029 	netif_carrier_on(dev);
7030 	netif_tx_start_all_queues(dev);
7031 	stmmac_enable_all_dma_irq(priv);
7032 
7033 	return 0;
7034 
7035 irq_error:
7036 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7037 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7038 
7039 	stmmac_hw_teardown(dev);
7040 init_error:
7041 	free_dma_desc_resources(priv, &priv->dma_conf);
7042 dma_desc_error:
7043 	return ret;
7044 }
7045 
7046 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7047 {
7048 	struct stmmac_priv *priv = netdev_priv(dev);
7049 	struct stmmac_rx_queue *rx_q;
7050 	struct stmmac_tx_queue *tx_q;
7051 	struct stmmac_channel *ch;
7052 
7053 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7054 	    !netif_carrier_ok(priv->dev))
7055 		return -ENETDOWN;
7056 
7057 	if (!stmmac_xdp_is_enabled(priv))
7058 		return -EINVAL;
7059 
7060 	if (queue >= priv->plat->rx_queues_to_use ||
7061 	    queue >= priv->plat->tx_queues_to_use)
7062 		return -EINVAL;
7063 
7064 	rx_q = &priv->dma_conf.rx_queue[queue];
7065 	tx_q = &priv->dma_conf.tx_queue[queue];
7066 	ch = &priv->channel[queue];
7067 
7068 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7069 		return -EINVAL;
7070 
7071 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7072 		/* EQoS does not have per-DMA channel SW interrupt,
7073 		 * so we schedule RX Napi straight-away.
7074 		 */
7075 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7076 			__napi_schedule(&ch->rxtx_napi);
7077 	}
7078 
7079 	return 0;
7080 }
7081 
7082 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7083 {
7084 	struct stmmac_priv *priv = netdev_priv(dev);
7085 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7086 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7087 	unsigned int start;
7088 	int q;
7089 
7090 	for (q = 0; q < tx_cnt; q++) {
7091 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7092 		u64 tx_packets;
7093 		u64 tx_bytes;
7094 
7095 		do {
7096 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7097 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7098 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7099 		do {
7100 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7101 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7102 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7103 
7104 		stats->tx_packets += tx_packets;
7105 		stats->tx_bytes += tx_bytes;
7106 	}
7107 
7108 	for (q = 0; q < rx_cnt; q++) {
7109 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7110 		u64 rx_packets;
7111 		u64 rx_bytes;
7112 
7113 		do {
7114 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7115 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7116 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7117 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7118 
7119 		stats->rx_packets += rx_packets;
7120 		stats->rx_bytes += rx_bytes;
7121 	}
7122 
7123 	stats->rx_dropped = priv->xstats.rx_dropped;
7124 	stats->rx_errors = priv->xstats.rx_errors;
7125 	stats->tx_dropped = priv->xstats.tx_dropped;
7126 	stats->tx_errors = priv->xstats.tx_errors;
7127 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7128 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7129 	stats->rx_length_errors = priv->xstats.rx_length;
7130 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7131 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7132 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7133 }
7134 
7135 static const struct net_device_ops stmmac_netdev_ops = {
7136 	.ndo_open = stmmac_open,
7137 	.ndo_start_xmit = stmmac_xmit,
7138 	.ndo_stop = stmmac_release,
7139 	.ndo_change_mtu = stmmac_change_mtu,
7140 	.ndo_fix_features = stmmac_fix_features,
7141 	.ndo_set_features = stmmac_set_features,
7142 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7143 	.ndo_tx_timeout = stmmac_tx_timeout,
7144 	.ndo_eth_ioctl = stmmac_ioctl,
7145 	.ndo_get_stats64 = stmmac_get_stats64,
7146 	.ndo_setup_tc = stmmac_setup_tc,
7147 	.ndo_select_queue = stmmac_select_queue,
7148 	.ndo_set_mac_address = stmmac_set_mac_address,
7149 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7150 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7151 	.ndo_bpf = stmmac_bpf,
7152 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7153 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7154 };
7155 
7156 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7157 {
7158 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7159 		return;
7160 	if (test_bit(STMMAC_DOWN, &priv->state))
7161 		return;
7162 
7163 	netdev_err(priv->dev, "Reset adapter.\n");
7164 
7165 	rtnl_lock();
7166 	netif_trans_update(priv->dev);
7167 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7168 		usleep_range(1000, 2000);
7169 
7170 	set_bit(STMMAC_DOWN, &priv->state);
7171 	dev_close(priv->dev);
7172 	dev_open(priv->dev, NULL);
7173 	clear_bit(STMMAC_DOWN, &priv->state);
7174 	clear_bit(STMMAC_RESETING, &priv->state);
7175 	rtnl_unlock();
7176 }
7177 
7178 static void stmmac_service_task(struct work_struct *work)
7179 {
7180 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7181 			service_task);
7182 
7183 	stmmac_reset_subtask(priv);
7184 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7185 }
7186 
7187 /**
7188  *  stmmac_hw_init - Init the MAC device
7189  *  @priv: driver private structure
7190  *  Description: this function is to configure the MAC device according to
7191  *  some platform parameters or the HW capability register. It prepares the
7192  *  driver to use either ring or chain modes and to setup either enhanced or
7193  *  normal descriptors.
7194  */
7195 static int stmmac_hw_init(struct stmmac_priv *priv)
7196 {
7197 	int ret;
7198 
7199 	/* dwmac-sun8i only work in chain mode */
7200 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7201 		chain_mode = 1;
7202 	priv->chain_mode = chain_mode;
7203 
7204 	/* Initialize HW Interface */
7205 	ret = stmmac_hwif_init(priv);
7206 	if (ret)
7207 		return ret;
7208 
7209 	/* Get the HW capability (new GMAC newer than 3.50a) */
7210 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7211 	if (priv->hw_cap_support) {
7212 		dev_info(priv->device, "DMA HW capability register supported\n");
7213 
7214 		/* We can override some gmac/dma configuration fields: e.g.
7215 		 * enh_desc, tx_coe (e.g. that are passed through the
7216 		 * platform) with the values from the HW capability
7217 		 * register (if supported).
7218 		 */
7219 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7220 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7221 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7222 		priv->hw->pmt = priv->plat->pmt;
7223 		if (priv->dma_cap.hash_tb_sz) {
7224 			priv->hw->multicast_filter_bins =
7225 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7226 			priv->hw->mcast_bits_log2 =
7227 					ilog2(priv->hw->multicast_filter_bins);
7228 		}
7229 
7230 		/* TXCOE doesn't work in thresh DMA mode */
7231 		if (priv->plat->force_thresh_dma_mode)
7232 			priv->plat->tx_coe = 0;
7233 		else
7234 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7235 
7236 		/* In case of GMAC4 rx_coe is from HW cap register. */
7237 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7238 
7239 		if (priv->dma_cap.rx_coe_type2)
7240 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7241 		else if (priv->dma_cap.rx_coe_type1)
7242 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7243 
7244 	} else {
7245 		dev_info(priv->device, "No HW DMA feature register supported\n");
7246 	}
7247 
7248 	if (priv->plat->rx_coe) {
7249 		priv->hw->rx_csum = priv->plat->rx_coe;
7250 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7251 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7252 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7253 	}
7254 	if (priv->plat->tx_coe)
7255 		dev_info(priv->device, "TX Checksum insertion supported\n");
7256 
7257 	if (priv->plat->pmt) {
7258 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7259 		device_set_wakeup_capable(priv->device, 1);
7260 	}
7261 
7262 	if (priv->dma_cap.tsoen)
7263 		dev_info(priv->device, "TSO supported\n");
7264 
7265 	priv->hw->vlan_fail_q_en =
7266 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7267 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7268 
7269 	/* Run HW quirks, if any */
7270 	if (priv->hwif_quirks) {
7271 		ret = priv->hwif_quirks(priv);
7272 		if (ret)
7273 			return ret;
7274 	}
7275 
7276 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7277 	 * In some case, for example on bugged HW this feature
7278 	 * has to be disable and this can be done by passing the
7279 	 * riwt_off field from the platform.
7280 	 */
7281 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7282 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7283 		priv->use_riwt = 1;
7284 		dev_info(priv->device,
7285 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7286 	}
7287 
7288 	return 0;
7289 }
7290 
7291 static void stmmac_napi_add(struct net_device *dev)
7292 {
7293 	struct stmmac_priv *priv = netdev_priv(dev);
7294 	u32 queue, maxq;
7295 
7296 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7297 
7298 	for (queue = 0; queue < maxq; queue++) {
7299 		struct stmmac_channel *ch = &priv->channel[queue];
7300 
7301 		ch->priv_data = priv;
7302 		ch->index = queue;
7303 		spin_lock_init(&ch->lock);
7304 
7305 		if (queue < priv->plat->rx_queues_to_use) {
7306 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7307 		}
7308 		if (queue < priv->plat->tx_queues_to_use) {
7309 			netif_napi_add_tx(dev, &ch->tx_napi,
7310 					  stmmac_napi_poll_tx);
7311 		}
7312 		if (queue < priv->plat->rx_queues_to_use &&
7313 		    queue < priv->plat->tx_queues_to_use) {
7314 			netif_napi_add(dev, &ch->rxtx_napi,
7315 				       stmmac_napi_poll_rxtx);
7316 		}
7317 	}
7318 }
7319 
7320 static void stmmac_napi_del(struct net_device *dev)
7321 {
7322 	struct stmmac_priv *priv = netdev_priv(dev);
7323 	u32 queue, maxq;
7324 
7325 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7326 
7327 	for (queue = 0; queue < maxq; queue++) {
7328 		struct stmmac_channel *ch = &priv->channel[queue];
7329 
7330 		if (queue < priv->plat->rx_queues_to_use)
7331 			netif_napi_del(&ch->rx_napi);
7332 		if (queue < priv->plat->tx_queues_to_use)
7333 			netif_napi_del(&ch->tx_napi);
7334 		if (queue < priv->plat->rx_queues_to_use &&
7335 		    queue < priv->plat->tx_queues_to_use) {
7336 			netif_napi_del(&ch->rxtx_napi);
7337 		}
7338 	}
7339 }
7340 
7341 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7342 {
7343 	struct stmmac_priv *priv = netdev_priv(dev);
7344 	int ret = 0, i;
7345 
7346 	if (netif_running(dev))
7347 		stmmac_release(dev);
7348 
7349 	stmmac_napi_del(dev);
7350 
7351 	priv->plat->rx_queues_to_use = rx_cnt;
7352 	priv->plat->tx_queues_to_use = tx_cnt;
7353 	if (!netif_is_rxfh_configured(dev))
7354 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7355 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7356 									rx_cnt);
7357 
7358 	stmmac_set_half_duplex(priv);
7359 	stmmac_napi_add(dev);
7360 
7361 	if (netif_running(dev))
7362 		ret = stmmac_open(dev);
7363 
7364 	return ret;
7365 }
7366 
7367 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7368 {
7369 	struct stmmac_priv *priv = netdev_priv(dev);
7370 	int ret = 0;
7371 
7372 	if (netif_running(dev))
7373 		stmmac_release(dev);
7374 
7375 	priv->dma_conf.dma_rx_size = rx_size;
7376 	priv->dma_conf.dma_tx_size = tx_size;
7377 
7378 	if (netif_running(dev))
7379 		ret = stmmac_open(dev);
7380 
7381 	return ret;
7382 }
7383 
7384 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7385 static void stmmac_fpe_lp_task(struct work_struct *work)
7386 {
7387 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7388 						fpe_task);
7389 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7390 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7391 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7392 	bool *hs_enable = &fpe_cfg->hs_enable;
7393 	bool *enable = &fpe_cfg->enable;
7394 	int retries = 20;
7395 
7396 	while (retries-- > 0) {
7397 		/* Bail out immediately if FPE handshake is OFF */
7398 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7399 			break;
7400 
7401 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7402 		    *lp_state == FPE_STATE_ENTERING_ON) {
7403 			stmmac_fpe_configure(priv, priv->ioaddr,
7404 					     fpe_cfg,
7405 					     priv->plat->tx_queues_to_use,
7406 					     priv->plat->rx_queues_to_use,
7407 					     *enable);
7408 
7409 			netdev_info(priv->dev, "configured FPE\n");
7410 
7411 			*lo_state = FPE_STATE_ON;
7412 			*lp_state = FPE_STATE_ON;
7413 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7414 			break;
7415 		}
7416 
7417 		if ((*lo_state == FPE_STATE_CAPABLE ||
7418 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7419 		     *lp_state != FPE_STATE_ON) {
7420 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7421 				    *lo_state, *lp_state);
7422 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7423 						fpe_cfg,
7424 						MPACKET_VERIFY);
7425 		}
7426 		/* Sleep then retry */
7427 		msleep(500);
7428 	}
7429 
7430 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7431 }
7432 
7433 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7434 {
7435 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7436 		if (enable) {
7437 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7438 						priv->plat->fpe_cfg,
7439 						MPACKET_VERIFY);
7440 		} else {
7441 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7442 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7443 		}
7444 
7445 		priv->plat->fpe_cfg->hs_enable = enable;
7446 	}
7447 }
7448 
7449 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7450 {
7451 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7452 	struct dma_desc *desc_contains_ts = ctx->desc;
7453 	struct stmmac_priv *priv = ctx->priv;
7454 	struct dma_desc *ndesc = ctx->ndesc;
7455 	struct dma_desc *desc = ctx->desc;
7456 	u64 ns = 0;
7457 
7458 	if (!priv->hwts_rx_en)
7459 		return -ENODATA;
7460 
7461 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7462 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7463 		desc_contains_ts = ndesc;
7464 
7465 	/* Check if timestamp is available */
7466 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7467 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7468 		ns -= priv->plat->cdc_error_adj;
7469 		*timestamp = ns_to_ktime(ns);
7470 		return 0;
7471 	}
7472 
7473 	return -ENODATA;
7474 }
7475 
7476 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7477 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7478 };
7479 
7480 /**
7481  * stmmac_dvr_probe
7482  * @device: device pointer
7483  * @plat_dat: platform data pointer
7484  * @res: stmmac resource pointer
7485  * Description: this is the main probe function used to
7486  * call the alloc_etherdev, allocate the priv structure.
7487  * Return:
7488  * returns 0 on success, otherwise errno.
7489  */
7490 int stmmac_dvr_probe(struct device *device,
7491 		     struct plat_stmmacenet_data *plat_dat,
7492 		     struct stmmac_resources *res)
7493 {
7494 	struct net_device *ndev = NULL;
7495 	struct stmmac_priv *priv;
7496 	u32 rxq;
7497 	int i, ret = 0;
7498 
7499 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7500 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7501 	if (!ndev)
7502 		return -ENOMEM;
7503 
7504 	SET_NETDEV_DEV(ndev, device);
7505 
7506 	priv = netdev_priv(ndev);
7507 	priv->device = device;
7508 	priv->dev = ndev;
7509 
7510 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7511 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7512 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7513 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7514 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7515 	}
7516 
7517 	priv->xstats.pcpu_stats =
7518 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7519 	if (!priv->xstats.pcpu_stats)
7520 		return -ENOMEM;
7521 
7522 	stmmac_set_ethtool_ops(ndev);
7523 	priv->pause = pause;
7524 	priv->plat = plat_dat;
7525 	priv->ioaddr = res->addr;
7526 	priv->dev->base_addr = (unsigned long)res->addr;
7527 	priv->plat->dma_cfg->multi_msi_en =
7528 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7529 
7530 	priv->dev->irq = res->irq;
7531 	priv->wol_irq = res->wol_irq;
7532 	priv->lpi_irq = res->lpi_irq;
7533 	priv->sfty_irq = res->sfty_irq;
7534 	priv->sfty_ce_irq = res->sfty_ce_irq;
7535 	priv->sfty_ue_irq = res->sfty_ue_irq;
7536 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7537 		priv->rx_irq[i] = res->rx_irq[i];
7538 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7539 		priv->tx_irq[i] = res->tx_irq[i];
7540 
7541 	if (!is_zero_ether_addr(res->mac))
7542 		eth_hw_addr_set(priv->dev, res->mac);
7543 
7544 	dev_set_drvdata(device, priv->dev);
7545 
7546 	/* Verify driver arguments */
7547 	stmmac_verify_args();
7548 
7549 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7550 	if (!priv->af_xdp_zc_qps)
7551 		return -ENOMEM;
7552 
7553 	/* Allocate workqueue */
7554 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7555 	if (!priv->wq) {
7556 		dev_err(priv->device, "failed to create workqueue\n");
7557 		ret = -ENOMEM;
7558 		goto error_wq_init;
7559 	}
7560 
7561 	INIT_WORK(&priv->service_task, stmmac_service_task);
7562 
7563 	/* Initialize Link Partner FPE workqueue */
7564 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7565 
7566 	/* Override with kernel parameters if supplied XXX CRS XXX
7567 	 * this needs to have multiple instances
7568 	 */
7569 	if ((phyaddr >= 0) && (phyaddr <= 31))
7570 		priv->plat->phy_addr = phyaddr;
7571 
7572 	if (priv->plat->stmmac_rst) {
7573 		ret = reset_control_assert(priv->plat->stmmac_rst);
7574 		reset_control_deassert(priv->plat->stmmac_rst);
7575 		/* Some reset controllers have only reset callback instead of
7576 		 * assert + deassert callbacks pair.
7577 		 */
7578 		if (ret == -ENOTSUPP)
7579 			reset_control_reset(priv->plat->stmmac_rst);
7580 	}
7581 
7582 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7583 	if (ret == -ENOTSUPP)
7584 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7585 			ERR_PTR(ret));
7586 
7587 	/* Wait a bit for the reset to take effect */
7588 	udelay(10);
7589 
7590 	/* Init MAC and get the capabilities */
7591 	ret = stmmac_hw_init(priv);
7592 	if (ret)
7593 		goto error_hw_init;
7594 
7595 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7596 	 */
7597 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7598 		priv->plat->dma_cfg->dche = false;
7599 
7600 	stmmac_check_ether_addr(priv);
7601 
7602 	ndev->netdev_ops = &stmmac_netdev_ops;
7603 
7604 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7605 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7606 
7607 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7608 			    NETIF_F_RXCSUM;
7609 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7610 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7611 
7612 	ret = stmmac_tc_init(priv, priv);
7613 	if (!ret) {
7614 		ndev->hw_features |= NETIF_F_HW_TC;
7615 	}
7616 
7617 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7618 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7619 		if (priv->plat->has_gmac4)
7620 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7621 		priv->tso = true;
7622 		dev_info(priv->device, "TSO feature enabled\n");
7623 	}
7624 
7625 	if (priv->dma_cap.sphen &&
7626 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7627 		ndev->hw_features |= NETIF_F_GRO;
7628 		priv->sph_cap = true;
7629 		priv->sph = priv->sph_cap;
7630 		dev_info(priv->device, "SPH feature enabled\n");
7631 	}
7632 
7633 	/* Ideally our host DMA address width is the same as for the
7634 	 * device. However, it may differ and then we have to use our
7635 	 * host DMA width for allocation and the device DMA width for
7636 	 * register handling.
7637 	 */
7638 	if (priv->plat->host_dma_width)
7639 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7640 	else
7641 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7642 
7643 	if (priv->dma_cap.host_dma_width) {
7644 		ret = dma_set_mask_and_coherent(device,
7645 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7646 		if (!ret) {
7647 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7648 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7649 
7650 			/*
7651 			 * If more than 32 bits can be addressed, make sure to
7652 			 * enable enhanced addressing mode.
7653 			 */
7654 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7655 				priv->plat->dma_cfg->eame = true;
7656 		} else {
7657 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7658 			if (ret) {
7659 				dev_err(priv->device, "Failed to set DMA Mask\n");
7660 				goto error_hw_init;
7661 			}
7662 
7663 			priv->dma_cap.host_dma_width = 32;
7664 		}
7665 	}
7666 
7667 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7668 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7669 #ifdef STMMAC_VLAN_TAG_USED
7670 	/* Both mac100 and gmac support receive VLAN tag detection */
7671 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7672 	ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7673 	priv->hw->hw_vlan_en = true;
7674 
7675 	if (priv->dma_cap.vlhash) {
7676 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7677 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7678 	}
7679 	if (priv->dma_cap.vlins) {
7680 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7681 		if (priv->dma_cap.dvlan)
7682 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7683 	}
7684 #endif
7685 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7686 
7687 	priv->xstats.threshold = tc;
7688 
7689 	/* Initialize RSS */
7690 	rxq = priv->plat->rx_queues_to_use;
7691 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7692 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7693 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7694 
7695 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7696 		ndev->features |= NETIF_F_RXHASH;
7697 
7698 	ndev->vlan_features |= ndev->features;
7699 	/* TSO doesn't work on VLANs yet */
7700 	ndev->vlan_features &= ~NETIF_F_TSO;
7701 
7702 	/* MTU range: 46 - hw-specific max */
7703 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7704 	if (priv->plat->has_xgmac)
7705 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7706 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7707 		ndev->max_mtu = JUMBO_LEN;
7708 	else
7709 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7710 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7711 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7712 	 */
7713 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7714 	    (priv->plat->maxmtu >= ndev->min_mtu))
7715 		ndev->max_mtu = priv->plat->maxmtu;
7716 	else if (priv->plat->maxmtu < ndev->min_mtu)
7717 		dev_warn(priv->device,
7718 			 "%s: warning: maxmtu having invalid value (%d)\n",
7719 			 __func__, priv->plat->maxmtu);
7720 
7721 	if (flow_ctrl)
7722 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7723 
7724 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7725 
7726 	/* Setup channels NAPI */
7727 	stmmac_napi_add(ndev);
7728 
7729 	mutex_init(&priv->lock);
7730 
7731 	/* If a specific clk_csr value is passed from the platform
7732 	 * this means that the CSR Clock Range selection cannot be
7733 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7734 	 * set the MDC clock dynamically according to the csr actual
7735 	 * clock input.
7736 	 */
7737 	if (priv->plat->clk_csr >= 0)
7738 		priv->clk_csr = priv->plat->clk_csr;
7739 	else
7740 		stmmac_clk_csr_set(priv);
7741 
7742 	stmmac_check_pcs_mode(priv);
7743 
7744 	pm_runtime_get_noresume(device);
7745 	pm_runtime_set_active(device);
7746 	if (!pm_runtime_enabled(device))
7747 		pm_runtime_enable(device);
7748 
7749 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7750 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7751 		/* MDIO bus Registration */
7752 		ret = stmmac_mdio_register(ndev);
7753 		if (ret < 0) {
7754 			dev_err_probe(priv->device, ret,
7755 				      "%s: MDIO bus (id: %d) registration failed\n",
7756 				      __func__, priv->plat->bus_id);
7757 			goto error_mdio_register;
7758 		}
7759 	}
7760 
7761 	if (priv->plat->speed_mode_2500)
7762 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7763 
7764 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7765 		ret = stmmac_xpcs_setup(priv->mii);
7766 		if (ret)
7767 			goto error_xpcs_setup;
7768 	}
7769 
7770 	ret = stmmac_phy_setup(priv);
7771 	if (ret) {
7772 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7773 		goto error_phy_setup;
7774 	}
7775 
7776 	ret = register_netdev(ndev);
7777 	if (ret) {
7778 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7779 			__func__, ret);
7780 		goto error_netdev_register;
7781 	}
7782 
7783 #ifdef CONFIG_DEBUG_FS
7784 	stmmac_init_fs(ndev);
7785 #endif
7786 
7787 	if (priv->plat->dump_debug_regs)
7788 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7789 
7790 	/* Let pm_runtime_put() disable the clocks.
7791 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7792 	 */
7793 	pm_runtime_put(device);
7794 
7795 	return ret;
7796 
7797 error_netdev_register:
7798 	phylink_destroy(priv->phylink);
7799 error_xpcs_setup:
7800 error_phy_setup:
7801 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7802 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7803 		stmmac_mdio_unregister(ndev);
7804 error_mdio_register:
7805 	stmmac_napi_del(ndev);
7806 error_hw_init:
7807 	destroy_workqueue(priv->wq);
7808 error_wq_init:
7809 	bitmap_free(priv->af_xdp_zc_qps);
7810 
7811 	return ret;
7812 }
7813 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7814 
7815 /**
7816  * stmmac_dvr_remove
7817  * @dev: device pointer
7818  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7819  * changes the link status, releases the DMA descriptor rings.
7820  */
7821 void stmmac_dvr_remove(struct device *dev)
7822 {
7823 	struct net_device *ndev = dev_get_drvdata(dev);
7824 	struct stmmac_priv *priv = netdev_priv(ndev);
7825 
7826 	netdev_info(priv->dev, "%s: removing driver", __func__);
7827 
7828 	pm_runtime_get_sync(dev);
7829 
7830 	stmmac_stop_all_dma(priv);
7831 	stmmac_mac_set(priv, priv->ioaddr, false);
7832 	netif_carrier_off(ndev);
7833 	unregister_netdev(ndev);
7834 
7835 #ifdef CONFIG_DEBUG_FS
7836 	stmmac_exit_fs(ndev);
7837 #endif
7838 	phylink_destroy(priv->phylink);
7839 	if (priv->plat->stmmac_rst)
7840 		reset_control_assert(priv->plat->stmmac_rst);
7841 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7842 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7843 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7844 		stmmac_mdio_unregister(ndev);
7845 	destroy_workqueue(priv->wq);
7846 	mutex_destroy(&priv->lock);
7847 	bitmap_free(priv->af_xdp_zc_qps);
7848 
7849 	pm_runtime_disable(dev);
7850 	pm_runtime_put_noidle(dev);
7851 }
7852 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7853 
7854 /**
7855  * stmmac_suspend - suspend callback
7856  * @dev: device pointer
7857  * Description: this is the function to suspend the device and it is called
7858  * by the platform driver to stop the network queue, release the resources,
7859  * program the PMT register (for WoL), clean and release driver resources.
7860  */
7861 int stmmac_suspend(struct device *dev)
7862 {
7863 	struct net_device *ndev = dev_get_drvdata(dev);
7864 	struct stmmac_priv *priv = netdev_priv(ndev);
7865 	u32 chan;
7866 
7867 	if (!ndev || !netif_running(ndev))
7868 		return 0;
7869 
7870 	mutex_lock(&priv->lock);
7871 
7872 	netif_device_detach(ndev);
7873 
7874 	stmmac_disable_all_queues(priv);
7875 
7876 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7877 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7878 
7879 	if (priv->eee_enabled) {
7880 		priv->tx_path_in_lpi_mode = false;
7881 		del_timer_sync(&priv->eee_ctrl_timer);
7882 	}
7883 
7884 	/* Stop TX/RX DMA */
7885 	stmmac_stop_all_dma(priv);
7886 
7887 	if (priv->plat->serdes_powerdown)
7888 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7889 
7890 	/* Enable Power down mode by programming the PMT regs */
7891 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7892 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7893 		priv->irq_wake = 1;
7894 	} else {
7895 		stmmac_mac_set(priv, priv->ioaddr, false);
7896 		pinctrl_pm_select_sleep_state(priv->device);
7897 	}
7898 
7899 	mutex_unlock(&priv->lock);
7900 
7901 	rtnl_lock();
7902 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7903 		phylink_suspend(priv->phylink, true);
7904 	} else {
7905 		if (device_may_wakeup(priv->device))
7906 			phylink_speed_down(priv->phylink, false);
7907 		phylink_suspend(priv->phylink, false);
7908 	}
7909 	rtnl_unlock();
7910 
7911 	if (priv->dma_cap.fpesel) {
7912 		/* Disable FPE */
7913 		stmmac_fpe_configure(priv, priv->ioaddr,
7914 				     priv->plat->fpe_cfg,
7915 				     priv->plat->tx_queues_to_use,
7916 				     priv->plat->rx_queues_to_use, false);
7917 
7918 		stmmac_fpe_handshake(priv, false);
7919 		stmmac_fpe_stop_wq(priv);
7920 	}
7921 
7922 	priv->speed = SPEED_UNKNOWN;
7923 	return 0;
7924 }
7925 EXPORT_SYMBOL_GPL(stmmac_suspend);
7926 
7927 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7928 {
7929 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7930 
7931 	rx_q->cur_rx = 0;
7932 	rx_q->dirty_rx = 0;
7933 }
7934 
7935 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7936 {
7937 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7938 
7939 	tx_q->cur_tx = 0;
7940 	tx_q->dirty_tx = 0;
7941 	tx_q->mss = 0;
7942 
7943 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7944 }
7945 
7946 /**
7947  * stmmac_reset_queues_param - reset queue parameters
7948  * @priv: device pointer
7949  */
7950 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7951 {
7952 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7953 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7954 	u32 queue;
7955 
7956 	for (queue = 0; queue < rx_cnt; queue++)
7957 		stmmac_reset_rx_queue(priv, queue);
7958 
7959 	for (queue = 0; queue < tx_cnt; queue++)
7960 		stmmac_reset_tx_queue(priv, queue);
7961 }
7962 
7963 /**
7964  * stmmac_resume - resume callback
7965  * @dev: device pointer
7966  * Description: when resume this function is invoked to setup the DMA and CORE
7967  * in a usable state.
7968  */
7969 int stmmac_resume(struct device *dev)
7970 {
7971 	struct net_device *ndev = dev_get_drvdata(dev);
7972 	struct stmmac_priv *priv = netdev_priv(ndev);
7973 	int ret;
7974 
7975 	if (!netif_running(ndev))
7976 		return 0;
7977 
7978 	/* Power Down bit, into the PM register, is cleared
7979 	 * automatically as soon as a magic packet or a Wake-up frame
7980 	 * is received. Anyway, it's better to manually clear
7981 	 * this bit because it can generate problems while resuming
7982 	 * from another devices (e.g. serial console).
7983 	 */
7984 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7985 		mutex_lock(&priv->lock);
7986 		stmmac_pmt(priv, priv->hw, 0);
7987 		mutex_unlock(&priv->lock);
7988 		priv->irq_wake = 0;
7989 	} else {
7990 		pinctrl_pm_select_default_state(priv->device);
7991 		/* reset the phy so that it's ready */
7992 		if (priv->mii)
7993 			stmmac_mdio_reset(priv->mii);
7994 	}
7995 
7996 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7997 	    priv->plat->serdes_powerup) {
7998 		ret = priv->plat->serdes_powerup(ndev,
7999 						 priv->plat->bsp_priv);
8000 
8001 		if (ret < 0)
8002 			return ret;
8003 	}
8004 
8005 	rtnl_lock();
8006 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
8007 		phylink_resume(priv->phylink);
8008 	} else {
8009 		phylink_resume(priv->phylink);
8010 		if (device_may_wakeup(priv->device))
8011 			phylink_speed_up(priv->phylink);
8012 	}
8013 	rtnl_unlock();
8014 
8015 	rtnl_lock();
8016 	mutex_lock(&priv->lock);
8017 
8018 	stmmac_reset_queues_param(priv);
8019 
8020 	stmmac_free_tx_skbufs(priv);
8021 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8022 
8023 	stmmac_hw_setup(ndev, false);
8024 	stmmac_init_coalesce(priv);
8025 	stmmac_set_rx_mode(ndev);
8026 
8027 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8028 
8029 	stmmac_enable_all_queues(priv);
8030 	stmmac_enable_all_dma_irq(priv);
8031 
8032 	mutex_unlock(&priv->lock);
8033 	rtnl_unlock();
8034 
8035 	netif_device_attach(ndev);
8036 
8037 	return 0;
8038 }
8039 EXPORT_SYMBOL_GPL(stmmac_resume);
8040 
8041 #ifndef MODULE
8042 static int __init stmmac_cmdline_opt(char *str)
8043 {
8044 	char *opt;
8045 
8046 	if (!str || !*str)
8047 		return 1;
8048 	while ((opt = strsep(&str, ",")) != NULL) {
8049 		if (!strncmp(opt, "debug:", 6)) {
8050 			if (kstrtoint(opt + 6, 0, &debug))
8051 				goto err;
8052 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8053 			if (kstrtoint(opt + 8, 0, &phyaddr))
8054 				goto err;
8055 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8056 			if (kstrtoint(opt + 7, 0, &buf_sz))
8057 				goto err;
8058 		} else if (!strncmp(opt, "tc:", 3)) {
8059 			if (kstrtoint(opt + 3, 0, &tc))
8060 				goto err;
8061 		} else if (!strncmp(opt, "watchdog:", 9)) {
8062 			if (kstrtoint(opt + 9, 0, &watchdog))
8063 				goto err;
8064 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8065 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8066 				goto err;
8067 		} else if (!strncmp(opt, "pause:", 6)) {
8068 			if (kstrtoint(opt + 6, 0, &pause))
8069 				goto err;
8070 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8071 			if (kstrtoint(opt + 10, 0, &eee_timer))
8072 				goto err;
8073 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8074 			if (kstrtoint(opt + 11, 0, &chain_mode))
8075 				goto err;
8076 		}
8077 	}
8078 	return 1;
8079 
8080 err:
8081 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8082 	return 1;
8083 }
8084 
8085 __setup("stmmaceth=", stmmac_cmdline_opt);
8086 #endif /* MODULE */
8087 
8088 static int __init stmmac_init(void)
8089 {
8090 #ifdef CONFIG_DEBUG_FS
8091 	/* Create debugfs main directory if it doesn't exist yet */
8092 	if (!stmmac_fs_dir)
8093 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8094 	register_netdevice_notifier(&stmmac_notifier);
8095 #endif
8096 
8097 	return 0;
8098 }
8099 
8100 static void __exit stmmac_exit(void)
8101 {
8102 #ifdef CONFIG_DEBUG_FS
8103 	unregister_netdevice_notifier(&stmmac_notifier);
8104 	debugfs_remove_recursive(stmmac_fs_dir);
8105 #endif
8106 }
8107 
8108 module_init(stmmac_init)
8109 module_exit(stmmac_exit)
8110 
8111 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8112 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8113 MODULE_LICENSE("GPL");
8114