xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 2ee738e90e80850582cbe10f34c6447965c1d87b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54 
55 /* As long as the interface is active, we keep the timestamping counter enabled
56  * with fine resolution and binary rollover. This avoid non-monotonic behavior
57  * (clock jumps) when changing timestamping settings at runtime.
58  */
59 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 				 PTP_TCR_TSCTRLSSR)
61 
62 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
64 
65 /* Module parameters */
66 #define TX_TIMEO	5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70 
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78 
79 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
110 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
111 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
112 
113 #define STMMAC_DEFAULT_LPI_TIMER	1000
114 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
115 module_param(eee_timer, uint, 0644);
116 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
117 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
118 
119 /* By default the driver will use the ring mode to manage tx and rx descriptors,
120  * but allow user to force to use the chain instead of the ring
121  */
122 static unsigned int chain_mode;
123 module_param(chain_mode, int, 0444);
124 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
125 
126 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
127 /* For MSI interrupts handling */
128 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
129 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
131 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
132 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
133 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
135 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
137 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
138 					  u32 rxmode, u32 chan);
139 
140 #ifdef CONFIG_DEBUG_FS
141 static const struct net_device_ops stmmac_netdev_ops;
142 static void stmmac_init_fs(struct net_device *dev);
143 static void stmmac_exit_fs(struct net_device *dev);
144 #endif
145 
146 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
147 
148 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
149 {
150 	int ret = 0;
151 
152 	if (enabled) {
153 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
154 		if (ret)
155 			return ret;
156 		ret = clk_prepare_enable(priv->plat->pclk);
157 		if (ret) {
158 			clk_disable_unprepare(priv->plat->stmmac_clk);
159 			return ret;
160 		}
161 		if (priv->plat->clks_config) {
162 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
163 			if (ret) {
164 				clk_disable_unprepare(priv->plat->stmmac_clk);
165 				clk_disable_unprepare(priv->plat->pclk);
166 				return ret;
167 			}
168 		}
169 	} else {
170 		clk_disable_unprepare(priv->plat->stmmac_clk);
171 		clk_disable_unprepare(priv->plat->pclk);
172 		if (priv->plat->clks_config)
173 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
174 	}
175 
176 	return ret;
177 }
178 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
179 
180 /**
181  * stmmac_verify_args - verify the driver parameters.
182  * Description: it checks the driver parameters and set a default in case of
183  * errors.
184  */
185 static void stmmac_verify_args(void)
186 {
187 	if (unlikely(watchdog < 0))
188 		watchdog = TX_TIMEO;
189 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
190 		buf_sz = DEFAULT_BUFSIZE;
191 	if (unlikely(flow_ctrl > 1))
192 		flow_ctrl = FLOW_AUTO;
193 	else if (likely(flow_ctrl < 0))
194 		flow_ctrl = FLOW_OFF;
195 	if (unlikely((pause < 0) || (pause > 0xffff)))
196 		pause = PAUSE_TIME;
197 }
198 
199 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
200 {
201 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
202 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
203 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
204 	u32 queue;
205 
206 	for (queue = 0; queue < maxq; queue++) {
207 		struct stmmac_channel *ch = &priv->channel[queue];
208 
209 		if (stmmac_xdp_is_enabled(priv) &&
210 		    test_bit(queue, priv->af_xdp_zc_qps)) {
211 			napi_disable(&ch->rxtx_napi);
212 			continue;
213 		}
214 
215 		if (queue < rx_queues_cnt)
216 			napi_disable(&ch->rx_napi);
217 		if (queue < tx_queues_cnt)
218 			napi_disable(&ch->tx_napi);
219 	}
220 }
221 
222 /**
223  * stmmac_disable_all_queues - Disable all queues
224  * @priv: driver private structure
225  */
226 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
227 {
228 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
229 	struct stmmac_rx_queue *rx_q;
230 	u32 queue;
231 
232 	/* synchronize_rcu() needed for pending XDP buffers to drain */
233 	for (queue = 0; queue < rx_queues_cnt; queue++) {
234 		rx_q = &priv->dma_conf.rx_queue[queue];
235 		if (rx_q->xsk_pool) {
236 			synchronize_rcu();
237 			break;
238 		}
239 	}
240 
241 	__stmmac_disable_all_queues(priv);
242 }
243 
244 /**
245  * stmmac_enable_all_queues - Enable all queues
246  * @priv: driver private structure
247  */
248 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
249 {
250 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
251 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
252 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
253 	u32 queue;
254 
255 	for (queue = 0; queue < maxq; queue++) {
256 		struct stmmac_channel *ch = &priv->channel[queue];
257 
258 		if (stmmac_xdp_is_enabled(priv) &&
259 		    test_bit(queue, priv->af_xdp_zc_qps)) {
260 			napi_enable(&ch->rxtx_napi);
261 			continue;
262 		}
263 
264 		if (queue < rx_queues_cnt)
265 			napi_enable(&ch->rx_napi);
266 		if (queue < tx_queues_cnt)
267 			napi_enable(&ch->tx_napi);
268 	}
269 }
270 
271 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
272 {
273 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
274 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
275 		queue_work(priv->wq, &priv->service_task);
276 }
277 
278 static void stmmac_global_err(struct stmmac_priv *priv)
279 {
280 	netif_carrier_off(priv->dev);
281 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
282 	stmmac_service_event_schedule(priv);
283 }
284 
285 /**
286  * stmmac_clk_csr_set - dynamically set the MDC clock
287  * @priv: driver private structure
288  * Description: this is to dynamically set the MDC clock according to the csr
289  * clock input.
290  * Note:
291  *	If a specific clk_csr value is passed from the platform
292  *	this means that the CSR Clock Range selection cannot be
293  *	changed at run-time and it is fixed (as reported in the driver
294  *	documentation). Viceversa the driver will try to set the MDC
295  *	clock dynamically according to the actual clock input.
296  */
297 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
298 {
299 	unsigned long clk_rate;
300 
301 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
302 
303 	/* Platform provided default clk_csr would be assumed valid
304 	 * for all other cases except for the below mentioned ones.
305 	 * For values higher than the IEEE 802.3 specified frequency
306 	 * we can not estimate the proper divider as it is not known
307 	 * the frequency of clk_csr_i. So we do not change the default
308 	 * divider.
309 	 */
310 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
311 		if (clk_rate < CSR_F_35M)
312 			priv->clk_csr = STMMAC_CSR_20_35M;
313 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
314 			priv->clk_csr = STMMAC_CSR_35_60M;
315 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
316 			priv->clk_csr = STMMAC_CSR_60_100M;
317 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
318 			priv->clk_csr = STMMAC_CSR_100_150M;
319 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
320 			priv->clk_csr = STMMAC_CSR_150_250M;
321 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
322 			priv->clk_csr = STMMAC_CSR_250_300M;
323 		else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
324 			priv->clk_csr = STMMAC_CSR_300_500M;
325 		else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
326 			priv->clk_csr = STMMAC_CSR_500_800M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
394 {
395 	stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
396 }
397 
398 static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
399 {
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
401 }
402 
403 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
404 {
405 	u32 tx_cnt = priv->plat->tx_queues_to_use;
406 	u32 queue;
407 
408 	/* check if all TX queues have the work finished */
409 	for (queue = 0; queue < tx_cnt; queue++) {
410 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
411 
412 		if (tx_q->dirty_tx != tx_q->cur_tx)
413 			return true; /* still unfinished work */
414 	}
415 
416 	return false;
417 }
418 
419 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
420 {
421 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
422 }
423 
424 /**
425  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
426  * @priv: driver private structure
427  * Description: this function is to verify and enter in LPI mode in case of
428  * EEE.
429  */
430 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
431 {
432 	if (stmmac_eee_tx_busy(priv)) {
433 		stmmac_restart_sw_lpi_timer(priv);
434 		return;
435 	}
436 
437 	/* Check and enter in LPI mode */
438 	if (!priv->tx_path_in_lpi_mode)
439 		stmmac_set_eee_mode(priv, priv->hw,
440 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
441 }
442 
443 /**
444  * stmmac_stop_sw_lpi - stop transmitting LPI
445  * @priv: driver private structure
446  * Description: When using software-controlled LPI, stop transmitting LPI state.
447  */
448 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
449 {
450 	stmmac_reset_eee_mode(priv, priv->hw);
451 	del_timer_sync(&priv->eee_ctrl_timer);
452 	priv->tx_path_in_lpi_mode = false;
453 }
454 
455 /**
456  * stmmac_eee_ctrl_timer - EEE TX SW timer.
457  * @t:  timer_list struct containing private info
458  * Description:
459  *  if there is no data transfer and if we are not in LPI state,
460  *  then MAC Transmitter can be moved to LPI state.
461  */
462 static void stmmac_eee_ctrl_timer(struct timer_list *t)
463 {
464 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
465 
466 	stmmac_try_to_start_sw_lpi(priv);
467 }
468 
469 /**
470  * stmmac_eee_init - init EEE
471  * @priv: driver private structure
472  * @active: indicates whether EEE should be enabled.
473  * Description:
474  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
475  *  can also manage EEE, this function enable the LPI state and start related
476  *  timer.
477  */
478 static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
479 {
480 	priv->eee_active = active;
481 
482 	/* Check if MAC core supports the EEE feature. */
483 	if (!priv->dma_cap.eee) {
484 		priv->eee_enabled = false;
485 		return;
486 	}
487 
488 	mutex_lock(&priv->lock);
489 
490 	/* Check if it needs to be deactivated */
491 	if (!priv->eee_active) {
492 		if (priv->eee_enabled) {
493 			netdev_dbg(priv->dev, "disable EEE\n");
494 			priv->eee_sw_timer_en = false;
495 			stmmac_disable_hw_lpi_timer(priv);
496 			del_timer_sync(&priv->eee_ctrl_timer);
497 			stmmac_set_eee_timer(priv, priv->hw, 0,
498 					     STMMAC_DEFAULT_TWT_LS);
499 			if (priv->hw->xpcs)
500 				xpcs_config_eee(priv->hw->xpcs,
501 						priv->plat->mult_fact_100ns,
502 						false);
503 		}
504 		priv->eee_enabled = false;
505 		mutex_unlock(&priv->lock);
506 		return;
507 	}
508 
509 	if (priv->eee_active && !priv->eee_enabled) {
510 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
511 				     STMMAC_DEFAULT_TWT_LS);
512 		if (priv->hw->xpcs)
513 			xpcs_config_eee(priv->hw->xpcs,
514 					priv->plat->mult_fact_100ns,
515 					true);
516 	}
517 
518 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
519 		/* Use hardware LPI mode */
520 		del_timer_sync(&priv->eee_ctrl_timer);
521 		priv->tx_path_in_lpi_mode = false;
522 		priv->eee_sw_timer_en = false;
523 		stmmac_enable_hw_lpi_timer(priv);
524 	} else {
525 		/* Use software LPI mode */
526 		priv->eee_sw_timer_en = true;
527 		stmmac_disable_hw_lpi_timer(priv);
528 		stmmac_restart_sw_lpi_timer(priv);
529 	}
530 
531 	priv->eee_enabled = true;
532 
533 	mutex_unlock(&priv->lock);
534 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
535 }
536 
537 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
538  * @priv: driver private structure
539  * @p : descriptor pointer
540  * @skb : the socket buffer
541  * Description :
542  * This function will read timestamp from the descriptor & pass it to stack.
543  * and also perform some sanity checks.
544  */
545 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
546 				   struct dma_desc *p, struct sk_buff *skb)
547 {
548 	struct skb_shared_hwtstamps shhwtstamp;
549 	bool found = false;
550 	u64 ns = 0;
551 
552 	if (!priv->hwts_tx_en)
553 		return;
554 
555 	/* exit if skb doesn't support hw tstamp */
556 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
557 		return;
558 
559 	/* check tx tstamp status */
560 	if (stmmac_get_tx_timestamp_status(priv, p)) {
561 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
562 		found = true;
563 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
564 		found = true;
565 	}
566 
567 	if (found) {
568 		ns -= priv->plat->cdc_error_adj;
569 
570 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
571 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
572 
573 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
574 		/* pass tstamp to stack */
575 		skb_tstamp_tx(skb, &shhwtstamp);
576 	}
577 }
578 
579 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
580  * @priv: driver private structure
581  * @p : descriptor pointer
582  * @np : next descriptor pointer
583  * @skb : the socket buffer
584  * Description :
585  * This function will read received packet's timestamp from the descriptor
586  * and pass it to stack. It also perform some sanity checks.
587  */
588 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
589 				   struct dma_desc *np, struct sk_buff *skb)
590 {
591 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
592 	struct dma_desc *desc = p;
593 	u64 ns = 0;
594 
595 	if (!priv->hwts_rx_en)
596 		return;
597 	/* For GMAC4, the valid timestamp is from CTX next desc. */
598 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
599 		desc = np;
600 
601 	/* Check if timestamp is available */
602 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
603 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
604 
605 		ns -= priv->plat->cdc_error_adj;
606 
607 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
608 		shhwtstamp = skb_hwtstamps(skb);
609 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
610 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
611 	} else  {
612 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
613 	}
614 }
615 
616 /**
617  *  stmmac_hwtstamp_set - control hardware timestamping.
618  *  @dev: device pointer.
619  *  @ifr: An IOCTL specific structure, that can contain a pointer to
620  *  a proprietary structure used to pass information to the driver.
621  *  Description:
622  *  This function configures the MAC to enable/disable both outgoing(TX)
623  *  and incoming(RX) packets time stamping based on user input.
624  *  Return Value:
625  *  0 on success and an appropriate -ve integer on failure.
626  */
627 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
628 {
629 	struct stmmac_priv *priv = netdev_priv(dev);
630 	struct hwtstamp_config config;
631 	u32 ptp_v2 = 0;
632 	u32 tstamp_all = 0;
633 	u32 ptp_over_ipv4_udp = 0;
634 	u32 ptp_over_ipv6_udp = 0;
635 	u32 ptp_over_ethernet = 0;
636 	u32 snap_type_sel = 0;
637 	u32 ts_master_en = 0;
638 	u32 ts_event_en = 0;
639 
640 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
641 		netdev_alert(priv->dev, "No support for HW time stamping\n");
642 		priv->hwts_tx_en = 0;
643 		priv->hwts_rx_en = 0;
644 
645 		return -EOPNOTSUPP;
646 	}
647 
648 	if (copy_from_user(&config, ifr->ifr_data,
649 			   sizeof(config)))
650 		return -EFAULT;
651 
652 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
653 		   __func__, config.flags, config.tx_type, config.rx_filter);
654 
655 	if (config.tx_type != HWTSTAMP_TX_OFF &&
656 	    config.tx_type != HWTSTAMP_TX_ON)
657 		return -ERANGE;
658 
659 	if (priv->adv_ts) {
660 		switch (config.rx_filter) {
661 		case HWTSTAMP_FILTER_NONE:
662 			/* time stamp no incoming packet at all */
663 			config.rx_filter = HWTSTAMP_FILTER_NONE;
664 			break;
665 
666 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
667 			/* PTP v1, UDP, any kind of event packet */
668 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
669 			/* 'xmac' hardware can support Sync, Pdelay_Req and
670 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
671 			 * This leaves Delay_Req timestamps out.
672 			 * Enable all events *and* general purpose message
673 			 * timestamping
674 			 */
675 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
676 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678 			break;
679 
680 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
681 			/* PTP v1, UDP, Sync packet */
682 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
683 			/* take time stamp for SYNC messages only */
684 			ts_event_en = PTP_TCR_TSEVNTENA;
685 
686 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 			break;
689 
690 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
691 			/* PTP v1, UDP, Delay_req packet */
692 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
693 			/* take time stamp for Delay_Req messages only */
694 			ts_master_en = PTP_TCR_TSMSTRENA;
695 			ts_event_en = PTP_TCR_TSEVNTENA;
696 
697 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
698 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
699 			break;
700 
701 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
702 			/* PTP v2, UDP, any kind of event packet */
703 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
704 			ptp_v2 = PTP_TCR_TSVER2ENA;
705 			/* take time stamp for all event messages */
706 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
707 
708 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
709 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
710 			break;
711 
712 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
713 			/* PTP v2, UDP, Sync packet */
714 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
715 			ptp_v2 = PTP_TCR_TSVER2ENA;
716 			/* take time stamp for SYNC messages only */
717 			ts_event_en = PTP_TCR_TSEVNTENA;
718 
719 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
720 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
721 			break;
722 
723 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
724 			/* PTP v2, UDP, Delay_req packet */
725 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
726 			ptp_v2 = PTP_TCR_TSVER2ENA;
727 			/* take time stamp for Delay_Req messages only */
728 			ts_master_en = PTP_TCR_TSMSTRENA;
729 			ts_event_en = PTP_TCR_TSEVNTENA;
730 
731 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
732 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
733 			break;
734 
735 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
736 			/* PTP v2/802.AS1 any layer, any kind of event packet */
737 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
738 			ptp_v2 = PTP_TCR_TSVER2ENA;
739 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
740 			if (priv->synopsys_id < DWMAC_CORE_4_10)
741 				ts_event_en = PTP_TCR_TSEVNTENA;
742 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
743 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
744 			ptp_over_ethernet = PTP_TCR_TSIPENA;
745 			break;
746 
747 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
748 			/* PTP v2/802.AS1, any layer, Sync packet */
749 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
750 			ptp_v2 = PTP_TCR_TSVER2ENA;
751 			/* take time stamp for SYNC messages only */
752 			ts_event_en = PTP_TCR_TSEVNTENA;
753 
754 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
755 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
756 			ptp_over_ethernet = PTP_TCR_TSIPENA;
757 			break;
758 
759 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
760 			/* PTP v2/802.AS1, any layer, Delay_req packet */
761 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
762 			ptp_v2 = PTP_TCR_TSVER2ENA;
763 			/* take time stamp for Delay_Req messages only */
764 			ts_master_en = PTP_TCR_TSMSTRENA;
765 			ts_event_en = PTP_TCR_TSEVNTENA;
766 
767 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
768 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
769 			ptp_over_ethernet = PTP_TCR_TSIPENA;
770 			break;
771 
772 		case HWTSTAMP_FILTER_NTP_ALL:
773 		case HWTSTAMP_FILTER_ALL:
774 			/* time stamp any incoming packet */
775 			config.rx_filter = HWTSTAMP_FILTER_ALL;
776 			tstamp_all = PTP_TCR_TSENALL;
777 			break;
778 
779 		default:
780 			return -ERANGE;
781 		}
782 	} else {
783 		switch (config.rx_filter) {
784 		case HWTSTAMP_FILTER_NONE:
785 			config.rx_filter = HWTSTAMP_FILTER_NONE;
786 			break;
787 		default:
788 			/* PTP v1, UDP, any kind of event packet */
789 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
790 			break;
791 		}
792 	}
793 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
794 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
795 
796 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
797 
798 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
799 		priv->systime_flags |= tstamp_all | ptp_v2 |
800 				       ptp_over_ethernet | ptp_over_ipv6_udp |
801 				       ptp_over_ipv4_udp | ts_event_en |
802 				       ts_master_en | snap_type_sel;
803 	}
804 
805 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
806 
807 	memcpy(&priv->tstamp_config, &config, sizeof(config));
808 
809 	return copy_to_user(ifr->ifr_data, &config,
810 			    sizeof(config)) ? -EFAULT : 0;
811 }
812 
813 /**
814  *  stmmac_hwtstamp_get - read hardware timestamping.
815  *  @dev: device pointer.
816  *  @ifr: An IOCTL specific structure, that can contain a pointer to
817  *  a proprietary structure used to pass information to the driver.
818  *  Description:
819  *  This function obtain the current hardware timestamping settings
820  *  as requested.
821  */
822 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
823 {
824 	struct stmmac_priv *priv = netdev_priv(dev);
825 	struct hwtstamp_config *config = &priv->tstamp_config;
826 
827 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
828 		return -EOPNOTSUPP;
829 
830 	return copy_to_user(ifr->ifr_data, config,
831 			    sizeof(*config)) ? -EFAULT : 0;
832 }
833 
834 /**
835  * stmmac_init_tstamp_counter - init hardware timestamping counter
836  * @priv: driver private structure
837  * @systime_flags: timestamping flags
838  * Description:
839  * Initialize hardware counter for packet timestamping.
840  * This is valid as long as the interface is open and not suspended.
841  * Will be rerun after resuming from suspend, case in which the timestamping
842  * flags updated by stmmac_hwtstamp_set() also need to be restored.
843  */
844 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
845 {
846 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
847 	struct timespec64 now;
848 	u32 sec_inc = 0;
849 	u64 temp = 0;
850 
851 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
852 		return -EOPNOTSUPP;
853 
854 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
855 	priv->systime_flags = systime_flags;
856 
857 	/* program Sub Second Increment reg */
858 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
859 					   priv->plat->clk_ptp_rate,
860 					   xmac, &sec_inc);
861 	temp = div_u64(1000000000ULL, sec_inc);
862 
863 	/* Store sub second increment for later use */
864 	priv->sub_second_inc = sec_inc;
865 
866 	/* calculate default added value:
867 	 * formula is :
868 	 * addend = (2^32)/freq_div_ratio;
869 	 * where, freq_div_ratio = 1e9ns/sec_inc
870 	 */
871 	temp = (u64)(temp << 32);
872 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
873 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
874 
875 	/* initialize system time */
876 	ktime_get_real_ts64(&now);
877 
878 	/* lower 32 bits of tv_sec are safe until y2106 */
879 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
880 
881 	return 0;
882 }
883 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
884 
885 /**
886  * stmmac_init_ptp - init PTP
887  * @priv: driver private structure
888  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
889  * This is done by looking at the HW cap. register.
890  * This function also registers the ptp driver.
891  */
892 static int stmmac_init_ptp(struct stmmac_priv *priv)
893 {
894 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
895 	int ret;
896 
897 	if (priv->plat->ptp_clk_freq_config)
898 		priv->plat->ptp_clk_freq_config(priv);
899 
900 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
901 	if (ret)
902 		return ret;
903 
904 	priv->adv_ts = 0;
905 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
906 	if (xmac && priv->dma_cap.atime_stamp)
907 		priv->adv_ts = 1;
908 	/* Dwmac 3.x core with extend_desc can support adv_ts */
909 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
910 		priv->adv_ts = 1;
911 
912 	if (priv->dma_cap.time_stamp)
913 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
914 
915 	if (priv->adv_ts)
916 		netdev_info(priv->dev,
917 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
918 
919 	priv->hwts_tx_en = 0;
920 	priv->hwts_rx_en = 0;
921 
922 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
923 		stmmac_hwtstamp_correct_latency(priv, priv);
924 
925 	return 0;
926 }
927 
928 static void stmmac_release_ptp(struct stmmac_priv *priv)
929 {
930 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
931 	stmmac_ptp_unregister(priv);
932 }
933 
934 /**
935  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
936  *  @priv: driver private structure
937  *  @duplex: duplex passed to the next function
938  *  Description: It is used for configuring the flow control in all queues
939  */
940 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
941 {
942 	u32 tx_cnt = priv->plat->tx_queues_to_use;
943 
944 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
945 			priv->pause, tx_cnt);
946 }
947 
948 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
949 					 phy_interface_t interface)
950 {
951 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952 
953 	/* Refresh the MAC-specific capabilities */
954 	stmmac_mac_update_caps(priv);
955 
956 	config->mac_capabilities = priv->hw->link.caps;
957 
958 	if (priv->plat->max_speed)
959 		phylink_limit_mac_speed(config, priv->plat->max_speed);
960 
961 	return config->mac_capabilities;
962 }
963 
964 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
965 						 phy_interface_t interface)
966 {
967 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
968 	struct phylink_pcs *pcs;
969 
970 	if (priv->plat->select_pcs) {
971 		pcs = priv->plat->select_pcs(priv, interface);
972 		if (!IS_ERR(pcs))
973 			return pcs;
974 	}
975 
976 	return NULL;
977 }
978 
979 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
980 			      const struct phylink_link_state *state)
981 {
982 	/* Nothing to do, xpcs_config() handles everything */
983 }
984 
985 static void stmmac_mac_link_down(struct phylink_config *config,
986 				 unsigned int mode, phy_interface_t interface)
987 {
988 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989 
990 	stmmac_mac_set(priv, priv->ioaddr, false);
991 	stmmac_eee_init(priv, false);
992 	stmmac_set_eee_pls(priv, priv->hw, false);
993 
994 	if (stmmac_fpe_supported(priv))
995 		stmmac_fpe_link_state_handle(priv, false);
996 }
997 
998 static void stmmac_mac_link_up(struct phylink_config *config,
999 			       struct phy_device *phy,
1000 			       unsigned int mode, phy_interface_t interface,
1001 			       int speed, int duplex,
1002 			       bool tx_pause, bool rx_pause)
1003 {
1004 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1005 	u32 old_ctrl, ctrl;
1006 
1007 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1008 	    priv->plat->serdes_powerup)
1009 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1010 
1011 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1012 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1013 
1014 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1015 		switch (speed) {
1016 		case SPEED_10000:
1017 			ctrl |= priv->hw->link.xgmii.speed10000;
1018 			break;
1019 		case SPEED_5000:
1020 			ctrl |= priv->hw->link.xgmii.speed5000;
1021 			break;
1022 		case SPEED_2500:
1023 			ctrl |= priv->hw->link.xgmii.speed2500;
1024 			break;
1025 		default:
1026 			return;
1027 		}
1028 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1029 		switch (speed) {
1030 		case SPEED_100000:
1031 			ctrl |= priv->hw->link.xlgmii.speed100000;
1032 			break;
1033 		case SPEED_50000:
1034 			ctrl |= priv->hw->link.xlgmii.speed50000;
1035 			break;
1036 		case SPEED_40000:
1037 			ctrl |= priv->hw->link.xlgmii.speed40000;
1038 			break;
1039 		case SPEED_25000:
1040 			ctrl |= priv->hw->link.xlgmii.speed25000;
1041 			break;
1042 		case SPEED_10000:
1043 			ctrl |= priv->hw->link.xgmii.speed10000;
1044 			break;
1045 		case SPEED_2500:
1046 			ctrl |= priv->hw->link.speed2500;
1047 			break;
1048 		case SPEED_1000:
1049 			ctrl |= priv->hw->link.speed1000;
1050 			break;
1051 		default:
1052 			return;
1053 		}
1054 	} else {
1055 		switch (speed) {
1056 		case SPEED_2500:
1057 			ctrl |= priv->hw->link.speed2500;
1058 			break;
1059 		case SPEED_1000:
1060 			ctrl |= priv->hw->link.speed1000;
1061 			break;
1062 		case SPEED_100:
1063 			ctrl |= priv->hw->link.speed100;
1064 			break;
1065 		case SPEED_10:
1066 			ctrl |= priv->hw->link.speed10;
1067 			break;
1068 		default:
1069 			return;
1070 		}
1071 	}
1072 
1073 	priv->speed = speed;
1074 
1075 	if (priv->plat->fix_mac_speed)
1076 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1077 
1078 	if (!duplex)
1079 		ctrl &= ~priv->hw->link.duplex;
1080 	else
1081 		ctrl |= priv->hw->link.duplex;
1082 
1083 	/* Flow Control operation */
1084 	if (rx_pause && tx_pause)
1085 		priv->flow_ctrl = FLOW_AUTO;
1086 	else if (rx_pause && !tx_pause)
1087 		priv->flow_ctrl = FLOW_RX;
1088 	else if (!rx_pause && tx_pause)
1089 		priv->flow_ctrl = FLOW_TX;
1090 	else
1091 		priv->flow_ctrl = FLOW_OFF;
1092 
1093 	stmmac_mac_flow_ctrl(priv, duplex);
1094 
1095 	if (ctrl != old_ctrl)
1096 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1097 
1098 	stmmac_mac_set(priv, priv->ioaddr, true);
1099 	if (phy && priv->dma_cap.eee) {
1100 		phy_eee_rx_clock_stop(phy, !(priv->plat->flags &
1101 					     STMMAC_FLAG_RX_CLK_RUNS_IN_LPI));
1102 		priv->tx_lpi_timer = phy->eee_cfg.tx_lpi_timer;
1103 		stmmac_eee_init(priv, phy->enable_tx_lpi);
1104 		stmmac_set_eee_pls(priv, priv->hw, true);
1105 	}
1106 
1107 	if (stmmac_fpe_supported(priv))
1108 		stmmac_fpe_link_state_handle(priv, true);
1109 
1110 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1111 		stmmac_hwtstamp_correct_latency(priv, priv);
1112 }
1113 
1114 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1115 	.mac_get_caps = stmmac_mac_get_caps,
1116 	.mac_select_pcs = stmmac_mac_select_pcs,
1117 	.mac_config = stmmac_mac_config,
1118 	.mac_link_down = stmmac_mac_link_down,
1119 	.mac_link_up = stmmac_mac_link_up,
1120 };
1121 
1122 /**
1123  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1124  * @priv: driver private structure
1125  * Description: this is to verify if the HW supports the PCS.
1126  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1127  * configured for the TBI, RTBI, or SGMII PHY interface.
1128  */
1129 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1130 {
1131 	int interface = priv->plat->mac_interface;
1132 
1133 	if (priv->dma_cap.pcs) {
1134 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1135 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1136 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1137 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1138 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1139 			priv->hw->pcs = STMMAC_PCS_RGMII;
1140 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1141 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1142 			priv->hw->pcs = STMMAC_PCS_SGMII;
1143 		}
1144 	}
1145 }
1146 
1147 /**
1148  * stmmac_init_phy - PHY initialization
1149  * @dev: net device structure
1150  * Description: it initializes the driver's PHY state, and attaches the PHY
1151  * to the mac driver.
1152  *  Return value:
1153  *  0 on success
1154  */
1155 static int stmmac_init_phy(struct net_device *dev)
1156 {
1157 	struct stmmac_priv *priv = netdev_priv(dev);
1158 	struct fwnode_handle *phy_fwnode;
1159 	struct fwnode_handle *fwnode;
1160 	int ret;
1161 
1162 	if (!phylink_expects_phy(priv->phylink))
1163 		return 0;
1164 
1165 	fwnode = priv->plat->port_node;
1166 	if (!fwnode)
1167 		fwnode = dev_fwnode(priv->device);
1168 
1169 	if (fwnode)
1170 		phy_fwnode = fwnode_get_phy_node(fwnode);
1171 	else
1172 		phy_fwnode = NULL;
1173 
1174 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1175 	 * manually parse it
1176 	 */
1177 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1178 		int addr = priv->plat->phy_addr;
1179 		struct phy_device *phydev;
1180 
1181 		if (addr < 0) {
1182 			netdev_err(priv->dev, "no phy found\n");
1183 			return -ENODEV;
1184 		}
1185 
1186 		phydev = mdiobus_get_phy(priv->mii, addr);
1187 		if (!phydev) {
1188 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1189 			return -ENODEV;
1190 		}
1191 
1192 		if (priv->dma_cap.eee)
1193 			phy_support_eee(phydev);
1194 
1195 		ret = phylink_connect_phy(priv->phylink, phydev);
1196 	} else {
1197 		fwnode_handle_put(phy_fwnode);
1198 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1199 	}
1200 
1201 	if (ret == 0) {
1202 		struct ethtool_keee eee;
1203 
1204 		/* Configure phylib's copy of the LPI timer */
1205 		if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1206 			eee.tx_lpi_timer = priv->tx_lpi_timer;
1207 			phylink_ethtool_set_eee(priv->phylink, &eee);
1208 		}
1209 	}
1210 
1211 	if (!priv->plat->pmt) {
1212 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1213 
1214 		phylink_ethtool_get_wol(priv->phylink, &wol);
1215 		device_set_wakeup_capable(priv->device, !!wol.supported);
1216 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1217 	}
1218 
1219 	return ret;
1220 }
1221 
1222 static int stmmac_phy_setup(struct stmmac_priv *priv)
1223 {
1224 	struct stmmac_mdio_bus_data *mdio_bus_data;
1225 	int mode = priv->plat->phy_interface;
1226 	struct fwnode_handle *fwnode;
1227 	struct phylink_pcs *pcs;
1228 	struct phylink *phylink;
1229 
1230 	priv->phylink_config.dev = &priv->dev->dev;
1231 	priv->phylink_config.type = PHYLINK_NETDEV;
1232 	priv->phylink_config.mac_managed_pm = true;
1233 
1234 	/* Stmmac always requires an RX clock for hardware initialization */
1235 	priv->phylink_config.mac_requires_rxc = true;
1236 
1237 	mdio_bus_data = priv->plat->mdio_bus_data;
1238 	if (mdio_bus_data)
1239 		priv->phylink_config.default_an_inband =
1240 			mdio_bus_data->default_an_inband;
1241 
1242 	/* Set the platform/firmware specified interface mode. Note, phylink
1243 	 * deals with the PHY interface mode, not the MAC interface mode.
1244 	 */
1245 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1246 
1247 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1248 	if (priv->hw->xpcs)
1249 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1250 	else
1251 		pcs = priv->hw->phylink_pcs;
1252 
1253 	if (pcs)
1254 		phy_interface_or(priv->phylink_config.supported_interfaces,
1255 				 priv->phylink_config.supported_interfaces,
1256 				 pcs->supported_interfaces);
1257 
1258 	fwnode = priv->plat->port_node;
1259 	if (!fwnode)
1260 		fwnode = dev_fwnode(priv->device);
1261 
1262 	phylink = phylink_create(&priv->phylink_config, fwnode,
1263 				 mode, &stmmac_phylink_mac_ops);
1264 	if (IS_ERR(phylink))
1265 		return PTR_ERR(phylink);
1266 
1267 	priv->phylink = phylink;
1268 	return 0;
1269 }
1270 
1271 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1272 				    struct stmmac_dma_conf *dma_conf)
1273 {
1274 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1275 	unsigned int desc_size;
1276 	void *head_rx;
1277 	u32 queue;
1278 
1279 	/* Display RX rings */
1280 	for (queue = 0; queue < rx_cnt; queue++) {
1281 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1282 
1283 		pr_info("\tRX Queue %u rings\n", queue);
1284 
1285 		if (priv->extend_desc) {
1286 			head_rx = (void *)rx_q->dma_erx;
1287 			desc_size = sizeof(struct dma_extended_desc);
1288 		} else {
1289 			head_rx = (void *)rx_q->dma_rx;
1290 			desc_size = sizeof(struct dma_desc);
1291 		}
1292 
1293 		/* Display RX ring */
1294 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1295 				    rx_q->dma_rx_phy, desc_size);
1296 	}
1297 }
1298 
1299 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1300 				    struct stmmac_dma_conf *dma_conf)
1301 {
1302 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1303 	unsigned int desc_size;
1304 	void *head_tx;
1305 	u32 queue;
1306 
1307 	/* Display TX rings */
1308 	for (queue = 0; queue < tx_cnt; queue++) {
1309 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1310 
1311 		pr_info("\tTX Queue %d rings\n", queue);
1312 
1313 		if (priv->extend_desc) {
1314 			head_tx = (void *)tx_q->dma_etx;
1315 			desc_size = sizeof(struct dma_extended_desc);
1316 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1317 			head_tx = (void *)tx_q->dma_entx;
1318 			desc_size = sizeof(struct dma_edesc);
1319 		} else {
1320 			head_tx = (void *)tx_q->dma_tx;
1321 			desc_size = sizeof(struct dma_desc);
1322 		}
1323 
1324 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1325 				    tx_q->dma_tx_phy, desc_size);
1326 	}
1327 }
1328 
1329 static void stmmac_display_rings(struct stmmac_priv *priv,
1330 				 struct stmmac_dma_conf *dma_conf)
1331 {
1332 	/* Display RX ring */
1333 	stmmac_display_rx_rings(priv, dma_conf);
1334 
1335 	/* Display TX ring */
1336 	stmmac_display_tx_rings(priv, dma_conf);
1337 }
1338 
1339 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1340 {
1341 	if (stmmac_xdp_is_enabled(priv))
1342 		return XDP_PACKET_HEADROOM;
1343 
1344 	return NET_SKB_PAD;
1345 }
1346 
1347 static int stmmac_set_bfsize(int mtu, int bufsize)
1348 {
1349 	int ret = bufsize;
1350 
1351 	if (mtu >= BUF_SIZE_8KiB)
1352 		ret = BUF_SIZE_16KiB;
1353 	else if (mtu >= BUF_SIZE_4KiB)
1354 		ret = BUF_SIZE_8KiB;
1355 	else if (mtu >= BUF_SIZE_2KiB)
1356 		ret = BUF_SIZE_4KiB;
1357 	else if (mtu > DEFAULT_BUFSIZE)
1358 		ret = BUF_SIZE_2KiB;
1359 	else
1360 		ret = DEFAULT_BUFSIZE;
1361 
1362 	return ret;
1363 }
1364 
1365 /**
1366  * stmmac_clear_rx_descriptors - clear RX descriptors
1367  * @priv: driver private structure
1368  * @dma_conf: structure to take the dma data
1369  * @queue: RX queue index
1370  * Description: this function is called to clear the RX descriptors
1371  * in case of both basic and extended descriptors are used.
1372  */
1373 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1374 					struct stmmac_dma_conf *dma_conf,
1375 					u32 queue)
1376 {
1377 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1378 	int i;
1379 
1380 	/* Clear the RX descriptors */
1381 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1382 		if (priv->extend_desc)
1383 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1384 					priv->use_riwt, priv->mode,
1385 					(i == dma_conf->dma_rx_size - 1),
1386 					dma_conf->dma_buf_sz);
1387 		else
1388 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1389 					priv->use_riwt, priv->mode,
1390 					(i == dma_conf->dma_rx_size - 1),
1391 					dma_conf->dma_buf_sz);
1392 }
1393 
1394 /**
1395  * stmmac_clear_tx_descriptors - clear tx descriptors
1396  * @priv: driver private structure
1397  * @dma_conf: structure to take the dma data
1398  * @queue: TX queue index.
1399  * Description: this function is called to clear the TX descriptors
1400  * in case of both basic and extended descriptors are used.
1401  */
1402 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1403 					struct stmmac_dma_conf *dma_conf,
1404 					u32 queue)
1405 {
1406 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1407 	int i;
1408 
1409 	/* Clear the TX descriptors */
1410 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1411 		int last = (i == (dma_conf->dma_tx_size - 1));
1412 		struct dma_desc *p;
1413 
1414 		if (priv->extend_desc)
1415 			p = &tx_q->dma_etx[i].basic;
1416 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1417 			p = &tx_q->dma_entx[i].basic;
1418 		else
1419 			p = &tx_q->dma_tx[i];
1420 
1421 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1422 	}
1423 }
1424 
1425 /**
1426  * stmmac_clear_descriptors - clear descriptors
1427  * @priv: driver private structure
1428  * @dma_conf: structure to take the dma data
1429  * Description: this function is called to clear the TX and RX descriptors
1430  * in case of both basic and extended descriptors are used.
1431  */
1432 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1433 				     struct stmmac_dma_conf *dma_conf)
1434 {
1435 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1436 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1437 	u32 queue;
1438 
1439 	/* Clear the RX descriptors */
1440 	for (queue = 0; queue < rx_queue_cnt; queue++)
1441 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1442 
1443 	/* Clear the TX descriptors */
1444 	for (queue = 0; queue < tx_queue_cnt; queue++)
1445 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1446 }
1447 
1448 /**
1449  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1450  * @priv: driver private structure
1451  * @dma_conf: structure to take the dma data
1452  * @p: descriptor pointer
1453  * @i: descriptor index
1454  * @flags: gfp flag
1455  * @queue: RX queue index
1456  * Description: this function is called to allocate a receive buffer, perform
1457  * the DMA mapping and init the descriptor.
1458  */
1459 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1460 				  struct stmmac_dma_conf *dma_conf,
1461 				  struct dma_desc *p,
1462 				  int i, gfp_t flags, u32 queue)
1463 {
1464 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1465 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1466 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1467 
1468 	if (priv->dma_cap.host_dma_width <= 32)
1469 		gfp |= GFP_DMA32;
1470 
1471 	if (!buf->page) {
1472 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1473 		if (!buf->page)
1474 			return -ENOMEM;
1475 		buf->page_offset = stmmac_rx_offset(priv);
1476 	}
1477 
1478 	if (priv->sph && !buf->sec_page) {
1479 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1480 		if (!buf->sec_page)
1481 			return -ENOMEM;
1482 
1483 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1484 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1485 	} else {
1486 		buf->sec_page = NULL;
1487 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1488 	}
1489 
1490 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1491 
1492 	stmmac_set_desc_addr(priv, p, buf->addr);
1493 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1494 		stmmac_init_desc3(priv, p);
1495 
1496 	return 0;
1497 }
1498 
1499 /**
1500  * stmmac_free_rx_buffer - free RX dma buffers
1501  * @priv: private structure
1502  * @rx_q: RX queue
1503  * @i: buffer index.
1504  */
1505 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1506 				  struct stmmac_rx_queue *rx_q,
1507 				  int i)
1508 {
1509 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1510 
1511 	if (buf->page)
1512 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1513 	buf->page = NULL;
1514 
1515 	if (buf->sec_page)
1516 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1517 	buf->sec_page = NULL;
1518 }
1519 
1520 /**
1521  * stmmac_free_tx_buffer - free RX dma buffers
1522  * @priv: private structure
1523  * @dma_conf: structure to take the dma data
1524  * @queue: RX queue index
1525  * @i: buffer index.
1526  */
1527 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1528 				  struct stmmac_dma_conf *dma_conf,
1529 				  u32 queue, int i)
1530 {
1531 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1532 
1533 	if (tx_q->tx_skbuff_dma[i].buf &&
1534 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1535 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1536 			dma_unmap_page(priv->device,
1537 				       tx_q->tx_skbuff_dma[i].buf,
1538 				       tx_q->tx_skbuff_dma[i].len,
1539 				       DMA_TO_DEVICE);
1540 		else
1541 			dma_unmap_single(priv->device,
1542 					 tx_q->tx_skbuff_dma[i].buf,
1543 					 tx_q->tx_skbuff_dma[i].len,
1544 					 DMA_TO_DEVICE);
1545 	}
1546 
1547 	if (tx_q->xdpf[i] &&
1548 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1549 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1550 		xdp_return_frame(tx_q->xdpf[i]);
1551 		tx_q->xdpf[i] = NULL;
1552 	}
1553 
1554 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1555 		tx_q->xsk_frames_done++;
1556 
1557 	if (tx_q->tx_skbuff[i] &&
1558 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1559 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1560 		tx_q->tx_skbuff[i] = NULL;
1561 	}
1562 
1563 	tx_q->tx_skbuff_dma[i].buf = 0;
1564 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1565 }
1566 
1567 /**
1568  * dma_free_rx_skbufs - free RX dma buffers
1569  * @priv: private structure
1570  * @dma_conf: structure to take the dma data
1571  * @queue: RX queue index
1572  */
1573 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1574 			       struct stmmac_dma_conf *dma_conf,
1575 			       u32 queue)
1576 {
1577 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1578 	int i;
1579 
1580 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1581 		stmmac_free_rx_buffer(priv, rx_q, i);
1582 }
1583 
1584 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1585 				   struct stmmac_dma_conf *dma_conf,
1586 				   u32 queue, gfp_t flags)
1587 {
1588 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1589 	int i;
1590 
1591 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1592 		struct dma_desc *p;
1593 		int ret;
1594 
1595 		if (priv->extend_desc)
1596 			p = &((rx_q->dma_erx + i)->basic);
1597 		else
1598 			p = rx_q->dma_rx + i;
1599 
1600 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1601 					     queue);
1602 		if (ret)
1603 			return ret;
1604 
1605 		rx_q->buf_alloc_num++;
1606 	}
1607 
1608 	return 0;
1609 }
1610 
1611 /**
1612  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1613  * @priv: private structure
1614  * @dma_conf: structure to take the dma data
1615  * @queue: RX queue index
1616  */
1617 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1618 				struct stmmac_dma_conf *dma_conf,
1619 				u32 queue)
1620 {
1621 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1622 	int i;
1623 
1624 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1625 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1626 
1627 		if (!buf->xdp)
1628 			continue;
1629 
1630 		xsk_buff_free(buf->xdp);
1631 		buf->xdp = NULL;
1632 	}
1633 }
1634 
1635 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1636 				      struct stmmac_dma_conf *dma_conf,
1637 				      u32 queue)
1638 {
1639 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1640 	int i;
1641 
1642 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1643 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1644 	 * use this macro to make sure no size violations.
1645 	 */
1646 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1647 
1648 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1649 		struct stmmac_rx_buffer *buf;
1650 		dma_addr_t dma_addr;
1651 		struct dma_desc *p;
1652 
1653 		if (priv->extend_desc)
1654 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1655 		else
1656 			p = rx_q->dma_rx + i;
1657 
1658 		buf = &rx_q->buf_pool[i];
1659 
1660 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1661 		if (!buf->xdp)
1662 			return -ENOMEM;
1663 
1664 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1665 		stmmac_set_desc_addr(priv, p, dma_addr);
1666 		rx_q->buf_alloc_num++;
1667 	}
1668 
1669 	return 0;
1670 }
1671 
1672 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1673 {
1674 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1675 		return NULL;
1676 
1677 	return xsk_get_pool_from_qid(priv->dev, queue);
1678 }
1679 
1680 /**
1681  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1682  * @priv: driver private structure
1683  * @dma_conf: structure to take the dma data
1684  * @queue: RX queue index
1685  * @flags: gfp flag.
1686  * Description: this function initializes the DMA RX descriptors
1687  * and allocates the socket buffers. It supports the chained and ring
1688  * modes.
1689  */
1690 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1691 				    struct stmmac_dma_conf *dma_conf,
1692 				    u32 queue, gfp_t flags)
1693 {
1694 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1695 	int ret;
1696 
1697 	netif_dbg(priv, probe, priv->dev,
1698 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1699 		  (u32)rx_q->dma_rx_phy);
1700 
1701 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1702 
1703 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1704 
1705 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1706 
1707 	if (rx_q->xsk_pool) {
1708 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1709 						   MEM_TYPE_XSK_BUFF_POOL,
1710 						   NULL));
1711 		netdev_info(priv->dev,
1712 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1713 			    rx_q->queue_index);
1714 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1715 	} else {
1716 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1717 						   MEM_TYPE_PAGE_POOL,
1718 						   rx_q->page_pool));
1719 		netdev_info(priv->dev,
1720 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1721 			    rx_q->queue_index);
1722 	}
1723 
1724 	if (rx_q->xsk_pool) {
1725 		/* RX XDP ZC buffer pool may not be populated, e.g.
1726 		 * xdpsock TX-only.
1727 		 */
1728 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1729 	} else {
1730 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1731 		if (ret < 0)
1732 			return -ENOMEM;
1733 	}
1734 
1735 	/* Setup the chained descriptor addresses */
1736 	if (priv->mode == STMMAC_CHAIN_MODE) {
1737 		if (priv->extend_desc)
1738 			stmmac_mode_init(priv, rx_q->dma_erx,
1739 					 rx_q->dma_rx_phy,
1740 					 dma_conf->dma_rx_size, 1);
1741 		else
1742 			stmmac_mode_init(priv, rx_q->dma_rx,
1743 					 rx_q->dma_rx_phy,
1744 					 dma_conf->dma_rx_size, 0);
1745 	}
1746 
1747 	return 0;
1748 }
1749 
1750 static int init_dma_rx_desc_rings(struct net_device *dev,
1751 				  struct stmmac_dma_conf *dma_conf,
1752 				  gfp_t flags)
1753 {
1754 	struct stmmac_priv *priv = netdev_priv(dev);
1755 	u32 rx_count = priv->plat->rx_queues_to_use;
1756 	int queue;
1757 	int ret;
1758 
1759 	/* RX INITIALIZATION */
1760 	netif_dbg(priv, probe, priv->dev,
1761 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1762 
1763 	for (queue = 0; queue < rx_count; queue++) {
1764 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1765 		if (ret)
1766 			goto err_init_rx_buffers;
1767 	}
1768 
1769 	return 0;
1770 
1771 err_init_rx_buffers:
1772 	while (queue >= 0) {
1773 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1774 
1775 		if (rx_q->xsk_pool)
1776 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1777 		else
1778 			dma_free_rx_skbufs(priv, dma_conf, queue);
1779 
1780 		rx_q->buf_alloc_num = 0;
1781 		rx_q->xsk_pool = NULL;
1782 
1783 		queue--;
1784 	}
1785 
1786 	return ret;
1787 }
1788 
1789 /**
1790  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1791  * @priv: driver private structure
1792  * @dma_conf: structure to take the dma data
1793  * @queue: TX queue index
1794  * Description: this function initializes the DMA TX descriptors
1795  * and allocates the socket buffers. It supports the chained and ring
1796  * modes.
1797  */
1798 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1799 				    struct stmmac_dma_conf *dma_conf,
1800 				    u32 queue)
1801 {
1802 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1803 	int i;
1804 
1805 	netif_dbg(priv, probe, priv->dev,
1806 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1807 		  (u32)tx_q->dma_tx_phy);
1808 
1809 	/* Setup the chained descriptor addresses */
1810 	if (priv->mode == STMMAC_CHAIN_MODE) {
1811 		if (priv->extend_desc)
1812 			stmmac_mode_init(priv, tx_q->dma_etx,
1813 					 tx_q->dma_tx_phy,
1814 					 dma_conf->dma_tx_size, 1);
1815 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1816 			stmmac_mode_init(priv, tx_q->dma_tx,
1817 					 tx_q->dma_tx_phy,
1818 					 dma_conf->dma_tx_size, 0);
1819 	}
1820 
1821 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1822 
1823 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1824 		struct dma_desc *p;
1825 
1826 		if (priv->extend_desc)
1827 			p = &((tx_q->dma_etx + i)->basic);
1828 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1829 			p = &((tx_q->dma_entx + i)->basic);
1830 		else
1831 			p = tx_q->dma_tx + i;
1832 
1833 		stmmac_clear_desc(priv, p);
1834 
1835 		tx_q->tx_skbuff_dma[i].buf = 0;
1836 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1837 		tx_q->tx_skbuff_dma[i].len = 0;
1838 		tx_q->tx_skbuff_dma[i].last_segment = false;
1839 		tx_q->tx_skbuff[i] = NULL;
1840 	}
1841 
1842 	return 0;
1843 }
1844 
1845 static int init_dma_tx_desc_rings(struct net_device *dev,
1846 				  struct stmmac_dma_conf *dma_conf)
1847 {
1848 	struct stmmac_priv *priv = netdev_priv(dev);
1849 	u32 tx_queue_cnt;
1850 	u32 queue;
1851 
1852 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1853 
1854 	for (queue = 0; queue < tx_queue_cnt; queue++)
1855 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1856 
1857 	return 0;
1858 }
1859 
1860 /**
1861  * init_dma_desc_rings - init the RX/TX descriptor rings
1862  * @dev: net device structure
1863  * @dma_conf: structure to take the dma data
1864  * @flags: gfp flag.
1865  * Description: this function initializes the DMA RX/TX descriptors
1866  * and allocates the socket buffers. It supports the chained and ring
1867  * modes.
1868  */
1869 static int init_dma_desc_rings(struct net_device *dev,
1870 			       struct stmmac_dma_conf *dma_conf,
1871 			       gfp_t flags)
1872 {
1873 	struct stmmac_priv *priv = netdev_priv(dev);
1874 	int ret;
1875 
1876 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1877 	if (ret)
1878 		return ret;
1879 
1880 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1881 
1882 	stmmac_clear_descriptors(priv, dma_conf);
1883 
1884 	if (netif_msg_hw(priv))
1885 		stmmac_display_rings(priv, dma_conf);
1886 
1887 	return ret;
1888 }
1889 
1890 /**
1891  * dma_free_tx_skbufs - free TX dma buffers
1892  * @priv: private structure
1893  * @dma_conf: structure to take the dma data
1894  * @queue: TX queue index
1895  */
1896 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1897 			       struct stmmac_dma_conf *dma_conf,
1898 			       u32 queue)
1899 {
1900 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1901 	int i;
1902 
1903 	tx_q->xsk_frames_done = 0;
1904 
1905 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1906 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1907 
1908 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1909 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1910 		tx_q->xsk_frames_done = 0;
1911 		tx_q->xsk_pool = NULL;
1912 	}
1913 }
1914 
1915 /**
1916  * stmmac_free_tx_skbufs - free TX skb buffers
1917  * @priv: private structure
1918  */
1919 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1920 {
1921 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1922 	u32 queue;
1923 
1924 	for (queue = 0; queue < tx_queue_cnt; queue++)
1925 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1926 }
1927 
1928 /**
1929  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1930  * @priv: private structure
1931  * @dma_conf: structure to take the dma data
1932  * @queue: RX queue index
1933  */
1934 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1935 					 struct stmmac_dma_conf *dma_conf,
1936 					 u32 queue)
1937 {
1938 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1939 
1940 	/* Release the DMA RX socket buffers */
1941 	if (rx_q->xsk_pool)
1942 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1943 	else
1944 		dma_free_rx_skbufs(priv, dma_conf, queue);
1945 
1946 	rx_q->buf_alloc_num = 0;
1947 	rx_q->xsk_pool = NULL;
1948 
1949 	/* Free DMA regions of consistent memory previously allocated */
1950 	if (!priv->extend_desc)
1951 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1952 				  sizeof(struct dma_desc),
1953 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1954 	else
1955 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1956 				  sizeof(struct dma_extended_desc),
1957 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1958 
1959 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1960 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1961 
1962 	kfree(rx_q->buf_pool);
1963 	if (rx_q->page_pool)
1964 		page_pool_destroy(rx_q->page_pool);
1965 }
1966 
1967 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1968 				       struct stmmac_dma_conf *dma_conf)
1969 {
1970 	u32 rx_count = priv->plat->rx_queues_to_use;
1971 	u32 queue;
1972 
1973 	/* Free RX queue resources */
1974 	for (queue = 0; queue < rx_count; queue++)
1975 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1976 }
1977 
1978 /**
1979  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1980  * @priv: private structure
1981  * @dma_conf: structure to take the dma data
1982  * @queue: TX queue index
1983  */
1984 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1985 					 struct stmmac_dma_conf *dma_conf,
1986 					 u32 queue)
1987 {
1988 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1989 	size_t size;
1990 	void *addr;
1991 
1992 	/* Release the DMA TX socket buffers */
1993 	dma_free_tx_skbufs(priv, dma_conf, queue);
1994 
1995 	if (priv->extend_desc) {
1996 		size = sizeof(struct dma_extended_desc);
1997 		addr = tx_q->dma_etx;
1998 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1999 		size = sizeof(struct dma_edesc);
2000 		addr = tx_q->dma_entx;
2001 	} else {
2002 		size = sizeof(struct dma_desc);
2003 		addr = tx_q->dma_tx;
2004 	}
2005 
2006 	size *= dma_conf->dma_tx_size;
2007 
2008 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2009 
2010 	kfree(tx_q->tx_skbuff_dma);
2011 	kfree(tx_q->tx_skbuff);
2012 }
2013 
2014 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2015 				       struct stmmac_dma_conf *dma_conf)
2016 {
2017 	u32 tx_count = priv->plat->tx_queues_to_use;
2018 	u32 queue;
2019 
2020 	/* Free TX queue resources */
2021 	for (queue = 0; queue < tx_count; queue++)
2022 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2023 }
2024 
2025 /**
2026  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2027  * @priv: private structure
2028  * @dma_conf: structure to take the dma data
2029  * @queue: RX queue index
2030  * Description: according to which descriptor can be used (extend or basic)
2031  * this function allocates the resources for TX and RX paths. In case of
2032  * reception, for example, it pre-allocated the RX socket buffer in order to
2033  * allow zero-copy mechanism.
2034  */
2035 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2036 					 struct stmmac_dma_conf *dma_conf,
2037 					 u32 queue)
2038 {
2039 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2040 	struct stmmac_channel *ch = &priv->channel[queue];
2041 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2042 	struct page_pool_params pp_params = { 0 };
2043 	unsigned int dma_buf_sz_pad, num_pages;
2044 	unsigned int napi_id;
2045 	int ret;
2046 
2047 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2048 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2049 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2050 
2051 	rx_q->queue_index = queue;
2052 	rx_q->priv_data = priv;
2053 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2054 
2055 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2056 	pp_params.pool_size = dma_conf->dma_rx_size;
2057 	pp_params.order = order_base_2(num_pages);
2058 	pp_params.nid = dev_to_node(priv->device);
2059 	pp_params.dev = priv->device;
2060 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2061 	pp_params.offset = stmmac_rx_offset(priv);
2062 	pp_params.max_len = dma_conf->dma_buf_sz;
2063 
2064 	rx_q->page_pool = page_pool_create(&pp_params);
2065 	if (IS_ERR(rx_q->page_pool)) {
2066 		ret = PTR_ERR(rx_q->page_pool);
2067 		rx_q->page_pool = NULL;
2068 		return ret;
2069 	}
2070 
2071 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2072 				 sizeof(*rx_q->buf_pool),
2073 				 GFP_KERNEL);
2074 	if (!rx_q->buf_pool)
2075 		return -ENOMEM;
2076 
2077 	if (priv->extend_desc) {
2078 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2079 						   dma_conf->dma_rx_size *
2080 						   sizeof(struct dma_extended_desc),
2081 						   &rx_q->dma_rx_phy,
2082 						   GFP_KERNEL);
2083 		if (!rx_q->dma_erx)
2084 			return -ENOMEM;
2085 
2086 	} else {
2087 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2088 						  dma_conf->dma_rx_size *
2089 						  sizeof(struct dma_desc),
2090 						  &rx_q->dma_rx_phy,
2091 						  GFP_KERNEL);
2092 		if (!rx_q->dma_rx)
2093 			return -ENOMEM;
2094 	}
2095 
2096 	if (stmmac_xdp_is_enabled(priv) &&
2097 	    test_bit(queue, priv->af_xdp_zc_qps))
2098 		napi_id = ch->rxtx_napi.napi_id;
2099 	else
2100 		napi_id = ch->rx_napi.napi_id;
2101 
2102 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2103 			       rx_q->queue_index,
2104 			       napi_id);
2105 	if (ret) {
2106 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2107 		return -EINVAL;
2108 	}
2109 
2110 	return 0;
2111 }
2112 
2113 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2114 				       struct stmmac_dma_conf *dma_conf)
2115 {
2116 	u32 rx_count = priv->plat->rx_queues_to_use;
2117 	u32 queue;
2118 	int ret;
2119 
2120 	/* RX queues buffers and DMA */
2121 	for (queue = 0; queue < rx_count; queue++) {
2122 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2123 		if (ret)
2124 			goto err_dma;
2125 	}
2126 
2127 	return 0;
2128 
2129 err_dma:
2130 	free_dma_rx_desc_resources(priv, dma_conf);
2131 
2132 	return ret;
2133 }
2134 
2135 /**
2136  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2137  * @priv: private structure
2138  * @dma_conf: structure to take the dma data
2139  * @queue: TX queue index
2140  * Description: according to which descriptor can be used (extend or basic)
2141  * this function allocates the resources for TX and RX paths. In case of
2142  * reception, for example, it pre-allocated the RX socket buffer in order to
2143  * allow zero-copy mechanism.
2144  */
2145 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2146 					 struct stmmac_dma_conf *dma_conf,
2147 					 u32 queue)
2148 {
2149 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2150 	size_t size;
2151 	void *addr;
2152 
2153 	tx_q->queue_index = queue;
2154 	tx_q->priv_data = priv;
2155 
2156 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2157 				      sizeof(*tx_q->tx_skbuff_dma),
2158 				      GFP_KERNEL);
2159 	if (!tx_q->tx_skbuff_dma)
2160 		return -ENOMEM;
2161 
2162 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2163 				  sizeof(struct sk_buff *),
2164 				  GFP_KERNEL);
2165 	if (!tx_q->tx_skbuff)
2166 		return -ENOMEM;
2167 
2168 	if (priv->extend_desc)
2169 		size = sizeof(struct dma_extended_desc);
2170 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2171 		size = sizeof(struct dma_edesc);
2172 	else
2173 		size = sizeof(struct dma_desc);
2174 
2175 	size *= dma_conf->dma_tx_size;
2176 
2177 	addr = dma_alloc_coherent(priv->device, size,
2178 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2179 	if (!addr)
2180 		return -ENOMEM;
2181 
2182 	if (priv->extend_desc)
2183 		tx_q->dma_etx = addr;
2184 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2185 		tx_q->dma_entx = addr;
2186 	else
2187 		tx_q->dma_tx = addr;
2188 
2189 	return 0;
2190 }
2191 
2192 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2193 				       struct stmmac_dma_conf *dma_conf)
2194 {
2195 	u32 tx_count = priv->plat->tx_queues_to_use;
2196 	u32 queue;
2197 	int ret;
2198 
2199 	/* TX queues buffers and DMA */
2200 	for (queue = 0; queue < tx_count; queue++) {
2201 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2202 		if (ret)
2203 			goto err_dma;
2204 	}
2205 
2206 	return 0;
2207 
2208 err_dma:
2209 	free_dma_tx_desc_resources(priv, dma_conf);
2210 	return ret;
2211 }
2212 
2213 /**
2214  * alloc_dma_desc_resources - alloc TX/RX resources.
2215  * @priv: private structure
2216  * @dma_conf: structure to take the dma data
2217  * Description: according to which descriptor can be used (extend or basic)
2218  * this function allocates the resources for TX and RX paths. In case of
2219  * reception, for example, it pre-allocated the RX socket buffer in order to
2220  * allow zero-copy mechanism.
2221  */
2222 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2223 				    struct stmmac_dma_conf *dma_conf)
2224 {
2225 	/* RX Allocation */
2226 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2227 
2228 	if (ret)
2229 		return ret;
2230 
2231 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2232 
2233 	return ret;
2234 }
2235 
2236 /**
2237  * free_dma_desc_resources - free dma desc resources
2238  * @priv: private structure
2239  * @dma_conf: structure to take the dma data
2240  */
2241 static void free_dma_desc_resources(struct stmmac_priv *priv,
2242 				    struct stmmac_dma_conf *dma_conf)
2243 {
2244 	/* Release the DMA TX socket buffers */
2245 	free_dma_tx_desc_resources(priv, dma_conf);
2246 
2247 	/* Release the DMA RX socket buffers later
2248 	 * to ensure all pending XDP_TX buffers are returned.
2249 	 */
2250 	free_dma_rx_desc_resources(priv, dma_conf);
2251 }
2252 
2253 /**
2254  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2255  *  @priv: driver private structure
2256  *  Description: It is used for enabling the rx queues in the MAC
2257  */
2258 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2259 {
2260 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2261 	int queue;
2262 	u8 mode;
2263 
2264 	for (queue = 0; queue < rx_queues_count; queue++) {
2265 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2266 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2267 	}
2268 }
2269 
2270 /**
2271  * stmmac_start_rx_dma - start RX DMA channel
2272  * @priv: driver private structure
2273  * @chan: RX channel index
2274  * Description:
2275  * This starts a RX DMA channel
2276  */
2277 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2278 {
2279 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2280 	stmmac_start_rx(priv, priv->ioaddr, chan);
2281 }
2282 
2283 /**
2284  * stmmac_start_tx_dma - start TX DMA channel
2285  * @priv: driver private structure
2286  * @chan: TX channel index
2287  * Description:
2288  * This starts a TX DMA channel
2289  */
2290 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2291 {
2292 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2293 	stmmac_start_tx(priv, priv->ioaddr, chan);
2294 }
2295 
2296 /**
2297  * stmmac_stop_rx_dma - stop RX DMA channel
2298  * @priv: driver private structure
2299  * @chan: RX channel index
2300  * Description:
2301  * This stops a RX DMA channel
2302  */
2303 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2304 {
2305 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2306 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2307 }
2308 
2309 /**
2310  * stmmac_stop_tx_dma - stop TX DMA channel
2311  * @priv: driver private structure
2312  * @chan: TX channel index
2313  * Description:
2314  * This stops a TX DMA channel
2315  */
2316 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2317 {
2318 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2319 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2320 }
2321 
2322 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2323 {
2324 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2325 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2326 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2327 	u32 chan;
2328 
2329 	for (chan = 0; chan < dma_csr_ch; chan++) {
2330 		struct stmmac_channel *ch = &priv->channel[chan];
2331 		unsigned long flags;
2332 
2333 		spin_lock_irqsave(&ch->lock, flags);
2334 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2335 		spin_unlock_irqrestore(&ch->lock, flags);
2336 	}
2337 }
2338 
2339 /**
2340  * stmmac_start_all_dma - start all RX and TX DMA channels
2341  * @priv: driver private structure
2342  * Description:
2343  * This starts all the RX and TX DMA channels
2344  */
2345 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2346 {
2347 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2348 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2349 	u32 chan = 0;
2350 
2351 	for (chan = 0; chan < rx_channels_count; chan++)
2352 		stmmac_start_rx_dma(priv, chan);
2353 
2354 	for (chan = 0; chan < tx_channels_count; chan++)
2355 		stmmac_start_tx_dma(priv, chan);
2356 }
2357 
2358 /**
2359  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2360  * @priv: driver private structure
2361  * Description:
2362  * This stops the RX and TX DMA channels
2363  */
2364 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2365 {
2366 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2367 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2368 	u32 chan = 0;
2369 
2370 	for (chan = 0; chan < rx_channels_count; chan++)
2371 		stmmac_stop_rx_dma(priv, chan);
2372 
2373 	for (chan = 0; chan < tx_channels_count; chan++)
2374 		stmmac_stop_tx_dma(priv, chan);
2375 }
2376 
2377 /**
2378  *  stmmac_dma_operation_mode - HW DMA operation mode
2379  *  @priv: driver private structure
2380  *  Description: it is used for configuring the DMA operation mode register in
2381  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2382  */
2383 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2384 {
2385 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2386 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2387 	int rxfifosz = priv->plat->rx_fifo_size;
2388 	int txfifosz = priv->plat->tx_fifo_size;
2389 	u32 txmode = 0;
2390 	u32 rxmode = 0;
2391 	u32 chan = 0;
2392 	u8 qmode = 0;
2393 
2394 	if (rxfifosz == 0)
2395 		rxfifosz = priv->dma_cap.rx_fifo_size;
2396 	if (txfifosz == 0)
2397 		txfifosz = priv->dma_cap.tx_fifo_size;
2398 
2399 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2400 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2401 		rxfifosz /= rx_channels_count;
2402 		txfifosz /= tx_channels_count;
2403 	}
2404 
2405 	if (priv->plat->force_thresh_dma_mode) {
2406 		txmode = tc;
2407 		rxmode = tc;
2408 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2409 		/*
2410 		 * In case of GMAC, SF mode can be enabled
2411 		 * to perform the TX COE in HW. This depends on:
2412 		 * 1) TX COE if actually supported
2413 		 * 2) There is no bugged Jumbo frame support
2414 		 *    that needs to not insert csum in the TDES.
2415 		 */
2416 		txmode = SF_DMA_MODE;
2417 		rxmode = SF_DMA_MODE;
2418 		priv->xstats.threshold = SF_DMA_MODE;
2419 	} else {
2420 		txmode = tc;
2421 		rxmode = SF_DMA_MODE;
2422 	}
2423 
2424 	/* configure all channels */
2425 	for (chan = 0; chan < rx_channels_count; chan++) {
2426 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2427 		u32 buf_size;
2428 
2429 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2430 
2431 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2432 				rxfifosz, qmode);
2433 
2434 		if (rx_q->xsk_pool) {
2435 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2436 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2437 					      buf_size,
2438 					      chan);
2439 		} else {
2440 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2441 					      priv->dma_conf.dma_buf_sz,
2442 					      chan);
2443 		}
2444 	}
2445 
2446 	for (chan = 0; chan < tx_channels_count; chan++) {
2447 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2448 
2449 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2450 				txfifosz, qmode);
2451 	}
2452 }
2453 
2454 static void stmmac_xsk_request_timestamp(void *_priv)
2455 {
2456 	struct stmmac_metadata_request *meta_req = _priv;
2457 
2458 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2459 	*meta_req->set_ic = true;
2460 }
2461 
2462 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2463 {
2464 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2465 	struct stmmac_priv *priv = tx_compl->priv;
2466 	struct dma_desc *desc = tx_compl->desc;
2467 	bool found = false;
2468 	u64 ns = 0;
2469 
2470 	if (!priv->hwts_tx_en)
2471 		return 0;
2472 
2473 	/* check tx tstamp status */
2474 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2475 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2476 		found = true;
2477 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2478 		found = true;
2479 	}
2480 
2481 	if (found) {
2482 		ns -= priv->plat->cdc_error_adj;
2483 		return ns_to_ktime(ns);
2484 	}
2485 
2486 	return 0;
2487 }
2488 
2489 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2490 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2491 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2492 };
2493 
2494 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2495 {
2496 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2497 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2498 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2499 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2500 	unsigned int entry = tx_q->cur_tx;
2501 	struct dma_desc *tx_desc = NULL;
2502 	struct xdp_desc xdp_desc;
2503 	bool work_done = true;
2504 	u32 tx_set_ic_bit = 0;
2505 
2506 	/* Avoids TX time-out as we are sharing with slow path */
2507 	txq_trans_cond_update(nq);
2508 
2509 	budget = min(budget, stmmac_tx_avail(priv, queue));
2510 
2511 	while (budget-- > 0) {
2512 		struct stmmac_metadata_request meta_req;
2513 		struct xsk_tx_metadata *meta = NULL;
2514 		dma_addr_t dma_addr;
2515 		bool set_ic;
2516 
2517 		/* We are sharing with slow path and stop XSK TX desc submission when
2518 		 * available TX ring is less than threshold.
2519 		 */
2520 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2521 		    !netif_carrier_ok(priv->dev)) {
2522 			work_done = false;
2523 			break;
2524 		}
2525 
2526 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2527 			break;
2528 
2529 		if (priv->est && priv->est->enable &&
2530 		    priv->est->max_sdu[queue] &&
2531 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2532 			priv->xstats.max_sdu_txq_drop[queue]++;
2533 			continue;
2534 		}
2535 
2536 		if (likely(priv->extend_desc))
2537 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2538 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2539 			tx_desc = &tx_q->dma_entx[entry].basic;
2540 		else
2541 			tx_desc = tx_q->dma_tx + entry;
2542 
2543 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2544 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2545 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2546 
2547 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2548 
2549 		/* To return XDP buffer to XSK pool, we simple call
2550 		 * xsk_tx_completed(), so we don't need to fill up
2551 		 * 'buf' and 'xdpf'.
2552 		 */
2553 		tx_q->tx_skbuff_dma[entry].buf = 0;
2554 		tx_q->xdpf[entry] = NULL;
2555 
2556 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2557 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2558 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2559 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2560 
2561 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2562 
2563 		tx_q->tx_count_frames++;
2564 
2565 		if (!priv->tx_coal_frames[queue])
2566 			set_ic = false;
2567 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2568 			set_ic = true;
2569 		else
2570 			set_ic = false;
2571 
2572 		meta_req.priv = priv;
2573 		meta_req.tx_desc = tx_desc;
2574 		meta_req.set_ic = &set_ic;
2575 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2576 					&meta_req);
2577 		if (set_ic) {
2578 			tx_q->tx_count_frames = 0;
2579 			stmmac_set_tx_ic(priv, tx_desc);
2580 			tx_set_ic_bit++;
2581 		}
2582 
2583 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2584 				       true, priv->mode, true, true,
2585 				       xdp_desc.len);
2586 
2587 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2588 
2589 		xsk_tx_metadata_to_compl(meta,
2590 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2591 
2592 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2593 		entry = tx_q->cur_tx;
2594 	}
2595 	u64_stats_update_begin(&txq_stats->napi_syncp);
2596 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2597 	u64_stats_update_end(&txq_stats->napi_syncp);
2598 
2599 	if (tx_desc) {
2600 		stmmac_flush_tx_descriptors(priv, queue);
2601 		xsk_tx_release(pool);
2602 	}
2603 
2604 	/* Return true if all of the 3 conditions are met
2605 	 *  a) TX Budget is still available
2606 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2607 	 *     pending XSK TX for transmission)
2608 	 */
2609 	return !!budget && work_done;
2610 }
2611 
2612 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2613 {
2614 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2615 		tc += 64;
2616 
2617 		if (priv->plat->force_thresh_dma_mode)
2618 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2619 		else
2620 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2621 						      chan);
2622 
2623 		priv->xstats.threshold = tc;
2624 	}
2625 }
2626 
2627 /**
2628  * stmmac_tx_clean - to manage the transmission completion
2629  * @priv: driver private structure
2630  * @budget: napi budget limiting this functions packet handling
2631  * @queue: TX queue index
2632  * @pending_packets: signal to arm the TX coal timer
2633  * Description: it reclaims the transmit resources after transmission completes.
2634  * If some packets still needs to be handled, due to TX coalesce, set
2635  * pending_packets to true to make NAPI arm the TX coal timer.
2636  */
2637 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2638 			   bool *pending_packets)
2639 {
2640 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2641 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2642 	unsigned int bytes_compl = 0, pkts_compl = 0;
2643 	unsigned int entry, xmits = 0, count = 0;
2644 	u32 tx_packets = 0, tx_errors = 0;
2645 
2646 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2647 
2648 	tx_q->xsk_frames_done = 0;
2649 
2650 	entry = tx_q->dirty_tx;
2651 
2652 	/* Try to clean all TX complete frame in 1 shot */
2653 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2654 		struct xdp_frame *xdpf;
2655 		struct sk_buff *skb;
2656 		struct dma_desc *p;
2657 		int status;
2658 
2659 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2660 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2661 			xdpf = tx_q->xdpf[entry];
2662 			skb = NULL;
2663 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2664 			xdpf = NULL;
2665 			skb = tx_q->tx_skbuff[entry];
2666 		} else {
2667 			xdpf = NULL;
2668 			skb = NULL;
2669 		}
2670 
2671 		if (priv->extend_desc)
2672 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2673 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2674 			p = &tx_q->dma_entx[entry].basic;
2675 		else
2676 			p = tx_q->dma_tx + entry;
2677 
2678 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2679 		/* Check if the descriptor is owned by the DMA */
2680 		if (unlikely(status & tx_dma_own))
2681 			break;
2682 
2683 		count++;
2684 
2685 		/* Make sure descriptor fields are read after reading
2686 		 * the own bit.
2687 		 */
2688 		dma_rmb();
2689 
2690 		/* Just consider the last segment and ...*/
2691 		if (likely(!(status & tx_not_ls))) {
2692 			/* ... verify the status error condition */
2693 			if (unlikely(status & tx_err)) {
2694 				tx_errors++;
2695 				if (unlikely(status & tx_err_bump_tc))
2696 					stmmac_bump_dma_threshold(priv, queue);
2697 			} else {
2698 				tx_packets++;
2699 			}
2700 			if (skb) {
2701 				stmmac_get_tx_hwtstamp(priv, p, skb);
2702 			} else if (tx_q->xsk_pool &&
2703 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2704 				struct stmmac_xsk_tx_complete tx_compl = {
2705 					.priv = priv,
2706 					.desc = p,
2707 				};
2708 
2709 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2710 							 &stmmac_xsk_tx_metadata_ops,
2711 							 &tx_compl);
2712 			}
2713 		}
2714 
2715 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2716 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2717 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2718 				dma_unmap_page(priv->device,
2719 					       tx_q->tx_skbuff_dma[entry].buf,
2720 					       tx_q->tx_skbuff_dma[entry].len,
2721 					       DMA_TO_DEVICE);
2722 			else
2723 				dma_unmap_single(priv->device,
2724 						 tx_q->tx_skbuff_dma[entry].buf,
2725 						 tx_q->tx_skbuff_dma[entry].len,
2726 						 DMA_TO_DEVICE);
2727 			tx_q->tx_skbuff_dma[entry].buf = 0;
2728 			tx_q->tx_skbuff_dma[entry].len = 0;
2729 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2730 		}
2731 
2732 		stmmac_clean_desc3(priv, tx_q, p);
2733 
2734 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2735 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2736 
2737 		if (xdpf &&
2738 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2739 			xdp_return_frame_rx_napi(xdpf);
2740 			tx_q->xdpf[entry] = NULL;
2741 		}
2742 
2743 		if (xdpf &&
2744 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2745 			xdp_return_frame(xdpf);
2746 			tx_q->xdpf[entry] = NULL;
2747 		}
2748 
2749 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2750 			tx_q->xsk_frames_done++;
2751 
2752 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2753 			if (likely(skb)) {
2754 				pkts_compl++;
2755 				bytes_compl += skb->len;
2756 				dev_consume_skb_any(skb);
2757 				tx_q->tx_skbuff[entry] = NULL;
2758 			}
2759 		}
2760 
2761 		stmmac_release_tx_desc(priv, p, priv->mode);
2762 
2763 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2764 	}
2765 	tx_q->dirty_tx = entry;
2766 
2767 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2768 				  pkts_compl, bytes_compl);
2769 
2770 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2771 								queue))) &&
2772 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2773 
2774 		netif_dbg(priv, tx_done, priv->dev,
2775 			  "%s: restart transmit\n", __func__);
2776 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2777 	}
2778 
2779 	if (tx_q->xsk_pool) {
2780 		bool work_done;
2781 
2782 		if (tx_q->xsk_frames_done)
2783 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2784 
2785 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2786 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2787 
2788 		/* For XSK TX, we try to send as many as possible.
2789 		 * If XSK work done (XSK TX desc empty and budget still
2790 		 * available), return "budget - 1" to reenable TX IRQ.
2791 		 * Else, return "budget" to make NAPI continue polling.
2792 		 */
2793 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2794 					       STMMAC_XSK_TX_BUDGET_MAX);
2795 		if (work_done)
2796 			xmits = budget - 1;
2797 		else
2798 			xmits = budget;
2799 	}
2800 
2801 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2802 		stmmac_restart_sw_lpi_timer(priv);
2803 
2804 	/* We still have pending packets, let's call for a new scheduling */
2805 	if (tx_q->dirty_tx != tx_q->cur_tx)
2806 		*pending_packets = true;
2807 
2808 	u64_stats_update_begin(&txq_stats->napi_syncp);
2809 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2810 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2811 	u64_stats_inc(&txq_stats->napi.tx_clean);
2812 	u64_stats_update_end(&txq_stats->napi_syncp);
2813 
2814 	priv->xstats.tx_errors += tx_errors;
2815 
2816 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2817 
2818 	/* Combine decisions from TX clean and XSK TX */
2819 	return max(count, xmits);
2820 }
2821 
2822 /**
2823  * stmmac_tx_err - to manage the tx error
2824  * @priv: driver private structure
2825  * @chan: channel index
2826  * Description: it cleans the descriptors and restarts the transmission
2827  * in case of transmission errors.
2828  */
2829 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2830 {
2831 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2832 
2833 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2834 
2835 	stmmac_stop_tx_dma(priv, chan);
2836 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2837 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2838 	stmmac_reset_tx_queue(priv, chan);
2839 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2840 			    tx_q->dma_tx_phy, chan);
2841 	stmmac_start_tx_dma(priv, chan);
2842 
2843 	priv->xstats.tx_errors++;
2844 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2845 }
2846 
2847 /**
2848  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2849  *  @priv: driver private structure
2850  *  @txmode: TX operating mode
2851  *  @rxmode: RX operating mode
2852  *  @chan: channel index
2853  *  Description: it is used for configuring of the DMA operation mode in
2854  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2855  *  mode.
2856  */
2857 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2858 					  u32 rxmode, u32 chan)
2859 {
2860 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2861 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2862 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2863 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2864 	int rxfifosz = priv->plat->rx_fifo_size;
2865 	int txfifosz = priv->plat->tx_fifo_size;
2866 
2867 	if (rxfifosz == 0)
2868 		rxfifosz = priv->dma_cap.rx_fifo_size;
2869 	if (txfifosz == 0)
2870 		txfifosz = priv->dma_cap.tx_fifo_size;
2871 
2872 	/* Adjust for real per queue fifo size */
2873 	rxfifosz /= rx_channels_count;
2874 	txfifosz /= tx_channels_count;
2875 
2876 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2877 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2878 }
2879 
2880 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2881 {
2882 	int ret;
2883 
2884 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2885 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2886 	if (ret && (ret != -EINVAL)) {
2887 		stmmac_global_err(priv);
2888 		return true;
2889 	}
2890 
2891 	return false;
2892 }
2893 
2894 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2895 {
2896 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2897 						 &priv->xstats, chan, dir);
2898 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2899 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2900 	struct stmmac_channel *ch = &priv->channel[chan];
2901 	struct napi_struct *rx_napi;
2902 	struct napi_struct *tx_napi;
2903 	unsigned long flags;
2904 
2905 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2906 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2907 
2908 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2909 		if (napi_schedule_prep(rx_napi)) {
2910 			spin_lock_irqsave(&ch->lock, flags);
2911 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2912 			spin_unlock_irqrestore(&ch->lock, flags);
2913 			__napi_schedule(rx_napi);
2914 		}
2915 	}
2916 
2917 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2918 		if (napi_schedule_prep(tx_napi)) {
2919 			spin_lock_irqsave(&ch->lock, flags);
2920 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2921 			spin_unlock_irqrestore(&ch->lock, flags);
2922 			__napi_schedule(tx_napi);
2923 		}
2924 	}
2925 
2926 	return status;
2927 }
2928 
2929 /**
2930  * stmmac_dma_interrupt - DMA ISR
2931  * @priv: driver private structure
2932  * Description: this is the DMA ISR. It is called by the main ISR.
2933  * It calls the dwmac dma routine and schedule poll method in case of some
2934  * work can be done.
2935  */
2936 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2937 {
2938 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2939 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2940 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2941 				tx_channel_count : rx_channel_count;
2942 	u32 chan;
2943 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2944 
2945 	/* Make sure we never check beyond our status buffer. */
2946 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2947 		channels_to_check = ARRAY_SIZE(status);
2948 
2949 	for (chan = 0; chan < channels_to_check; chan++)
2950 		status[chan] = stmmac_napi_check(priv, chan,
2951 						 DMA_DIR_RXTX);
2952 
2953 	for (chan = 0; chan < tx_channel_count; chan++) {
2954 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2955 			/* Try to bump up the dma threshold on this failure */
2956 			stmmac_bump_dma_threshold(priv, chan);
2957 		} else if (unlikely(status[chan] == tx_hard_error)) {
2958 			stmmac_tx_err(priv, chan);
2959 		}
2960 	}
2961 }
2962 
2963 /**
2964  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2965  * @priv: driver private structure
2966  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2967  */
2968 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2969 {
2970 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2971 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2972 
2973 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2974 
2975 	if (priv->dma_cap.rmon) {
2976 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2977 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2978 	} else
2979 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2980 }
2981 
2982 /**
2983  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2984  * @priv: driver private structure
2985  * Description:
2986  *  new GMAC chip generations have a new register to indicate the
2987  *  presence of the optional feature/functions.
2988  *  This can be also used to override the value passed through the
2989  *  platform and necessary for old MAC10/100 and GMAC chips.
2990  */
2991 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2992 {
2993 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2994 }
2995 
2996 /**
2997  * stmmac_check_ether_addr - check if the MAC addr is valid
2998  * @priv: driver private structure
2999  * Description:
3000  * it is to verify if the MAC address is valid, in case of failures it
3001  * generates a random MAC address
3002  */
3003 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3004 {
3005 	u8 addr[ETH_ALEN];
3006 
3007 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3008 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3009 		if (is_valid_ether_addr(addr))
3010 			eth_hw_addr_set(priv->dev, addr);
3011 		else
3012 			eth_hw_addr_random(priv->dev);
3013 		dev_info(priv->device, "device MAC address %pM\n",
3014 			 priv->dev->dev_addr);
3015 	}
3016 }
3017 
3018 /**
3019  * stmmac_init_dma_engine - DMA init.
3020  * @priv: driver private structure
3021  * Description:
3022  * It inits the DMA invoking the specific MAC/GMAC callback.
3023  * Some DMA parameters can be passed from the platform;
3024  * in case of these are not passed a default is kept for the MAC or GMAC.
3025  */
3026 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3027 {
3028 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3029 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3030 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3031 	struct stmmac_rx_queue *rx_q;
3032 	struct stmmac_tx_queue *tx_q;
3033 	u32 chan = 0;
3034 	int ret = 0;
3035 
3036 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3037 		dev_err(priv->device, "Invalid DMA configuration\n");
3038 		return -EINVAL;
3039 	}
3040 
3041 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3042 		priv->plat->dma_cfg->atds = 1;
3043 
3044 	ret = stmmac_reset(priv, priv->ioaddr);
3045 	if (ret) {
3046 		dev_err(priv->device, "Failed to reset the dma\n");
3047 		return ret;
3048 	}
3049 
3050 	/* DMA Configuration */
3051 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3052 
3053 	if (priv->plat->axi)
3054 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3055 
3056 	/* DMA CSR Channel configuration */
3057 	for (chan = 0; chan < dma_csr_ch; chan++) {
3058 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3059 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3060 	}
3061 
3062 	/* DMA RX Channel Configuration */
3063 	for (chan = 0; chan < rx_channels_count; chan++) {
3064 		rx_q = &priv->dma_conf.rx_queue[chan];
3065 
3066 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3067 				    rx_q->dma_rx_phy, chan);
3068 
3069 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3070 				     (rx_q->buf_alloc_num *
3071 				      sizeof(struct dma_desc));
3072 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3073 				       rx_q->rx_tail_addr, chan);
3074 	}
3075 
3076 	/* DMA TX Channel Configuration */
3077 	for (chan = 0; chan < tx_channels_count; chan++) {
3078 		tx_q = &priv->dma_conf.tx_queue[chan];
3079 
3080 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3081 				    tx_q->dma_tx_phy, chan);
3082 
3083 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3084 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3085 				       tx_q->tx_tail_addr, chan);
3086 	}
3087 
3088 	return ret;
3089 }
3090 
3091 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3092 {
3093 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3094 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3095 	struct stmmac_channel *ch;
3096 	struct napi_struct *napi;
3097 
3098 	if (!tx_coal_timer)
3099 		return;
3100 
3101 	ch = &priv->channel[tx_q->queue_index];
3102 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3103 
3104 	/* Arm timer only if napi is not already scheduled.
3105 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3106 	 * again in the next scheduled napi.
3107 	 */
3108 	if (unlikely(!napi_is_scheduled(napi)))
3109 		hrtimer_start(&tx_q->txtimer,
3110 			      STMMAC_COAL_TIMER(tx_coal_timer),
3111 			      HRTIMER_MODE_REL);
3112 	else
3113 		hrtimer_try_to_cancel(&tx_q->txtimer);
3114 }
3115 
3116 /**
3117  * stmmac_tx_timer - mitigation sw timer for tx.
3118  * @t: data pointer
3119  * Description:
3120  * This is the timer handler to directly invoke the stmmac_tx_clean.
3121  */
3122 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3123 {
3124 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3125 	struct stmmac_priv *priv = tx_q->priv_data;
3126 	struct stmmac_channel *ch;
3127 	struct napi_struct *napi;
3128 
3129 	ch = &priv->channel[tx_q->queue_index];
3130 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3131 
3132 	if (likely(napi_schedule_prep(napi))) {
3133 		unsigned long flags;
3134 
3135 		spin_lock_irqsave(&ch->lock, flags);
3136 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3137 		spin_unlock_irqrestore(&ch->lock, flags);
3138 		__napi_schedule(napi);
3139 	}
3140 
3141 	return HRTIMER_NORESTART;
3142 }
3143 
3144 /**
3145  * stmmac_init_coalesce - init mitigation options.
3146  * @priv: driver private structure
3147  * Description:
3148  * This inits the coalesce parameters: i.e. timer rate,
3149  * timer handler and default threshold used for enabling the
3150  * interrupt on completion bit.
3151  */
3152 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3153 {
3154 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3155 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3156 	u32 chan;
3157 
3158 	for (chan = 0; chan < tx_channel_count; chan++) {
3159 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3160 
3161 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3162 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3163 
3164 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3165 		tx_q->txtimer.function = stmmac_tx_timer;
3166 	}
3167 
3168 	for (chan = 0; chan < rx_channel_count; chan++)
3169 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3170 }
3171 
3172 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3173 {
3174 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3175 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3176 	u32 chan;
3177 
3178 	/* set TX ring length */
3179 	for (chan = 0; chan < tx_channels_count; chan++)
3180 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3181 				       (priv->dma_conf.dma_tx_size - 1), chan);
3182 
3183 	/* set RX ring length */
3184 	for (chan = 0; chan < rx_channels_count; chan++)
3185 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3186 				       (priv->dma_conf.dma_rx_size - 1), chan);
3187 }
3188 
3189 /**
3190  *  stmmac_set_tx_queue_weight - Set TX queue weight
3191  *  @priv: driver private structure
3192  *  Description: It is used for setting TX queues weight
3193  */
3194 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3195 {
3196 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3197 	u32 weight;
3198 	u32 queue;
3199 
3200 	for (queue = 0; queue < tx_queues_count; queue++) {
3201 		weight = priv->plat->tx_queues_cfg[queue].weight;
3202 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3203 	}
3204 }
3205 
3206 /**
3207  *  stmmac_configure_cbs - Configure CBS in TX queue
3208  *  @priv: driver private structure
3209  *  Description: It is used for configuring CBS in AVB TX queues
3210  */
3211 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3212 {
3213 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3214 	u32 mode_to_use;
3215 	u32 queue;
3216 
3217 	/* queue 0 is reserved for legacy traffic */
3218 	for (queue = 1; queue < tx_queues_count; queue++) {
3219 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3220 		if (mode_to_use == MTL_QUEUE_DCB)
3221 			continue;
3222 
3223 		stmmac_config_cbs(priv, priv->hw,
3224 				priv->plat->tx_queues_cfg[queue].send_slope,
3225 				priv->plat->tx_queues_cfg[queue].idle_slope,
3226 				priv->plat->tx_queues_cfg[queue].high_credit,
3227 				priv->plat->tx_queues_cfg[queue].low_credit,
3228 				queue);
3229 	}
3230 }
3231 
3232 /**
3233  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3234  *  @priv: driver private structure
3235  *  Description: It is used for mapping RX queues to RX dma channels
3236  */
3237 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3238 {
3239 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3240 	u32 queue;
3241 	u32 chan;
3242 
3243 	for (queue = 0; queue < rx_queues_count; queue++) {
3244 		chan = priv->plat->rx_queues_cfg[queue].chan;
3245 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3246 	}
3247 }
3248 
3249 /**
3250  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3251  *  @priv: driver private structure
3252  *  Description: It is used for configuring the RX Queue Priority
3253  */
3254 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3255 {
3256 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3257 	u32 queue;
3258 	u32 prio;
3259 
3260 	for (queue = 0; queue < rx_queues_count; queue++) {
3261 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3262 			continue;
3263 
3264 		prio = priv->plat->rx_queues_cfg[queue].prio;
3265 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3266 	}
3267 }
3268 
3269 /**
3270  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3271  *  @priv: driver private structure
3272  *  Description: It is used for configuring the TX Queue Priority
3273  */
3274 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3275 {
3276 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3277 	u32 queue;
3278 	u32 prio;
3279 
3280 	for (queue = 0; queue < tx_queues_count; queue++) {
3281 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3282 			continue;
3283 
3284 		prio = priv->plat->tx_queues_cfg[queue].prio;
3285 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3286 	}
3287 }
3288 
3289 /**
3290  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3291  *  @priv: driver private structure
3292  *  Description: It is used for configuring the RX queue routing
3293  */
3294 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3295 {
3296 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3297 	u32 queue;
3298 	u8 packet;
3299 
3300 	for (queue = 0; queue < rx_queues_count; queue++) {
3301 		/* no specific packet type routing specified for the queue */
3302 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3303 			continue;
3304 
3305 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3306 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3307 	}
3308 }
3309 
3310 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3311 {
3312 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3313 		priv->rss.enable = false;
3314 		return;
3315 	}
3316 
3317 	if (priv->dev->features & NETIF_F_RXHASH)
3318 		priv->rss.enable = true;
3319 	else
3320 		priv->rss.enable = false;
3321 
3322 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3323 			     priv->plat->rx_queues_to_use);
3324 }
3325 
3326 /**
3327  *  stmmac_mtl_configuration - Configure MTL
3328  *  @priv: driver private structure
3329  *  Description: It is used for configurring MTL
3330  */
3331 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3332 {
3333 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3334 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3335 
3336 	if (tx_queues_count > 1)
3337 		stmmac_set_tx_queue_weight(priv);
3338 
3339 	/* Configure MTL RX algorithms */
3340 	if (rx_queues_count > 1)
3341 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3342 				priv->plat->rx_sched_algorithm);
3343 
3344 	/* Configure MTL TX algorithms */
3345 	if (tx_queues_count > 1)
3346 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3347 				priv->plat->tx_sched_algorithm);
3348 
3349 	/* Configure CBS in AVB TX queues */
3350 	if (tx_queues_count > 1)
3351 		stmmac_configure_cbs(priv);
3352 
3353 	/* Map RX MTL to DMA channels */
3354 	stmmac_rx_queue_dma_chan_map(priv);
3355 
3356 	/* Enable MAC RX Queues */
3357 	stmmac_mac_enable_rx_queues(priv);
3358 
3359 	/* Set RX priorities */
3360 	if (rx_queues_count > 1)
3361 		stmmac_mac_config_rx_queues_prio(priv);
3362 
3363 	/* Set TX priorities */
3364 	if (tx_queues_count > 1)
3365 		stmmac_mac_config_tx_queues_prio(priv);
3366 
3367 	/* Set RX routing */
3368 	if (rx_queues_count > 1)
3369 		stmmac_mac_config_rx_queues_routing(priv);
3370 
3371 	/* Receive Side Scaling */
3372 	if (rx_queues_count > 1)
3373 		stmmac_mac_config_rss(priv);
3374 }
3375 
3376 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3377 {
3378 	if (priv->dma_cap.asp) {
3379 		netdev_info(priv->dev, "Enabling Safety Features\n");
3380 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3381 					  priv->plat->safety_feat_cfg);
3382 	} else {
3383 		netdev_info(priv->dev, "No Safety Features support found\n");
3384 	}
3385 }
3386 
3387 /**
3388  * stmmac_hw_setup - setup mac in a usable state.
3389  *  @dev : pointer to the device structure.
3390  *  @ptp_register: register PTP if set
3391  *  Description:
3392  *  this is the main function to setup the HW in a usable state because the
3393  *  dma engine is reset, the core registers are configured (e.g. AXI,
3394  *  Checksum features, timers). The DMA is ready to start receiving and
3395  *  transmitting.
3396  *  Return value:
3397  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3398  *  file on failure.
3399  */
3400 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3401 {
3402 	struct stmmac_priv *priv = netdev_priv(dev);
3403 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3404 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3405 	bool sph_en;
3406 	u32 chan;
3407 	int ret;
3408 
3409 	/* Make sure RX clock is enabled */
3410 	if (priv->hw->phylink_pcs)
3411 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3412 
3413 	/* DMA initialization and SW reset */
3414 	ret = stmmac_init_dma_engine(priv);
3415 	if (ret < 0) {
3416 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3417 			   __func__);
3418 		return ret;
3419 	}
3420 
3421 	/* Copy the MAC addr into the HW  */
3422 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3423 
3424 	/* PS and related bits will be programmed according to the speed */
3425 	if (priv->hw->pcs) {
3426 		int speed = priv->plat->mac_port_sel_speed;
3427 
3428 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3429 		    (speed == SPEED_1000)) {
3430 			priv->hw->ps = speed;
3431 		} else {
3432 			dev_warn(priv->device, "invalid port speed\n");
3433 			priv->hw->ps = 0;
3434 		}
3435 	}
3436 
3437 	/* Initialize the MAC Core */
3438 	stmmac_core_init(priv, priv->hw, dev);
3439 
3440 	/* Initialize MTL*/
3441 	stmmac_mtl_configuration(priv);
3442 
3443 	/* Initialize Safety Features */
3444 	stmmac_safety_feat_configuration(priv);
3445 
3446 	ret = stmmac_rx_ipc(priv, priv->hw);
3447 	if (!ret) {
3448 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3449 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3450 		priv->hw->rx_csum = 0;
3451 	}
3452 
3453 	/* Enable the MAC Rx/Tx */
3454 	stmmac_mac_set(priv, priv->ioaddr, true);
3455 
3456 	/* Set the HW DMA mode and the COE */
3457 	stmmac_dma_operation_mode(priv);
3458 
3459 	stmmac_mmc_setup(priv);
3460 
3461 	if (ptp_register) {
3462 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3463 		if (ret < 0)
3464 			netdev_warn(priv->dev,
3465 				    "failed to enable PTP reference clock: %pe\n",
3466 				    ERR_PTR(ret));
3467 	}
3468 
3469 	ret = stmmac_init_ptp(priv);
3470 	if (ret == -EOPNOTSUPP)
3471 		netdev_info(priv->dev, "PTP not supported by HW\n");
3472 	else if (ret)
3473 		netdev_warn(priv->dev, "PTP init failed\n");
3474 	else if (ptp_register)
3475 		stmmac_ptp_register(priv);
3476 
3477 	if (priv->use_riwt) {
3478 		u32 queue;
3479 
3480 		for (queue = 0; queue < rx_cnt; queue++) {
3481 			if (!priv->rx_riwt[queue])
3482 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3483 
3484 			stmmac_rx_watchdog(priv, priv->ioaddr,
3485 					   priv->rx_riwt[queue], queue);
3486 		}
3487 	}
3488 
3489 	if (priv->hw->pcs)
3490 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3491 
3492 	/* set TX and RX rings length */
3493 	stmmac_set_rings_length(priv);
3494 
3495 	/* Enable TSO */
3496 	if (priv->tso) {
3497 		for (chan = 0; chan < tx_cnt; chan++) {
3498 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3499 
3500 			/* TSO and TBS cannot co-exist */
3501 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3502 				continue;
3503 
3504 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3505 		}
3506 	}
3507 
3508 	/* Enable Split Header */
3509 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3510 	for (chan = 0; chan < rx_cnt; chan++)
3511 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3512 
3513 
3514 	/* VLAN Tag Insertion */
3515 	if (priv->dma_cap.vlins)
3516 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3517 
3518 	/* TBS */
3519 	for (chan = 0; chan < tx_cnt; chan++) {
3520 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3521 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3522 
3523 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3524 	}
3525 
3526 	/* Configure real RX and TX queues */
3527 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3528 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3529 
3530 	/* Start the ball rolling... */
3531 	stmmac_start_all_dma(priv);
3532 
3533 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3534 
3535 	return 0;
3536 }
3537 
3538 static void stmmac_hw_teardown(struct net_device *dev)
3539 {
3540 	struct stmmac_priv *priv = netdev_priv(dev);
3541 
3542 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3543 }
3544 
3545 static void stmmac_free_irq(struct net_device *dev,
3546 			    enum request_irq_err irq_err, int irq_idx)
3547 {
3548 	struct stmmac_priv *priv = netdev_priv(dev);
3549 	int j;
3550 
3551 	switch (irq_err) {
3552 	case REQ_IRQ_ERR_ALL:
3553 		irq_idx = priv->plat->tx_queues_to_use;
3554 		fallthrough;
3555 	case REQ_IRQ_ERR_TX:
3556 		for (j = irq_idx - 1; j >= 0; j--) {
3557 			if (priv->tx_irq[j] > 0) {
3558 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3559 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3560 			}
3561 		}
3562 		irq_idx = priv->plat->rx_queues_to_use;
3563 		fallthrough;
3564 	case REQ_IRQ_ERR_RX:
3565 		for (j = irq_idx - 1; j >= 0; j--) {
3566 			if (priv->rx_irq[j] > 0) {
3567 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3568 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3569 			}
3570 		}
3571 
3572 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3573 			free_irq(priv->sfty_ue_irq, dev);
3574 		fallthrough;
3575 	case REQ_IRQ_ERR_SFTY_UE:
3576 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3577 			free_irq(priv->sfty_ce_irq, dev);
3578 		fallthrough;
3579 	case REQ_IRQ_ERR_SFTY_CE:
3580 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3581 			free_irq(priv->lpi_irq, dev);
3582 		fallthrough;
3583 	case REQ_IRQ_ERR_LPI:
3584 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3585 			free_irq(priv->wol_irq, dev);
3586 		fallthrough;
3587 	case REQ_IRQ_ERR_SFTY:
3588 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3589 			free_irq(priv->sfty_irq, dev);
3590 		fallthrough;
3591 	case REQ_IRQ_ERR_WOL:
3592 		free_irq(dev->irq, dev);
3593 		fallthrough;
3594 	case REQ_IRQ_ERR_MAC:
3595 	case REQ_IRQ_ERR_NO:
3596 		/* If MAC IRQ request error, no more IRQ to free */
3597 		break;
3598 	}
3599 }
3600 
3601 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3602 {
3603 	struct stmmac_priv *priv = netdev_priv(dev);
3604 	enum request_irq_err irq_err;
3605 	cpumask_t cpu_mask;
3606 	int irq_idx = 0;
3607 	char *int_name;
3608 	int ret;
3609 	int i;
3610 
3611 	/* For common interrupt */
3612 	int_name = priv->int_name_mac;
3613 	sprintf(int_name, "%s:%s", dev->name, "mac");
3614 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3615 			  0, int_name, dev);
3616 	if (unlikely(ret < 0)) {
3617 		netdev_err(priv->dev,
3618 			   "%s: alloc mac MSI %d (error: %d)\n",
3619 			   __func__, dev->irq, ret);
3620 		irq_err = REQ_IRQ_ERR_MAC;
3621 		goto irq_error;
3622 	}
3623 
3624 	/* Request the Wake IRQ in case of another line
3625 	 * is used for WoL
3626 	 */
3627 	priv->wol_irq_disabled = true;
3628 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3629 		int_name = priv->int_name_wol;
3630 		sprintf(int_name, "%s:%s", dev->name, "wol");
3631 		ret = request_irq(priv->wol_irq,
3632 				  stmmac_mac_interrupt,
3633 				  0, int_name, dev);
3634 		if (unlikely(ret < 0)) {
3635 			netdev_err(priv->dev,
3636 				   "%s: alloc wol MSI %d (error: %d)\n",
3637 				   __func__, priv->wol_irq, ret);
3638 			irq_err = REQ_IRQ_ERR_WOL;
3639 			goto irq_error;
3640 		}
3641 	}
3642 
3643 	/* Request the LPI IRQ in case of another line
3644 	 * is used for LPI
3645 	 */
3646 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3647 		int_name = priv->int_name_lpi;
3648 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3649 		ret = request_irq(priv->lpi_irq,
3650 				  stmmac_mac_interrupt,
3651 				  0, int_name, dev);
3652 		if (unlikely(ret < 0)) {
3653 			netdev_err(priv->dev,
3654 				   "%s: alloc lpi MSI %d (error: %d)\n",
3655 				   __func__, priv->lpi_irq, ret);
3656 			irq_err = REQ_IRQ_ERR_LPI;
3657 			goto irq_error;
3658 		}
3659 	}
3660 
3661 	/* Request the common Safety Feature Correctible/Uncorrectible
3662 	 * Error line in case of another line is used
3663 	 */
3664 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3665 		int_name = priv->int_name_sfty;
3666 		sprintf(int_name, "%s:%s", dev->name, "safety");
3667 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3668 				  0, int_name, dev);
3669 		if (unlikely(ret < 0)) {
3670 			netdev_err(priv->dev,
3671 				   "%s: alloc sfty MSI %d (error: %d)\n",
3672 				   __func__, priv->sfty_irq, ret);
3673 			irq_err = REQ_IRQ_ERR_SFTY;
3674 			goto irq_error;
3675 		}
3676 	}
3677 
3678 	/* Request the Safety Feature Correctible Error line in
3679 	 * case of another line is used
3680 	 */
3681 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3682 		int_name = priv->int_name_sfty_ce;
3683 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3684 		ret = request_irq(priv->sfty_ce_irq,
3685 				  stmmac_safety_interrupt,
3686 				  0, int_name, dev);
3687 		if (unlikely(ret < 0)) {
3688 			netdev_err(priv->dev,
3689 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3690 				   __func__, priv->sfty_ce_irq, ret);
3691 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3692 			goto irq_error;
3693 		}
3694 	}
3695 
3696 	/* Request the Safety Feature Uncorrectible Error line in
3697 	 * case of another line is used
3698 	 */
3699 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3700 		int_name = priv->int_name_sfty_ue;
3701 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3702 		ret = request_irq(priv->sfty_ue_irq,
3703 				  stmmac_safety_interrupt,
3704 				  0, int_name, dev);
3705 		if (unlikely(ret < 0)) {
3706 			netdev_err(priv->dev,
3707 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3708 				   __func__, priv->sfty_ue_irq, ret);
3709 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3710 			goto irq_error;
3711 		}
3712 	}
3713 
3714 	/* Request Rx MSI irq */
3715 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3716 		if (i >= MTL_MAX_RX_QUEUES)
3717 			break;
3718 		if (priv->rx_irq[i] == 0)
3719 			continue;
3720 
3721 		int_name = priv->int_name_rx_irq[i];
3722 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3723 		ret = request_irq(priv->rx_irq[i],
3724 				  stmmac_msi_intr_rx,
3725 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3726 		if (unlikely(ret < 0)) {
3727 			netdev_err(priv->dev,
3728 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3729 				   __func__, i, priv->rx_irq[i], ret);
3730 			irq_err = REQ_IRQ_ERR_RX;
3731 			irq_idx = i;
3732 			goto irq_error;
3733 		}
3734 		cpumask_clear(&cpu_mask);
3735 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3736 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3737 	}
3738 
3739 	/* Request Tx MSI irq */
3740 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3741 		if (i >= MTL_MAX_TX_QUEUES)
3742 			break;
3743 		if (priv->tx_irq[i] == 0)
3744 			continue;
3745 
3746 		int_name = priv->int_name_tx_irq[i];
3747 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3748 		ret = request_irq(priv->tx_irq[i],
3749 				  stmmac_msi_intr_tx,
3750 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3751 		if (unlikely(ret < 0)) {
3752 			netdev_err(priv->dev,
3753 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3754 				   __func__, i, priv->tx_irq[i], ret);
3755 			irq_err = REQ_IRQ_ERR_TX;
3756 			irq_idx = i;
3757 			goto irq_error;
3758 		}
3759 		cpumask_clear(&cpu_mask);
3760 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3761 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3762 	}
3763 
3764 	return 0;
3765 
3766 irq_error:
3767 	stmmac_free_irq(dev, irq_err, irq_idx);
3768 	return ret;
3769 }
3770 
3771 static int stmmac_request_irq_single(struct net_device *dev)
3772 {
3773 	struct stmmac_priv *priv = netdev_priv(dev);
3774 	enum request_irq_err irq_err;
3775 	int ret;
3776 
3777 	ret = request_irq(dev->irq, stmmac_interrupt,
3778 			  IRQF_SHARED, dev->name, dev);
3779 	if (unlikely(ret < 0)) {
3780 		netdev_err(priv->dev,
3781 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3782 			   __func__, dev->irq, ret);
3783 		irq_err = REQ_IRQ_ERR_MAC;
3784 		goto irq_error;
3785 	}
3786 
3787 	/* Request the Wake IRQ in case of another line
3788 	 * is used for WoL
3789 	 */
3790 	priv->wol_irq_disabled = true;
3791 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3792 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3793 				  IRQF_SHARED, dev->name, dev);
3794 		if (unlikely(ret < 0)) {
3795 			netdev_err(priv->dev,
3796 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3797 				   __func__, priv->wol_irq, ret);
3798 			irq_err = REQ_IRQ_ERR_WOL;
3799 			goto irq_error;
3800 		}
3801 	}
3802 
3803 	/* Request the IRQ lines */
3804 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3805 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3806 				  IRQF_SHARED, dev->name, dev);
3807 		if (unlikely(ret < 0)) {
3808 			netdev_err(priv->dev,
3809 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3810 				   __func__, priv->lpi_irq, ret);
3811 			irq_err = REQ_IRQ_ERR_LPI;
3812 			goto irq_error;
3813 		}
3814 	}
3815 
3816 	/* Request the common Safety Feature Correctible/Uncorrectible
3817 	 * Error line in case of another line is used
3818 	 */
3819 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3820 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3821 				  IRQF_SHARED, dev->name, dev);
3822 		if (unlikely(ret < 0)) {
3823 			netdev_err(priv->dev,
3824 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3825 				   __func__, priv->sfty_irq, ret);
3826 			irq_err = REQ_IRQ_ERR_SFTY;
3827 			goto irq_error;
3828 		}
3829 	}
3830 
3831 	return 0;
3832 
3833 irq_error:
3834 	stmmac_free_irq(dev, irq_err, 0);
3835 	return ret;
3836 }
3837 
3838 static int stmmac_request_irq(struct net_device *dev)
3839 {
3840 	struct stmmac_priv *priv = netdev_priv(dev);
3841 	int ret;
3842 
3843 	/* Request the IRQ lines */
3844 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3845 		ret = stmmac_request_irq_multi_msi(dev);
3846 	else
3847 		ret = stmmac_request_irq_single(dev);
3848 
3849 	return ret;
3850 }
3851 
3852 /**
3853  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3854  *  @priv: driver private structure
3855  *  @mtu: MTU to setup the dma queue and buf with
3856  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3857  *  Allocate the Tx/Rx DMA queue and init them.
3858  *  Return value:
3859  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3860  */
3861 static struct stmmac_dma_conf *
3862 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3863 {
3864 	struct stmmac_dma_conf *dma_conf;
3865 	int chan, bfsize, ret;
3866 
3867 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3868 	if (!dma_conf) {
3869 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3870 			   __func__);
3871 		return ERR_PTR(-ENOMEM);
3872 	}
3873 
3874 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3875 	if (bfsize < 0)
3876 		bfsize = 0;
3877 
3878 	if (bfsize < BUF_SIZE_16KiB)
3879 		bfsize = stmmac_set_bfsize(mtu, 0);
3880 
3881 	dma_conf->dma_buf_sz = bfsize;
3882 	/* Chose the tx/rx size from the already defined one in the
3883 	 * priv struct. (if defined)
3884 	 */
3885 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3886 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3887 
3888 	if (!dma_conf->dma_tx_size)
3889 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3890 	if (!dma_conf->dma_rx_size)
3891 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3892 
3893 	/* Earlier check for TBS */
3894 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3895 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3896 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3897 
3898 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3899 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3900 	}
3901 
3902 	ret = alloc_dma_desc_resources(priv, dma_conf);
3903 	if (ret < 0) {
3904 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3905 			   __func__);
3906 		goto alloc_error;
3907 	}
3908 
3909 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3910 	if (ret < 0) {
3911 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3912 			   __func__);
3913 		goto init_error;
3914 	}
3915 
3916 	return dma_conf;
3917 
3918 init_error:
3919 	free_dma_desc_resources(priv, dma_conf);
3920 alloc_error:
3921 	kfree(dma_conf);
3922 	return ERR_PTR(ret);
3923 }
3924 
3925 /**
3926  *  __stmmac_open - open entry point of the driver
3927  *  @dev : pointer to the device structure.
3928  *  @dma_conf :  structure to take the dma data
3929  *  Description:
3930  *  This function is the open entry point of the driver.
3931  *  Return value:
3932  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3933  *  file on failure.
3934  */
3935 static int __stmmac_open(struct net_device *dev,
3936 			 struct stmmac_dma_conf *dma_conf)
3937 {
3938 	struct stmmac_priv *priv = netdev_priv(dev);
3939 	int mode = priv->plat->phy_interface;
3940 	u32 chan;
3941 	int ret;
3942 
3943 	/* Initialise the tx lpi timer, converting from msec to usec */
3944 	if (!priv->tx_lpi_timer)
3945 		priv->tx_lpi_timer = eee_timer * 1000;
3946 
3947 	ret = pm_runtime_resume_and_get(priv->device);
3948 	if (ret < 0)
3949 		return ret;
3950 
3951 	if ((!priv->hw->xpcs ||
3952 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3953 		ret = stmmac_init_phy(dev);
3954 		if (ret) {
3955 			netdev_err(priv->dev,
3956 				   "%s: Cannot attach to PHY (error: %d)\n",
3957 				   __func__, ret);
3958 			goto init_phy_error;
3959 		}
3960 	}
3961 
3962 	buf_sz = dma_conf->dma_buf_sz;
3963 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3964 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3965 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3966 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3967 
3968 	stmmac_reset_queues_param(priv);
3969 
3970 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3971 	    priv->plat->serdes_powerup) {
3972 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3973 		if (ret < 0) {
3974 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3975 				   __func__);
3976 			goto init_error;
3977 		}
3978 	}
3979 
3980 	ret = stmmac_hw_setup(dev, true);
3981 	if (ret < 0) {
3982 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3983 		goto init_error;
3984 	}
3985 
3986 	stmmac_init_coalesce(priv);
3987 
3988 	phylink_start(priv->phylink);
3989 	/* We may have called phylink_speed_down before */
3990 	phylink_speed_up(priv->phylink);
3991 
3992 	ret = stmmac_request_irq(dev);
3993 	if (ret)
3994 		goto irq_error;
3995 
3996 	stmmac_enable_all_queues(priv);
3997 	netif_tx_start_all_queues(priv->dev);
3998 	stmmac_enable_all_dma_irq(priv);
3999 
4000 	return 0;
4001 
4002 irq_error:
4003 	phylink_stop(priv->phylink);
4004 
4005 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4006 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4007 
4008 	stmmac_hw_teardown(dev);
4009 init_error:
4010 	phylink_disconnect_phy(priv->phylink);
4011 init_phy_error:
4012 	pm_runtime_put(priv->device);
4013 	return ret;
4014 }
4015 
4016 static int stmmac_open(struct net_device *dev)
4017 {
4018 	struct stmmac_priv *priv = netdev_priv(dev);
4019 	struct stmmac_dma_conf *dma_conf;
4020 	int ret;
4021 
4022 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4023 	if (IS_ERR(dma_conf))
4024 		return PTR_ERR(dma_conf);
4025 
4026 	ret = __stmmac_open(dev, dma_conf);
4027 	if (ret)
4028 		free_dma_desc_resources(priv, dma_conf);
4029 
4030 	kfree(dma_conf);
4031 	return ret;
4032 }
4033 
4034 /**
4035  *  stmmac_release - close entry point of the driver
4036  *  @dev : device pointer.
4037  *  Description:
4038  *  This is the stop entry point of the driver.
4039  */
4040 static int stmmac_release(struct net_device *dev)
4041 {
4042 	struct stmmac_priv *priv = netdev_priv(dev);
4043 	u32 chan;
4044 
4045 	if (device_may_wakeup(priv->device))
4046 		phylink_speed_down(priv->phylink, false);
4047 	/* Stop and disconnect the PHY */
4048 	phylink_stop(priv->phylink);
4049 	phylink_disconnect_phy(priv->phylink);
4050 
4051 	stmmac_disable_all_queues(priv);
4052 
4053 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4054 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4055 
4056 	netif_tx_disable(dev);
4057 
4058 	/* Free the IRQ lines */
4059 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4060 
4061 	/* Stop TX/RX DMA and clear the descriptors */
4062 	stmmac_stop_all_dma(priv);
4063 
4064 	/* Release and free the Rx/Tx resources */
4065 	free_dma_desc_resources(priv, &priv->dma_conf);
4066 
4067 	/* Disable the MAC Rx/Tx */
4068 	stmmac_mac_set(priv, priv->ioaddr, false);
4069 
4070 	/* Powerdown Serdes if there is */
4071 	if (priv->plat->serdes_powerdown)
4072 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4073 
4074 	stmmac_release_ptp(priv);
4075 
4076 	if (stmmac_fpe_supported(priv))
4077 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4078 
4079 	pm_runtime_put(priv->device);
4080 
4081 	return 0;
4082 }
4083 
4084 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4085 			       struct stmmac_tx_queue *tx_q)
4086 {
4087 	u16 tag = 0x0, inner_tag = 0x0;
4088 	u32 inner_type = 0x0;
4089 	struct dma_desc *p;
4090 
4091 	if (!priv->dma_cap.vlins)
4092 		return false;
4093 	if (!skb_vlan_tag_present(skb))
4094 		return false;
4095 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4096 		inner_tag = skb_vlan_tag_get(skb);
4097 		inner_type = STMMAC_VLAN_INSERT;
4098 	}
4099 
4100 	tag = skb_vlan_tag_get(skb);
4101 
4102 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4103 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4104 	else
4105 		p = &tx_q->dma_tx[tx_q->cur_tx];
4106 
4107 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4108 		return false;
4109 
4110 	stmmac_set_tx_owner(priv, p);
4111 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4112 	return true;
4113 }
4114 
4115 /**
4116  *  stmmac_tso_allocator - close entry point of the driver
4117  *  @priv: driver private structure
4118  *  @des: buffer start address
4119  *  @total_len: total length to fill in descriptors
4120  *  @last_segment: condition for the last descriptor
4121  *  @queue: TX queue index
4122  *  Description:
4123  *  This function fills descriptor and request new descriptors according to
4124  *  buffer length to fill
4125  */
4126 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4127 				 int total_len, bool last_segment, u32 queue)
4128 {
4129 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4130 	struct dma_desc *desc;
4131 	u32 buff_size;
4132 	int tmp_len;
4133 
4134 	tmp_len = total_len;
4135 
4136 	while (tmp_len > 0) {
4137 		dma_addr_t curr_addr;
4138 
4139 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4140 						priv->dma_conf.dma_tx_size);
4141 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4142 
4143 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4144 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4145 		else
4146 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4147 
4148 		curr_addr = des + (total_len - tmp_len);
4149 		stmmac_set_desc_addr(priv, desc, curr_addr);
4150 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4151 			    TSO_MAX_BUFF_SIZE : tmp_len;
4152 
4153 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4154 				0, 1,
4155 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4156 				0, 0);
4157 
4158 		tmp_len -= TSO_MAX_BUFF_SIZE;
4159 	}
4160 }
4161 
4162 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4163 {
4164 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4165 	int desc_size;
4166 
4167 	if (likely(priv->extend_desc))
4168 		desc_size = sizeof(struct dma_extended_desc);
4169 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4170 		desc_size = sizeof(struct dma_edesc);
4171 	else
4172 		desc_size = sizeof(struct dma_desc);
4173 
4174 	/* The own bit must be the latest setting done when prepare the
4175 	 * descriptor and then barrier is needed to make sure that
4176 	 * all is coherent before granting the DMA engine.
4177 	 */
4178 	wmb();
4179 
4180 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4181 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4182 }
4183 
4184 /**
4185  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4186  *  @skb : the socket buffer
4187  *  @dev : device pointer
4188  *  Description: this is the transmit function that is called on TSO frames
4189  *  (support available on GMAC4 and newer chips).
4190  *  Diagram below show the ring programming in case of TSO frames:
4191  *
4192  *  First Descriptor
4193  *   --------
4194  *   | DES0 |---> buffer1 = L2/L3/L4 header
4195  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4196  *   |      |     width is 32-bit, but we never use it.
4197  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4198  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4199  *   |      |     or 48-bit, and we always use it.
4200  *   | DES2 |---> buffer1 len
4201  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4202  *   --------
4203  *   --------
4204  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4205  *   | DES1 |---> same as the First Descriptor
4206  *   | DES2 |---> buffer1 len
4207  *   | DES3 |
4208  *   --------
4209  *	|
4210  *     ...
4211  *	|
4212  *   --------
4213  *   | DES0 |---> buffer1 = Split TCP Payload
4214  *   | DES1 |---> same as the First Descriptor
4215  *   | DES2 |---> buffer1 len
4216  *   | DES3 |
4217  *   --------
4218  *
4219  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4220  */
4221 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4222 {
4223 	struct dma_desc *desc, *first, *mss_desc = NULL;
4224 	struct stmmac_priv *priv = netdev_priv(dev);
4225 	unsigned int first_entry, tx_packets;
4226 	struct stmmac_txq_stats *txq_stats;
4227 	struct stmmac_tx_queue *tx_q;
4228 	u32 pay_len, mss, queue;
4229 	int i, first_tx, nfrags;
4230 	u8 proto_hdr_len, hdr;
4231 	dma_addr_t des;
4232 	bool set_ic;
4233 
4234 	/* Always insert VLAN tag to SKB payload for TSO frames.
4235 	 *
4236 	 * Never insert VLAN tag by HW, since segments splited by
4237 	 * TSO engine will be un-tagged by mistake.
4238 	 */
4239 	if (skb_vlan_tag_present(skb)) {
4240 		skb = __vlan_hwaccel_push_inside(skb);
4241 		if (unlikely(!skb)) {
4242 			priv->xstats.tx_dropped++;
4243 			return NETDEV_TX_OK;
4244 		}
4245 	}
4246 
4247 	nfrags = skb_shinfo(skb)->nr_frags;
4248 	queue = skb_get_queue_mapping(skb);
4249 
4250 	tx_q = &priv->dma_conf.tx_queue[queue];
4251 	txq_stats = &priv->xstats.txq_stats[queue];
4252 	first_tx = tx_q->cur_tx;
4253 
4254 	/* Compute header lengths */
4255 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4256 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4257 		hdr = sizeof(struct udphdr);
4258 	} else {
4259 		proto_hdr_len = skb_tcp_all_headers(skb);
4260 		hdr = tcp_hdrlen(skb);
4261 	}
4262 
4263 	/* Desc availability based on threshold should be enough safe */
4264 	if (unlikely(stmmac_tx_avail(priv, queue) <
4265 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4266 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4267 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4268 								queue));
4269 			/* This is a hard error, log it. */
4270 			netdev_err(priv->dev,
4271 				   "%s: Tx Ring full when queue awake\n",
4272 				   __func__);
4273 		}
4274 		return NETDEV_TX_BUSY;
4275 	}
4276 
4277 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4278 
4279 	mss = skb_shinfo(skb)->gso_size;
4280 
4281 	/* set new MSS value if needed */
4282 	if (mss != tx_q->mss) {
4283 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4284 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4285 		else
4286 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4287 
4288 		stmmac_set_mss(priv, mss_desc, mss);
4289 		tx_q->mss = mss;
4290 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4291 						priv->dma_conf.dma_tx_size);
4292 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4293 	}
4294 
4295 	if (netif_msg_tx_queued(priv)) {
4296 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4297 			__func__, hdr, proto_hdr_len, pay_len, mss);
4298 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4299 			skb->data_len);
4300 	}
4301 
4302 	first_entry = tx_q->cur_tx;
4303 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4304 
4305 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4306 		desc = &tx_q->dma_entx[first_entry].basic;
4307 	else
4308 		desc = &tx_q->dma_tx[first_entry];
4309 	first = desc;
4310 
4311 	/* first descriptor: fill Headers on Buf1 */
4312 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4313 			     DMA_TO_DEVICE);
4314 	if (dma_mapping_error(priv->device, des))
4315 		goto dma_map_err;
4316 
4317 	stmmac_set_desc_addr(priv, first, des);
4318 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4319 			     (nfrags == 0), queue);
4320 
4321 	/* In case two or more DMA transmit descriptors are allocated for this
4322 	 * non-paged SKB data, the DMA buffer address should be saved to
4323 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4324 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4325 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4326 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4327 	 * sooner or later.
4328 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4329 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4330 	 * this DMA buffer right after the DMA engine completely finishes the
4331 	 * full buffer transmission.
4332 	 */
4333 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4334 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4335 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4336 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4337 
4338 	/* Prepare fragments */
4339 	for (i = 0; i < nfrags; i++) {
4340 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4341 
4342 		des = skb_frag_dma_map(priv->device, frag, 0,
4343 				       skb_frag_size(frag),
4344 				       DMA_TO_DEVICE);
4345 		if (dma_mapping_error(priv->device, des))
4346 			goto dma_map_err;
4347 
4348 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4349 				     (i == nfrags - 1), queue);
4350 
4351 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4352 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4353 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4354 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4355 	}
4356 
4357 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4358 
4359 	/* Only the last descriptor gets to point to the skb. */
4360 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4361 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4362 
4363 	/* Manage tx mitigation */
4364 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4365 	tx_q->tx_count_frames += tx_packets;
4366 
4367 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4368 		set_ic = true;
4369 	else if (!priv->tx_coal_frames[queue])
4370 		set_ic = false;
4371 	else if (tx_packets > priv->tx_coal_frames[queue])
4372 		set_ic = true;
4373 	else if ((tx_q->tx_count_frames %
4374 		  priv->tx_coal_frames[queue]) < tx_packets)
4375 		set_ic = true;
4376 	else
4377 		set_ic = false;
4378 
4379 	if (set_ic) {
4380 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4381 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4382 		else
4383 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4384 
4385 		tx_q->tx_count_frames = 0;
4386 		stmmac_set_tx_ic(priv, desc);
4387 	}
4388 
4389 	/* We've used all descriptors we need for this skb, however,
4390 	 * advance cur_tx so that it references a fresh descriptor.
4391 	 * ndo_start_xmit will fill this descriptor the next time it's
4392 	 * called and stmmac_tx_clean may clean up to this descriptor.
4393 	 */
4394 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4395 
4396 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4397 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4398 			  __func__);
4399 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4400 	}
4401 
4402 	u64_stats_update_begin(&txq_stats->q_syncp);
4403 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4404 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4405 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4406 	if (set_ic)
4407 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4408 	u64_stats_update_end(&txq_stats->q_syncp);
4409 
4410 	if (priv->sarc_type)
4411 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4412 
4413 	skb_tx_timestamp(skb);
4414 
4415 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4416 		     priv->hwts_tx_en)) {
4417 		/* declare that device is doing timestamping */
4418 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4419 		stmmac_enable_tx_timestamp(priv, first);
4420 	}
4421 
4422 	/* Complete the first descriptor before granting the DMA */
4423 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4424 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4425 				   hdr / 4, (skb->len - proto_hdr_len));
4426 
4427 	/* If context desc is used to change MSS */
4428 	if (mss_desc) {
4429 		/* Make sure that first descriptor has been completely
4430 		 * written, including its own bit. This is because MSS is
4431 		 * actually before first descriptor, so we need to make
4432 		 * sure that MSS's own bit is the last thing written.
4433 		 */
4434 		dma_wmb();
4435 		stmmac_set_tx_owner(priv, mss_desc);
4436 	}
4437 
4438 	if (netif_msg_pktdata(priv)) {
4439 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4440 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4441 			tx_q->cur_tx, first, nfrags);
4442 		pr_info(">>> frame to be transmitted: ");
4443 		print_pkt(skb->data, skb_headlen(skb));
4444 	}
4445 
4446 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4447 
4448 	stmmac_flush_tx_descriptors(priv, queue);
4449 	stmmac_tx_timer_arm(priv, queue);
4450 
4451 	return NETDEV_TX_OK;
4452 
4453 dma_map_err:
4454 	dev_err(priv->device, "Tx dma map failed\n");
4455 	dev_kfree_skb(skb);
4456 	priv->xstats.tx_dropped++;
4457 	return NETDEV_TX_OK;
4458 }
4459 
4460 /**
4461  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4462  * @skb: socket buffer to check
4463  *
4464  * Check if a packet has an ethertype that will trigger the IP header checks
4465  * and IP/TCP checksum engine of the stmmac core.
4466  *
4467  * Return: true if the ethertype can trigger the checksum engine, false
4468  * otherwise
4469  */
4470 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4471 {
4472 	int depth = 0;
4473 	__be16 proto;
4474 
4475 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4476 				    &depth);
4477 
4478 	return (depth <= ETH_HLEN) &&
4479 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4480 }
4481 
4482 /**
4483  *  stmmac_xmit - Tx entry point of the driver
4484  *  @skb : the socket buffer
4485  *  @dev : device pointer
4486  *  Description : this is the tx entry point of the driver.
4487  *  It programs the chain or the ring and supports oversized frames
4488  *  and SG feature.
4489  */
4490 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4491 {
4492 	unsigned int first_entry, tx_packets, enh_desc;
4493 	struct stmmac_priv *priv = netdev_priv(dev);
4494 	unsigned int nopaged_len = skb_headlen(skb);
4495 	int i, csum_insertion = 0, is_jumbo = 0;
4496 	u32 queue = skb_get_queue_mapping(skb);
4497 	int nfrags = skb_shinfo(skb)->nr_frags;
4498 	int gso = skb_shinfo(skb)->gso_type;
4499 	struct stmmac_txq_stats *txq_stats;
4500 	struct dma_edesc *tbs_desc = NULL;
4501 	struct dma_desc *desc, *first;
4502 	struct stmmac_tx_queue *tx_q;
4503 	bool has_vlan, set_ic;
4504 	int entry, first_tx;
4505 	dma_addr_t des;
4506 
4507 	tx_q = &priv->dma_conf.tx_queue[queue];
4508 	txq_stats = &priv->xstats.txq_stats[queue];
4509 	first_tx = tx_q->cur_tx;
4510 
4511 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4512 		stmmac_stop_sw_lpi(priv);
4513 
4514 	/* Manage oversized TCP frames for GMAC4 device */
4515 	if (skb_is_gso(skb) && priv->tso) {
4516 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4517 			return stmmac_tso_xmit(skb, dev);
4518 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4519 			return stmmac_tso_xmit(skb, dev);
4520 	}
4521 
4522 	if (priv->est && priv->est->enable &&
4523 	    priv->est->max_sdu[queue] &&
4524 	    skb->len > priv->est->max_sdu[queue]){
4525 		priv->xstats.max_sdu_txq_drop[queue]++;
4526 		goto max_sdu_err;
4527 	}
4528 
4529 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4530 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4531 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4532 								queue));
4533 			/* This is a hard error, log it. */
4534 			netdev_err(priv->dev,
4535 				   "%s: Tx Ring full when queue awake\n",
4536 				   __func__);
4537 		}
4538 		return NETDEV_TX_BUSY;
4539 	}
4540 
4541 	/* Check if VLAN can be inserted by HW */
4542 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4543 
4544 	entry = tx_q->cur_tx;
4545 	first_entry = entry;
4546 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4547 
4548 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4549 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4550 	 * queues. In that case, checksum offloading for those queues that don't
4551 	 * support tx coe needs to fallback to software checksum calculation.
4552 	 *
4553 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4554 	 * also have to be checksummed in software.
4555 	 */
4556 	if (csum_insertion &&
4557 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4558 	     !stmmac_has_ip_ethertype(skb))) {
4559 		if (unlikely(skb_checksum_help(skb)))
4560 			goto dma_map_err;
4561 		csum_insertion = !csum_insertion;
4562 	}
4563 
4564 	if (likely(priv->extend_desc))
4565 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4566 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4567 		desc = &tx_q->dma_entx[entry].basic;
4568 	else
4569 		desc = tx_q->dma_tx + entry;
4570 
4571 	first = desc;
4572 
4573 	if (has_vlan)
4574 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4575 
4576 	enh_desc = priv->plat->enh_desc;
4577 	/* To program the descriptors according to the size of the frame */
4578 	if (enh_desc)
4579 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4580 
4581 	if (unlikely(is_jumbo)) {
4582 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4583 		if (unlikely(entry < 0) && (entry != -EINVAL))
4584 			goto dma_map_err;
4585 	}
4586 
4587 	for (i = 0; i < nfrags; i++) {
4588 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4589 		int len = skb_frag_size(frag);
4590 		bool last_segment = (i == (nfrags - 1));
4591 
4592 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4593 		WARN_ON(tx_q->tx_skbuff[entry]);
4594 
4595 		if (likely(priv->extend_desc))
4596 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4597 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4598 			desc = &tx_q->dma_entx[entry].basic;
4599 		else
4600 			desc = tx_q->dma_tx + entry;
4601 
4602 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4603 				       DMA_TO_DEVICE);
4604 		if (dma_mapping_error(priv->device, des))
4605 			goto dma_map_err; /* should reuse desc w/o issues */
4606 
4607 		tx_q->tx_skbuff_dma[entry].buf = des;
4608 
4609 		stmmac_set_desc_addr(priv, desc, des);
4610 
4611 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4612 		tx_q->tx_skbuff_dma[entry].len = len;
4613 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4614 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4615 
4616 		/* Prepare the descriptor and set the own bit too */
4617 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4618 				priv->mode, 1, last_segment, skb->len);
4619 	}
4620 
4621 	/* Only the last descriptor gets to point to the skb. */
4622 	tx_q->tx_skbuff[entry] = skb;
4623 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4624 
4625 	/* According to the coalesce parameter the IC bit for the latest
4626 	 * segment is reset and the timer re-started to clean the tx status.
4627 	 * This approach takes care about the fragments: desc is the first
4628 	 * element in case of no SG.
4629 	 */
4630 	tx_packets = (entry + 1) - first_tx;
4631 	tx_q->tx_count_frames += tx_packets;
4632 
4633 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4634 		set_ic = true;
4635 	else if (!priv->tx_coal_frames[queue])
4636 		set_ic = false;
4637 	else if (tx_packets > priv->tx_coal_frames[queue])
4638 		set_ic = true;
4639 	else if ((tx_q->tx_count_frames %
4640 		  priv->tx_coal_frames[queue]) < tx_packets)
4641 		set_ic = true;
4642 	else
4643 		set_ic = false;
4644 
4645 	if (set_ic) {
4646 		if (likely(priv->extend_desc))
4647 			desc = &tx_q->dma_etx[entry].basic;
4648 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4649 			desc = &tx_q->dma_entx[entry].basic;
4650 		else
4651 			desc = &tx_q->dma_tx[entry];
4652 
4653 		tx_q->tx_count_frames = 0;
4654 		stmmac_set_tx_ic(priv, desc);
4655 	}
4656 
4657 	/* We've used all descriptors we need for this skb, however,
4658 	 * advance cur_tx so that it references a fresh descriptor.
4659 	 * ndo_start_xmit will fill this descriptor the next time it's
4660 	 * called and stmmac_tx_clean may clean up to this descriptor.
4661 	 */
4662 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4663 	tx_q->cur_tx = entry;
4664 
4665 	if (netif_msg_pktdata(priv)) {
4666 		netdev_dbg(priv->dev,
4667 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4668 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4669 			   entry, first, nfrags);
4670 
4671 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4672 		print_pkt(skb->data, skb->len);
4673 	}
4674 
4675 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4676 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4677 			  __func__);
4678 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4679 	}
4680 
4681 	u64_stats_update_begin(&txq_stats->q_syncp);
4682 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4683 	if (set_ic)
4684 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4685 	u64_stats_update_end(&txq_stats->q_syncp);
4686 
4687 	if (priv->sarc_type)
4688 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4689 
4690 	skb_tx_timestamp(skb);
4691 
4692 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4693 	 * problems because all the descriptors are actually ready to be
4694 	 * passed to the DMA engine.
4695 	 */
4696 	if (likely(!is_jumbo)) {
4697 		bool last_segment = (nfrags == 0);
4698 
4699 		des = dma_map_single(priv->device, skb->data,
4700 				     nopaged_len, DMA_TO_DEVICE);
4701 		if (dma_mapping_error(priv->device, des))
4702 			goto dma_map_err;
4703 
4704 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4705 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4706 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4707 
4708 		stmmac_set_desc_addr(priv, first, des);
4709 
4710 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4711 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4712 
4713 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4714 			     priv->hwts_tx_en)) {
4715 			/* declare that device is doing timestamping */
4716 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4717 			stmmac_enable_tx_timestamp(priv, first);
4718 		}
4719 
4720 		/* Prepare the first descriptor setting the OWN bit too */
4721 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4722 				csum_insertion, priv->mode, 0, last_segment,
4723 				skb->len);
4724 	}
4725 
4726 	if (tx_q->tbs & STMMAC_TBS_EN) {
4727 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4728 
4729 		tbs_desc = &tx_q->dma_entx[first_entry];
4730 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4731 	}
4732 
4733 	stmmac_set_tx_owner(priv, first);
4734 
4735 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4736 
4737 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4738 
4739 	stmmac_flush_tx_descriptors(priv, queue);
4740 	stmmac_tx_timer_arm(priv, queue);
4741 
4742 	return NETDEV_TX_OK;
4743 
4744 dma_map_err:
4745 	netdev_err(priv->dev, "Tx DMA map failed\n");
4746 max_sdu_err:
4747 	dev_kfree_skb(skb);
4748 	priv->xstats.tx_dropped++;
4749 	return NETDEV_TX_OK;
4750 }
4751 
4752 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4753 {
4754 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4755 	__be16 vlan_proto = veth->h_vlan_proto;
4756 	u16 vlanid;
4757 
4758 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4759 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4760 	    (vlan_proto == htons(ETH_P_8021AD) &&
4761 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4762 		/* pop the vlan tag */
4763 		vlanid = ntohs(veth->h_vlan_TCI);
4764 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4765 		skb_pull(skb, VLAN_HLEN);
4766 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4767 	}
4768 }
4769 
4770 /**
4771  * stmmac_rx_refill - refill used skb preallocated buffers
4772  * @priv: driver private structure
4773  * @queue: RX queue index
4774  * Description : this is to reallocate the skb for the reception process
4775  * that is based on zero-copy.
4776  */
4777 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4778 {
4779 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4780 	int dirty = stmmac_rx_dirty(priv, queue);
4781 	unsigned int entry = rx_q->dirty_rx;
4782 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4783 
4784 	if (priv->dma_cap.host_dma_width <= 32)
4785 		gfp |= GFP_DMA32;
4786 
4787 	while (dirty-- > 0) {
4788 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4789 		struct dma_desc *p;
4790 		bool use_rx_wd;
4791 
4792 		if (priv->extend_desc)
4793 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4794 		else
4795 			p = rx_q->dma_rx + entry;
4796 
4797 		if (!buf->page) {
4798 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4799 			if (!buf->page)
4800 				break;
4801 		}
4802 
4803 		if (priv->sph && !buf->sec_page) {
4804 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4805 			if (!buf->sec_page)
4806 				break;
4807 
4808 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4809 		}
4810 
4811 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4812 
4813 		stmmac_set_desc_addr(priv, p, buf->addr);
4814 		if (priv->sph)
4815 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4816 		else
4817 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4818 		stmmac_refill_desc3(priv, rx_q, p);
4819 
4820 		rx_q->rx_count_frames++;
4821 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4822 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4823 			rx_q->rx_count_frames = 0;
4824 
4825 		use_rx_wd = !priv->rx_coal_frames[queue];
4826 		use_rx_wd |= rx_q->rx_count_frames > 0;
4827 		if (!priv->use_riwt)
4828 			use_rx_wd = false;
4829 
4830 		dma_wmb();
4831 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4832 
4833 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4834 	}
4835 	rx_q->dirty_rx = entry;
4836 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4837 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4838 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4839 }
4840 
4841 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4842 				       struct dma_desc *p,
4843 				       int status, unsigned int len)
4844 {
4845 	unsigned int plen = 0, hlen = 0;
4846 	int coe = priv->hw->rx_csum;
4847 
4848 	/* Not first descriptor, buffer is always zero */
4849 	if (priv->sph && len)
4850 		return 0;
4851 
4852 	/* First descriptor, get split header length */
4853 	stmmac_get_rx_header_len(priv, p, &hlen);
4854 	if (priv->sph && hlen) {
4855 		priv->xstats.rx_split_hdr_pkt_n++;
4856 		return hlen;
4857 	}
4858 
4859 	/* First descriptor, not last descriptor and not split header */
4860 	if (status & rx_not_ls)
4861 		return priv->dma_conf.dma_buf_sz;
4862 
4863 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4864 
4865 	/* First descriptor and last descriptor and not split header */
4866 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4867 }
4868 
4869 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4870 				       struct dma_desc *p,
4871 				       int status, unsigned int len)
4872 {
4873 	int coe = priv->hw->rx_csum;
4874 	unsigned int plen = 0;
4875 
4876 	/* Not split header, buffer is not available */
4877 	if (!priv->sph)
4878 		return 0;
4879 
4880 	/* Not last descriptor */
4881 	if (status & rx_not_ls)
4882 		return priv->dma_conf.dma_buf_sz;
4883 
4884 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4885 
4886 	/* Last descriptor */
4887 	return plen - len;
4888 }
4889 
4890 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4891 				struct xdp_frame *xdpf, bool dma_map)
4892 {
4893 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4894 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4895 	unsigned int entry = tx_q->cur_tx;
4896 	struct dma_desc *tx_desc;
4897 	dma_addr_t dma_addr;
4898 	bool set_ic;
4899 
4900 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4901 		return STMMAC_XDP_CONSUMED;
4902 
4903 	if (priv->est && priv->est->enable &&
4904 	    priv->est->max_sdu[queue] &&
4905 	    xdpf->len > priv->est->max_sdu[queue]) {
4906 		priv->xstats.max_sdu_txq_drop[queue]++;
4907 		return STMMAC_XDP_CONSUMED;
4908 	}
4909 
4910 	if (likely(priv->extend_desc))
4911 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4912 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4913 		tx_desc = &tx_q->dma_entx[entry].basic;
4914 	else
4915 		tx_desc = tx_q->dma_tx + entry;
4916 
4917 	if (dma_map) {
4918 		dma_addr = dma_map_single(priv->device, xdpf->data,
4919 					  xdpf->len, DMA_TO_DEVICE);
4920 		if (dma_mapping_error(priv->device, dma_addr))
4921 			return STMMAC_XDP_CONSUMED;
4922 
4923 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4924 	} else {
4925 		struct page *page = virt_to_page(xdpf->data);
4926 
4927 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4928 			   xdpf->headroom;
4929 		dma_sync_single_for_device(priv->device, dma_addr,
4930 					   xdpf->len, DMA_BIDIRECTIONAL);
4931 
4932 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4933 	}
4934 
4935 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4936 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4937 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4938 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4939 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4940 
4941 	tx_q->xdpf[entry] = xdpf;
4942 
4943 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4944 
4945 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4946 			       true, priv->mode, true, true,
4947 			       xdpf->len);
4948 
4949 	tx_q->tx_count_frames++;
4950 
4951 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4952 		set_ic = true;
4953 	else
4954 		set_ic = false;
4955 
4956 	if (set_ic) {
4957 		tx_q->tx_count_frames = 0;
4958 		stmmac_set_tx_ic(priv, tx_desc);
4959 		u64_stats_update_begin(&txq_stats->q_syncp);
4960 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4961 		u64_stats_update_end(&txq_stats->q_syncp);
4962 	}
4963 
4964 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4965 
4966 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4967 	tx_q->cur_tx = entry;
4968 
4969 	return STMMAC_XDP_TX;
4970 }
4971 
4972 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4973 				   int cpu)
4974 {
4975 	int index = cpu;
4976 
4977 	if (unlikely(index < 0))
4978 		index = 0;
4979 
4980 	while (index >= priv->plat->tx_queues_to_use)
4981 		index -= priv->plat->tx_queues_to_use;
4982 
4983 	return index;
4984 }
4985 
4986 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4987 				struct xdp_buff *xdp)
4988 {
4989 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4990 	int cpu = smp_processor_id();
4991 	struct netdev_queue *nq;
4992 	int queue;
4993 	int res;
4994 
4995 	if (unlikely(!xdpf))
4996 		return STMMAC_XDP_CONSUMED;
4997 
4998 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4999 	nq = netdev_get_tx_queue(priv->dev, queue);
5000 
5001 	__netif_tx_lock(nq, cpu);
5002 	/* Avoids TX time-out as we are sharing with slow path */
5003 	txq_trans_cond_update(nq);
5004 
5005 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5006 	if (res == STMMAC_XDP_TX)
5007 		stmmac_flush_tx_descriptors(priv, queue);
5008 
5009 	__netif_tx_unlock(nq);
5010 
5011 	return res;
5012 }
5013 
5014 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5015 				 struct bpf_prog *prog,
5016 				 struct xdp_buff *xdp)
5017 {
5018 	u32 act;
5019 	int res;
5020 
5021 	act = bpf_prog_run_xdp(prog, xdp);
5022 	switch (act) {
5023 	case XDP_PASS:
5024 		res = STMMAC_XDP_PASS;
5025 		break;
5026 	case XDP_TX:
5027 		res = stmmac_xdp_xmit_back(priv, xdp);
5028 		break;
5029 	case XDP_REDIRECT:
5030 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5031 			res = STMMAC_XDP_CONSUMED;
5032 		else
5033 			res = STMMAC_XDP_REDIRECT;
5034 		break;
5035 	default:
5036 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5037 		fallthrough;
5038 	case XDP_ABORTED:
5039 		trace_xdp_exception(priv->dev, prog, act);
5040 		fallthrough;
5041 	case XDP_DROP:
5042 		res = STMMAC_XDP_CONSUMED;
5043 		break;
5044 	}
5045 
5046 	return res;
5047 }
5048 
5049 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5050 					   struct xdp_buff *xdp)
5051 {
5052 	struct bpf_prog *prog;
5053 	int res;
5054 
5055 	prog = READ_ONCE(priv->xdp_prog);
5056 	if (!prog) {
5057 		res = STMMAC_XDP_PASS;
5058 		goto out;
5059 	}
5060 
5061 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5062 out:
5063 	return ERR_PTR(-res);
5064 }
5065 
5066 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5067 				   int xdp_status)
5068 {
5069 	int cpu = smp_processor_id();
5070 	int queue;
5071 
5072 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5073 
5074 	if (xdp_status & STMMAC_XDP_TX)
5075 		stmmac_tx_timer_arm(priv, queue);
5076 
5077 	if (xdp_status & STMMAC_XDP_REDIRECT)
5078 		xdp_do_flush();
5079 }
5080 
5081 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5082 					       struct xdp_buff *xdp)
5083 {
5084 	unsigned int metasize = xdp->data - xdp->data_meta;
5085 	unsigned int datasize = xdp->data_end - xdp->data;
5086 	struct sk_buff *skb;
5087 
5088 	skb = napi_alloc_skb(&ch->rxtx_napi,
5089 			     xdp->data_end - xdp->data_hard_start);
5090 	if (unlikely(!skb))
5091 		return NULL;
5092 
5093 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5094 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5095 	if (metasize)
5096 		skb_metadata_set(skb, metasize);
5097 
5098 	return skb;
5099 }
5100 
5101 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5102 				   struct dma_desc *p, struct dma_desc *np,
5103 				   struct xdp_buff *xdp)
5104 {
5105 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5106 	struct stmmac_channel *ch = &priv->channel[queue];
5107 	unsigned int len = xdp->data_end - xdp->data;
5108 	enum pkt_hash_types hash_type;
5109 	int coe = priv->hw->rx_csum;
5110 	struct sk_buff *skb;
5111 	u32 hash;
5112 
5113 	skb = stmmac_construct_skb_zc(ch, xdp);
5114 	if (!skb) {
5115 		priv->xstats.rx_dropped++;
5116 		return;
5117 	}
5118 
5119 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5120 	if (priv->hw->hw_vlan_en)
5121 		/* MAC level stripping. */
5122 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5123 	else
5124 		/* Driver level stripping. */
5125 		stmmac_rx_vlan(priv->dev, skb);
5126 	skb->protocol = eth_type_trans(skb, priv->dev);
5127 
5128 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5129 		skb_checksum_none_assert(skb);
5130 	else
5131 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5132 
5133 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5134 		skb_set_hash(skb, hash, hash_type);
5135 
5136 	skb_record_rx_queue(skb, queue);
5137 	napi_gro_receive(&ch->rxtx_napi, skb);
5138 
5139 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5140 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5141 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5142 	u64_stats_update_end(&rxq_stats->napi_syncp);
5143 }
5144 
5145 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5146 {
5147 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5148 	unsigned int entry = rx_q->dirty_rx;
5149 	struct dma_desc *rx_desc = NULL;
5150 	bool ret = true;
5151 
5152 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5153 
5154 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5155 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5156 		dma_addr_t dma_addr;
5157 		bool use_rx_wd;
5158 
5159 		if (!buf->xdp) {
5160 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5161 			if (!buf->xdp) {
5162 				ret = false;
5163 				break;
5164 			}
5165 		}
5166 
5167 		if (priv->extend_desc)
5168 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5169 		else
5170 			rx_desc = rx_q->dma_rx + entry;
5171 
5172 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5173 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5174 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5175 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5176 
5177 		rx_q->rx_count_frames++;
5178 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5179 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5180 			rx_q->rx_count_frames = 0;
5181 
5182 		use_rx_wd = !priv->rx_coal_frames[queue];
5183 		use_rx_wd |= rx_q->rx_count_frames > 0;
5184 		if (!priv->use_riwt)
5185 			use_rx_wd = false;
5186 
5187 		dma_wmb();
5188 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5189 
5190 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5191 	}
5192 
5193 	if (rx_desc) {
5194 		rx_q->dirty_rx = entry;
5195 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5196 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5197 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5198 	}
5199 
5200 	return ret;
5201 }
5202 
5203 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5204 {
5205 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5206 	 * to represent incoming packet, whereas cb field in the same structure
5207 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5208 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5209 	 */
5210 	return (struct stmmac_xdp_buff *)xdp;
5211 }
5212 
5213 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5214 {
5215 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5216 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5217 	unsigned int count = 0, error = 0, len = 0;
5218 	int dirty = stmmac_rx_dirty(priv, queue);
5219 	unsigned int next_entry = rx_q->cur_rx;
5220 	u32 rx_errors = 0, rx_dropped = 0;
5221 	unsigned int desc_size;
5222 	struct bpf_prog *prog;
5223 	bool failure = false;
5224 	int xdp_status = 0;
5225 	int status = 0;
5226 
5227 	if (netif_msg_rx_status(priv)) {
5228 		void *rx_head;
5229 
5230 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5231 		if (priv->extend_desc) {
5232 			rx_head = (void *)rx_q->dma_erx;
5233 			desc_size = sizeof(struct dma_extended_desc);
5234 		} else {
5235 			rx_head = (void *)rx_q->dma_rx;
5236 			desc_size = sizeof(struct dma_desc);
5237 		}
5238 
5239 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5240 				    rx_q->dma_rx_phy, desc_size);
5241 	}
5242 	while (count < limit) {
5243 		struct stmmac_rx_buffer *buf;
5244 		struct stmmac_xdp_buff *ctx;
5245 		unsigned int buf1_len = 0;
5246 		struct dma_desc *np, *p;
5247 		int entry;
5248 		int res;
5249 
5250 		if (!count && rx_q->state_saved) {
5251 			error = rx_q->state.error;
5252 			len = rx_q->state.len;
5253 		} else {
5254 			rx_q->state_saved = false;
5255 			error = 0;
5256 			len = 0;
5257 		}
5258 
5259 		if (count >= limit)
5260 			break;
5261 
5262 read_again:
5263 		buf1_len = 0;
5264 		entry = next_entry;
5265 		buf = &rx_q->buf_pool[entry];
5266 
5267 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5268 			failure = failure ||
5269 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5270 			dirty = 0;
5271 		}
5272 
5273 		if (priv->extend_desc)
5274 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5275 		else
5276 			p = rx_q->dma_rx + entry;
5277 
5278 		/* read the status of the incoming frame */
5279 		status = stmmac_rx_status(priv, &priv->xstats, p);
5280 		/* check if managed by the DMA otherwise go ahead */
5281 		if (unlikely(status & dma_own))
5282 			break;
5283 
5284 		/* Prefetch the next RX descriptor */
5285 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5286 						priv->dma_conf.dma_rx_size);
5287 		next_entry = rx_q->cur_rx;
5288 
5289 		if (priv->extend_desc)
5290 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5291 		else
5292 			np = rx_q->dma_rx + next_entry;
5293 
5294 		prefetch(np);
5295 
5296 		/* Ensure a valid XSK buffer before proceed */
5297 		if (!buf->xdp)
5298 			break;
5299 
5300 		if (priv->extend_desc)
5301 			stmmac_rx_extended_status(priv, &priv->xstats,
5302 						  rx_q->dma_erx + entry);
5303 		if (unlikely(status == discard_frame)) {
5304 			xsk_buff_free(buf->xdp);
5305 			buf->xdp = NULL;
5306 			dirty++;
5307 			error = 1;
5308 			if (!priv->hwts_rx_en)
5309 				rx_errors++;
5310 		}
5311 
5312 		if (unlikely(error && (status & rx_not_ls)))
5313 			goto read_again;
5314 		if (unlikely(error)) {
5315 			count++;
5316 			continue;
5317 		}
5318 
5319 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5320 		if (likely(status & rx_not_ls)) {
5321 			xsk_buff_free(buf->xdp);
5322 			buf->xdp = NULL;
5323 			dirty++;
5324 			count++;
5325 			goto read_again;
5326 		}
5327 
5328 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5329 		ctx->priv = priv;
5330 		ctx->desc = p;
5331 		ctx->ndesc = np;
5332 
5333 		/* XDP ZC Frame only support primary buffers for now */
5334 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5335 		len += buf1_len;
5336 
5337 		/* ACS is disabled; strip manually. */
5338 		if (likely(!(status & rx_not_ls))) {
5339 			buf1_len -= ETH_FCS_LEN;
5340 			len -= ETH_FCS_LEN;
5341 		}
5342 
5343 		/* RX buffer is good and fit into a XSK pool buffer */
5344 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5345 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5346 
5347 		prog = READ_ONCE(priv->xdp_prog);
5348 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5349 
5350 		switch (res) {
5351 		case STMMAC_XDP_PASS:
5352 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5353 			xsk_buff_free(buf->xdp);
5354 			break;
5355 		case STMMAC_XDP_CONSUMED:
5356 			xsk_buff_free(buf->xdp);
5357 			rx_dropped++;
5358 			break;
5359 		case STMMAC_XDP_TX:
5360 		case STMMAC_XDP_REDIRECT:
5361 			xdp_status |= res;
5362 			break;
5363 		}
5364 
5365 		buf->xdp = NULL;
5366 		dirty++;
5367 		count++;
5368 	}
5369 
5370 	if (status & rx_not_ls) {
5371 		rx_q->state_saved = true;
5372 		rx_q->state.error = error;
5373 		rx_q->state.len = len;
5374 	}
5375 
5376 	stmmac_finalize_xdp_rx(priv, xdp_status);
5377 
5378 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5379 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5380 	u64_stats_update_end(&rxq_stats->napi_syncp);
5381 
5382 	priv->xstats.rx_dropped += rx_dropped;
5383 	priv->xstats.rx_errors += rx_errors;
5384 
5385 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5386 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5387 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5388 		else
5389 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5390 
5391 		return (int)count;
5392 	}
5393 
5394 	return failure ? limit : (int)count;
5395 }
5396 
5397 /**
5398  * stmmac_rx - manage the receive process
5399  * @priv: driver private structure
5400  * @limit: napi bugget
5401  * @queue: RX queue index.
5402  * Description :  this the function called by the napi poll method.
5403  * It gets all the frames inside the ring.
5404  */
5405 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5406 {
5407 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5408 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5409 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5410 	struct stmmac_channel *ch = &priv->channel[queue];
5411 	unsigned int count = 0, error = 0, len = 0;
5412 	int status = 0, coe = priv->hw->rx_csum;
5413 	unsigned int next_entry = rx_q->cur_rx;
5414 	enum dma_data_direction dma_dir;
5415 	unsigned int desc_size;
5416 	struct sk_buff *skb = NULL;
5417 	struct stmmac_xdp_buff ctx;
5418 	int xdp_status = 0;
5419 	int buf_sz;
5420 
5421 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5422 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5423 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5424 
5425 	if (netif_msg_rx_status(priv)) {
5426 		void *rx_head;
5427 
5428 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5429 		if (priv->extend_desc) {
5430 			rx_head = (void *)rx_q->dma_erx;
5431 			desc_size = sizeof(struct dma_extended_desc);
5432 		} else {
5433 			rx_head = (void *)rx_q->dma_rx;
5434 			desc_size = sizeof(struct dma_desc);
5435 		}
5436 
5437 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5438 				    rx_q->dma_rx_phy, desc_size);
5439 	}
5440 	while (count < limit) {
5441 		unsigned int buf1_len = 0, buf2_len = 0;
5442 		enum pkt_hash_types hash_type;
5443 		struct stmmac_rx_buffer *buf;
5444 		struct dma_desc *np, *p;
5445 		int entry;
5446 		u32 hash;
5447 
5448 		if (!count && rx_q->state_saved) {
5449 			skb = rx_q->state.skb;
5450 			error = rx_q->state.error;
5451 			len = rx_q->state.len;
5452 		} else {
5453 			rx_q->state_saved = false;
5454 			skb = NULL;
5455 			error = 0;
5456 			len = 0;
5457 		}
5458 
5459 read_again:
5460 		if (count >= limit)
5461 			break;
5462 
5463 		buf1_len = 0;
5464 		buf2_len = 0;
5465 		entry = next_entry;
5466 		buf = &rx_q->buf_pool[entry];
5467 
5468 		if (priv->extend_desc)
5469 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5470 		else
5471 			p = rx_q->dma_rx + entry;
5472 
5473 		/* read the status of the incoming frame */
5474 		status = stmmac_rx_status(priv, &priv->xstats, p);
5475 		/* check if managed by the DMA otherwise go ahead */
5476 		if (unlikely(status & dma_own))
5477 			break;
5478 
5479 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5480 						priv->dma_conf.dma_rx_size);
5481 		next_entry = rx_q->cur_rx;
5482 
5483 		if (priv->extend_desc)
5484 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5485 		else
5486 			np = rx_q->dma_rx + next_entry;
5487 
5488 		prefetch(np);
5489 
5490 		if (priv->extend_desc)
5491 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5492 		if (unlikely(status == discard_frame)) {
5493 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5494 			buf->page = NULL;
5495 			error = 1;
5496 			if (!priv->hwts_rx_en)
5497 				rx_errors++;
5498 		}
5499 
5500 		if (unlikely(error && (status & rx_not_ls)))
5501 			goto read_again;
5502 		if (unlikely(error)) {
5503 			dev_kfree_skb(skb);
5504 			skb = NULL;
5505 			count++;
5506 			continue;
5507 		}
5508 
5509 		/* Buffer is good. Go on. */
5510 
5511 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5512 		len += buf1_len;
5513 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5514 		len += buf2_len;
5515 
5516 		/* ACS is disabled; strip manually. */
5517 		if (likely(!(status & rx_not_ls))) {
5518 			if (buf2_len) {
5519 				buf2_len -= ETH_FCS_LEN;
5520 				len -= ETH_FCS_LEN;
5521 			} else if (buf1_len) {
5522 				buf1_len -= ETH_FCS_LEN;
5523 				len -= ETH_FCS_LEN;
5524 			}
5525 		}
5526 
5527 		if (!skb) {
5528 			unsigned int pre_len, sync_len;
5529 
5530 			dma_sync_single_for_cpu(priv->device, buf->addr,
5531 						buf1_len, dma_dir);
5532 			net_prefetch(page_address(buf->page) +
5533 				     buf->page_offset);
5534 
5535 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5536 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5537 					 buf->page_offset, buf1_len, true);
5538 
5539 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5540 				  buf->page_offset;
5541 
5542 			ctx.priv = priv;
5543 			ctx.desc = p;
5544 			ctx.ndesc = np;
5545 
5546 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5547 			/* Due xdp_adjust_tail: DMA sync for_device
5548 			 * cover max len CPU touch
5549 			 */
5550 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5551 				   buf->page_offset;
5552 			sync_len = max(sync_len, pre_len);
5553 
5554 			/* For Not XDP_PASS verdict */
5555 			if (IS_ERR(skb)) {
5556 				unsigned int xdp_res = -PTR_ERR(skb);
5557 
5558 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5559 					page_pool_put_page(rx_q->page_pool,
5560 							   virt_to_head_page(ctx.xdp.data),
5561 							   sync_len, true);
5562 					buf->page = NULL;
5563 					rx_dropped++;
5564 
5565 					/* Clear skb as it was set as
5566 					 * status by XDP program.
5567 					 */
5568 					skb = NULL;
5569 
5570 					if (unlikely((status & rx_not_ls)))
5571 						goto read_again;
5572 
5573 					count++;
5574 					continue;
5575 				} else if (xdp_res & (STMMAC_XDP_TX |
5576 						      STMMAC_XDP_REDIRECT)) {
5577 					xdp_status |= xdp_res;
5578 					buf->page = NULL;
5579 					skb = NULL;
5580 					count++;
5581 					continue;
5582 				}
5583 			}
5584 		}
5585 
5586 		if (!skb) {
5587 			unsigned int head_pad_len;
5588 
5589 			/* XDP program may expand or reduce tail */
5590 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5591 
5592 			skb = napi_build_skb(page_address(buf->page),
5593 					     rx_q->napi_skb_frag_size);
5594 			if (!skb) {
5595 				page_pool_recycle_direct(rx_q->page_pool,
5596 							 buf->page);
5597 				rx_dropped++;
5598 				count++;
5599 				goto drain_data;
5600 			}
5601 
5602 			/* XDP program may adjust header */
5603 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5604 			skb_reserve(skb, head_pad_len);
5605 			skb_put(skb, buf1_len);
5606 			skb_mark_for_recycle(skb);
5607 			buf->page = NULL;
5608 		} else if (buf1_len) {
5609 			dma_sync_single_for_cpu(priv->device, buf->addr,
5610 						buf1_len, dma_dir);
5611 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5612 					buf->page, buf->page_offset, buf1_len,
5613 					priv->dma_conf.dma_buf_sz);
5614 
5615 			/* Data payload appended into SKB */
5616 			skb_mark_for_recycle(skb);
5617 			buf->page = NULL;
5618 		}
5619 
5620 		if (buf2_len) {
5621 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5622 						buf2_len, dma_dir);
5623 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5624 					buf->sec_page, 0, buf2_len,
5625 					priv->dma_conf.dma_buf_sz);
5626 
5627 			/* Data payload appended into SKB */
5628 			skb_mark_for_recycle(skb);
5629 			buf->sec_page = NULL;
5630 		}
5631 
5632 drain_data:
5633 		if (likely(status & rx_not_ls))
5634 			goto read_again;
5635 		if (!skb)
5636 			continue;
5637 
5638 		/* Got entire packet into SKB. Finish it. */
5639 
5640 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5641 
5642 		if (priv->hw->hw_vlan_en)
5643 			/* MAC level stripping. */
5644 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5645 		else
5646 			/* Driver level stripping. */
5647 			stmmac_rx_vlan(priv->dev, skb);
5648 
5649 		skb->protocol = eth_type_trans(skb, priv->dev);
5650 
5651 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5652 			skb_checksum_none_assert(skb);
5653 		else
5654 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5655 
5656 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5657 			skb_set_hash(skb, hash, hash_type);
5658 
5659 		skb_record_rx_queue(skb, queue);
5660 		napi_gro_receive(&ch->rx_napi, skb);
5661 		skb = NULL;
5662 
5663 		rx_packets++;
5664 		rx_bytes += len;
5665 		count++;
5666 	}
5667 
5668 	if (status & rx_not_ls || skb) {
5669 		rx_q->state_saved = true;
5670 		rx_q->state.skb = skb;
5671 		rx_q->state.error = error;
5672 		rx_q->state.len = len;
5673 	}
5674 
5675 	stmmac_finalize_xdp_rx(priv, xdp_status);
5676 
5677 	stmmac_rx_refill(priv, queue);
5678 
5679 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5680 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5681 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5682 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5683 	u64_stats_update_end(&rxq_stats->napi_syncp);
5684 
5685 	priv->xstats.rx_dropped += rx_dropped;
5686 	priv->xstats.rx_errors += rx_errors;
5687 
5688 	return count;
5689 }
5690 
5691 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5692 {
5693 	struct stmmac_channel *ch =
5694 		container_of(napi, struct stmmac_channel, rx_napi);
5695 	struct stmmac_priv *priv = ch->priv_data;
5696 	struct stmmac_rxq_stats *rxq_stats;
5697 	u32 chan = ch->index;
5698 	int work_done;
5699 
5700 	rxq_stats = &priv->xstats.rxq_stats[chan];
5701 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5702 	u64_stats_inc(&rxq_stats->napi.poll);
5703 	u64_stats_update_end(&rxq_stats->napi_syncp);
5704 
5705 	work_done = stmmac_rx(priv, budget, chan);
5706 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5707 		unsigned long flags;
5708 
5709 		spin_lock_irqsave(&ch->lock, flags);
5710 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5711 		spin_unlock_irqrestore(&ch->lock, flags);
5712 	}
5713 
5714 	return work_done;
5715 }
5716 
5717 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5718 {
5719 	struct stmmac_channel *ch =
5720 		container_of(napi, struct stmmac_channel, tx_napi);
5721 	struct stmmac_priv *priv = ch->priv_data;
5722 	struct stmmac_txq_stats *txq_stats;
5723 	bool pending_packets = false;
5724 	u32 chan = ch->index;
5725 	int work_done;
5726 
5727 	txq_stats = &priv->xstats.txq_stats[chan];
5728 	u64_stats_update_begin(&txq_stats->napi_syncp);
5729 	u64_stats_inc(&txq_stats->napi.poll);
5730 	u64_stats_update_end(&txq_stats->napi_syncp);
5731 
5732 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5733 	work_done = min(work_done, budget);
5734 
5735 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5736 		unsigned long flags;
5737 
5738 		spin_lock_irqsave(&ch->lock, flags);
5739 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5740 		spin_unlock_irqrestore(&ch->lock, flags);
5741 	}
5742 
5743 	/* TX still have packet to handle, check if we need to arm tx timer */
5744 	if (pending_packets)
5745 		stmmac_tx_timer_arm(priv, chan);
5746 
5747 	return work_done;
5748 }
5749 
5750 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5751 {
5752 	struct stmmac_channel *ch =
5753 		container_of(napi, struct stmmac_channel, rxtx_napi);
5754 	struct stmmac_priv *priv = ch->priv_data;
5755 	bool tx_pending_packets = false;
5756 	int rx_done, tx_done, rxtx_done;
5757 	struct stmmac_rxq_stats *rxq_stats;
5758 	struct stmmac_txq_stats *txq_stats;
5759 	u32 chan = ch->index;
5760 
5761 	rxq_stats = &priv->xstats.rxq_stats[chan];
5762 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5763 	u64_stats_inc(&rxq_stats->napi.poll);
5764 	u64_stats_update_end(&rxq_stats->napi_syncp);
5765 
5766 	txq_stats = &priv->xstats.txq_stats[chan];
5767 	u64_stats_update_begin(&txq_stats->napi_syncp);
5768 	u64_stats_inc(&txq_stats->napi.poll);
5769 	u64_stats_update_end(&txq_stats->napi_syncp);
5770 
5771 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5772 	tx_done = min(tx_done, budget);
5773 
5774 	rx_done = stmmac_rx_zc(priv, budget, chan);
5775 
5776 	rxtx_done = max(tx_done, rx_done);
5777 
5778 	/* If either TX or RX work is not complete, return budget
5779 	 * and keep pooling
5780 	 */
5781 	if (rxtx_done >= budget)
5782 		return budget;
5783 
5784 	/* all work done, exit the polling mode */
5785 	if (napi_complete_done(napi, rxtx_done)) {
5786 		unsigned long flags;
5787 
5788 		spin_lock_irqsave(&ch->lock, flags);
5789 		/* Both RX and TX work done are compelte,
5790 		 * so enable both RX & TX IRQs.
5791 		 */
5792 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5793 		spin_unlock_irqrestore(&ch->lock, flags);
5794 	}
5795 
5796 	/* TX still have packet to handle, check if we need to arm tx timer */
5797 	if (tx_pending_packets)
5798 		stmmac_tx_timer_arm(priv, chan);
5799 
5800 	return min(rxtx_done, budget - 1);
5801 }
5802 
5803 /**
5804  *  stmmac_tx_timeout
5805  *  @dev : Pointer to net device structure
5806  *  @txqueue: the index of the hanging transmit queue
5807  *  Description: this function is called when a packet transmission fails to
5808  *   complete within a reasonable time. The driver will mark the error in the
5809  *   netdev structure and arrange for the device to be reset to a sane state
5810  *   in order to transmit a new packet.
5811  */
5812 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5813 {
5814 	struct stmmac_priv *priv = netdev_priv(dev);
5815 
5816 	stmmac_global_err(priv);
5817 }
5818 
5819 /**
5820  *  stmmac_set_rx_mode - entry point for multicast addressing
5821  *  @dev : pointer to the device structure
5822  *  Description:
5823  *  This function is a driver entry point which gets called by the kernel
5824  *  whenever multicast addresses must be enabled/disabled.
5825  *  Return value:
5826  *  void.
5827  */
5828 static void stmmac_set_rx_mode(struct net_device *dev)
5829 {
5830 	struct stmmac_priv *priv = netdev_priv(dev);
5831 
5832 	stmmac_set_filter(priv, priv->hw, dev);
5833 }
5834 
5835 /**
5836  *  stmmac_change_mtu - entry point to change MTU size for the device.
5837  *  @dev : device pointer.
5838  *  @new_mtu : the new MTU size for the device.
5839  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5840  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5841  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5842  *  Return value:
5843  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5844  *  file on failure.
5845  */
5846 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5847 {
5848 	struct stmmac_priv *priv = netdev_priv(dev);
5849 	int txfifosz = priv->plat->tx_fifo_size;
5850 	struct stmmac_dma_conf *dma_conf;
5851 	const int mtu = new_mtu;
5852 	int ret;
5853 
5854 	if (txfifosz == 0)
5855 		txfifosz = priv->dma_cap.tx_fifo_size;
5856 
5857 	txfifosz /= priv->plat->tx_queues_to_use;
5858 
5859 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5860 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5861 		return -EINVAL;
5862 	}
5863 
5864 	new_mtu = STMMAC_ALIGN(new_mtu);
5865 
5866 	/* If condition true, FIFO is too small or MTU too large */
5867 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5868 		return -EINVAL;
5869 
5870 	if (netif_running(dev)) {
5871 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5872 		/* Try to allocate the new DMA conf with the new mtu */
5873 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5874 		if (IS_ERR(dma_conf)) {
5875 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5876 				   mtu);
5877 			return PTR_ERR(dma_conf);
5878 		}
5879 
5880 		stmmac_release(dev);
5881 
5882 		ret = __stmmac_open(dev, dma_conf);
5883 		if (ret) {
5884 			free_dma_desc_resources(priv, dma_conf);
5885 			kfree(dma_conf);
5886 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5887 			return ret;
5888 		}
5889 
5890 		kfree(dma_conf);
5891 
5892 		stmmac_set_rx_mode(dev);
5893 	}
5894 
5895 	WRITE_ONCE(dev->mtu, mtu);
5896 	netdev_update_features(dev);
5897 
5898 	return 0;
5899 }
5900 
5901 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5902 					     netdev_features_t features)
5903 {
5904 	struct stmmac_priv *priv = netdev_priv(dev);
5905 
5906 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5907 		features &= ~NETIF_F_RXCSUM;
5908 
5909 	if (!priv->plat->tx_coe)
5910 		features &= ~NETIF_F_CSUM_MASK;
5911 
5912 	/* Some GMAC devices have a bugged Jumbo frame support that
5913 	 * needs to have the Tx COE disabled for oversized frames
5914 	 * (due to limited buffer sizes). In this case we disable
5915 	 * the TX csum insertion in the TDES and not use SF.
5916 	 */
5917 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5918 		features &= ~NETIF_F_CSUM_MASK;
5919 
5920 	/* Disable tso if asked by ethtool */
5921 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5922 		if (features & NETIF_F_TSO)
5923 			priv->tso = true;
5924 		else
5925 			priv->tso = false;
5926 	}
5927 
5928 	return features;
5929 }
5930 
5931 static int stmmac_set_features(struct net_device *netdev,
5932 			       netdev_features_t features)
5933 {
5934 	struct stmmac_priv *priv = netdev_priv(netdev);
5935 
5936 	/* Keep the COE Type in case of csum is supporting */
5937 	if (features & NETIF_F_RXCSUM)
5938 		priv->hw->rx_csum = priv->plat->rx_coe;
5939 	else
5940 		priv->hw->rx_csum = 0;
5941 	/* No check needed because rx_coe has been set before and it will be
5942 	 * fixed in case of issue.
5943 	 */
5944 	stmmac_rx_ipc(priv, priv->hw);
5945 
5946 	if (priv->sph_cap) {
5947 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5948 		u32 chan;
5949 
5950 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5951 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5952 	}
5953 
5954 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5955 		priv->hw->hw_vlan_en = true;
5956 	else
5957 		priv->hw->hw_vlan_en = false;
5958 
5959 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5960 
5961 	return 0;
5962 }
5963 
5964 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5965 {
5966 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5967 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5968 	u32 queues_count;
5969 	u32 queue;
5970 	bool xmac;
5971 
5972 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5973 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5974 
5975 	if (priv->irq_wake)
5976 		pm_wakeup_event(priv->device, 0);
5977 
5978 	if (priv->dma_cap.estsel)
5979 		stmmac_est_irq_status(priv, priv, priv->dev,
5980 				      &priv->xstats, tx_cnt);
5981 
5982 	if (stmmac_fpe_supported(priv))
5983 		stmmac_fpe_irq_status(priv);
5984 
5985 	/* To handle GMAC own interrupts */
5986 	if ((priv->plat->has_gmac) || xmac) {
5987 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5988 
5989 		if (unlikely(status)) {
5990 			/* For LPI we need to save the tx status */
5991 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5992 				priv->tx_path_in_lpi_mode = true;
5993 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5994 				priv->tx_path_in_lpi_mode = false;
5995 		}
5996 
5997 		for (queue = 0; queue < queues_count; queue++)
5998 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
5999 
6000 		/* PCS link status */
6001 		if (priv->hw->pcs &&
6002 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6003 			if (priv->xstats.pcs_link)
6004 				netif_carrier_on(priv->dev);
6005 			else
6006 				netif_carrier_off(priv->dev);
6007 		}
6008 
6009 		stmmac_timestamp_interrupt(priv, priv);
6010 	}
6011 }
6012 
6013 /**
6014  *  stmmac_interrupt - main ISR
6015  *  @irq: interrupt number.
6016  *  @dev_id: to pass the net device pointer.
6017  *  Description: this is the main driver interrupt service routine.
6018  *  It can call:
6019  *  o DMA service routine (to manage incoming frame reception and transmission
6020  *    status)
6021  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6022  *    interrupts.
6023  */
6024 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6025 {
6026 	struct net_device *dev = (struct net_device *)dev_id;
6027 	struct stmmac_priv *priv = netdev_priv(dev);
6028 
6029 	/* Check if adapter is up */
6030 	if (test_bit(STMMAC_DOWN, &priv->state))
6031 		return IRQ_HANDLED;
6032 
6033 	/* Check ASP error if it isn't delivered via an individual IRQ */
6034 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6035 		return IRQ_HANDLED;
6036 
6037 	/* To handle Common interrupts */
6038 	stmmac_common_interrupt(priv);
6039 
6040 	/* To handle DMA interrupts */
6041 	stmmac_dma_interrupt(priv);
6042 
6043 	return IRQ_HANDLED;
6044 }
6045 
6046 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6047 {
6048 	struct net_device *dev = (struct net_device *)dev_id;
6049 	struct stmmac_priv *priv = netdev_priv(dev);
6050 
6051 	/* Check if adapter is up */
6052 	if (test_bit(STMMAC_DOWN, &priv->state))
6053 		return IRQ_HANDLED;
6054 
6055 	/* To handle Common interrupts */
6056 	stmmac_common_interrupt(priv);
6057 
6058 	return IRQ_HANDLED;
6059 }
6060 
6061 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6062 {
6063 	struct net_device *dev = (struct net_device *)dev_id;
6064 	struct stmmac_priv *priv = netdev_priv(dev);
6065 
6066 	/* Check if adapter is up */
6067 	if (test_bit(STMMAC_DOWN, &priv->state))
6068 		return IRQ_HANDLED;
6069 
6070 	/* Check if a fatal error happened */
6071 	stmmac_safety_feat_interrupt(priv);
6072 
6073 	return IRQ_HANDLED;
6074 }
6075 
6076 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6077 {
6078 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6079 	struct stmmac_dma_conf *dma_conf;
6080 	int chan = tx_q->queue_index;
6081 	struct stmmac_priv *priv;
6082 	int status;
6083 
6084 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6085 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6086 
6087 	/* Check if adapter is up */
6088 	if (test_bit(STMMAC_DOWN, &priv->state))
6089 		return IRQ_HANDLED;
6090 
6091 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6092 
6093 	if (unlikely(status & tx_hard_error_bump_tc)) {
6094 		/* Try to bump up the dma threshold on this failure */
6095 		stmmac_bump_dma_threshold(priv, chan);
6096 	} else if (unlikely(status == tx_hard_error)) {
6097 		stmmac_tx_err(priv, chan);
6098 	}
6099 
6100 	return IRQ_HANDLED;
6101 }
6102 
6103 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6104 {
6105 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6106 	struct stmmac_dma_conf *dma_conf;
6107 	int chan = rx_q->queue_index;
6108 	struct stmmac_priv *priv;
6109 
6110 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6111 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6112 
6113 	/* Check if adapter is up */
6114 	if (test_bit(STMMAC_DOWN, &priv->state))
6115 		return IRQ_HANDLED;
6116 
6117 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6118 
6119 	return IRQ_HANDLED;
6120 }
6121 
6122 /**
6123  *  stmmac_ioctl - Entry point for the Ioctl
6124  *  @dev: Device pointer.
6125  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6126  *  a proprietary structure used to pass information to the driver.
6127  *  @cmd: IOCTL command
6128  *  Description:
6129  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6130  */
6131 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6132 {
6133 	struct stmmac_priv *priv = netdev_priv (dev);
6134 	int ret = -EOPNOTSUPP;
6135 
6136 	if (!netif_running(dev))
6137 		return -EINVAL;
6138 
6139 	switch (cmd) {
6140 	case SIOCGMIIPHY:
6141 	case SIOCGMIIREG:
6142 	case SIOCSMIIREG:
6143 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6144 		break;
6145 	case SIOCSHWTSTAMP:
6146 		ret = stmmac_hwtstamp_set(dev, rq);
6147 		break;
6148 	case SIOCGHWTSTAMP:
6149 		ret = stmmac_hwtstamp_get(dev, rq);
6150 		break;
6151 	default:
6152 		break;
6153 	}
6154 
6155 	return ret;
6156 }
6157 
6158 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6159 				    void *cb_priv)
6160 {
6161 	struct stmmac_priv *priv = cb_priv;
6162 	int ret = -EOPNOTSUPP;
6163 
6164 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6165 		return ret;
6166 
6167 	__stmmac_disable_all_queues(priv);
6168 
6169 	switch (type) {
6170 	case TC_SETUP_CLSU32:
6171 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6172 		break;
6173 	case TC_SETUP_CLSFLOWER:
6174 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6175 		break;
6176 	default:
6177 		break;
6178 	}
6179 
6180 	stmmac_enable_all_queues(priv);
6181 	return ret;
6182 }
6183 
6184 static LIST_HEAD(stmmac_block_cb_list);
6185 
6186 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6187 			   void *type_data)
6188 {
6189 	struct stmmac_priv *priv = netdev_priv(ndev);
6190 
6191 	switch (type) {
6192 	case TC_QUERY_CAPS:
6193 		return stmmac_tc_query_caps(priv, priv, type_data);
6194 	case TC_SETUP_QDISC_MQPRIO:
6195 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6196 	case TC_SETUP_BLOCK:
6197 		return flow_block_cb_setup_simple(type_data,
6198 						  &stmmac_block_cb_list,
6199 						  stmmac_setup_tc_block_cb,
6200 						  priv, priv, true);
6201 	case TC_SETUP_QDISC_CBS:
6202 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6203 	case TC_SETUP_QDISC_TAPRIO:
6204 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6205 	case TC_SETUP_QDISC_ETF:
6206 		return stmmac_tc_setup_etf(priv, priv, type_data);
6207 	default:
6208 		return -EOPNOTSUPP;
6209 	}
6210 }
6211 
6212 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6213 			       struct net_device *sb_dev)
6214 {
6215 	int gso = skb_shinfo(skb)->gso_type;
6216 
6217 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6218 		/*
6219 		 * There is no way to determine the number of TSO/USO
6220 		 * capable Queues. Let's use always the Queue 0
6221 		 * because if TSO/USO is supported then at least this
6222 		 * one will be capable.
6223 		 */
6224 		return 0;
6225 	}
6226 
6227 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6228 }
6229 
6230 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6231 {
6232 	struct stmmac_priv *priv = netdev_priv(ndev);
6233 	int ret = 0;
6234 
6235 	ret = pm_runtime_resume_and_get(priv->device);
6236 	if (ret < 0)
6237 		return ret;
6238 
6239 	ret = eth_mac_addr(ndev, addr);
6240 	if (ret)
6241 		goto set_mac_error;
6242 
6243 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6244 
6245 set_mac_error:
6246 	pm_runtime_put(priv->device);
6247 
6248 	return ret;
6249 }
6250 
6251 #ifdef CONFIG_DEBUG_FS
6252 static struct dentry *stmmac_fs_dir;
6253 
6254 static void sysfs_display_ring(void *head, int size, int extend_desc,
6255 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6256 {
6257 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6258 	struct dma_desc *p = (struct dma_desc *)head;
6259 	unsigned int desc_size;
6260 	dma_addr_t dma_addr;
6261 	int i;
6262 
6263 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6264 	for (i = 0; i < size; i++) {
6265 		dma_addr = dma_phy_addr + i * desc_size;
6266 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6267 				i, &dma_addr,
6268 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6269 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6270 		if (extend_desc)
6271 			p = &(++ep)->basic;
6272 		else
6273 			p++;
6274 	}
6275 }
6276 
6277 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6278 {
6279 	struct net_device *dev = seq->private;
6280 	struct stmmac_priv *priv = netdev_priv(dev);
6281 	u32 rx_count = priv->plat->rx_queues_to_use;
6282 	u32 tx_count = priv->plat->tx_queues_to_use;
6283 	u32 queue;
6284 
6285 	if ((dev->flags & IFF_UP) == 0)
6286 		return 0;
6287 
6288 	for (queue = 0; queue < rx_count; queue++) {
6289 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6290 
6291 		seq_printf(seq, "RX Queue %d:\n", queue);
6292 
6293 		if (priv->extend_desc) {
6294 			seq_printf(seq, "Extended descriptor ring:\n");
6295 			sysfs_display_ring((void *)rx_q->dma_erx,
6296 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6297 		} else {
6298 			seq_printf(seq, "Descriptor ring:\n");
6299 			sysfs_display_ring((void *)rx_q->dma_rx,
6300 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6301 		}
6302 	}
6303 
6304 	for (queue = 0; queue < tx_count; queue++) {
6305 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6306 
6307 		seq_printf(seq, "TX Queue %d:\n", queue);
6308 
6309 		if (priv->extend_desc) {
6310 			seq_printf(seq, "Extended descriptor ring:\n");
6311 			sysfs_display_ring((void *)tx_q->dma_etx,
6312 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6313 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6314 			seq_printf(seq, "Descriptor ring:\n");
6315 			sysfs_display_ring((void *)tx_q->dma_tx,
6316 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6317 		}
6318 	}
6319 
6320 	return 0;
6321 }
6322 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6323 
6324 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6325 {
6326 	static const char * const dwxgmac_timestamp_source[] = {
6327 		"None",
6328 		"Internal",
6329 		"External",
6330 		"Both",
6331 	};
6332 	static const char * const dwxgmac_safety_feature_desc[] = {
6333 		"No",
6334 		"All Safety Features with ECC and Parity",
6335 		"All Safety Features without ECC or Parity",
6336 		"All Safety Features with Parity Only",
6337 		"ECC Only",
6338 		"UNDEFINED",
6339 		"UNDEFINED",
6340 		"UNDEFINED",
6341 	};
6342 	struct net_device *dev = seq->private;
6343 	struct stmmac_priv *priv = netdev_priv(dev);
6344 
6345 	if (!priv->hw_cap_support) {
6346 		seq_printf(seq, "DMA HW features not supported\n");
6347 		return 0;
6348 	}
6349 
6350 	seq_printf(seq, "==============================\n");
6351 	seq_printf(seq, "\tDMA HW features\n");
6352 	seq_printf(seq, "==============================\n");
6353 
6354 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6355 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6356 	seq_printf(seq, "\t1000 Mbps: %s\n",
6357 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6358 	seq_printf(seq, "\tHalf duplex: %s\n",
6359 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6360 	if (priv->plat->has_xgmac) {
6361 		seq_printf(seq,
6362 			   "\tNumber of Additional MAC address registers: %d\n",
6363 			   priv->dma_cap.multi_addr);
6364 	} else {
6365 		seq_printf(seq, "\tHash Filter: %s\n",
6366 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6367 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6368 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6369 	}
6370 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6371 		   (priv->dma_cap.pcs) ? "Y" : "N");
6372 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6373 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6374 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6375 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6376 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6377 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6378 	seq_printf(seq, "\tRMON module: %s\n",
6379 		   (priv->dma_cap.rmon) ? "Y" : "N");
6380 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6381 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6382 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6383 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6384 	if (priv->plat->has_xgmac)
6385 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6386 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6387 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6388 		   (priv->dma_cap.eee) ? "Y" : "N");
6389 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6390 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6391 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6392 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6393 	    priv->plat->has_xgmac) {
6394 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6395 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6396 	} else {
6397 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6398 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6399 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6400 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6401 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6402 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6403 	}
6404 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6405 		   priv->dma_cap.number_rx_channel);
6406 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6407 		   priv->dma_cap.number_tx_channel);
6408 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6409 		   priv->dma_cap.number_rx_queues);
6410 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6411 		   priv->dma_cap.number_tx_queues);
6412 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6413 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6414 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6415 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6416 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6417 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6418 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6419 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6420 		   priv->dma_cap.pps_out_num);
6421 	seq_printf(seq, "\tSafety Features: %s\n",
6422 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6423 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6424 		   priv->dma_cap.frpsel ? "Y" : "N");
6425 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6426 		   priv->dma_cap.host_dma_width);
6427 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6428 		   priv->dma_cap.rssen ? "Y" : "N");
6429 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6430 		   priv->dma_cap.vlhash ? "Y" : "N");
6431 	seq_printf(seq, "\tSplit Header: %s\n",
6432 		   priv->dma_cap.sphen ? "Y" : "N");
6433 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6434 		   priv->dma_cap.vlins ? "Y" : "N");
6435 	seq_printf(seq, "\tDouble VLAN: %s\n",
6436 		   priv->dma_cap.dvlan ? "Y" : "N");
6437 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6438 		   priv->dma_cap.l3l4fnum);
6439 	seq_printf(seq, "\tARP Offloading: %s\n",
6440 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6441 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6442 		   priv->dma_cap.estsel ? "Y" : "N");
6443 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6444 		   priv->dma_cap.fpesel ? "Y" : "N");
6445 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6446 		   priv->dma_cap.tbssel ? "Y" : "N");
6447 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6448 		   priv->dma_cap.tbs_ch_num);
6449 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6450 		   priv->dma_cap.sgfsel ? "Y" : "N");
6451 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6452 		   BIT(priv->dma_cap.ttsfd) >> 1);
6453 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6454 		   priv->dma_cap.numtc);
6455 	seq_printf(seq, "\tDCB Feature: %s\n",
6456 		   priv->dma_cap.dcben ? "Y" : "N");
6457 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6458 		   priv->dma_cap.advthword ? "Y" : "N");
6459 	seq_printf(seq, "\tPTP Offload: %s\n",
6460 		   priv->dma_cap.ptoen ? "Y" : "N");
6461 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6462 		   priv->dma_cap.osten ? "Y" : "N");
6463 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6464 		   priv->dma_cap.pfcen ? "Y" : "N");
6465 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6466 		   BIT(priv->dma_cap.frpes) << 6);
6467 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6468 		   BIT(priv->dma_cap.frpbs) << 6);
6469 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6470 		   priv->dma_cap.frppipe_num);
6471 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6472 		   priv->dma_cap.nrvf_num ?
6473 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6474 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6475 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6476 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6477 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6478 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6479 		   priv->dma_cap.cbtisel ? "Y" : "N");
6480 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6481 		   priv->dma_cap.aux_snapshot_n);
6482 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6483 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6484 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6485 		   priv->dma_cap.edma ? "Y" : "N");
6486 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6487 		   priv->dma_cap.ediffc ? "Y" : "N");
6488 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6489 		   priv->dma_cap.vxn ? "Y" : "N");
6490 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6491 		   priv->dma_cap.dbgmem ? "Y" : "N");
6492 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6493 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6494 	return 0;
6495 }
6496 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6497 
6498 /* Use network device events to rename debugfs file entries.
6499  */
6500 static int stmmac_device_event(struct notifier_block *unused,
6501 			       unsigned long event, void *ptr)
6502 {
6503 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6504 	struct stmmac_priv *priv = netdev_priv(dev);
6505 
6506 	if (dev->netdev_ops != &stmmac_netdev_ops)
6507 		goto done;
6508 
6509 	switch (event) {
6510 	case NETDEV_CHANGENAME:
6511 		if (priv->dbgfs_dir)
6512 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6513 							 priv->dbgfs_dir,
6514 							 stmmac_fs_dir,
6515 							 dev->name);
6516 		break;
6517 	}
6518 done:
6519 	return NOTIFY_DONE;
6520 }
6521 
6522 static struct notifier_block stmmac_notifier = {
6523 	.notifier_call = stmmac_device_event,
6524 };
6525 
6526 static void stmmac_init_fs(struct net_device *dev)
6527 {
6528 	struct stmmac_priv *priv = netdev_priv(dev);
6529 
6530 	rtnl_lock();
6531 
6532 	/* Create per netdev entries */
6533 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6534 
6535 	/* Entry to report DMA RX/TX rings */
6536 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6537 			    &stmmac_rings_status_fops);
6538 
6539 	/* Entry to report the DMA HW features */
6540 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6541 			    &stmmac_dma_cap_fops);
6542 
6543 	rtnl_unlock();
6544 }
6545 
6546 static void stmmac_exit_fs(struct net_device *dev)
6547 {
6548 	struct stmmac_priv *priv = netdev_priv(dev);
6549 
6550 	debugfs_remove_recursive(priv->dbgfs_dir);
6551 }
6552 #endif /* CONFIG_DEBUG_FS */
6553 
6554 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6555 {
6556 	unsigned char *data = (unsigned char *)&vid_le;
6557 	unsigned char data_byte = 0;
6558 	u32 crc = ~0x0;
6559 	u32 temp = 0;
6560 	int i, bits;
6561 
6562 	bits = get_bitmask_order(VLAN_VID_MASK);
6563 	for (i = 0; i < bits; i++) {
6564 		if ((i % 8) == 0)
6565 			data_byte = data[i / 8];
6566 
6567 		temp = ((crc & 1) ^ data_byte) & 1;
6568 		crc >>= 1;
6569 		data_byte >>= 1;
6570 
6571 		if (temp)
6572 			crc ^= 0xedb88320;
6573 	}
6574 
6575 	return crc;
6576 }
6577 
6578 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6579 {
6580 	u32 crc, hash = 0;
6581 	u16 pmatch = 0;
6582 	int count = 0;
6583 	u16 vid = 0;
6584 
6585 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6586 		__le16 vid_le = cpu_to_le16(vid);
6587 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6588 		hash |= (1 << crc);
6589 		count++;
6590 	}
6591 
6592 	if (!priv->dma_cap.vlhash) {
6593 		if (count > 2) /* VID = 0 always passes filter */
6594 			return -EOPNOTSUPP;
6595 
6596 		pmatch = vid;
6597 		hash = 0;
6598 	}
6599 
6600 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6601 }
6602 
6603 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6604 {
6605 	struct stmmac_priv *priv = netdev_priv(ndev);
6606 	bool is_double = false;
6607 	int ret;
6608 
6609 	ret = pm_runtime_resume_and_get(priv->device);
6610 	if (ret < 0)
6611 		return ret;
6612 
6613 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6614 		is_double = true;
6615 
6616 	set_bit(vid, priv->active_vlans);
6617 	ret = stmmac_vlan_update(priv, is_double);
6618 	if (ret) {
6619 		clear_bit(vid, priv->active_vlans);
6620 		goto err_pm_put;
6621 	}
6622 
6623 	if (priv->hw->num_vlan) {
6624 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6625 		if (ret)
6626 			goto err_pm_put;
6627 	}
6628 err_pm_put:
6629 	pm_runtime_put(priv->device);
6630 
6631 	return ret;
6632 }
6633 
6634 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6635 {
6636 	struct stmmac_priv *priv = netdev_priv(ndev);
6637 	bool is_double = false;
6638 	int ret;
6639 
6640 	ret = pm_runtime_resume_and_get(priv->device);
6641 	if (ret < 0)
6642 		return ret;
6643 
6644 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6645 		is_double = true;
6646 
6647 	clear_bit(vid, priv->active_vlans);
6648 
6649 	if (priv->hw->num_vlan) {
6650 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6651 		if (ret)
6652 			goto del_vlan_error;
6653 	}
6654 
6655 	ret = stmmac_vlan_update(priv, is_double);
6656 
6657 del_vlan_error:
6658 	pm_runtime_put(priv->device);
6659 
6660 	return ret;
6661 }
6662 
6663 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6664 {
6665 	struct stmmac_priv *priv = netdev_priv(dev);
6666 
6667 	switch (bpf->command) {
6668 	case XDP_SETUP_PROG:
6669 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6670 	case XDP_SETUP_XSK_POOL:
6671 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6672 					     bpf->xsk.queue_id);
6673 	default:
6674 		return -EOPNOTSUPP;
6675 	}
6676 }
6677 
6678 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6679 			   struct xdp_frame **frames, u32 flags)
6680 {
6681 	struct stmmac_priv *priv = netdev_priv(dev);
6682 	int cpu = smp_processor_id();
6683 	struct netdev_queue *nq;
6684 	int i, nxmit = 0;
6685 	int queue;
6686 
6687 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6688 		return -ENETDOWN;
6689 
6690 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6691 		return -EINVAL;
6692 
6693 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6694 	nq = netdev_get_tx_queue(priv->dev, queue);
6695 
6696 	__netif_tx_lock(nq, cpu);
6697 	/* Avoids TX time-out as we are sharing with slow path */
6698 	txq_trans_cond_update(nq);
6699 
6700 	for (i = 0; i < num_frames; i++) {
6701 		int res;
6702 
6703 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6704 		if (res == STMMAC_XDP_CONSUMED)
6705 			break;
6706 
6707 		nxmit++;
6708 	}
6709 
6710 	if (flags & XDP_XMIT_FLUSH) {
6711 		stmmac_flush_tx_descriptors(priv, queue);
6712 		stmmac_tx_timer_arm(priv, queue);
6713 	}
6714 
6715 	__netif_tx_unlock(nq);
6716 
6717 	return nxmit;
6718 }
6719 
6720 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6721 {
6722 	struct stmmac_channel *ch = &priv->channel[queue];
6723 	unsigned long flags;
6724 
6725 	spin_lock_irqsave(&ch->lock, flags);
6726 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6727 	spin_unlock_irqrestore(&ch->lock, flags);
6728 
6729 	stmmac_stop_rx_dma(priv, queue);
6730 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6731 }
6732 
6733 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6734 {
6735 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6736 	struct stmmac_channel *ch = &priv->channel[queue];
6737 	unsigned long flags;
6738 	u32 buf_size;
6739 	int ret;
6740 
6741 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6742 	if (ret) {
6743 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6744 		return;
6745 	}
6746 
6747 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6748 	if (ret) {
6749 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6750 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6751 		return;
6752 	}
6753 
6754 	stmmac_reset_rx_queue(priv, queue);
6755 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6756 
6757 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6758 			    rx_q->dma_rx_phy, rx_q->queue_index);
6759 
6760 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6761 			     sizeof(struct dma_desc));
6762 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6763 			       rx_q->rx_tail_addr, rx_q->queue_index);
6764 
6765 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6766 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6767 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6768 				      buf_size,
6769 				      rx_q->queue_index);
6770 	} else {
6771 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6772 				      priv->dma_conf.dma_buf_sz,
6773 				      rx_q->queue_index);
6774 	}
6775 
6776 	stmmac_start_rx_dma(priv, queue);
6777 
6778 	spin_lock_irqsave(&ch->lock, flags);
6779 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6780 	spin_unlock_irqrestore(&ch->lock, flags);
6781 }
6782 
6783 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6784 {
6785 	struct stmmac_channel *ch = &priv->channel[queue];
6786 	unsigned long flags;
6787 
6788 	spin_lock_irqsave(&ch->lock, flags);
6789 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6790 	spin_unlock_irqrestore(&ch->lock, flags);
6791 
6792 	stmmac_stop_tx_dma(priv, queue);
6793 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6794 }
6795 
6796 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6797 {
6798 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6799 	struct stmmac_channel *ch = &priv->channel[queue];
6800 	unsigned long flags;
6801 	int ret;
6802 
6803 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6804 	if (ret) {
6805 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6806 		return;
6807 	}
6808 
6809 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6810 	if (ret) {
6811 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6812 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6813 		return;
6814 	}
6815 
6816 	stmmac_reset_tx_queue(priv, queue);
6817 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6818 
6819 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6820 			    tx_q->dma_tx_phy, tx_q->queue_index);
6821 
6822 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6823 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6824 
6825 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6826 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6827 			       tx_q->tx_tail_addr, tx_q->queue_index);
6828 
6829 	stmmac_start_tx_dma(priv, queue);
6830 
6831 	spin_lock_irqsave(&ch->lock, flags);
6832 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6833 	spin_unlock_irqrestore(&ch->lock, flags);
6834 }
6835 
6836 void stmmac_xdp_release(struct net_device *dev)
6837 {
6838 	struct stmmac_priv *priv = netdev_priv(dev);
6839 	u32 chan;
6840 
6841 	/* Ensure tx function is not running */
6842 	netif_tx_disable(dev);
6843 
6844 	/* Disable NAPI process */
6845 	stmmac_disable_all_queues(priv);
6846 
6847 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6848 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6849 
6850 	/* Free the IRQ lines */
6851 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6852 
6853 	/* Stop TX/RX DMA channels */
6854 	stmmac_stop_all_dma(priv);
6855 
6856 	/* Release and free the Rx/Tx resources */
6857 	free_dma_desc_resources(priv, &priv->dma_conf);
6858 
6859 	/* Disable the MAC Rx/Tx */
6860 	stmmac_mac_set(priv, priv->ioaddr, false);
6861 
6862 	/* set trans_start so we don't get spurious
6863 	 * watchdogs during reset
6864 	 */
6865 	netif_trans_update(dev);
6866 	netif_carrier_off(dev);
6867 }
6868 
6869 int stmmac_xdp_open(struct net_device *dev)
6870 {
6871 	struct stmmac_priv *priv = netdev_priv(dev);
6872 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6873 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6874 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6875 	struct stmmac_rx_queue *rx_q;
6876 	struct stmmac_tx_queue *tx_q;
6877 	u32 buf_size;
6878 	bool sph_en;
6879 	u32 chan;
6880 	int ret;
6881 
6882 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6883 	if (ret < 0) {
6884 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6885 			   __func__);
6886 		goto dma_desc_error;
6887 	}
6888 
6889 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6890 	if (ret < 0) {
6891 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6892 			   __func__);
6893 		goto init_error;
6894 	}
6895 
6896 	stmmac_reset_queues_param(priv);
6897 
6898 	/* DMA CSR Channel configuration */
6899 	for (chan = 0; chan < dma_csr_ch; chan++) {
6900 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6901 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6902 	}
6903 
6904 	/* Adjust Split header */
6905 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6906 
6907 	/* DMA RX Channel Configuration */
6908 	for (chan = 0; chan < rx_cnt; chan++) {
6909 		rx_q = &priv->dma_conf.rx_queue[chan];
6910 
6911 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6912 				    rx_q->dma_rx_phy, chan);
6913 
6914 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6915 				     (rx_q->buf_alloc_num *
6916 				      sizeof(struct dma_desc));
6917 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6918 				       rx_q->rx_tail_addr, chan);
6919 
6920 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6921 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6922 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6923 					      buf_size,
6924 					      rx_q->queue_index);
6925 		} else {
6926 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6927 					      priv->dma_conf.dma_buf_sz,
6928 					      rx_q->queue_index);
6929 		}
6930 
6931 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6932 	}
6933 
6934 	/* DMA TX Channel Configuration */
6935 	for (chan = 0; chan < tx_cnt; chan++) {
6936 		tx_q = &priv->dma_conf.tx_queue[chan];
6937 
6938 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6939 				    tx_q->dma_tx_phy, chan);
6940 
6941 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6942 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6943 				       tx_q->tx_tail_addr, chan);
6944 
6945 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6946 		tx_q->txtimer.function = stmmac_tx_timer;
6947 	}
6948 
6949 	/* Enable the MAC Rx/Tx */
6950 	stmmac_mac_set(priv, priv->ioaddr, true);
6951 
6952 	/* Start Rx & Tx DMA Channels */
6953 	stmmac_start_all_dma(priv);
6954 
6955 	ret = stmmac_request_irq(dev);
6956 	if (ret)
6957 		goto irq_error;
6958 
6959 	/* Enable NAPI process*/
6960 	stmmac_enable_all_queues(priv);
6961 	netif_carrier_on(dev);
6962 	netif_tx_start_all_queues(dev);
6963 	stmmac_enable_all_dma_irq(priv);
6964 
6965 	return 0;
6966 
6967 irq_error:
6968 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6969 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6970 
6971 	stmmac_hw_teardown(dev);
6972 init_error:
6973 	free_dma_desc_resources(priv, &priv->dma_conf);
6974 dma_desc_error:
6975 	return ret;
6976 }
6977 
6978 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6979 {
6980 	struct stmmac_priv *priv = netdev_priv(dev);
6981 	struct stmmac_rx_queue *rx_q;
6982 	struct stmmac_tx_queue *tx_q;
6983 	struct stmmac_channel *ch;
6984 
6985 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6986 	    !netif_carrier_ok(priv->dev))
6987 		return -ENETDOWN;
6988 
6989 	if (!stmmac_xdp_is_enabled(priv))
6990 		return -EINVAL;
6991 
6992 	if (queue >= priv->plat->rx_queues_to_use ||
6993 	    queue >= priv->plat->tx_queues_to_use)
6994 		return -EINVAL;
6995 
6996 	rx_q = &priv->dma_conf.rx_queue[queue];
6997 	tx_q = &priv->dma_conf.tx_queue[queue];
6998 	ch = &priv->channel[queue];
6999 
7000 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7001 		return -EINVAL;
7002 
7003 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7004 		/* EQoS does not have per-DMA channel SW interrupt,
7005 		 * so we schedule RX Napi straight-away.
7006 		 */
7007 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7008 			__napi_schedule(&ch->rxtx_napi);
7009 	}
7010 
7011 	return 0;
7012 }
7013 
7014 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7015 {
7016 	struct stmmac_priv *priv = netdev_priv(dev);
7017 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7018 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7019 	unsigned int start;
7020 	int q;
7021 
7022 	for (q = 0; q < tx_cnt; q++) {
7023 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7024 		u64 tx_packets;
7025 		u64 tx_bytes;
7026 
7027 		do {
7028 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7029 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7030 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7031 		do {
7032 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7033 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7034 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7035 
7036 		stats->tx_packets += tx_packets;
7037 		stats->tx_bytes += tx_bytes;
7038 	}
7039 
7040 	for (q = 0; q < rx_cnt; q++) {
7041 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7042 		u64 rx_packets;
7043 		u64 rx_bytes;
7044 
7045 		do {
7046 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7047 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7048 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7049 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7050 
7051 		stats->rx_packets += rx_packets;
7052 		stats->rx_bytes += rx_bytes;
7053 	}
7054 
7055 	stats->rx_dropped = priv->xstats.rx_dropped;
7056 	stats->rx_errors = priv->xstats.rx_errors;
7057 	stats->tx_dropped = priv->xstats.tx_dropped;
7058 	stats->tx_errors = priv->xstats.tx_errors;
7059 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7060 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7061 	stats->rx_length_errors = priv->xstats.rx_length;
7062 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7063 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7064 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7065 }
7066 
7067 static const struct net_device_ops stmmac_netdev_ops = {
7068 	.ndo_open = stmmac_open,
7069 	.ndo_start_xmit = stmmac_xmit,
7070 	.ndo_stop = stmmac_release,
7071 	.ndo_change_mtu = stmmac_change_mtu,
7072 	.ndo_fix_features = stmmac_fix_features,
7073 	.ndo_set_features = stmmac_set_features,
7074 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7075 	.ndo_tx_timeout = stmmac_tx_timeout,
7076 	.ndo_eth_ioctl = stmmac_ioctl,
7077 	.ndo_get_stats64 = stmmac_get_stats64,
7078 	.ndo_setup_tc = stmmac_setup_tc,
7079 	.ndo_select_queue = stmmac_select_queue,
7080 	.ndo_set_mac_address = stmmac_set_mac_address,
7081 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7082 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7083 	.ndo_bpf = stmmac_bpf,
7084 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7085 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7086 };
7087 
7088 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7089 {
7090 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7091 		return;
7092 	if (test_bit(STMMAC_DOWN, &priv->state))
7093 		return;
7094 
7095 	netdev_err(priv->dev, "Reset adapter.\n");
7096 
7097 	rtnl_lock();
7098 	netif_trans_update(priv->dev);
7099 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7100 		usleep_range(1000, 2000);
7101 
7102 	set_bit(STMMAC_DOWN, &priv->state);
7103 	dev_close(priv->dev);
7104 	dev_open(priv->dev, NULL);
7105 	clear_bit(STMMAC_DOWN, &priv->state);
7106 	clear_bit(STMMAC_RESETING, &priv->state);
7107 	rtnl_unlock();
7108 }
7109 
7110 static void stmmac_service_task(struct work_struct *work)
7111 {
7112 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7113 			service_task);
7114 
7115 	stmmac_reset_subtask(priv);
7116 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7117 }
7118 
7119 /**
7120  *  stmmac_hw_init - Init the MAC device
7121  *  @priv: driver private structure
7122  *  Description: this function is to configure the MAC device according to
7123  *  some platform parameters or the HW capability register. It prepares the
7124  *  driver to use either ring or chain modes and to setup either enhanced or
7125  *  normal descriptors.
7126  */
7127 static int stmmac_hw_init(struct stmmac_priv *priv)
7128 {
7129 	int ret;
7130 
7131 	/* dwmac-sun8i only work in chain mode */
7132 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7133 		chain_mode = 1;
7134 	priv->chain_mode = chain_mode;
7135 
7136 	/* Initialize HW Interface */
7137 	ret = stmmac_hwif_init(priv);
7138 	if (ret)
7139 		return ret;
7140 
7141 	/* Get the HW capability (new GMAC newer than 3.50a) */
7142 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7143 	if (priv->hw_cap_support) {
7144 		dev_info(priv->device, "DMA HW capability register supported\n");
7145 
7146 		/* We can override some gmac/dma configuration fields: e.g.
7147 		 * enh_desc, tx_coe (e.g. that are passed through the
7148 		 * platform) with the values from the HW capability
7149 		 * register (if supported).
7150 		 */
7151 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7152 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7153 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7154 		priv->hw->pmt = priv->plat->pmt;
7155 		if (priv->dma_cap.hash_tb_sz) {
7156 			priv->hw->multicast_filter_bins =
7157 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7158 			priv->hw->mcast_bits_log2 =
7159 					ilog2(priv->hw->multicast_filter_bins);
7160 		}
7161 
7162 		/* TXCOE doesn't work in thresh DMA mode */
7163 		if (priv->plat->force_thresh_dma_mode)
7164 			priv->plat->tx_coe = 0;
7165 		else
7166 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7167 
7168 		/* In case of GMAC4 rx_coe is from HW cap register. */
7169 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7170 
7171 		if (priv->dma_cap.rx_coe_type2)
7172 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7173 		else if (priv->dma_cap.rx_coe_type1)
7174 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7175 
7176 	} else {
7177 		dev_info(priv->device, "No HW DMA feature register supported\n");
7178 	}
7179 
7180 	if (priv->plat->rx_coe) {
7181 		priv->hw->rx_csum = priv->plat->rx_coe;
7182 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7183 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7184 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7185 	}
7186 	if (priv->plat->tx_coe)
7187 		dev_info(priv->device, "TX Checksum insertion supported\n");
7188 
7189 	if (priv->plat->pmt) {
7190 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7191 		device_set_wakeup_capable(priv->device, 1);
7192 	}
7193 
7194 	if (priv->dma_cap.tsoen)
7195 		dev_info(priv->device, "TSO supported\n");
7196 
7197 	priv->hw->vlan_fail_q_en =
7198 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7199 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7200 
7201 	/* Run HW quirks, if any */
7202 	if (priv->hwif_quirks) {
7203 		ret = priv->hwif_quirks(priv);
7204 		if (ret)
7205 			return ret;
7206 	}
7207 
7208 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7209 	 * In some case, for example on bugged HW this feature
7210 	 * has to be disable and this can be done by passing the
7211 	 * riwt_off field from the platform.
7212 	 */
7213 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7214 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7215 		priv->use_riwt = 1;
7216 		dev_info(priv->device,
7217 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7218 	}
7219 
7220 	return 0;
7221 }
7222 
7223 static void stmmac_napi_add(struct net_device *dev)
7224 {
7225 	struct stmmac_priv *priv = netdev_priv(dev);
7226 	u32 queue, maxq;
7227 
7228 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7229 
7230 	for (queue = 0; queue < maxq; queue++) {
7231 		struct stmmac_channel *ch = &priv->channel[queue];
7232 
7233 		ch->priv_data = priv;
7234 		ch->index = queue;
7235 		spin_lock_init(&ch->lock);
7236 
7237 		if (queue < priv->plat->rx_queues_to_use) {
7238 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7239 		}
7240 		if (queue < priv->plat->tx_queues_to_use) {
7241 			netif_napi_add_tx(dev, &ch->tx_napi,
7242 					  stmmac_napi_poll_tx);
7243 		}
7244 		if (queue < priv->plat->rx_queues_to_use &&
7245 		    queue < priv->plat->tx_queues_to_use) {
7246 			netif_napi_add(dev, &ch->rxtx_napi,
7247 				       stmmac_napi_poll_rxtx);
7248 		}
7249 	}
7250 }
7251 
7252 static void stmmac_napi_del(struct net_device *dev)
7253 {
7254 	struct stmmac_priv *priv = netdev_priv(dev);
7255 	u32 queue, maxq;
7256 
7257 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7258 
7259 	for (queue = 0; queue < maxq; queue++) {
7260 		struct stmmac_channel *ch = &priv->channel[queue];
7261 
7262 		if (queue < priv->plat->rx_queues_to_use)
7263 			netif_napi_del(&ch->rx_napi);
7264 		if (queue < priv->plat->tx_queues_to_use)
7265 			netif_napi_del(&ch->tx_napi);
7266 		if (queue < priv->plat->rx_queues_to_use &&
7267 		    queue < priv->plat->tx_queues_to_use) {
7268 			netif_napi_del(&ch->rxtx_napi);
7269 		}
7270 	}
7271 }
7272 
7273 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7274 {
7275 	struct stmmac_priv *priv = netdev_priv(dev);
7276 	int ret = 0, i;
7277 
7278 	if (netif_running(dev))
7279 		stmmac_release(dev);
7280 
7281 	stmmac_napi_del(dev);
7282 
7283 	priv->plat->rx_queues_to_use = rx_cnt;
7284 	priv->plat->tx_queues_to_use = tx_cnt;
7285 	if (!netif_is_rxfh_configured(dev))
7286 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7287 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7288 									rx_cnt);
7289 
7290 	stmmac_napi_add(dev);
7291 
7292 	if (netif_running(dev))
7293 		ret = stmmac_open(dev);
7294 
7295 	return ret;
7296 }
7297 
7298 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7299 {
7300 	struct stmmac_priv *priv = netdev_priv(dev);
7301 	int ret = 0;
7302 
7303 	if (netif_running(dev))
7304 		stmmac_release(dev);
7305 
7306 	priv->dma_conf.dma_rx_size = rx_size;
7307 	priv->dma_conf.dma_tx_size = tx_size;
7308 
7309 	if (netif_running(dev))
7310 		ret = stmmac_open(dev);
7311 
7312 	return ret;
7313 }
7314 
7315 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7316 {
7317 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7318 	struct dma_desc *desc_contains_ts = ctx->desc;
7319 	struct stmmac_priv *priv = ctx->priv;
7320 	struct dma_desc *ndesc = ctx->ndesc;
7321 	struct dma_desc *desc = ctx->desc;
7322 	u64 ns = 0;
7323 
7324 	if (!priv->hwts_rx_en)
7325 		return -ENODATA;
7326 
7327 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7328 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7329 		desc_contains_ts = ndesc;
7330 
7331 	/* Check if timestamp is available */
7332 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7333 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7334 		ns -= priv->plat->cdc_error_adj;
7335 		*timestamp = ns_to_ktime(ns);
7336 		return 0;
7337 	}
7338 
7339 	return -ENODATA;
7340 }
7341 
7342 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7343 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7344 };
7345 
7346 /**
7347  * stmmac_dvr_probe
7348  * @device: device pointer
7349  * @plat_dat: platform data pointer
7350  * @res: stmmac resource pointer
7351  * Description: this is the main probe function used to
7352  * call the alloc_etherdev, allocate the priv structure.
7353  * Return:
7354  * returns 0 on success, otherwise errno.
7355  */
7356 int stmmac_dvr_probe(struct device *device,
7357 		     struct plat_stmmacenet_data *plat_dat,
7358 		     struct stmmac_resources *res)
7359 {
7360 	struct net_device *ndev = NULL;
7361 	struct stmmac_priv *priv;
7362 	u32 rxq;
7363 	int i, ret = 0;
7364 
7365 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7366 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7367 	if (!ndev)
7368 		return -ENOMEM;
7369 
7370 	SET_NETDEV_DEV(ndev, device);
7371 
7372 	priv = netdev_priv(ndev);
7373 	priv->device = device;
7374 	priv->dev = ndev;
7375 
7376 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7377 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7378 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7379 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7380 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7381 	}
7382 
7383 	priv->xstats.pcpu_stats =
7384 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7385 	if (!priv->xstats.pcpu_stats)
7386 		return -ENOMEM;
7387 
7388 	stmmac_set_ethtool_ops(ndev);
7389 	priv->pause = pause;
7390 	priv->plat = plat_dat;
7391 	priv->ioaddr = res->addr;
7392 	priv->dev->base_addr = (unsigned long)res->addr;
7393 	priv->plat->dma_cfg->multi_msi_en =
7394 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7395 
7396 	priv->dev->irq = res->irq;
7397 	priv->wol_irq = res->wol_irq;
7398 	priv->lpi_irq = res->lpi_irq;
7399 	priv->sfty_irq = res->sfty_irq;
7400 	priv->sfty_ce_irq = res->sfty_ce_irq;
7401 	priv->sfty_ue_irq = res->sfty_ue_irq;
7402 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7403 		priv->rx_irq[i] = res->rx_irq[i];
7404 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7405 		priv->tx_irq[i] = res->tx_irq[i];
7406 
7407 	if (!is_zero_ether_addr(res->mac))
7408 		eth_hw_addr_set(priv->dev, res->mac);
7409 
7410 	dev_set_drvdata(device, priv->dev);
7411 
7412 	/* Verify driver arguments */
7413 	stmmac_verify_args();
7414 
7415 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7416 	if (!priv->af_xdp_zc_qps)
7417 		return -ENOMEM;
7418 
7419 	/* Allocate workqueue */
7420 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7421 	if (!priv->wq) {
7422 		dev_err(priv->device, "failed to create workqueue\n");
7423 		ret = -ENOMEM;
7424 		goto error_wq_init;
7425 	}
7426 
7427 	INIT_WORK(&priv->service_task, stmmac_service_task);
7428 
7429 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7430 
7431 	/* Override with kernel parameters if supplied XXX CRS XXX
7432 	 * this needs to have multiple instances
7433 	 */
7434 	if ((phyaddr >= 0) && (phyaddr <= 31))
7435 		priv->plat->phy_addr = phyaddr;
7436 
7437 	if (priv->plat->stmmac_rst) {
7438 		ret = reset_control_assert(priv->plat->stmmac_rst);
7439 		reset_control_deassert(priv->plat->stmmac_rst);
7440 		/* Some reset controllers have only reset callback instead of
7441 		 * assert + deassert callbacks pair.
7442 		 */
7443 		if (ret == -ENOTSUPP)
7444 			reset_control_reset(priv->plat->stmmac_rst);
7445 	}
7446 
7447 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7448 	if (ret == -ENOTSUPP)
7449 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7450 			ERR_PTR(ret));
7451 
7452 	/* Wait a bit for the reset to take effect */
7453 	udelay(10);
7454 
7455 	/* Init MAC and get the capabilities */
7456 	ret = stmmac_hw_init(priv);
7457 	if (ret)
7458 		goto error_hw_init;
7459 
7460 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7461 	 */
7462 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7463 		priv->plat->dma_cfg->dche = false;
7464 
7465 	stmmac_check_ether_addr(priv);
7466 
7467 	ndev->netdev_ops = &stmmac_netdev_ops;
7468 
7469 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7470 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7471 
7472 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7473 			    NETIF_F_RXCSUM;
7474 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7475 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7476 
7477 	ret = stmmac_tc_init(priv, priv);
7478 	if (!ret) {
7479 		ndev->hw_features |= NETIF_F_HW_TC;
7480 	}
7481 
7482 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7483 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7484 		if (priv->plat->has_gmac4)
7485 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7486 		priv->tso = true;
7487 		dev_info(priv->device, "TSO feature enabled\n");
7488 	}
7489 
7490 	if (priv->dma_cap.sphen &&
7491 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7492 		ndev->hw_features |= NETIF_F_GRO;
7493 		priv->sph_cap = true;
7494 		priv->sph = priv->sph_cap;
7495 		dev_info(priv->device, "SPH feature enabled\n");
7496 	}
7497 
7498 	/* Ideally our host DMA address width is the same as for the
7499 	 * device. However, it may differ and then we have to use our
7500 	 * host DMA width for allocation and the device DMA width for
7501 	 * register handling.
7502 	 */
7503 	if (priv->plat->host_dma_width)
7504 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7505 	else
7506 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7507 
7508 	if (priv->dma_cap.host_dma_width) {
7509 		ret = dma_set_mask_and_coherent(device,
7510 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7511 		if (!ret) {
7512 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7513 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7514 
7515 			/*
7516 			 * If more than 32 bits can be addressed, make sure to
7517 			 * enable enhanced addressing mode.
7518 			 */
7519 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7520 				priv->plat->dma_cfg->eame = true;
7521 		} else {
7522 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7523 			if (ret) {
7524 				dev_err(priv->device, "Failed to set DMA Mask\n");
7525 				goto error_hw_init;
7526 			}
7527 
7528 			priv->dma_cap.host_dma_width = 32;
7529 		}
7530 	}
7531 
7532 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7533 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7534 #ifdef STMMAC_VLAN_TAG_USED
7535 	/* Both mac100 and gmac support receive VLAN tag detection */
7536 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7537 	if (priv->plat->has_gmac4) {
7538 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7539 		priv->hw->hw_vlan_en = true;
7540 	}
7541 	if (priv->dma_cap.vlhash) {
7542 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7543 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7544 	}
7545 	if (priv->dma_cap.vlins) {
7546 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7547 		if (priv->dma_cap.dvlan)
7548 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7549 	}
7550 #endif
7551 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7552 
7553 	priv->xstats.threshold = tc;
7554 
7555 	/* Initialize RSS */
7556 	rxq = priv->plat->rx_queues_to_use;
7557 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7558 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7559 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7560 
7561 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7562 		ndev->features |= NETIF_F_RXHASH;
7563 
7564 	ndev->vlan_features |= ndev->features;
7565 
7566 	/* MTU range: 46 - hw-specific max */
7567 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7568 	if (priv->plat->has_xgmac)
7569 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7570 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7571 		ndev->max_mtu = JUMBO_LEN;
7572 	else
7573 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7574 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7575 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7576 	 */
7577 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7578 	    (priv->plat->maxmtu >= ndev->min_mtu))
7579 		ndev->max_mtu = priv->plat->maxmtu;
7580 	else if (priv->plat->maxmtu < ndev->min_mtu)
7581 		dev_warn(priv->device,
7582 			 "%s: warning: maxmtu having invalid value (%d)\n",
7583 			 __func__, priv->plat->maxmtu);
7584 
7585 	if (flow_ctrl)
7586 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7587 
7588 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7589 
7590 	/* Setup channels NAPI */
7591 	stmmac_napi_add(ndev);
7592 
7593 	mutex_init(&priv->lock);
7594 
7595 	stmmac_fpe_init(priv);
7596 
7597 	/* If a specific clk_csr value is passed from the platform
7598 	 * this means that the CSR Clock Range selection cannot be
7599 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7600 	 * set the MDC clock dynamically according to the csr actual
7601 	 * clock input.
7602 	 */
7603 	if (priv->plat->clk_csr >= 0)
7604 		priv->clk_csr = priv->plat->clk_csr;
7605 	else
7606 		stmmac_clk_csr_set(priv);
7607 
7608 	stmmac_check_pcs_mode(priv);
7609 
7610 	pm_runtime_get_noresume(device);
7611 	pm_runtime_set_active(device);
7612 	if (!pm_runtime_enabled(device))
7613 		pm_runtime_enable(device);
7614 
7615 	ret = stmmac_mdio_register(ndev);
7616 	if (ret < 0) {
7617 		dev_err_probe(priv->device, ret,
7618 			      "MDIO bus (id: %d) registration failed\n",
7619 			      priv->plat->bus_id);
7620 		goto error_mdio_register;
7621 	}
7622 
7623 	if (priv->plat->speed_mode_2500)
7624 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7625 
7626 	ret = stmmac_pcs_setup(ndev);
7627 	if (ret)
7628 		goto error_pcs_setup;
7629 
7630 	ret = stmmac_phy_setup(priv);
7631 	if (ret) {
7632 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7633 		goto error_phy_setup;
7634 	}
7635 
7636 	ret = register_netdev(ndev);
7637 	if (ret) {
7638 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7639 			__func__, ret);
7640 		goto error_netdev_register;
7641 	}
7642 
7643 #ifdef CONFIG_DEBUG_FS
7644 	stmmac_init_fs(ndev);
7645 #endif
7646 
7647 	if (priv->plat->dump_debug_regs)
7648 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7649 
7650 	/* Let pm_runtime_put() disable the clocks.
7651 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7652 	 */
7653 	pm_runtime_put(device);
7654 
7655 	return ret;
7656 
7657 error_netdev_register:
7658 	phylink_destroy(priv->phylink);
7659 error_phy_setup:
7660 	stmmac_pcs_clean(ndev);
7661 error_pcs_setup:
7662 	stmmac_mdio_unregister(ndev);
7663 error_mdio_register:
7664 	stmmac_napi_del(ndev);
7665 error_hw_init:
7666 	destroy_workqueue(priv->wq);
7667 error_wq_init:
7668 	bitmap_free(priv->af_xdp_zc_qps);
7669 
7670 	return ret;
7671 }
7672 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7673 
7674 /**
7675  * stmmac_dvr_remove
7676  * @dev: device pointer
7677  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7678  * changes the link status, releases the DMA descriptor rings.
7679  */
7680 void stmmac_dvr_remove(struct device *dev)
7681 {
7682 	struct net_device *ndev = dev_get_drvdata(dev);
7683 	struct stmmac_priv *priv = netdev_priv(ndev);
7684 
7685 	netdev_info(priv->dev, "%s: removing driver", __func__);
7686 
7687 	pm_runtime_get_sync(dev);
7688 
7689 	stmmac_stop_all_dma(priv);
7690 	stmmac_mac_set(priv, priv->ioaddr, false);
7691 	unregister_netdev(ndev);
7692 
7693 #ifdef CONFIG_DEBUG_FS
7694 	stmmac_exit_fs(ndev);
7695 #endif
7696 	phylink_destroy(priv->phylink);
7697 	if (priv->plat->stmmac_rst)
7698 		reset_control_assert(priv->plat->stmmac_rst);
7699 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7700 
7701 	stmmac_pcs_clean(ndev);
7702 	stmmac_mdio_unregister(ndev);
7703 
7704 	destroy_workqueue(priv->wq);
7705 	mutex_destroy(&priv->lock);
7706 	bitmap_free(priv->af_xdp_zc_qps);
7707 
7708 	pm_runtime_disable(dev);
7709 	pm_runtime_put_noidle(dev);
7710 }
7711 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7712 
7713 /**
7714  * stmmac_suspend - suspend callback
7715  * @dev: device pointer
7716  * Description: this is the function to suspend the device and it is called
7717  * by the platform driver to stop the network queue, release the resources,
7718  * program the PMT register (for WoL), clean and release driver resources.
7719  */
7720 int stmmac_suspend(struct device *dev)
7721 {
7722 	struct net_device *ndev = dev_get_drvdata(dev);
7723 	struct stmmac_priv *priv = netdev_priv(ndev);
7724 	u32 chan;
7725 
7726 	if (!ndev || !netif_running(ndev))
7727 		return 0;
7728 
7729 	mutex_lock(&priv->lock);
7730 
7731 	netif_device_detach(ndev);
7732 
7733 	stmmac_disable_all_queues(priv);
7734 
7735 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7736 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7737 
7738 	if (priv->eee_sw_timer_en) {
7739 		priv->tx_path_in_lpi_mode = false;
7740 		del_timer_sync(&priv->eee_ctrl_timer);
7741 	}
7742 
7743 	/* Stop TX/RX DMA */
7744 	stmmac_stop_all_dma(priv);
7745 
7746 	if (priv->plat->serdes_powerdown)
7747 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7748 
7749 	/* Enable Power down mode by programming the PMT regs */
7750 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7751 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7752 		priv->irq_wake = 1;
7753 	} else {
7754 		stmmac_mac_set(priv, priv->ioaddr, false);
7755 		pinctrl_pm_select_sleep_state(priv->device);
7756 	}
7757 
7758 	mutex_unlock(&priv->lock);
7759 
7760 	rtnl_lock();
7761 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7762 		phylink_suspend(priv->phylink, true);
7763 	} else {
7764 		if (device_may_wakeup(priv->device))
7765 			phylink_speed_down(priv->phylink, false);
7766 		phylink_suspend(priv->phylink, false);
7767 	}
7768 	rtnl_unlock();
7769 
7770 	if (stmmac_fpe_supported(priv))
7771 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7772 
7773 	priv->speed = SPEED_UNKNOWN;
7774 	return 0;
7775 }
7776 EXPORT_SYMBOL_GPL(stmmac_suspend);
7777 
7778 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7779 {
7780 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7781 
7782 	rx_q->cur_rx = 0;
7783 	rx_q->dirty_rx = 0;
7784 }
7785 
7786 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7787 {
7788 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7789 
7790 	tx_q->cur_tx = 0;
7791 	tx_q->dirty_tx = 0;
7792 	tx_q->mss = 0;
7793 
7794 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7795 }
7796 
7797 /**
7798  * stmmac_reset_queues_param - reset queue parameters
7799  * @priv: device pointer
7800  */
7801 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7802 {
7803 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7804 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7805 	u32 queue;
7806 
7807 	for (queue = 0; queue < rx_cnt; queue++)
7808 		stmmac_reset_rx_queue(priv, queue);
7809 
7810 	for (queue = 0; queue < tx_cnt; queue++)
7811 		stmmac_reset_tx_queue(priv, queue);
7812 }
7813 
7814 /**
7815  * stmmac_resume - resume callback
7816  * @dev: device pointer
7817  * Description: when resume this function is invoked to setup the DMA and CORE
7818  * in a usable state.
7819  */
7820 int stmmac_resume(struct device *dev)
7821 {
7822 	struct net_device *ndev = dev_get_drvdata(dev);
7823 	struct stmmac_priv *priv = netdev_priv(ndev);
7824 	int ret;
7825 
7826 	if (!netif_running(ndev))
7827 		return 0;
7828 
7829 	/* Power Down bit, into the PM register, is cleared
7830 	 * automatically as soon as a magic packet or a Wake-up frame
7831 	 * is received. Anyway, it's better to manually clear
7832 	 * this bit because it can generate problems while resuming
7833 	 * from another devices (e.g. serial console).
7834 	 */
7835 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7836 		mutex_lock(&priv->lock);
7837 		stmmac_pmt(priv, priv->hw, 0);
7838 		mutex_unlock(&priv->lock);
7839 		priv->irq_wake = 0;
7840 	} else {
7841 		pinctrl_pm_select_default_state(priv->device);
7842 		/* reset the phy so that it's ready */
7843 		if (priv->mii)
7844 			stmmac_mdio_reset(priv->mii);
7845 	}
7846 
7847 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7848 	    priv->plat->serdes_powerup) {
7849 		ret = priv->plat->serdes_powerup(ndev,
7850 						 priv->plat->bsp_priv);
7851 
7852 		if (ret < 0)
7853 			return ret;
7854 	}
7855 
7856 	rtnl_lock();
7857 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7858 		phylink_resume(priv->phylink);
7859 	} else {
7860 		phylink_resume(priv->phylink);
7861 		if (device_may_wakeup(priv->device))
7862 			phylink_speed_up(priv->phylink);
7863 	}
7864 	rtnl_unlock();
7865 
7866 	rtnl_lock();
7867 	mutex_lock(&priv->lock);
7868 
7869 	stmmac_reset_queues_param(priv);
7870 
7871 	stmmac_free_tx_skbufs(priv);
7872 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7873 
7874 	stmmac_hw_setup(ndev, false);
7875 	stmmac_init_coalesce(priv);
7876 	stmmac_set_rx_mode(ndev);
7877 
7878 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7879 
7880 	stmmac_enable_all_queues(priv);
7881 	stmmac_enable_all_dma_irq(priv);
7882 
7883 	mutex_unlock(&priv->lock);
7884 	rtnl_unlock();
7885 
7886 	netif_device_attach(ndev);
7887 
7888 	return 0;
7889 }
7890 EXPORT_SYMBOL_GPL(stmmac_resume);
7891 
7892 #ifndef MODULE
7893 static int __init stmmac_cmdline_opt(char *str)
7894 {
7895 	char *opt;
7896 
7897 	if (!str || !*str)
7898 		return 1;
7899 	while ((opt = strsep(&str, ",")) != NULL) {
7900 		if (!strncmp(opt, "debug:", 6)) {
7901 			if (kstrtoint(opt + 6, 0, &debug))
7902 				goto err;
7903 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7904 			if (kstrtoint(opt + 8, 0, &phyaddr))
7905 				goto err;
7906 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7907 			if (kstrtoint(opt + 7, 0, &buf_sz))
7908 				goto err;
7909 		} else if (!strncmp(opt, "tc:", 3)) {
7910 			if (kstrtoint(opt + 3, 0, &tc))
7911 				goto err;
7912 		} else if (!strncmp(opt, "watchdog:", 9)) {
7913 			if (kstrtoint(opt + 9, 0, &watchdog))
7914 				goto err;
7915 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7916 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7917 				goto err;
7918 		} else if (!strncmp(opt, "pause:", 6)) {
7919 			if (kstrtoint(opt + 6, 0, &pause))
7920 				goto err;
7921 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7922 			if (kstrtoint(opt + 10, 0, &eee_timer))
7923 				goto err;
7924 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7925 			if (kstrtoint(opt + 11, 0, &chain_mode))
7926 				goto err;
7927 		}
7928 	}
7929 	return 1;
7930 
7931 err:
7932 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7933 	return 1;
7934 }
7935 
7936 __setup("stmmaceth=", stmmac_cmdline_opt);
7937 #endif /* MODULE */
7938 
7939 static int __init stmmac_init(void)
7940 {
7941 #ifdef CONFIG_DEBUG_FS
7942 	/* Create debugfs main directory if it doesn't exist yet */
7943 	if (!stmmac_fs_dir)
7944 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7945 	register_netdevice_notifier(&stmmac_notifier);
7946 #endif
7947 
7948 	return 0;
7949 }
7950 
7951 static void __exit stmmac_exit(void)
7952 {
7953 #ifdef CONFIG_DEBUG_FS
7954 	unregister_netdevice_notifier(&stmmac_notifier);
7955 	debugfs_remove_recursive(stmmac_fs_dir);
7956 #endif
7957 }
7958 
7959 module_init(stmmac_init)
7960 module_exit(stmmac_exit)
7961 
7962 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7963 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7964 MODULE_LICENSE("GPL");
7965