xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision a35d00d5512accd337510fa4de756b743d331a87)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54 
55 /* As long as the interface is active, we keep the timestamping counter enabled
56  * with fine resolution and binary rollover. This avoid non-monotonic behavior
57  * (clock jumps) when changing timestamping settings at runtime.
58  */
59 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 				 PTP_TCR_TSCTRLSSR)
61 
62 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
64 
65 /* Module parameters */
66 #define TX_TIMEO	5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70 
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78 
79 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
80 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
81 
82 /* Limit to make sure XDP TX and slow path can coexist */
83 #define STMMAC_XSK_TX_BUDGET_MAX	256
84 #define STMMAC_TX_XSK_AVAIL		16
85 #define STMMAC_RX_FILL_BATCH		16
86 
87 #define STMMAC_XDP_PASS		0
88 #define STMMAC_XDP_CONSUMED	BIT(0)
89 #define STMMAC_XDP_TX		BIT(1)
90 #define STMMAC_XDP_REDIRECT	BIT(2)
91 
92 static int flow_ctrl = FLOW_AUTO;
93 module_param(flow_ctrl, int, 0644);
94 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
95 
96 static int pause = PAUSE_TIME;
97 module_param(pause, int, 0644);
98 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
99 
100 #define TC_DEFAULT 64
101 static int tc = TC_DEFAULT;
102 module_param(tc, int, 0644);
103 MODULE_PARM_DESC(tc, "DMA threshold control value");
104 
105 #define	DEFAULT_BUFSIZE	1536
106 static int buf_sz = DEFAULT_BUFSIZE;
107 module_param(buf_sz, int, 0644);
108 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
109 
110 #define	STMMAC_RX_COPYBREAK	256
111 
112 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
113 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
114 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
115 
116 #define STMMAC_DEFAULT_LPI_TIMER	1000
117 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
118 module_param(eee_timer, int, 0644);
119 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
120 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
121 
122 /* By default the driver will use the ring mode to manage tx and rx descriptors,
123  * but allow user to force to use the chain instead of the ring
124  */
125 static unsigned int chain_mode;
126 module_param(chain_mode, int, 0444);
127 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
128 
129 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
130 /* For MSI interrupts handling */
131 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
133 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
134 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
135 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
138 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
139 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
140 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
141 					  u32 rxmode, u32 chan);
142 
143 #ifdef CONFIG_DEBUG_FS
144 static const struct net_device_ops stmmac_netdev_ops;
145 static void stmmac_init_fs(struct net_device *dev);
146 static void stmmac_exit_fs(struct net_device *dev);
147 #endif
148 
149 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
150 
151 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
152 {
153 	int ret = 0;
154 
155 	if (enabled) {
156 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
157 		if (ret)
158 			return ret;
159 		ret = clk_prepare_enable(priv->plat->pclk);
160 		if (ret) {
161 			clk_disable_unprepare(priv->plat->stmmac_clk);
162 			return ret;
163 		}
164 		if (priv->plat->clks_config) {
165 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
166 			if (ret) {
167 				clk_disable_unprepare(priv->plat->stmmac_clk);
168 				clk_disable_unprepare(priv->plat->pclk);
169 				return ret;
170 			}
171 		}
172 	} else {
173 		clk_disable_unprepare(priv->plat->stmmac_clk);
174 		clk_disable_unprepare(priv->plat->pclk);
175 		if (priv->plat->clks_config)
176 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
177 	}
178 
179 	return ret;
180 }
181 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
182 
183 /**
184  * stmmac_verify_args - verify the driver parameters.
185  * Description: it checks the driver parameters and set a default in case of
186  * errors.
187  */
188 static void stmmac_verify_args(void)
189 {
190 	if (unlikely(watchdog < 0))
191 		watchdog = TX_TIMEO;
192 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
193 		buf_sz = DEFAULT_BUFSIZE;
194 	if (unlikely(flow_ctrl > 1))
195 		flow_ctrl = FLOW_AUTO;
196 	else if (likely(flow_ctrl < 0))
197 		flow_ctrl = FLOW_OFF;
198 	if (unlikely((pause < 0) || (pause > 0xffff)))
199 		pause = PAUSE_TIME;
200 	if (eee_timer < 0)
201 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
202 }
203 
204 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
205 {
206 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
207 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
208 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
209 	u32 queue;
210 
211 	for (queue = 0; queue < maxq; queue++) {
212 		struct stmmac_channel *ch = &priv->channel[queue];
213 
214 		if (stmmac_xdp_is_enabled(priv) &&
215 		    test_bit(queue, priv->af_xdp_zc_qps)) {
216 			napi_disable(&ch->rxtx_napi);
217 			continue;
218 		}
219 
220 		if (queue < rx_queues_cnt)
221 			napi_disable(&ch->rx_napi);
222 		if (queue < tx_queues_cnt)
223 			napi_disable(&ch->tx_napi);
224 	}
225 }
226 
227 /**
228  * stmmac_disable_all_queues - Disable all queues
229  * @priv: driver private structure
230  */
231 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
232 {
233 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
234 	struct stmmac_rx_queue *rx_q;
235 	u32 queue;
236 
237 	/* synchronize_rcu() needed for pending XDP buffers to drain */
238 	for (queue = 0; queue < rx_queues_cnt; queue++) {
239 		rx_q = &priv->dma_conf.rx_queue[queue];
240 		if (rx_q->xsk_pool) {
241 			synchronize_rcu();
242 			break;
243 		}
244 	}
245 
246 	__stmmac_disable_all_queues(priv);
247 }
248 
249 /**
250  * stmmac_enable_all_queues - Enable all queues
251  * @priv: driver private structure
252  */
253 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
254 {
255 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
256 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
257 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
258 	u32 queue;
259 
260 	for (queue = 0; queue < maxq; queue++) {
261 		struct stmmac_channel *ch = &priv->channel[queue];
262 
263 		if (stmmac_xdp_is_enabled(priv) &&
264 		    test_bit(queue, priv->af_xdp_zc_qps)) {
265 			napi_enable(&ch->rxtx_napi);
266 			continue;
267 		}
268 
269 		if (queue < rx_queues_cnt)
270 			napi_enable(&ch->rx_napi);
271 		if (queue < tx_queues_cnt)
272 			napi_enable(&ch->tx_napi);
273 	}
274 }
275 
276 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
277 {
278 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
279 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
280 		queue_work(priv->wq, &priv->service_task);
281 }
282 
283 static void stmmac_global_err(struct stmmac_priv *priv)
284 {
285 	netif_carrier_off(priv->dev);
286 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
287 	stmmac_service_event_schedule(priv);
288 }
289 
290 /**
291  * stmmac_clk_csr_set - dynamically set the MDC clock
292  * @priv: driver private structure
293  * Description: this is to dynamically set the MDC clock according to the csr
294  * clock input.
295  * Note:
296  *	If a specific clk_csr value is passed from the platform
297  *	this means that the CSR Clock Range selection cannot be
298  *	changed at run-time and it is fixed (as reported in the driver
299  *	documentation). Viceversa the driver will try to set the MDC
300  *	clock dynamically according to the actual clock input.
301  */
302 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
303 {
304 	unsigned long clk_rate;
305 
306 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
307 
308 	/* Platform provided default clk_csr would be assumed valid
309 	 * for all other cases except for the below mentioned ones.
310 	 * For values higher than the IEEE 802.3 specified frequency
311 	 * we can not estimate the proper divider as it is not known
312 	 * the frequency of clk_csr_i. So we do not change the default
313 	 * divider.
314 	 */
315 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
316 		if (clk_rate < CSR_F_35M)
317 			priv->clk_csr = STMMAC_CSR_20_35M;
318 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
319 			priv->clk_csr = STMMAC_CSR_35_60M;
320 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
321 			priv->clk_csr = STMMAC_CSR_60_100M;
322 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
323 			priv->clk_csr = STMMAC_CSR_100_150M;
324 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
325 			priv->clk_csr = STMMAC_CSR_150_250M;
326 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
327 			priv->clk_csr = STMMAC_CSR_250_300M;
328 		else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
329 			priv->clk_csr = STMMAC_CSR_300_500M;
330 		else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
331 			priv->clk_csr = STMMAC_CSR_500_800M;
332 	}
333 
334 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
335 		if (clk_rate > 160000000)
336 			priv->clk_csr = 0x03;
337 		else if (clk_rate > 80000000)
338 			priv->clk_csr = 0x02;
339 		else if (clk_rate > 40000000)
340 			priv->clk_csr = 0x01;
341 		else
342 			priv->clk_csr = 0;
343 	}
344 
345 	if (priv->plat->has_xgmac) {
346 		if (clk_rate > 400000000)
347 			priv->clk_csr = 0x5;
348 		else if (clk_rate > 350000000)
349 			priv->clk_csr = 0x4;
350 		else if (clk_rate > 300000000)
351 			priv->clk_csr = 0x3;
352 		else if (clk_rate > 250000000)
353 			priv->clk_csr = 0x2;
354 		else if (clk_rate > 150000000)
355 			priv->clk_csr = 0x1;
356 		else
357 			priv->clk_csr = 0x0;
358 	}
359 }
360 
361 static void print_pkt(unsigned char *buf, int len)
362 {
363 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
364 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
365 }
366 
367 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
368 {
369 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
370 	u32 avail;
371 
372 	if (tx_q->dirty_tx > tx_q->cur_tx)
373 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
374 	else
375 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
376 
377 	return avail;
378 }
379 
380 /**
381  * stmmac_rx_dirty - Get RX queue dirty
382  * @priv: driver private structure
383  * @queue: RX queue index
384  */
385 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
386 {
387 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
388 	u32 dirty;
389 
390 	if (rx_q->dirty_rx <= rx_q->cur_rx)
391 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
392 	else
393 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
394 
395 	return dirty;
396 }
397 
398 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
399 {
400 	int tx_lpi_timer;
401 
402 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
403 	priv->eee_sw_timer_en = en ? 0 : 1;
404 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
405 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
406 }
407 
408 /**
409  * stmmac_enable_eee_mode - check and enter in LPI mode
410  * @priv: driver private structure
411  * Description: this function is to verify and enter in LPI mode in case of
412  * EEE.
413  */
414 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
415 {
416 	u32 tx_cnt = priv->plat->tx_queues_to_use;
417 	u32 queue;
418 
419 	/* check if all TX queues have the work finished */
420 	for (queue = 0; queue < tx_cnt; queue++) {
421 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
422 
423 		if (tx_q->dirty_tx != tx_q->cur_tx)
424 			return -EBUSY; /* still unfinished work */
425 	}
426 
427 	/* Check and enter in LPI mode */
428 	if (!priv->tx_path_in_lpi_mode)
429 		stmmac_set_eee_mode(priv, priv->hw,
430 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
431 	return 0;
432 }
433 
434 /**
435  * stmmac_disable_eee_mode - disable and exit from LPI mode
436  * @priv: driver private structure
437  * Description: this function is to exit and disable EEE in case of
438  * LPI state is true. This is called by the xmit.
439  */
440 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
441 {
442 	if (!priv->eee_sw_timer_en) {
443 		stmmac_lpi_entry_timer_config(priv, 0);
444 		return;
445 	}
446 
447 	stmmac_reset_eee_mode(priv, priv->hw);
448 	del_timer_sync(&priv->eee_ctrl_timer);
449 	priv->tx_path_in_lpi_mode = false;
450 }
451 
452 /**
453  * stmmac_eee_ctrl_timer - EEE TX SW timer.
454  * @t:  timer_list struct containing private info
455  * Description:
456  *  if there is no data transfer and if we are not in LPI state,
457  *  then MAC Transmitter can be moved to LPI state.
458  */
459 static void stmmac_eee_ctrl_timer(struct timer_list *t)
460 {
461 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
462 
463 	if (stmmac_enable_eee_mode(priv))
464 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
465 }
466 
467 /**
468  * stmmac_eee_init - init EEE
469  * @priv: driver private structure
470  * Description:
471  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
472  *  can also manage EEE, this function enable the LPI state and start related
473  *  timer.
474  */
475 bool stmmac_eee_init(struct stmmac_priv *priv)
476 {
477 	int eee_tw_timer = priv->eee_tw_timer;
478 
479 	/* Check if MAC core supports the EEE feature. */
480 	if (!priv->dma_cap.eee)
481 		return false;
482 
483 	mutex_lock(&priv->lock);
484 
485 	/* Check if it needs to be deactivated */
486 	if (!priv->eee_active) {
487 		if (priv->eee_enabled) {
488 			netdev_dbg(priv->dev, "disable EEE\n");
489 			stmmac_lpi_entry_timer_config(priv, 0);
490 			del_timer_sync(&priv->eee_ctrl_timer);
491 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
492 			if (priv->hw->xpcs)
493 				xpcs_config_eee(priv->hw->xpcs,
494 						priv->plat->mult_fact_100ns,
495 						false);
496 		}
497 		mutex_unlock(&priv->lock);
498 		return false;
499 	}
500 
501 	if (priv->eee_active && !priv->eee_enabled) {
502 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
503 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
504 				     eee_tw_timer);
505 		if (priv->hw->xpcs)
506 			xpcs_config_eee(priv->hw->xpcs,
507 					priv->plat->mult_fact_100ns,
508 					true);
509 	}
510 
511 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
512 		del_timer_sync(&priv->eee_ctrl_timer);
513 		priv->tx_path_in_lpi_mode = false;
514 		stmmac_lpi_entry_timer_config(priv, 1);
515 	} else {
516 		stmmac_lpi_entry_timer_config(priv, 0);
517 		mod_timer(&priv->eee_ctrl_timer,
518 			  STMMAC_LPI_T(priv->tx_lpi_timer));
519 	}
520 
521 	mutex_unlock(&priv->lock);
522 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
523 	return true;
524 }
525 
526 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
527  * @priv: driver private structure
528  * @p : descriptor pointer
529  * @skb : the socket buffer
530  * Description :
531  * This function will read timestamp from the descriptor & pass it to stack.
532  * and also perform some sanity checks.
533  */
534 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
535 				   struct dma_desc *p, struct sk_buff *skb)
536 {
537 	struct skb_shared_hwtstamps shhwtstamp;
538 	bool found = false;
539 	u64 ns = 0;
540 
541 	if (!priv->hwts_tx_en)
542 		return;
543 
544 	/* exit if skb doesn't support hw tstamp */
545 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
546 		return;
547 
548 	/* check tx tstamp status */
549 	if (stmmac_get_tx_timestamp_status(priv, p)) {
550 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
551 		found = true;
552 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
553 		found = true;
554 	}
555 
556 	if (found) {
557 		ns -= priv->plat->cdc_error_adj;
558 
559 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
560 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
561 
562 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
563 		/* pass tstamp to stack */
564 		skb_tstamp_tx(skb, &shhwtstamp);
565 	}
566 }
567 
568 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
569  * @priv: driver private structure
570  * @p : descriptor pointer
571  * @np : next descriptor pointer
572  * @skb : the socket buffer
573  * Description :
574  * This function will read received packet's timestamp from the descriptor
575  * and pass it to stack. It also perform some sanity checks.
576  */
577 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
578 				   struct dma_desc *np, struct sk_buff *skb)
579 {
580 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
581 	struct dma_desc *desc = p;
582 	u64 ns = 0;
583 
584 	if (!priv->hwts_rx_en)
585 		return;
586 	/* For GMAC4, the valid timestamp is from CTX next desc. */
587 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
588 		desc = np;
589 
590 	/* Check if timestamp is available */
591 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
592 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
593 
594 		ns -= priv->plat->cdc_error_adj;
595 
596 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
597 		shhwtstamp = skb_hwtstamps(skb);
598 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
599 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
600 	} else  {
601 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
602 	}
603 }
604 
605 /**
606  *  stmmac_hwtstamp_set - control hardware timestamping.
607  *  @dev: device pointer.
608  *  @ifr: An IOCTL specific structure, that can contain a pointer to
609  *  a proprietary structure used to pass information to the driver.
610  *  Description:
611  *  This function configures the MAC to enable/disable both outgoing(TX)
612  *  and incoming(RX) packets time stamping based on user input.
613  *  Return Value:
614  *  0 on success and an appropriate -ve integer on failure.
615  */
616 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
617 {
618 	struct stmmac_priv *priv = netdev_priv(dev);
619 	struct hwtstamp_config config;
620 	u32 ptp_v2 = 0;
621 	u32 tstamp_all = 0;
622 	u32 ptp_over_ipv4_udp = 0;
623 	u32 ptp_over_ipv6_udp = 0;
624 	u32 ptp_over_ethernet = 0;
625 	u32 snap_type_sel = 0;
626 	u32 ts_master_en = 0;
627 	u32 ts_event_en = 0;
628 
629 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
630 		netdev_alert(priv->dev, "No support for HW time stamping\n");
631 		priv->hwts_tx_en = 0;
632 		priv->hwts_rx_en = 0;
633 
634 		return -EOPNOTSUPP;
635 	}
636 
637 	if (copy_from_user(&config, ifr->ifr_data,
638 			   sizeof(config)))
639 		return -EFAULT;
640 
641 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
642 		   __func__, config.flags, config.tx_type, config.rx_filter);
643 
644 	if (config.tx_type != HWTSTAMP_TX_OFF &&
645 	    config.tx_type != HWTSTAMP_TX_ON)
646 		return -ERANGE;
647 
648 	if (priv->adv_ts) {
649 		switch (config.rx_filter) {
650 		case HWTSTAMP_FILTER_NONE:
651 			/* time stamp no incoming packet at all */
652 			config.rx_filter = HWTSTAMP_FILTER_NONE;
653 			break;
654 
655 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
656 			/* PTP v1, UDP, any kind of event packet */
657 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
658 			/* 'xmac' hardware can support Sync, Pdelay_Req and
659 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
660 			 * This leaves Delay_Req timestamps out.
661 			 * Enable all events *and* general purpose message
662 			 * timestamping
663 			 */
664 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
665 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
666 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
667 			break;
668 
669 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
670 			/* PTP v1, UDP, Sync packet */
671 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
672 			/* take time stamp for SYNC messages only */
673 			ts_event_en = PTP_TCR_TSEVNTENA;
674 
675 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
676 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
677 			break;
678 
679 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
680 			/* PTP v1, UDP, Delay_req packet */
681 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
682 			/* take time stamp for Delay_Req messages only */
683 			ts_master_en = PTP_TCR_TSMSTRENA;
684 			ts_event_en = PTP_TCR_TSEVNTENA;
685 
686 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 			break;
689 
690 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
691 			/* PTP v2, UDP, any kind of event packet */
692 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
693 			ptp_v2 = PTP_TCR_TSVER2ENA;
694 			/* take time stamp for all event messages */
695 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
696 
697 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
698 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
699 			break;
700 
701 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
702 			/* PTP v2, UDP, Sync packet */
703 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
704 			ptp_v2 = PTP_TCR_TSVER2ENA;
705 			/* take time stamp for SYNC messages only */
706 			ts_event_en = PTP_TCR_TSEVNTENA;
707 
708 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
709 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
710 			break;
711 
712 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
713 			/* PTP v2, UDP, Delay_req packet */
714 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
715 			ptp_v2 = PTP_TCR_TSVER2ENA;
716 			/* take time stamp for Delay_Req messages only */
717 			ts_master_en = PTP_TCR_TSMSTRENA;
718 			ts_event_en = PTP_TCR_TSEVNTENA;
719 
720 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
721 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
722 			break;
723 
724 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
725 			/* PTP v2/802.AS1 any layer, any kind of event packet */
726 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
727 			ptp_v2 = PTP_TCR_TSVER2ENA;
728 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
729 			if (priv->synopsys_id < DWMAC_CORE_4_10)
730 				ts_event_en = PTP_TCR_TSEVNTENA;
731 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
732 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
733 			ptp_over_ethernet = PTP_TCR_TSIPENA;
734 			break;
735 
736 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
737 			/* PTP v2/802.AS1, any layer, Sync packet */
738 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
739 			ptp_v2 = PTP_TCR_TSVER2ENA;
740 			/* take time stamp for SYNC messages only */
741 			ts_event_en = PTP_TCR_TSEVNTENA;
742 
743 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
744 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
745 			ptp_over_ethernet = PTP_TCR_TSIPENA;
746 			break;
747 
748 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
749 			/* PTP v2/802.AS1, any layer, Delay_req packet */
750 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
751 			ptp_v2 = PTP_TCR_TSVER2ENA;
752 			/* take time stamp for Delay_Req messages only */
753 			ts_master_en = PTP_TCR_TSMSTRENA;
754 			ts_event_en = PTP_TCR_TSEVNTENA;
755 
756 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
757 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
758 			ptp_over_ethernet = PTP_TCR_TSIPENA;
759 			break;
760 
761 		case HWTSTAMP_FILTER_NTP_ALL:
762 		case HWTSTAMP_FILTER_ALL:
763 			/* time stamp any incoming packet */
764 			config.rx_filter = HWTSTAMP_FILTER_ALL;
765 			tstamp_all = PTP_TCR_TSENALL;
766 			break;
767 
768 		default:
769 			return -ERANGE;
770 		}
771 	} else {
772 		switch (config.rx_filter) {
773 		case HWTSTAMP_FILTER_NONE:
774 			config.rx_filter = HWTSTAMP_FILTER_NONE;
775 			break;
776 		default:
777 			/* PTP v1, UDP, any kind of event packet */
778 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
779 			break;
780 		}
781 	}
782 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
783 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
784 
785 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
786 
787 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
788 		priv->systime_flags |= tstamp_all | ptp_v2 |
789 				       ptp_over_ethernet | ptp_over_ipv6_udp |
790 				       ptp_over_ipv4_udp | ts_event_en |
791 				       ts_master_en | snap_type_sel;
792 	}
793 
794 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
795 
796 	memcpy(&priv->tstamp_config, &config, sizeof(config));
797 
798 	return copy_to_user(ifr->ifr_data, &config,
799 			    sizeof(config)) ? -EFAULT : 0;
800 }
801 
802 /**
803  *  stmmac_hwtstamp_get - read hardware timestamping.
804  *  @dev: device pointer.
805  *  @ifr: An IOCTL specific structure, that can contain a pointer to
806  *  a proprietary structure used to pass information to the driver.
807  *  Description:
808  *  This function obtain the current hardware timestamping settings
809  *  as requested.
810  */
811 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
812 {
813 	struct stmmac_priv *priv = netdev_priv(dev);
814 	struct hwtstamp_config *config = &priv->tstamp_config;
815 
816 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
817 		return -EOPNOTSUPP;
818 
819 	return copy_to_user(ifr->ifr_data, config,
820 			    sizeof(*config)) ? -EFAULT : 0;
821 }
822 
823 /**
824  * stmmac_init_tstamp_counter - init hardware timestamping counter
825  * @priv: driver private structure
826  * @systime_flags: timestamping flags
827  * Description:
828  * Initialize hardware counter for packet timestamping.
829  * This is valid as long as the interface is open and not suspended.
830  * Will be rerun after resuming from suspend, case in which the timestamping
831  * flags updated by stmmac_hwtstamp_set() also need to be restored.
832  */
833 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
834 {
835 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
836 	struct timespec64 now;
837 	u32 sec_inc = 0;
838 	u64 temp = 0;
839 
840 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
841 		return -EOPNOTSUPP;
842 
843 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
844 	priv->systime_flags = systime_flags;
845 
846 	/* program Sub Second Increment reg */
847 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
848 					   priv->plat->clk_ptp_rate,
849 					   xmac, &sec_inc);
850 	temp = div_u64(1000000000ULL, sec_inc);
851 
852 	/* Store sub second increment for later use */
853 	priv->sub_second_inc = sec_inc;
854 
855 	/* calculate default added value:
856 	 * formula is :
857 	 * addend = (2^32)/freq_div_ratio;
858 	 * where, freq_div_ratio = 1e9ns/sec_inc
859 	 */
860 	temp = (u64)(temp << 32);
861 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
862 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
863 
864 	/* initialize system time */
865 	ktime_get_real_ts64(&now);
866 
867 	/* lower 32 bits of tv_sec are safe until y2106 */
868 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
869 
870 	return 0;
871 }
872 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
873 
874 /**
875  * stmmac_init_ptp - init PTP
876  * @priv: driver private structure
877  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
878  * This is done by looking at the HW cap. register.
879  * This function also registers the ptp driver.
880  */
881 static int stmmac_init_ptp(struct stmmac_priv *priv)
882 {
883 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
884 	int ret;
885 
886 	if (priv->plat->ptp_clk_freq_config)
887 		priv->plat->ptp_clk_freq_config(priv);
888 
889 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
890 	if (ret)
891 		return ret;
892 
893 	priv->adv_ts = 0;
894 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
895 	if (xmac && priv->dma_cap.atime_stamp)
896 		priv->adv_ts = 1;
897 	/* Dwmac 3.x core with extend_desc can support adv_ts */
898 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
899 		priv->adv_ts = 1;
900 
901 	if (priv->dma_cap.time_stamp)
902 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
903 
904 	if (priv->adv_ts)
905 		netdev_info(priv->dev,
906 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
907 
908 	priv->hwts_tx_en = 0;
909 	priv->hwts_rx_en = 0;
910 
911 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
912 		stmmac_hwtstamp_correct_latency(priv, priv);
913 
914 	return 0;
915 }
916 
917 static void stmmac_release_ptp(struct stmmac_priv *priv)
918 {
919 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
920 	stmmac_ptp_unregister(priv);
921 }
922 
923 /**
924  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
925  *  @priv: driver private structure
926  *  @duplex: duplex passed to the next function
927  *  Description: It is used for configuring the flow control in all queues
928  */
929 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
930 {
931 	u32 tx_cnt = priv->plat->tx_queues_to_use;
932 
933 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
934 			priv->pause, tx_cnt);
935 }
936 
937 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
938 					 phy_interface_t interface)
939 {
940 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
941 
942 	/* Refresh the MAC-specific capabilities */
943 	stmmac_mac_update_caps(priv);
944 
945 	config->mac_capabilities = priv->hw->link.caps;
946 
947 	if (priv->plat->max_speed)
948 		phylink_limit_mac_speed(config, priv->plat->max_speed);
949 
950 	return config->mac_capabilities;
951 }
952 
953 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
954 						 phy_interface_t interface)
955 {
956 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
957 	struct phylink_pcs *pcs;
958 
959 	if (priv->plat->select_pcs) {
960 		pcs = priv->plat->select_pcs(priv, interface);
961 		if (!IS_ERR(pcs))
962 			return pcs;
963 	}
964 
965 	return NULL;
966 }
967 
968 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
969 			      const struct phylink_link_state *state)
970 {
971 	/* Nothing to do, xpcs_config() handles everything */
972 }
973 
974 static void stmmac_mac_link_down(struct phylink_config *config,
975 				 unsigned int mode, phy_interface_t interface)
976 {
977 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
978 
979 	stmmac_mac_set(priv, priv->ioaddr, false);
980 	priv->eee_active = false;
981 	priv->tx_lpi_enabled = false;
982 	priv->eee_enabled = stmmac_eee_init(priv);
983 	stmmac_set_eee_pls(priv, priv->hw, false);
984 
985 	if (stmmac_fpe_supported(priv))
986 		stmmac_fpe_link_state_handle(priv, false);
987 }
988 
989 static void stmmac_mac_link_up(struct phylink_config *config,
990 			       struct phy_device *phy,
991 			       unsigned int mode, phy_interface_t interface,
992 			       int speed, int duplex,
993 			       bool tx_pause, bool rx_pause)
994 {
995 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
996 	u32 old_ctrl, ctrl;
997 
998 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
999 	    priv->plat->serdes_powerup)
1000 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1001 
1002 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1003 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1004 
1005 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1006 		switch (speed) {
1007 		case SPEED_10000:
1008 			ctrl |= priv->hw->link.xgmii.speed10000;
1009 			break;
1010 		case SPEED_5000:
1011 			ctrl |= priv->hw->link.xgmii.speed5000;
1012 			break;
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.xgmii.speed2500;
1015 			break;
1016 		default:
1017 			return;
1018 		}
1019 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1020 		switch (speed) {
1021 		case SPEED_100000:
1022 			ctrl |= priv->hw->link.xlgmii.speed100000;
1023 			break;
1024 		case SPEED_50000:
1025 			ctrl |= priv->hw->link.xlgmii.speed50000;
1026 			break;
1027 		case SPEED_40000:
1028 			ctrl |= priv->hw->link.xlgmii.speed40000;
1029 			break;
1030 		case SPEED_25000:
1031 			ctrl |= priv->hw->link.xlgmii.speed25000;
1032 			break;
1033 		case SPEED_10000:
1034 			ctrl |= priv->hw->link.xgmii.speed10000;
1035 			break;
1036 		case SPEED_2500:
1037 			ctrl |= priv->hw->link.speed2500;
1038 			break;
1039 		case SPEED_1000:
1040 			ctrl |= priv->hw->link.speed1000;
1041 			break;
1042 		default:
1043 			return;
1044 		}
1045 	} else {
1046 		switch (speed) {
1047 		case SPEED_2500:
1048 			ctrl |= priv->hw->link.speed2500;
1049 			break;
1050 		case SPEED_1000:
1051 			ctrl |= priv->hw->link.speed1000;
1052 			break;
1053 		case SPEED_100:
1054 			ctrl |= priv->hw->link.speed100;
1055 			break;
1056 		case SPEED_10:
1057 			ctrl |= priv->hw->link.speed10;
1058 			break;
1059 		default:
1060 			return;
1061 		}
1062 	}
1063 
1064 	priv->speed = speed;
1065 
1066 	if (priv->plat->fix_mac_speed)
1067 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1068 
1069 	if (!duplex)
1070 		ctrl &= ~priv->hw->link.duplex;
1071 	else
1072 		ctrl |= priv->hw->link.duplex;
1073 
1074 	/* Flow Control operation */
1075 	if (rx_pause && tx_pause)
1076 		priv->flow_ctrl = FLOW_AUTO;
1077 	else if (rx_pause && !tx_pause)
1078 		priv->flow_ctrl = FLOW_RX;
1079 	else if (!rx_pause && tx_pause)
1080 		priv->flow_ctrl = FLOW_TX;
1081 	else
1082 		priv->flow_ctrl = FLOW_OFF;
1083 
1084 	stmmac_mac_flow_ctrl(priv, duplex);
1085 
1086 	if (ctrl != old_ctrl)
1087 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1088 
1089 	stmmac_mac_set(priv, priv->ioaddr, true);
1090 	if (phy && priv->dma_cap.eee) {
1091 		priv->eee_active =
1092 			phy_init_eee(phy, !(priv->plat->flags &
1093 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1094 		priv->eee_enabled = stmmac_eee_init(priv);
1095 		priv->tx_lpi_enabled = priv->eee_enabled;
1096 		stmmac_set_eee_pls(priv, priv->hw, true);
1097 	}
1098 
1099 	if (stmmac_fpe_supported(priv))
1100 		stmmac_fpe_link_state_handle(priv, true);
1101 
1102 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1103 		stmmac_hwtstamp_correct_latency(priv, priv);
1104 }
1105 
1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1107 	.mac_get_caps = stmmac_mac_get_caps,
1108 	.mac_select_pcs = stmmac_mac_select_pcs,
1109 	.mac_config = stmmac_mac_config,
1110 	.mac_link_down = stmmac_mac_link_down,
1111 	.mac_link_up = stmmac_mac_link_up,
1112 };
1113 
1114 /**
1115  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116  * @priv: driver private structure
1117  * Description: this is to verify if the HW supports the PCS.
1118  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119  * configured for the TBI, RTBI, or SGMII PHY interface.
1120  */
1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 	int interface = priv->plat->mac_interface;
1124 
1125 	if (priv->dma_cap.pcs) {
1126 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_RGMII;
1132 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135 		}
1136 	}
1137 }
1138 
1139 /**
1140  * stmmac_init_phy - PHY initialization
1141  * @dev: net device structure
1142  * Description: it initializes the driver's PHY state, and attaches the PHY
1143  * to the mac driver.
1144  *  Return value:
1145  *  0 on success
1146  */
1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 	struct stmmac_priv *priv = netdev_priv(dev);
1150 	struct fwnode_handle *phy_fwnode;
1151 	struct fwnode_handle *fwnode;
1152 	int ret;
1153 
1154 	if (!phylink_expects_phy(priv->phylink))
1155 		return 0;
1156 
1157 	fwnode = priv->plat->port_node;
1158 	if (!fwnode)
1159 		fwnode = dev_fwnode(priv->device);
1160 
1161 	if (fwnode)
1162 		phy_fwnode = fwnode_get_phy_node(fwnode);
1163 	else
1164 		phy_fwnode = NULL;
1165 
1166 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1167 	 * manually parse it
1168 	 */
1169 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 		int addr = priv->plat->phy_addr;
1171 		struct phy_device *phydev;
1172 
1173 		if (addr < 0) {
1174 			netdev_err(priv->dev, "no phy found\n");
1175 			return -ENODEV;
1176 		}
1177 
1178 		phydev = mdiobus_get_phy(priv->mii, addr);
1179 		if (!phydev) {
1180 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 			return -ENODEV;
1182 		}
1183 
1184 		if (priv->dma_cap.eee)
1185 			phy_support_eee(phydev);
1186 
1187 		ret = phylink_connect_phy(priv->phylink, phydev);
1188 	} else {
1189 		fwnode_handle_put(phy_fwnode);
1190 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1191 	}
1192 
1193 	if (!priv->plat->pmt) {
1194 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1195 
1196 		phylink_ethtool_get_wol(priv->phylink, &wol);
1197 		device_set_wakeup_capable(priv->device, !!wol.supported);
1198 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1199 	}
1200 
1201 	return ret;
1202 }
1203 
1204 static int stmmac_phy_setup(struct stmmac_priv *priv)
1205 {
1206 	struct stmmac_mdio_bus_data *mdio_bus_data;
1207 	int mode = priv->plat->phy_interface;
1208 	struct fwnode_handle *fwnode;
1209 	struct phylink *phylink;
1210 
1211 	priv->phylink_config.dev = &priv->dev->dev;
1212 	priv->phylink_config.type = PHYLINK_NETDEV;
1213 	priv->phylink_config.mac_managed_pm = true;
1214 
1215 	/* Stmmac always requires an RX clock for hardware initialization */
1216 	priv->phylink_config.mac_requires_rxc = true;
1217 
1218 	mdio_bus_data = priv->plat->mdio_bus_data;
1219 	if (mdio_bus_data)
1220 		priv->phylink_config.default_an_inband =
1221 			mdio_bus_data->default_an_inband;
1222 
1223 	/* Set the platform/firmware specified interface mode. Note, phylink
1224 	 * deals with the PHY interface mode, not the MAC interface mode.
1225 	 */
1226 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1227 
1228 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1229 	if (priv->hw->xpcs)
1230 		xpcs_get_interfaces(priv->hw->xpcs,
1231 				    priv->phylink_config.supported_interfaces);
1232 
1233 	fwnode = priv->plat->port_node;
1234 	if (!fwnode)
1235 		fwnode = dev_fwnode(priv->device);
1236 
1237 	phylink = phylink_create(&priv->phylink_config, fwnode,
1238 				 mode, &stmmac_phylink_mac_ops);
1239 	if (IS_ERR(phylink))
1240 		return PTR_ERR(phylink);
1241 
1242 	priv->phylink = phylink;
1243 	return 0;
1244 }
1245 
1246 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1247 				    struct stmmac_dma_conf *dma_conf)
1248 {
1249 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1250 	unsigned int desc_size;
1251 	void *head_rx;
1252 	u32 queue;
1253 
1254 	/* Display RX rings */
1255 	for (queue = 0; queue < rx_cnt; queue++) {
1256 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1257 
1258 		pr_info("\tRX Queue %u rings\n", queue);
1259 
1260 		if (priv->extend_desc) {
1261 			head_rx = (void *)rx_q->dma_erx;
1262 			desc_size = sizeof(struct dma_extended_desc);
1263 		} else {
1264 			head_rx = (void *)rx_q->dma_rx;
1265 			desc_size = sizeof(struct dma_desc);
1266 		}
1267 
1268 		/* Display RX ring */
1269 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1270 				    rx_q->dma_rx_phy, desc_size);
1271 	}
1272 }
1273 
1274 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1275 				    struct stmmac_dma_conf *dma_conf)
1276 {
1277 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1278 	unsigned int desc_size;
1279 	void *head_tx;
1280 	u32 queue;
1281 
1282 	/* Display TX rings */
1283 	for (queue = 0; queue < tx_cnt; queue++) {
1284 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1285 
1286 		pr_info("\tTX Queue %d rings\n", queue);
1287 
1288 		if (priv->extend_desc) {
1289 			head_tx = (void *)tx_q->dma_etx;
1290 			desc_size = sizeof(struct dma_extended_desc);
1291 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1292 			head_tx = (void *)tx_q->dma_entx;
1293 			desc_size = sizeof(struct dma_edesc);
1294 		} else {
1295 			head_tx = (void *)tx_q->dma_tx;
1296 			desc_size = sizeof(struct dma_desc);
1297 		}
1298 
1299 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1300 				    tx_q->dma_tx_phy, desc_size);
1301 	}
1302 }
1303 
1304 static void stmmac_display_rings(struct stmmac_priv *priv,
1305 				 struct stmmac_dma_conf *dma_conf)
1306 {
1307 	/* Display RX ring */
1308 	stmmac_display_rx_rings(priv, dma_conf);
1309 
1310 	/* Display TX ring */
1311 	stmmac_display_tx_rings(priv, dma_conf);
1312 }
1313 
1314 static int stmmac_set_bfsize(int mtu, int bufsize)
1315 {
1316 	int ret = bufsize;
1317 
1318 	if (mtu >= BUF_SIZE_8KiB)
1319 		ret = BUF_SIZE_16KiB;
1320 	else if (mtu >= BUF_SIZE_4KiB)
1321 		ret = BUF_SIZE_8KiB;
1322 	else if (mtu >= BUF_SIZE_2KiB)
1323 		ret = BUF_SIZE_4KiB;
1324 	else if (mtu > DEFAULT_BUFSIZE)
1325 		ret = BUF_SIZE_2KiB;
1326 	else
1327 		ret = DEFAULT_BUFSIZE;
1328 
1329 	return ret;
1330 }
1331 
1332 /**
1333  * stmmac_clear_rx_descriptors - clear RX descriptors
1334  * @priv: driver private structure
1335  * @dma_conf: structure to take the dma data
1336  * @queue: RX queue index
1337  * Description: this function is called to clear the RX descriptors
1338  * in case of both basic and extended descriptors are used.
1339  */
1340 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1341 					struct stmmac_dma_conf *dma_conf,
1342 					u32 queue)
1343 {
1344 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1345 	int i;
1346 
1347 	/* Clear the RX descriptors */
1348 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1349 		if (priv->extend_desc)
1350 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1351 					priv->use_riwt, priv->mode,
1352 					(i == dma_conf->dma_rx_size - 1),
1353 					dma_conf->dma_buf_sz);
1354 		else
1355 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1356 					priv->use_riwt, priv->mode,
1357 					(i == dma_conf->dma_rx_size - 1),
1358 					dma_conf->dma_buf_sz);
1359 }
1360 
1361 /**
1362  * stmmac_clear_tx_descriptors - clear tx descriptors
1363  * @priv: driver private structure
1364  * @dma_conf: structure to take the dma data
1365  * @queue: TX queue index.
1366  * Description: this function is called to clear the TX descriptors
1367  * in case of both basic and extended descriptors are used.
1368  */
1369 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1370 					struct stmmac_dma_conf *dma_conf,
1371 					u32 queue)
1372 {
1373 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1374 	int i;
1375 
1376 	/* Clear the TX descriptors */
1377 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1378 		int last = (i == (dma_conf->dma_tx_size - 1));
1379 		struct dma_desc *p;
1380 
1381 		if (priv->extend_desc)
1382 			p = &tx_q->dma_etx[i].basic;
1383 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1384 			p = &tx_q->dma_entx[i].basic;
1385 		else
1386 			p = &tx_q->dma_tx[i];
1387 
1388 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1389 	}
1390 }
1391 
1392 /**
1393  * stmmac_clear_descriptors - clear descriptors
1394  * @priv: driver private structure
1395  * @dma_conf: structure to take the dma data
1396  * Description: this function is called to clear the TX and RX descriptors
1397  * in case of both basic and extended descriptors are used.
1398  */
1399 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1400 				     struct stmmac_dma_conf *dma_conf)
1401 {
1402 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1403 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1404 	u32 queue;
1405 
1406 	/* Clear the RX descriptors */
1407 	for (queue = 0; queue < rx_queue_cnt; queue++)
1408 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1409 
1410 	/* Clear the TX descriptors */
1411 	for (queue = 0; queue < tx_queue_cnt; queue++)
1412 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1413 }
1414 
1415 /**
1416  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1417  * @priv: driver private structure
1418  * @dma_conf: structure to take the dma data
1419  * @p: descriptor pointer
1420  * @i: descriptor index
1421  * @flags: gfp flag
1422  * @queue: RX queue index
1423  * Description: this function is called to allocate a receive buffer, perform
1424  * the DMA mapping and init the descriptor.
1425  */
1426 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1427 				  struct stmmac_dma_conf *dma_conf,
1428 				  struct dma_desc *p,
1429 				  int i, gfp_t flags, u32 queue)
1430 {
1431 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1432 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1433 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1434 
1435 	if (priv->dma_cap.host_dma_width <= 32)
1436 		gfp |= GFP_DMA32;
1437 
1438 	if (!buf->page) {
1439 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1440 		if (!buf->page)
1441 			return -ENOMEM;
1442 		buf->page_offset = stmmac_rx_offset(priv);
1443 	}
1444 
1445 	if (priv->sph && !buf->sec_page) {
1446 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1447 		if (!buf->sec_page)
1448 			return -ENOMEM;
1449 
1450 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1451 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1452 	} else {
1453 		buf->sec_page = NULL;
1454 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1455 	}
1456 
1457 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1458 
1459 	stmmac_set_desc_addr(priv, p, buf->addr);
1460 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1461 		stmmac_init_desc3(priv, p);
1462 
1463 	return 0;
1464 }
1465 
1466 /**
1467  * stmmac_free_rx_buffer - free RX dma buffers
1468  * @priv: private structure
1469  * @rx_q: RX queue
1470  * @i: buffer index.
1471  */
1472 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1473 				  struct stmmac_rx_queue *rx_q,
1474 				  int i)
1475 {
1476 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1477 
1478 	if (buf->page)
1479 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1480 	buf->page = NULL;
1481 
1482 	if (buf->sec_page)
1483 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1484 	buf->sec_page = NULL;
1485 }
1486 
1487 /**
1488  * stmmac_free_tx_buffer - free RX dma buffers
1489  * @priv: private structure
1490  * @dma_conf: structure to take the dma data
1491  * @queue: RX queue index
1492  * @i: buffer index.
1493  */
1494 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1495 				  struct stmmac_dma_conf *dma_conf,
1496 				  u32 queue, int i)
1497 {
1498 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1499 
1500 	if (tx_q->tx_skbuff_dma[i].buf &&
1501 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1502 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1503 			dma_unmap_page(priv->device,
1504 				       tx_q->tx_skbuff_dma[i].buf,
1505 				       tx_q->tx_skbuff_dma[i].len,
1506 				       DMA_TO_DEVICE);
1507 		else
1508 			dma_unmap_single(priv->device,
1509 					 tx_q->tx_skbuff_dma[i].buf,
1510 					 tx_q->tx_skbuff_dma[i].len,
1511 					 DMA_TO_DEVICE);
1512 	}
1513 
1514 	if (tx_q->xdpf[i] &&
1515 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1516 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1517 		xdp_return_frame(tx_q->xdpf[i]);
1518 		tx_q->xdpf[i] = NULL;
1519 	}
1520 
1521 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1522 		tx_q->xsk_frames_done++;
1523 
1524 	if (tx_q->tx_skbuff[i] &&
1525 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1526 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1527 		tx_q->tx_skbuff[i] = NULL;
1528 	}
1529 
1530 	tx_q->tx_skbuff_dma[i].buf = 0;
1531 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1532 }
1533 
1534 /**
1535  * dma_free_rx_skbufs - free RX dma buffers
1536  * @priv: private structure
1537  * @dma_conf: structure to take the dma data
1538  * @queue: RX queue index
1539  */
1540 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1541 			       struct stmmac_dma_conf *dma_conf,
1542 			       u32 queue)
1543 {
1544 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1545 	int i;
1546 
1547 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1548 		stmmac_free_rx_buffer(priv, rx_q, i);
1549 }
1550 
1551 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1552 				   struct stmmac_dma_conf *dma_conf,
1553 				   u32 queue, gfp_t flags)
1554 {
1555 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1556 	int i;
1557 
1558 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1559 		struct dma_desc *p;
1560 		int ret;
1561 
1562 		if (priv->extend_desc)
1563 			p = &((rx_q->dma_erx + i)->basic);
1564 		else
1565 			p = rx_q->dma_rx + i;
1566 
1567 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1568 					     queue);
1569 		if (ret)
1570 			return ret;
1571 
1572 		rx_q->buf_alloc_num++;
1573 	}
1574 
1575 	return 0;
1576 }
1577 
1578 /**
1579  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1580  * @priv: private structure
1581  * @dma_conf: structure to take the dma data
1582  * @queue: RX queue index
1583  */
1584 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1585 				struct stmmac_dma_conf *dma_conf,
1586 				u32 queue)
1587 {
1588 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1589 	int i;
1590 
1591 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1592 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1593 
1594 		if (!buf->xdp)
1595 			continue;
1596 
1597 		xsk_buff_free(buf->xdp);
1598 		buf->xdp = NULL;
1599 	}
1600 }
1601 
1602 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1603 				      struct stmmac_dma_conf *dma_conf,
1604 				      u32 queue)
1605 {
1606 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1607 	int i;
1608 
1609 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1610 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1611 	 * use this macro to make sure no size violations.
1612 	 */
1613 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1614 
1615 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1616 		struct stmmac_rx_buffer *buf;
1617 		dma_addr_t dma_addr;
1618 		struct dma_desc *p;
1619 
1620 		if (priv->extend_desc)
1621 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1622 		else
1623 			p = rx_q->dma_rx + i;
1624 
1625 		buf = &rx_q->buf_pool[i];
1626 
1627 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1628 		if (!buf->xdp)
1629 			return -ENOMEM;
1630 
1631 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1632 		stmmac_set_desc_addr(priv, p, dma_addr);
1633 		rx_q->buf_alloc_num++;
1634 	}
1635 
1636 	return 0;
1637 }
1638 
1639 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1640 {
1641 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1642 		return NULL;
1643 
1644 	return xsk_get_pool_from_qid(priv->dev, queue);
1645 }
1646 
1647 /**
1648  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1649  * @priv: driver private structure
1650  * @dma_conf: structure to take the dma data
1651  * @queue: RX queue index
1652  * @flags: gfp flag.
1653  * Description: this function initializes the DMA RX descriptors
1654  * and allocates the socket buffers. It supports the chained and ring
1655  * modes.
1656  */
1657 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1658 				    struct stmmac_dma_conf *dma_conf,
1659 				    u32 queue, gfp_t flags)
1660 {
1661 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1662 	int ret;
1663 
1664 	netif_dbg(priv, probe, priv->dev,
1665 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1666 		  (u32)rx_q->dma_rx_phy);
1667 
1668 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1669 
1670 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1671 
1672 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1673 
1674 	if (rx_q->xsk_pool) {
1675 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1676 						   MEM_TYPE_XSK_BUFF_POOL,
1677 						   NULL));
1678 		netdev_info(priv->dev,
1679 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1680 			    rx_q->queue_index);
1681 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1682 	} else {
1683 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1684 						   MEM_TYPE_PAGE_POOL,
1685 						   rx_q->page_pool));
1686 		netdev_info(priv->dev,
1687 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1688 			    rx_q->queue_index);
1689 	}
1690 
1691 	if (rx_q->xsk_pool) {
1692 		/* RX XDP ZC buffer pool may not be populated, e.g.
1693 		 * xdpsock TX-only.
1694 		 */
1695 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1696 	} else {
1697 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1698 		if (ret < 0)
1699 			return -ENOMEM;
1700 	}
1701 
1702 	/* Setup the chained descriptor addresses */
1703 	if (priv->mode == STMMAC_CHAIN_MODE) {
1704 		if (priv->extend_desc)
1705 			stmmac_mode_init(priv, rx_q->dma_erx,
1706 					 rx_q->dma_rx_phy,
1707 					 dma_conf->dma_rx_size, 1);
1708 		else
1709 			stmmac_mode_init(priv, rx_q->dma_rx,
1710 					 rx_q->dma_rx_phy,
1711 					 dma_conf->dma_rx_size, 0);
1712 	}
1713 
1714 	return 0;
1715 }
1716 
1717 static int init_dma_rx_desc_rings(struct net_device *dev,
1718 				  struct stmmac_dma_conf *dma_conf,
1719 				  gfp_t flags)
1720 {
1721 	struct stmmac_priv *priv = netdev_priv(dev);
1722 	u32 rx_count = priv->plat->rx_queues_to_use;
1723 	int queue;
1724 	int ret;
1725 
1726 	/* RX INITIALIZATION */
1727 	netif_dbg(priv, probe, priv->dev,
1728 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1729 
1730 	for (queue = 0; queue < rx_count; queue++) {
1731 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1732 		if (ret)
1733 			goto err_init_rx_buffers;
1734 	}
1735 
1736 	return 0;
1737 
1738 err_init_rx_buffers:
1739 	while (queue >= 0) {
1740 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1741 
1742 		if (rx_q->xsk_pool)
1743 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1744 		else
1745 			dma_free_rx_skbufs(priv, dma_conf, queue);
1746 
1747 		rx_q->buf_alloc_num = 0;
1748 		rx_q->xsk_pool = NULL;
1749 
1750 		queue--;
1751 	}
1752 
1753 	return ret;
1754 }
1755 
1756 /**
1757  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1758  * @priv: driver private structure
1759  * @dma_conf: structure to take the dma data
1760  * @queue: TX queue index
1761  * Description: this function initializes the DMA TX descriptors
1762  * and allocates the socket buffers. It supports the chained and ring
1763  * modes.
1764  */
1765 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1766 				    struct stmmac_dma_conf *dma_conf,
1767 				    u32 queue)
1768 {
1769 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1770 	int i;
1771 
1772 	netif_dbg(priv, probe, priv->dev,
1773 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1774 		  (u32)tx_q->dma_tx_phy);
1775 
1776 	/* Setup the chained descriptor addresses */
1777 	if (priv->mode == STMMAC_CHAIN_MODE) {
1778 		if (priv->extend_desc)
1779 			stmmac_mode_init(priv, tx_q->dma_etx,
1780 					 tx_q->dma_tx_phy,
1781 					 dma_conf->dma_tx_size, 1);
1782 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1783 			stmmac_mode_init(priv, tx_q->dma_tx,
1784 					 tx_q->dma_tx_phy,
1785 					 dma_conf->dma_tx_size, 0);
1786 	}
1787 
1788 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1789 
1790 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1791 		struct dma_desc *p;
1792 
1793 		if (priv->extend_desc)
1794 			p = &((tx_q->dma_etx + i)->basic);
1795 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1796 			p = &((tx_q->dma_entx + i)->basic);
1797 		else
1798 			p = tx_q->dma_tx + i;
1799 
1800 		stmmac_clear_desc(priv, p);
1801 
1802 		tx_q->tx_skbuff_dma[i].buf = 0;
1803 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1804 		tx_q->tx_skbuff_dma[i].len = 0;
1805 		tx_q->tx_skbuff_dma[i].last_segment = false;
1806 		tx_q->tx_skbuff[i] = NULL;
1807 	}
1808 
1809 	return 0;
1810 }
1811 
1812 static int init_dma_tx_desc_rings(struct net_device *dev,
1813 				  struct stmmac_dma_conf *dma_conf)
1814 {
1815 	struct stmmac_priv *priv = netdev_priv(dev);
1816 	u32 tx_queue_cnt;
1817 	u32 queue;
1818 
1819 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1820 
1821 	for (queue = 0; queue < tx_queue_cnt; queue++)
1822 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1823 
1824 	return 0;
1825 }
1826 
1827 /**
1828  * init_dma_desc_rings - init the RX/TX descriptor rings
1829  * @dev: net device structure
1830  * @dma_conf: structure to take the dma data
1831  * @flags: gfp flag.
1832  * Description: this function initializes the DMA RX/TX descriptors
1833  * and allocates the socket buffers. It supports the chained and ring
1834  * modes.
1835  */
1836 static int init_dma_desc_rings(struct net_device *dev,
1837 			       struct stmmac_dma_conf *dma_conf,
1838 			       gfp_t flags)
1839 {
1840 	struct stmmac_priv *priv = netdev_priv(dev);
1841 	int ret;
1842 
1843 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1844 	if (ret)
1845 		return ret;
1846 
1847 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1848 
1849 	stmmac_clear_descriptors(priv, dma_conf);
1850 
1851 	if (netif_msg_hw(priv))
1852 		stmmac_display_rings(priv, dma_conf);
1853 
1854 	return ret;
1855 }
1856 
1857 /**
1858  * dma_free_tx_skbufs - free TX dma buffers
1859  * @priv: private structure
1860  * @dma_conf: structure to take the dma data
1861  * @queue: TX queue index
1862  */
1863 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1864 			       struct stmmac_dma_conf *dma_conf,
1865 			       u32 queue)
1866 {
1867 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1868 	int i;
1869 
1870 	tx_q->xsk_frames_done = 0;
1871 
1872 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1873 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1874 
1875 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1876 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1877 		tx_q->xsk_frames_done = 0;
1878 		tx_q->xsk_pool = NULL;
1879 	}
1880 }
1881 
1882 /**
1883  * stmmac_free_tx_skbufs - free TX skb buffers
1884  * @priv: private structure
1885  */
1886 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1887 {
1888 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1889 	u32 queue;
1890 
1891 	for (queue = 0; queue < tx_queue_cnt; queue++)
1892 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1893 }
1894 
1895 /**
1896  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1897  * @priv: private structure
1898  * @dma_conf: structure to take the dma data
1899  * @queue: RX queue index
1900  */
1901 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1902 					 struct stmmac_dma_conf *dma_conf,
1903 					 u32 queue)
1904 {
1905 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1906 
1907 	/* Release the DMA RX socket buffers */
1908 	if (rx_q->xsk_pool)
1909 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1910 	else
1911 		dma_free_rx_skbufs(priv, dma_conf, queue);
1912 
1913 	rx_q->buf_alloc_num = 0;
1914 	rx_q->xsk_pool = NULL;
1915 
1916 	/* Free DMA regions of consistent memory previously allocated */
1917 	if (!priv->extend_desc)
1918 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1919 				  sizeof(struct dma_desc),
1920 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1921 	else
1922 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1923 				  sizeof(struct dma_extended_desc),
1924 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1925 
1926 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1927 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1928 
1929 	kfree(rx_q->buf_pool);
1930 	if (rx_q->page_pool)
1931 		page_pool_destroy(rx_q->page_pool);
1932 }
1933 
1934 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1935 				       struct stmmac_dma_conf *dma_conf)
1936 {
1937 	u32 rx_count = priv->plat->rx_queues_to_use;
1938 	u32 queue;
1939 
1940 	/* Free RX queue resources */
1941 	for (queue = 0; queue < rx_count; queue++)
1942 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1943 }
1944 
1945 /**
1946  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1947  * @priv: private structure
1948  * @dma_conf: structure to take the dma data
1949  * @queue: TX queue index
1950  */
1951 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1952 					 struct stmmac_dma_conf *dma_conf,
1953 					 u32 queue)
1954 {
1955 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1956 	size_t size;
1957 	void *addr;
1958 
1959 	/* Release the DMA TX socket buffers */
1960 	dma_free_tx_skbufs(priv, dma_conf, queue);
1961 
1962 	if (priv->extend_desc) {
1963 		size = sizeof(struct dma_extended_desc);
1964 		addr = tx_q->dma_etx;
1965 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1966 		size = sizeof(struct dma_edesc);
1967 		addr = tx_q->dma_entx;
1968 	} else {
1969 		size = sizeof(struct dma_desc);
1970 		addr = tx_q->dma_tx;
1971 	}
1972 
1973 	size *= dma_conf->dma_tx_size;
1974 
1975 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1976 
1977 	kfree(tx_q->tx_skbuff_dma);
1978 	kfree(tx_q->tx_skbuff);
1979 }
1980 
1981 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1982 				       struct stmmac_dma_conf *dma_conf)
1983 {
1984 	u32 tx_count = priv->plat->tx_queues_to_use;
1985 	u32 queue;
1986 
1987 	/* Free TX queue resources */
1988 	for (queue = 0; queue < tx_count; queue++)
1989 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1990 }
1991 
1992 /**
1993  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1994  * @priv: private structure
1995  * @dma_conf: structure to take the dma data
1996  * @queue: RX queue index
1997  * Description: according to which descriptor can be used (extend or basic)
1998  * this function allocates the resources for TX and RX paths. In case of
1999  * reception, for example, it pre-allocated the RX socket buffer in order to
2000  * allow zero-copy mechanism.
2001  */
2002 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2003 					 struct stmmac_dma_conf *dma_conf,
2004 					 u32 queue)
2005 {
2006 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2007 	struct stmmac_channel *ch = &priv->channel[queue];
2008 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2009 	struct page_pool_params pp_params = { 0 };
2010 	unsigned int num_pages;
2011 	unsigned int napi_id;
2012 	int ret;
2013 
2014 	rx_q->queue_index = queue;
2015 	rx_q->priv_data = priv;
2016 
2017 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2018 	pp_params.pool_size = dma_conf->dma_rx_size;
2019 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2020 	pp_params.order = ilog2(num_pages);
2021 	pp_params.nid = dev_to_node(priv->device);
2022 	pp_params.dev = priv->device;
2023 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2024 	pp_params.offset = stmmac_rx_offset(priv);
2025 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2026 
2027 	rx_q->page_pool = page_pool_create(&pp_params);
2028 	if (IS_ERR(rx_q->page_pool)) {
2029 		ret = PTR_ERR(rx_q->page_pool);
2030 		rx_q->page_pool = NULL;
2031 		return ret;
2032 	}
2033 
2034 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2035 				 sizeof(*rx_q->buf_pool),
2036 				 GFP_KERNEL);
2037 	if (!rx_q->buf_pool)
2038 		return -ENOMEM;
2039 
2040 	if (priv->extend_desc) {
2041 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2042 						   dma_conf->dma_rx_size *
2043 						   sizeof(struct dma_extended_desc),
2044 						   &rx_q->dma_rx_phy,
2045 						   GFP_KERNEL);
2046 		if (!rx_q->dma_erx)
2047 			return -ENOMEM;
2048 
2049 	} else {
2050 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2051 						  dma_conf->dma_rx_size *
2052 						  sizeof(struct dma_desc),
2053 						  &rx_q->dma_rx_phy,
2054 						  GFP_KERNEL);
2055 		if (!rx_q->dma_rx)
2056 			return -ENOMEM;
2057 	}
2058 
2059 	if (stmmac_xdp_is_enabled(priv) &&
2060 	    test_bit(queue, priv->af_xdp_zc_qps))
2061 		napi_id = ch->rxtx_napi.napi_id;
2062 	else
2063 		napi_id = ch->rx_napi.napi_id;
2064 
2065 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2066 			       rx_q->queue_index,
2067 			       napi_id);
2068 	if (ret) {
2069 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2070 		return -EINVAL;
2071 	}
2072 
2073 	return 0;
2074 }
2075 
2076 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2077 				       struct stmmac_dma_conf *dma_conf)
2078 {
2079 	u32 rx_count = priv->plat->rx_queues_to_use;
2080 	u32 queue;
2081 	int ret;
2082 
2083 	/* RX queues buffers and DMA */
2084 	for (queue = 0; queue < rx_count; queue++) {
2085 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2086 		if (ret)
2087 			goto err_dma;
2088 	}
2089 
2090 	return 0;
2091 
2092 err_dma:
2093 	free_dma_rx_desc_resources(priv, dma_conf);
2094 
2095 	return ret;
2096 }
2097 
2098 /**
2099  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2100  * @priv: private structure
2101  * @dma_conf: structure to take the dma data
2102  * @queue: TX queue index
2103  * Description: according to which descriptor can be used (extend or basic)
2104  * this function allocates the resources for TX and RX paths. In case of
2105  * reception, for example, it pre-allocated the RX socket buffer in order to
2106  * allow zero-copy mechanism.
2107  */
2108 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2109 					 struct stmmac_dma_conf *dma_conf,
2110 					 u32 queue)
2111 {
2112 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2113 	size_t size;
2114 	void *addr;
2115 
2116 	tx_q->queue_index = queue;
2117 	tx_q->priv_data = priv;
2118 
2119 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2120 				      sizeof(*tx_q->tx_skbuff_dma),
2121 				      GFP_KERNEL);
2122 	if (!tx_q->tx_skbuff_dma)
2123 		return -ENOMEM;
2124 
2125 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2126 				  sizeof(struct sk_buff *),
2127 				  GFP_KERNEL);
2128 	if (!tx_q->tx_skbuff)
2129 		return -ENOMEM;
2130 
2131 	if (priv->extend_desc)
2132 		size = sizeof(struct dma_extended_desc);
2133 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2134 		size = sizeof(struct dma_edesc);
2135 	else
2136 		size = sizeof(struct dma_desc);
2137 
2138 	size *= dma_conf->dma_tx_size;
2139 
2140 	addr = dma_alloc_coherent(priv->device, size,
2141 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2142 	if (!addr)
2143 		return -ENOMEM;
2144 
2145 	if (priv->extend_desc)
2146 		tx_q->dma_etx = addr;
2147 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2148 		tx_q->dma_entx = addr;
2149 	else
2150 		tx_q->dma_tx = addr;
2151 
2152 	return 0;
2153 }
2154 
2155 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2156 				       struct stmmac_dma_conf *dma_conf)
2157 {
2158 	u32 tx_count = priv->plat->tx_queues_to_use;
2159 	u32 queue;
2160 	int ret;
2161 
2162 	/* TX queues buffers and DMA */
2163 	for (queue = 0; queue < tx_count; queue++) {
2164 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2165 		if (ret)
2166 			goto err_dma;
2167 	}
2168 
2169 	return 0;
2170 
2171 err_dma:
2172 	free_dma_tx_desc_resources(priv, dma_conf);
2173 	return ret;
2174 }
2175 
2176 /**
2177  * alloc_dma_desc_resources - alloc TX/RX resources.
2178  * @priv: private structure
2179  * @dma_conf: structure to take the dma data
2180  * Description: according to which descriptor can be used (extend or basic)
2181  * this function allocates the resources for TX and RX paths. In case of
2182  * reception, for example, it pre-allocated the RX socket buffer in order to
2183  * allow zero-copy mechanism.
2184  */
2185 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2186 				    struct stmmac_dma_conf *dma_conf)
2187 {
2188 	/* RX Allocation */
2189 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2190 
2191 	if (ret)
2192 		return ret;
2193 
2194 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2195 
2196 	return ret;
2197 }
2198 
2199 /**
2200  * free_dma_desc_resources - free dma desc resources
2201  * @priv: private structure
2202  * @dma_conf: structure to take the dma data
2203  */
2204 static void free_dma_desc_resources(struct stmmac_priv *priv,
2205 				    struct stmmac_dma_conf *dma_conf)
2206 {
2207 	/* Release the DMA TX socket buffers */
2208 	free_dma_tx_desc_resources(priv, dma_conf);
2209 
2210 	/* Release the DMA RX socket buffers later
2211 	 * to ensure all pending XDP_TX buffers are returned.
2212 	 */
2213 	free_dma_rx_desc_resources(priv, dma_conf);
2214 }
2215 
2216 /**
2217  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2218  *  @priv: driver private structure
2219  *  Description: It is used for enabling the rx queues in the MAC
2220  */
2221 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2222 {
2223 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2224 	int queue;
2225 	u8 mode;
2226 
2227 	for (queue = 0; queue < rx_queues_count; queue++) {
2228 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2229 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2230 	}
2231 }
2232 
2233 /**
2234  * stmmac_start_rx_dma - start RX DMA channel
2235  * @priv: driver private structure
2236  * @chan: RX channel index
2237  * Description:
2238  * This starts a RX DMA channel
2239  */
2240 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2241 {
2242 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2243 	stmmac_start_rx(priv, priv->ioaddr, chan);
2244 }
2245 
2246 /**
2247  * stmmac_start_tx_dma - start TX DMA channel
2248  * @priv: driver private structure
2249  * @chan: TX channel index
2250  * Description:
2251  * This starts a TX DMA channel
2252  */
2253 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2254 {
2255 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2256 	stmmac_start_tx(priv, priv->ioaddr, chan);
2257 }
2258 
2259 /**
2260  * stmmac_stop_rx_dma - stop RX DMA channel
2261  * @priv: driver private structure
2262  * @chan: RX channel index
2263  * Description:
2264  * This stops a RX DMA channel
2265  */
2266 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2267 {
2268 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2269 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2270 }
2271 
2272 /**
2273  * stmmac_stop_tx_dma - stop TX DMA channel
2274  * @priv: driver private structure
2275  * @chan: TX channel index
2276  * Description:
2277  * This stops a TX DMA channel
2278  */
2279 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2280 {
2281 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2282 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2283 }
2284 
2285 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2286 {
2287 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2288 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2289 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2290 	u32 chan;
2291 
2292 	for (chan = 0; chan < dma_csr_ch; chan++) {
2293 		struct stmmac_channel *ch = &priv->channel[chan];
2294 		unsigned long flags;
2295 
2296 		spin_lock_irqsave(&ch->lock, flags);
2297 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2298 		spin_unlock_irqrestore(&ch->lock, flags);
2299 	}
2300 }
2301 
2302 /**
2303  * stmmac_start_all_dma - start all RX and TX DMA channels
2304  * @priv: driver private structure
2305  * Description:
2306  * This starts all the RX and TX DMA channels
2307  */
2308 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2309 {
2310 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2311 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2312 	u32 chan = 0;
2313 
2314 	for (chan = 0; chan < rx_channels_count; chan++)
2315 		stmmac_start_rx_dma(priv, chan);
2316 
2317 	for (chan = 0; chan < tx_channels_count; chan++)
2318 		stmmac_start_tx_dma(priv, chan);
2319 }
2320 
2321 /**
2322  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2323  * @priv: driver private structure
2324  * Description:
2325  * This stops the RX and TX DMA channels
2326  */
2327 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2328 {
2329 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331 	u32 chan = 0;
2332 
2333 	for (chan = 0; chan < rx_channels_count; chan++)
2334 		stmmac_stop_rx_dma(priv, chan);
2335 
2336 	for (chan = 0; chan < tx_channels_count; chan++)
2337 		stmmac_stop_tx_dma(priv, chan);
2338 }
2339 
2340 /**
2341  *  stmmac_dma_operation_mode - HW DMA operation mode
2342  *  @priv: driver private structure
2343  *  Description: it is used for configuring the DMA operation mode register in
2344  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2345  */
2346 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2347 {
2348 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350 	int rxfifosz = priv->plat->rx_fifo_size;
2351 	int txfifosz = priv->plat->tx_fifo_size;
2352 	u32 txmode = 0;
2353 	u32 rxmode = 0;
2354 	u32 chan = 0;
2355 	u8 qmode = 0;
2356 
2357 	if (rxfifosz == 0)
2358 		rxfifosz = priv->dma_cap.rx_fifo_size;
2359 	if (txfifosz == 0)
2360 		txfifosz = priv->dma_cap.tx_fifo_size;
2361 
2362 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2363 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2364 		rxfifosz /= rx_channels_count;
2365 		txfifosz /= tx_channels_count;
2366 	}
2367 
2368 	if (priv->plat->force_thresh_dma_mode) {
2369 		txmode = tc;
2370 		rxmode = tc;
2371 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2372 		/*
2373 		 * In case of GMAC, SF mode can be enabled
2374 		 * to perform the TX COE in HW. This depends on:
2375 		 * 1) TX COE if actually supported
2376 		 * 2) There is no bugged Jumbo frame support
2377 		 *    that needs to not insert csum in the TDES.
2378 		 */
2379 		txmode = SF_DMA_MODE;
2380 		rxmode = SF_DMA_MODE;
2381 		priv->xstats.threshold = SF_DMA_MODE;
2382 	} else {
2383 		txmode = tc;
2384 		rxmode = SF_DMA_MODE;
2385 	}
2386 
2387 	/* configure all channels */
2388 	for (chan = 0; chan < rx_channels_count; chan++) {
2389 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2390 		u32 buf_size;
2391 
2392 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2393 
2394 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2395 				rxfifosz, qmode);
2396 
2397 		if (rx_q->xsk_pool) {
2398 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2399 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2400 					      buf_size,
2401 					      chan);
2402 		} else {
2403 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2404 					      priv->dma_conf.dma_buf_sz,
2405 					      chan);
2406 		}
2407 	}
2408 
2409 	for (chan = 0; chan < tx_channels_count; chan++) {
2410 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2411 
2412 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2413 				txfifosz, qmode);
2414 	}
2415 }
2416 
2417 static void stmmac_xsk_request_timestamp(void *_priv)
2418 {
2419 	struct stmmac_metadata_request *meta_req = _priv;
2420 
2421 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2422 	*meta_req->set_ic = true;
2423 }
2424 
2425 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2426 {
2427 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2428 	struct stmmac_priv *priv = tx_compl->priv;
2429 	struct dma_desc *desc = tx_compl->desc;
2430 	bool found = false;
2431 	u64 ns = 0;
2432 
2433 	if (!priv->hwts_tx_en)
2434 		return 0;
2435 
2436 	/* check tx tstamp status */
2437 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2438 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2439 		found = true;
2440 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2441 		found = true;
2442 	}
2443 
2444 	if (found) {
2445 		ns -= priv->plat->cdc_error_adj;
2446 		return ns_to_ktime(ns);
2447 	}
2448 
2449 	return 0;
2450 }
2451 
2452 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2453 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2454 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2455 };
2456 
2457 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2458 {
2459 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2460 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2461 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2462 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2463 	unsigned int entry = tx_q->cur_tx;
2464 	struct dma_desc *tx_desc = NULL;
2465 	struct xdp_desc xdp_desc;
2466 	bool work_done = true;
2467 	u32 tx_set_ic_bit = 0;
2468 
2469 	/* Avoids TX time-out as we are sharing with slow path */
2470 	txq_trans_cond_update(nq);
2471 
2472 	budget = min(budget, stmmac_tx_avail(priv, queue));
2473 
2474 	while (budget-- > 0) {
2475 		struct stmmac_metadata_request meta_req;
2476 		struct xsk_tx_metadata *meta = NULL;
2477 		dma_addr_t dma_addr;
2478 		bool set_ic;
2479 
2480 		/* We are sharing with slow path and stop XSK TX desc submission when
2481 		 * available TX ring is less than threshold.
2482 		 */
2483 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2484 		    !netif_carrier_ok(priv->dev)) {
2485 			work_done = false;
2486 			break;
2487 		}
2488 
2489 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2490 			break;
2491 
2492 		if (priv->est && priv->est->enable &&
2493 		    priv->est->max_sdu[queue] &&
2494 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2495 			priv->xstats.max_sdu_txq_drop[queue]++;
2496 			continue;
2497 		}
2498 
2499 		if (likely(priv->extend_desc))
2500 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2501 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2502 			tx_desc = &tx_q->dma_entx[entry].basic;
2503 		else
2504 			tx_desc = tx_q->dma_tx + entry;
2505 
2506 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2507 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2508 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2509 
2510 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2511 
2512 		/* To return XDP buffer to XSK pool, we simple call
2513 		 * xsk_tx_completed(), so we don't need to fill up
2514 		 * 'buf' and 'xdpf'.
2515 		 */
2516 		tx_q->tx_skbuff_dma[entry].buf = 0;
2517 		tx_q->xdpf[entry] = NULL;
2518 
2519 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2520 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2521 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2522 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2523 
2524 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2525 
2526 		tx_q->tx_count_frames++;
2527 
2528 		if (!priv->tx_coal_frames[queue])
2529 			set_ic = false;
2530 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2531 			set_ic = true;
2532 		else
2533 			set_ic = false;
2534 
2535 		meta_req.priv = priv;
2536 		meta_req.tx_desc = tx_desc;
2537 		meta_req.set_ic = &set_ic;
2538 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2539 					&meta_req);
2540 		if (set_ic) {
2541 			tx_q->tx_count_frames = 0;
2542 			stmmac_set_tx_ic(priv, tx_desc);
2543 			tx_set_ic_bit++;
2544 		}
2545 
2546 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2547 				       true, priv->mode, true, true,
2548 				       xdp_desc.len);
2549 
2550 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2551 
2552 		xsk_tx_metadata_to_compl(meta,
2553 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2554 
2555 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2556 		entry = tx_q->cur_tx;
2557 	}
2558 	u64_stats_update_begin(&txq_stats->napi_syncp);
2559 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2560 	u64_stats_update_end(&txq_stats->napi_syncp);
2561 
2562 	if (tx_desc) {
2563 		stmmac_flush_tx_descriptors(priv, queue);
2564 		xsk_tx_release(pool);
2565 	}
2566 
2567 	/* Return true if all of the 3 conditions are met
2568 	 *  a) TX Budget is still available
2569 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2570 	 *     pending XSK TX for transmission)
2571 	 */
2572 	return !!budget && work_done;
2573 }
2574 
2575 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2576 {
2577 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2578 		tc += 64;
2579 
2580 		if (priv->plat->force_thresh_dma_mode)
2581 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2582 		else
2583 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2584 						      chan);
2585 
2586 		priv->xstats.threshold = tc;
2587 	}
2588 }
2589 
2590 /**
2591  * stmmac_tx_clean - to manage the transmission completion
2592  * @priv: driver private structure
2593  * @budget: napi budget limiting this functions packet handling
2594  * @queue: TX queue index
2595  * @pending_packets: signal to arm the TX coal timer
2596  * Description: it reclaims the transmit resources after transmission completes.
2597  * If some packets still needs to be handled, due to TX coalesce, set
2598  * pending_packets to true to make NAPI arm the TX coal timer.
2599  */
2600 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2601 			   bool *pending_packets)
2602 {
2603 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2604 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2605 	unsigned int bytes_compl = 0, pkts_compl = 0;
2606 	unsigned int entry, xmits = 0, count = 0;
2607 	u32 tx_packets = 0, tx_errors = 0;
2608 
2609 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2610 
2611 	tx_q->xsk_frames_done = 0;
2612 
2613 	entry = tx_q->dirty_tx;
2614 
2615 	/* Try to clean all TX complete frame in 1 shot */
2616 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2617 		struct xdp_frame *xdpf;
2618 		struct sk_buff *skb;
2619 		struct dma_desc *p;
2620 		int status;
2621 
2622 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2623 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2624 			xdpf = tx_q->xdpf[entry];
2625 			skb = NULL;
2626 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2627 			xdpf = NULL;
2628 			skb = tx_q->tx_skbuff[entry];
2629 		} else {
2630 			xdpf = NULL;
2631 			skb = NULL;
2632 		}
2633 
2634 		if (priv->extend_desc)
2635 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2636 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2637 			p = &tx_q->dma_entx[entry].basic;
2638 		else
2639 			p = tx_q->dma_tx + entry;
2640 
2641 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2642 		/* Check if the descriptor is owned by the DMA */
2643 		if (unlikely(status & tx_dma_own))
2644 			break;
2645 
2646 		count++;
2647 
2648 		/* Make sure descriptor fields are read after reading
2649 		 * the own bit.
2650 		 */
2651 		dma_rmb();
2652 
2653 		/* Just consider the last segment and ...*/
2654 		if (likely(!(status & tx_not_ls))) {
2655 			/* ... verify the status error condition */
2656 			if (unlikely(status & tx_err)) {
2657 				tx_errors++;
2658 				if (unlikely(status & tx_err_bump_tc))
2659 					stmmac_bump_dma_threshold(priv, queue);
2660 			} else {
2661 				tx_packets++;
2662 			}
2663 			if (skb) {
2664 				stmmac_get_tx_hwtstamp(priv, p, skb);
2665 			} else if (tx_q->xsk_pool &&
2666 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2667 				struct stmmac_xsk_tx_complete tx_compl = {
2668 					.priv = priv,
2669 					.desc = p,
2670 				};
2671 
2672 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2673 							 &stmmac_xsk_tx_metadata_ops,
2674 							 &tx_compl);
2675 			}
2676 		}
2677 
2678 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2679 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2680 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2681 				dma_unmap_page(priv->device,
2682 					       tx_q->tx_skbuff_dma[entry].buf,
2683 					       tx_q->tx_skbuff_dma[entry].len,
2684 					       DMA_TO_DEVICE);
2685 			else
2686 				dma_unmap_single(priv->device,
2687 						 tx_q->tx_skbuff_dma[entry].buf,
2688 						 tx_q->tx_skbuff_dma[entry].len,
2689 						 DMA_TO_DEVICE);
2690 			tx_q->tx_skbuff_dma[entry].buf = 0;
2691 			tx_q->tx_skbuff_dma[entry].len = 0;
2692 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2693 		}
2694 
2695 		stmmac_clean_desc3(priv, tx_q, p);
2696 
2697 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2698 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2699 
2700 		if (xdpf &&
2701 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2702 			xdp_return_frame_rx_napi(xdpf);
2703 			tx_q->xdpf[entry] = NULL;
2704 		}
2705 
2706 		if (xdpf &&
2707 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2708 			xdp_return_frame(xdpf);
2709 			tx_q->xdpf[entry] = NULL;
2710 		}
2711 
2712 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2713 			tx_q->xsk_frames_done++;
2714 
2715 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2716 			if (likely(skb)) {
2717 				pkts_compl++;
2718 				bytes_compl += skb->len;
2719 				dev_consume_skb_any(skb);
2720 				tx_q->tx_skbuff[entry] = NULL;
2721 			}
2722 		}
2723 
2724 		stmmac_release_tx_desc(priv, p, priv->mode);
2725 
2726 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2727 	}
2728 	tx_q->dirty_tx = entry;
2729 
2730 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2731 				  pkts_compl, bytes_compl);
2732 
2733 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2734 								queue))) &&
2735 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2736 
2737 		netif_dbg(priv, tx_done, priv->dev,
2738 			  "%s: restart transmit\n", __func__);
2739 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2740 	}
2741 
2742 	if (tx_q->xsk_pool) {
2743 		bool work_done;
2744 
2745 		if (tx_q->xsk_frames_done)
2746 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2747 
2748 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2749 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2750 
2751 		/* For XSK TX, we try to send as many as possible.
2752 		 * If XSK work done (XSK TX desc empty and budget still
2753 		 * available), return "budget - 1" to reenable TX IRQ.
2754 		 * Else, return "budget" to make NAPI continue polling.
2755 		 */
2756 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2757 					       STMMAC_XSK_TX_BUDGET_MAX);
2758 		if (work_done)
2759 			xmits = budget - 1;
2760 		else
2761 			xmits = budget;
2762 	}
2763 
2764 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2765 	    priv->eee_sw_timer_en) {
2766 		if (stmmac_enable_eee_mode(priv))
2767 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2768 	}
2769 
2770 	/* We still have pending packets, let's call for a new scheduling */
2771 	if (tx_q->dirty_tx != tx_q->cur_tx)
2772 		*pending_packets = true;
2773 
2774 	u64_stats_update_begin(&txq_stats->napi_syncp);
2775 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2776 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2777 	u64_stats_inc(&txq_stats->napi.tx_clean);
2778 	u64_stats_update_end(&txq_stats->napi_syncp);
2779 
2780 	priv->xstats.tx_errors += tx_errors;
2781 
2782 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2783 
2784 	/* Combine decisions from TX clean and XSK TX */
2785 	return max(count, xmits);
2786 }
2787 
2788 /**
2789  * stmmac_tx_err - to manage the tx error
2790  * @priv: driver private structure
2791  * @chan: channel index
2792  * Description: it cleans the descriptors and restarts the transmission
2793  * in case of transmission errors.
2794  */
2795 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2796 {
2797 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2798 
2799 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2800 
2801 	stmmac_stop_tx_dma(priv, chan);
2802 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2803 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2804 	stmmac_reset_tx_queue(priv, chan);
2805 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2806 			    tx_q->dma_tx_phy, chan);
2807 	stmmac_start_tx_dma(priv, chan);
2808 
2809 	priv->xstats.tx_errors++;
2810 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2811 }
2812 
2813 /**
2814  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2815  *  @priv: driver private structure
2816  *  @txmode: TX operating mode
2817  *  @rxmode: RX operating mode
2818  *  @chan: channel index
2819  *  Description: it is used for configuring of the DMA operation mode in
2820  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2821  *  mode.
2822  */
2823 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2824 					  u32 rxmode, u32 chan)
2825 {
2826 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2827 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2828 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2829 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2830 	int rxfifosz = priv->plat->rx_fifo_size;
2831 	int txfifosz = priv->plat->tx_fifo_size;
2832 
2833 	if (rxfifosz == 0)
2834 		rxfifosz = priv->dma_cap.rx_fifo_size;
2835 	if (txfifosz == 0)
2836 		txfifosz = priv->dma_cap.tx_fifo_size;
2837 
2838 	/* Adjust for real per queue fifo size */
2839 	rxfifosz /= rx_channels_count;
2840 	txfifosz /= tx_channels_count;
2841 
2842 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2843 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2844 }
2845 
2846 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2847 {
2848 	int ret;
2849 
2850 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2851 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2852 	if (ret && (ret != -EINVAL)) {
2853 		stmmac_global_err(priv);
2854 		return true;
2855 	}
2856 
2857 	return false;
2858 }
2859 
2860 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2861 {
2862 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2863 						 &priv->xstats, chan, dir);
2864 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2865 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2866 	struct stmmac_channel *ch = &priv->channel[chan];
2867 	struct napi_struct *rx_napi;
2868 	struct napi_struct *tx_napi;
2869 	unsigned long flags;
2870 
2871 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2872 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2873 
2874 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2875 		if (napi_schedule_prep(rx_napi)) {
2876 			spin_lock_irqsave(&ch->lock, flags);
2877 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2878 			spin_unlock_irqrestore(&ch->lock, flags);
2879 			__napi_schedule(rx_napi);
2880 		}
2881 	}
2882 
2883 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2884 		if (napi_schedule_prep(tx_napi)) {
2885 			spin_lock_irqsave(&ch->lock, flags);
2886 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2887 			spin_unlock_irqrestore(&ch->lock, flags);
2888 			__napi_schedule(tx_napi);
2889 		}
2890 	}
2891 
2892 	return status;
2893 }
2894 
2895 /**
2896  * stmmac_dma_interrupt - DMA ISR
2897  * @priv: driver private structure
2898  * Description: this is the DMA ISR. It is called by the main ISR.
2899  * It calls the dwmac dma routine and schedule poll method in case of some
2900  * work can be done.
2901  */
2902 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2903 {
2904 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2905 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2906 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2907 				tx_channel_count : rx_channel_count;
2908 	u32 chan;
2909 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2910 
2911 	/* Make sure we never check beyond our status buffer. */
2912 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2913 		channels_to_check = ARRAY_SIZE(status);
2914 
2915 	for (chan = 0; chan < channels_to_check; chan++)
2916 		status[chan] = stmmac_napi_check(priv, chan,
2917 						 DMA_DIR_RXTX);
2918 
2919 	for (chan = 0; chan < tx_channel_count; chan++) {
2920 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2921 			/* Try to bump up the dma threshold on this failure */
2922 			stmmac_bump_dma_threshold(priv, chan);
2923 		} else if (unlikely(status[chan] == tx_hard_error)) {
2924 			stmmac_tx_err(priv, chan);
2925 		}
2926 	}
2927 }
2928 
2929 /**
2930  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2931  * @priv: driver private structure
2932  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2933  */
2934 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2935 {
2936 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2937 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2938 
2939 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2940 
2941 	if (priv->dma_cap.rmon) {
2942 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2943 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2944 	} else
2945 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2946 }
2947 
2948 /**
2949  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2950  * @priv: driver private structure
2951  * Description:
2952  *  new GMAC chip generations have a new register to indicate the
2953  *  presence of the optional feature/functions.
2954  *  This can be also used to override the value passed through the
2955  *  platform and necessary for old MAC10/100 and GMAC chips.
2956  */
2957 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2958 {
2959 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2960 }
2961 
2962 /**
2963  * stmmac_check_ether_addr - check if the MAC addr is valid
2964  * @priv: driver private structure
2965  * Description:
2966  * it is to verify if the MAC address is valid, in case of failures it
2967  * generates a random MAC address
2968  */
2969 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2970 {
2971 	u8 addr[ETH_ALEN];
2972 
2973 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2974 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2975 		if (is_valid_ether_addr(addr))
2976 			eth_hw_addr_set(priv->dev, addr);
2977 		else
2978 			eth_hw_addr_random(priv->dev);
2979 		dev_info(priv->device, "device MAC address %pM\n",
2980 			 priv->dev->dev_addr);
2981 	}
2982 }
2983 
2984 /**
2985  * stmmac_init_dma_engine - DMA init.
2986  * @priv: driver private structure
2987  * Description:
2988  * It inits the DMA invoking the specific MAC/GMAC callback.
2989  * Some DMA parameters can be passed from the platform;
2990  * in case of these are not passed a default is kept for the MAC or GMAC.
2991  */
2992 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2993 {
2994 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2995 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2996 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2997 	struct stmmac_rx_queue *rx_q;
2998 	struct stmmac_tx_queue *tx_q;
2999 	u32 chan = 0;
3000 	int ret = 0;
3001 
3002 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3003 		dev_err(priv->device, "Invalid DMA configuration\n");
3004 		return -EINVAL;
3005 	}
3006 
3007 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3008 		priv->plat->dma_cfg->atds = 1;
3009 
3010 	ret = stmmac_reset(priv, priv->ioaddr);
3011 	if (ret) {
3012 		dev_err(priv->device, "Failed to reset the dma\n");
3013 		return ret;
3014 	}
3015 
3016 	/* DMA Configuration */
3017 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3018 
3019 	if (priv->plat->axi)
3020 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3021 
3022 	/* DMA CSR Channel configuration */
3023 	for (chan = 0; chan < dma_csr_ch; chan++) {
3024 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3025 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3026 	}
3027 
3028 	/* DMA RX Channel Configuration */
3029 	for (chan = 0; chan < rx_channels_count; chan++) {
3030 		rx_q = &priv->dma_conf.rx_queue[chan];
3031 
3032 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3033 				    rx_q->dma_rx_phy, chan);
3034 
3035 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3036 				     (rx_q->buf_alloc_num *
3037 				      sizeof(struct dma_desc));
3038 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3039 				       rx_q->rx_tail_addr, chan);
3040 	}
3041 
3042 	/* DMA TX Channel Configuration */
3043 	for (chan = 0; chan < tx_channels_count; chan++) {
3044 		tx_q = &priv->dma_conf.tx_queue[chan];
3045 
3046 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3047 				    tx_q->dma_tx_phy, chan);
3048 
3049 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3050 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3051 				       tx_q->tx_tail_addr, chan);
3052 	}
3053 
3054 	return ret;
3055 }
3056 
3057 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3058 {
3059 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3060 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3061 	struct stmmac_channel *ch;
3062 	struct napi_struct *napi;
3063 
3064 	if (!tx_coal_timer)
3065 		return;
3066 
3067 	ch = &priv->channel[tx_q->queue_index];
3068 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3069 
3070 	/* Arm timer only if napi is not already scheduled.
3071 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3072 	 * again in the next scheduled napi.
3073 	 */
3074 	if (unlikely(!napi_is_scheduled(napi)))
3075 		hrtimer_start(&tx_q->txtimer,
3076 			      STMMAC_COAL_TIMER(tx_coal_timer),
3077 			      HRTIMER_MODE_REL);
3078 	else
3079 		hrtimer_try_to_cancel(&tx_q->txtimer);
3080 }
3081 
3082 /**
3083  * stmmac_tx_timer - mitigation sw timer for tx.
3084  * @t: data pointer
3085  * Description:
3086  * This is the timer handler to directly invoke the stmmac_tx_clean.
3087  */
3088 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3089 {
3090 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3091 	struct stmmac_priv *priv = tx_q->priv_data;
3092 	struct stmmac_channel *ch;
3093 	struct napi_struct *napi;
3094 
3095 	ch = &priv->channel[tx_q->queue_index];
3096 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3097 
3098 	if (likely(napi_schedule_prep(napi))) {
3099 		unsigned long flags;
3100 
3101 		spin_lock_irqsave(&ch->lock, flags);
3102 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3103 		spin_unlock_irqrestore(&ch->lock, flags);
3104 		__napi_schedule(napi);
3105 	}
3106 
3107 	return HRTIMER_NORESTART;
3108 }
3109 
3110 /**
3111  * stmmac_init_coalesce - init mitigation options.
3112  * @priv: driver private structure
3113  * Description:
3114  * This inits the coalesce parameters: i.e. timer rate,
3115  * timer handler and default threshold used for enabling the
3116  * interrupt on completion bit.
3117  */
3118 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3119 {
3120 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3121 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3122 	u32 chan;
3123 
3124 	for (chan = 0; chan < tx_channel_count; chan++) {
3125 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3126 
3127 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3128 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3129 
3130 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3131 		tx_q->txtimer.function = stmmac_tx_timer;
3132 	}
3133 
3134 	for (chan = 0; chan < rx_channel_count; chan++)
3135 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3136 }
3137 
3138 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3139 {
3140 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3141 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3142 	u32 chan;
3143 
3144 	/* set TX ring length */
3145 	for (chan = 0; chan < tx_channels_count; chan++)
3146 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3147 				       (priv->dma_conf.dma_tx_size - 1), chan);
3148 
3149 	/* set RX ring length */
3150 	for (chan = 0; chan < rx_channels_count; chan++)
3151 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3152 				       (priv->dma_conf.dma_rx_size - 1), chan);
3153 }
3154 
3155 /**
3156  *  stmmac_set_tx_queue_weight - Set TX queue weight
3157  *  @priv: driver private structure
3158  *  Description: It is used for setting TX queues weight
3159  */
3160 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3161 {
3162 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3163 	u32 weight;
3164 	u32 queue;
3165 
3166 	for (queue = 0; queue < tx_queues_count; queue++) {
3167 		weight = priv->plat->tx_queues_cfg[queue].weight;
3168 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3169 	}
3170 }
3171 
3172 /**
3173  *  stmmac_configure_cbs - Configure CBS in TX queue
3174  *  @priv: driver private structure
3175  *  Description: It is used for configuring CBS in AVB TX queues
3176  */
3177 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3178 {
3179 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3180 	u32 mode_to_use;
3181 	u32 queue;
3182 
3183 	/* queue 0 is reserved for legacy traffic */
3184 	for (queue = 1; queue < tx_queues_count; queue++) {
3185 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3186 		if (mode_to_use == MTL_QUEUE_DCB)
3187 			continue;
3188 
3189 		stmmac_config_cbs(priv, priv->hw,
3190 				priv->plat->tx_queues_cfg[queue].send_slope,
3191 				priv->plat->tx_queues_cfg[queue].idle_slope,
3192 				priv->plat->tx_queues_cfg[queue].high_credit,
3193 				priv->plat->tx_queues_cfg[queue].low_credit,
3194 				queue);
3195 	}
3196 }
3197 
3198 /**
3199  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3200  *  @priv: driver private structure
3201  *  Description: It is used for mapping RX queues to RX dma channels
3202  */
3203 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3204 {
3205 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3206 	u32 queue;
3207 	u32 chan;
3208 
3209 	for (queue = 0; queue < rx_queues_count; queue++) {
3210 		chan = priv->plat->rx_queues_cfg[queue].chan;
3211 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3212 	}
3213 }
3214 
3215 /**
3216  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3217  *  @priv: driver private structure
3218  *  Description: It is used for configuring the RX Queue Priority
3219  */
3220 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3221 {
3222 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3223 	u32 queue;
3224 	u32 prio;
3225 
3226 	for (queue = 0; queue < rx_queues_count; queue++) {
3227 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3228 			continue;
3229 
3230 		prio = priv->plat->rx_queues_cfg[queue].prio;
3231 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3232 	}
3233 }
3234 
3235 /**
3236  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3237  *  @priv: driver private structure
3238  *  Description: It is used for configuring the TX Queue Priority
3239  */
3240 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3241 {
3242 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3243 	u32 queue;
3244 	u32 prio;
3245 
3246 	for (queue = 0; queue < tx_queues_count; queue++) {
3247 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3248 			continue;
3249 
3250 		prio = priv->plat->tx_queues_cfg[queue].prio;
3251 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3252 	}
3253 }
3254 
3255 /**
3256  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3257  *  @priv: driver private structure
3258  *  Description: It is used for configuring the RX queue routing
3259  */
3260 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3261 {
3262 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3263 	u32 queue;
3264 	u8 packet;
3265 
3266 	for (queue = 0; queue < rx_queues_count; queue++) {
3267 		/* no specific packet type routing specified for the queue */
3268 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3269 			continue;
3270 
3271 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3272 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3273 	}
3274 }
3275 
3276 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3277 {
3278 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3279 		priv->rss.enable = false;
3280 		return;
3281 	}
3282 
3283 	if (priv->dev->features & NETIF_F_RXHASH)
3284 		priv->rss.enable = true;
3285 	else
3286 		priv->rss.enable = false;
3287 
3288 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3289 			     priv->plat->rx_queues_to_use);
3290 }
3291 
3292 /**
3293  *  stmmac_mtl_configuration - Configure MTL
3294  *  @priv: driver private structure
3295  *  Description: It is used for configurring MTL
3296  */
3297 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3298 {
3299 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3300 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3301 
3302 	if (tx_queues_count > 1)
3303 		stmmac_set_tx_queue_weight(priv);
3304 
3305 	/* Configure MTL RX algorithms */
3306 	if (rx_queues_count > 1)
3307 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3308 				priv->plat->rx_sched_algorithm);
3309 
3310 	/* Configure MTL TX algorithms */
3311 	if (tx_queues_count > 1)
3312 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3313 				priv->plat->tx_sched_algorithm);
3314 
3315 	/* Configure CBS in AVB TX queues */
3316 	if (tx_queues_count > 1)
3317 		stmmac_configure_cbs(priv);
3318 
3319 	/* Map RX MTL to DMA channels */
3320 	stmmac_rx_queue_dma_chan_map(priv);
3321 
3322 	/* Enable MAC RX Queues */
3323 	stmmac_mac_enable_rx_queues(priv);
3324 
3325 	/* Set RX priorities */
3326 	if (rx_queues_count > 1)
3327 		stmmac_mac_config_rx_queues_prio(priv);
3328 
3329 	/* Set TX priorities */
3330 	if (tx_queues_count > 1)
3331 		stmmac_mac_config_tx_queues_prio(priv);
3332 
3333 	/* Set RX routing */
3334 	if (rx_queues_count > 1)
3335 		stmmac_mac_config_rx_queues_routing(priv);
3336 
3337 	/* Receive Side Scaling */
3338 	if (rx_queues_count > 1)
3339 		stmmac_mac_config_rss(priv);
3340 }
3341 
3342 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3343 {
3344 	if (priv->dma_cap.asp) {
3345 		netdev_info(priv->dev, "Enabling Safety Features\n");
3346 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3347 					  priv->plat->safety_feat_cfg);
3348 	} else {
3349 		netdev_info(priv->dev, "No Safety Features support found\n");
3350 	}
3351 }
3352 
3353 /**
3354  * stmmac_hw_setup - setup mac in a usable state.
3355  *  @dev : pointer to the device structure.
3356  *  @ptp_register: register PTP if set
3357  *  Description:
3358  *  this is the main function to setup the HW in a usable state because the
3359  *  dma engine is reset, the core registers are configured (e.g. AXI,
3360  *  Checksum features, timers). The DMA is ready to start receiving and
3361  *  transmitting.
3362  *  Return value:
3363  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3364  *  file on failure.
3365  */
3366 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3367 {
3368 	struct stmmac_priv *priv = netdev_priv(dev);
3369 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3370 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3371 	bool sph_en;
3372 	u32 chan;
3373 	int ret;
3374 
3375 	/* Make sure RX clock is enabled */
3376 	if (priv->hw->phylink_pcs)
3377 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3378 
3379 	/* DMA initialization and SW reset */
3380 	ret = stmmac_init_dma_engine(priv);
3381 	if (ret < 0) {
3382 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3383 			   __func__);
3384 		return ret;
3385 	}
3386 
3387 	/* Copy the MAC addr into the HW  */
3388 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3389 
3390 	/* PS and related bits will be programmed according to the speed */
3391 	if (priv->hw->pcs) {
3392 		int speed = priv->plat->mac_port_sel_speed;
3393 
3394 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3395 		    (speed == SPEED_1000)) {
3396 			priv->hw->ps = speed;
3397 		} else {
3398 			dev_warn(priv->device, "invalid port speed\n");
3399 			priv->hw->ps = 0;
3400 		}
3401 	}
3402 
3403 	/* Initialize the MAC Core */
3404 	stmmac_core_init(priv, priv->hw, dev);
3405 
3406 	/* Initialize MTL*/
3407 	stmmac_mtl_configuration(priv);
3408 
3409 	/* Initialize Safety Features */
3410 	stmmac_safety_feat_configuration(priv);
3411 
3412 	ret = stmmac_rx_ipc(priv, priv->hw);
3413 	if (!ret) {
3414 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3415 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3416 		priv->hw->rx_csum = 0;
3417 	}
3418 
3419 	/* Enable the MAC Rx/Tx */
3420 	stmmac_mac_set(priv, priv->ioaddr, true);
3421 
3422 	/* Set the HW DMA mode and the COE */
3423 	stmmac_dma_operation_mode(priv);
3424 
3425 	stmmac_mmc_setup(priv);
3426 
3427 	if (ptp_register) {
3428 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3429 		if (ret < 0)
3430 			netdev_warn(priv->dev,
3431 				    "failed to enable PTP reference clock: %pe\n",
3432 				    ERR_PTR(ret));
3433 	}
3434 
3435 	ret = stmmac_init_ptp(priv);
3436 	if (ret == -EOPNOTSUPP)
3437 		netdev_info(priv->dev, "PTP not supported by HW\n");
3438 	else if (ret)
3439 		netdev_warn(priv->dev, "PTP init failed\n");
3440 	else if (ptp_register)
3441 		stmmac_ptp_register(priv);
3442 
3443 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3444 
3445 	/* Convert the timer from msec to usec */
3446 	if (!priv->tx_lpi_timer)
3447 		priv->tx_lpi_timer = eee_timer * 1000;
3448 
3449 	if (priv->use_riwt) {
3450 		u32 queue;
3451 
3452 		for (queue = 0; queue < rx_cnt; queue++) {
3453 			if (!priv->rx_riwt[queue])
3454 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3455 
3456 			stmmac_rx_watchdog(priv, priv->ioaddr,
3457 					   priv->rx_riwt[queue], queue);
3458 		}
3459 	}
3460 
3461 	if (priv->hw->pcs)
3462 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3463 
3464 	/* set TX and RX rings length */
3465 	stmmac_set_rings_length(priv);
3466 
3467 	/* Enable TSO */
3468 	if (priv->tso) {
3469 		for (chan = 0; chan < tx_cnt; chan++) {
3470 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3471 
3472 			/* TSO and TBS cannot co-exist */
3473 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3474 				continue;
3475 
3476 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3477 		}
3478 	}
3479 
3480 	/* Enable Split Header */
3481 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3482 	for (chan = 0; chan < rx_cnt; chan++)
3483 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3484 
3485 
3486 	/* VLAN Tag Insertion */
3487 	if (priv->dma_cap.vlins)
3488 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3489 
3490 	/* TBS */
3491 	for (chan = 0; chan < tx_cnt; chan++) {
3492 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3493 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3494 
3495 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3496 	}
3497 
3498 	/* Configure real RX and TX queues */
3499 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3500 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3501 
3502 	/* Start the ball rolling... */
3503 	stmmac_start_all_dma(priv);
3504 
3505 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3506 
3507 	return 0;
3508 }
3509 
3510 static void stmmac_hw_teardown(struct net_device *dev)
3511 {
3512 	struct stmmac_priv *priv = netdev_priv(dev);
3513 
3514 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3515 }
3516 
3517 static void stmmac_free_irq(struct net_device *dev,
3518 			    enum request_irq_err irq_err, int irq_idx)
3519 {
3520 	struct stmmac_priv *priv = netdev_priv(dev);
3521 	int j;
3522 
3523 	switch (irq_err) {
3524 	case REQ_IRQ_ERR_ALL:
3525 		irq_idx = priv->plat->tx_queues_to_use;
3526 		fallthrough;
3527 	case REQ_IRQ_ERR_TX:
3528 		for (j = irq_idx - 1; j >= 0; j--) {
3529 			if (priv->tx_irq[j] > 0) {
3530 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3531 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3532 			}
3533 		}
3534 		irq_idx = priv->plat->rx_queues_to_use;
3535 		fallthrough;
3536 	case REQ_IRQ_ERR_RX:
3537 		for (j = irq_idx - 1; j >= 0; j--) {
3538 			if (priv->rx_irq[j] > 0) {
3539 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3540 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3541 			}
3542 		}
3543 
3544 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3545 			free_irq(priv->sfty_ue_irq, dev);
3546 		fallthrough;
3547 	case REQ_IRQ_ERR_SFTY_UE:
3548 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3549 			free_irq(priv->sfty_ce_irq, dev);
3550 		fallthrough;
3551 	case REQ_IRQ_ERR_SFTY_CE:
3552 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3553 			free_irq(priv->lpi_irq, dev);
3554 		fallthrough;
3555 	case REQ_IRQ_ERR_LPI:
3556 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3557 			free_irq(priv->wol_irq, dev);
3558 		fallthrough;
3559 	case REQ_IRQ_ERR_SFTY:
3560 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3561 			free_irq(priv->sfty_irq, dev);
3562 		fallthrough;
3563 	case REQ_IRQ_ERR_WOL:
3564 		free_irq(dev->irq, dev);
3565 		fallthrough;
3566 	case REQ_IRQ_ERR_MAC:
3567 	case REQ_IRQ_ERR_NO:
3568 		/* If MAC IRQ request error, no more IRQ to free */
3569 		break;
3570 	}
3571 }
3572 
3573 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3574 {
3575 	struct stmmac_priv *priv = netdev_priv(dev);
3576 	enum request_irq_err irq_err;
3577 	cpumask_t cpu_mask;
3578 	int irq_idx = 0;
3579 	char *int_name;
3580 	int ret;
3581 	int i;
3582 
3583 	/* For common interrupt */
3584 	int_name = priv->int_name_mac;
3585 	sprintf(int_name, "%s:%s", dev->name, "mac");
3586 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3587 			  0, int_name, dev);
3588 	if (unlikely(ret < 0)) {
3589 		netdev_err(priv->dev,
3590 			   "%s: alloc mac MSI %d (error: %d)\n",
3591 			   __func__, dev->irq, ret);
3592 		irq_err = REQ_IRQ_ERR_MAC;
3593 		goto irq_error;
3594 	}
3595 
3596 	/* Request the Wake IRQ in case of another line
3597 	 * is used for WoL
3598 	 */
3599 	priv->wol_irq_disabled = true;
3600 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3601 		int_name = priv->int_name_wol;
3602 		sprintf(int_name, "%s:%s", dev->name, "wol");
3603 		ret = request_irq(priv->wol_irq,
3604 				  stmmac_mac_interrupt,
3605 				  0, int_name, dev);
3606 		if (unlikely(ret < 0)) {
3607 			netdev_err(priv->dev,
3608 				   "%s: alloc wol MSI %d (error: %d)\n",
3609 				   __func__, priv->wol_irq, ret);
3610 			irq_err = REQ_IRQ_ERR_WOL;
3611 			goto irq_error;
3612 		}
3613 	}
3614 
3615 	/* Request the LPI IRQ in case of another line
3616 	 * is used for LPI
3617 	 */
3618 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3619 		int_name = priv->int_name_lpi;
3620 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3621 		ret = request_irq(priv->lpi_irq,
3622 				  stmmac_mac_interrupt,
3623 				  0, int_name, dev);
3624 		if (unlikely(ret < 0)) {
3625 			netdev_err(priv->dev,
3626 				   "%s: alloc lpi MSI %d (error: %d)\n",
3627 				   __func__, priv->lpi_irq, ret);
3628 			irq_err = REQ_IRQ_ERR_LPI;
3629 			goto irq_error;
3630 		}
3631 	}
3632 
3633 	/* Request the common Safety Feature Correctible/Uncorrectible
3634 	 * Error line in case of another line is used
3635 	 */
3636 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3637 		int_name = priv->int_name_sfty;
3638 		sprintf(int_name, "%s:%s", dev->name, "safety");
3639 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3640 				  0, int_name, dev);
3641 		if (unlikely(ret < 0)) {
3642 			netdev_err(priv->dev,
3643 				   "%s: alloc sfty MSI %d (error: %d)\n",
3644 				   __func__, priv->sfty_irq, ret);
3645 			irq_err = REQ_IRQ_ERR_SFTY;
3646 			goto irq_error;
3647 		}
3648 	}
3649 
3650 	/* Request the Safety Feature Correctible Error line in
3651 	 * case of another line is used
3652 	 */
3653 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3654 		int_name = priv->int_name_sfty_ce;
3655 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3656 		ret = request_irq(priv->sfty_ce_irq,
3657 				  stmmac_safety_interrupt,
3658 				  0, int_name, dev);
3659 		if (unlikely(ret < 0)) {
3660 			netdev_err(priv->dev,
3661 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3662 				   __func__, priv->sfty_ce_irq, ret);
3663 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3664 			goto irq_error;
3665 		}
3666 	}
3667 
3668 	/* Request the Safety Feature Uncorrectible Error line in
3669 	 * case of another line is used
3670 	 */
3671 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3672 		int_name = priv->int_name_sfty_ue;
3673 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3674 		ret = request_irq(priv->sfty_ue_irq,
3675 				  stmmac_safety_interrupt,
3676 				  0, int_name, dev);
3677 		if (unlikely(ret < 0)) {
3678 			netdev_err(priv->dev,
3679 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3680 				   __func__, priv->sfty_ue_irq, ret);
3681 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3682 			goto irq_error;
3683 		}
3684 	}
3685 
3686 	/* Request Rx MSI irq */
3687 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3688 		if (i >= MTL_MAX_RX_QUEUES)
3689 			break;
3690 		if (priv->rx_irq[i] == 0)
3691 			continue;
3692 
3693 		int_name = priv->int_name_rx_irq[i];
3694 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3695 		ret = request_irq(priv->rx_irq[i],
3696 				  stmmac_msi_intr_rx,
3697 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3698 		if (unlikely(ret < 0)) {
3699 			netdev_err(priv->dev,
3700 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3701 				   __func__, i, priv->rx_irq[i], ret);
3702 			irq_err = REQ_IRQ_ERR_RX;
3703 			irq_idx = i;
3704 			goto irq_error;
3705 		}
3706 		cpumask_clear(&cpu_mask);
3707 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3708 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3709 	}
3710 
3711 	/* Request Tx MSI irq */
3712 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3713 		if (i >= MTL_MAX_TX_QUEUES)
3714 			break;
3715 		if (priv->tx_irq[i] == 0)
3716 			continue;
3717 
3718 		int_name = priv->int_name_tx_irq[i];
3719 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3720 		ret = request_irq(priv->tx_irq[i],
3721 				  stmmac_msi_intr_tx,
3722 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3723 		if (unlikely(ret < 0)) {
3724 			netdev_err(priv->dev,
3725 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3726 				   __func__, i, priv->tx_irq[i], ret);
3727 			irq_err = REQ_IRQ_ERR_TX;
3728 			irq_idx = i;
3729 			goto irq_error;
3730 		}
3731 		cpumask_clear(&cpu_mask);
3732 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3733 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3734 	}
3735 
3736 	return 0;
3737 
3738 irq_error:
3739 	stmmac_free_irq(dev, irq_err, irq_idx);
3740 	return ret;
3741 }
3742 
3743 static int stmmac_request_irq_single(struct net_device *dev)
3744 {
3745 	struct stmmac_priv *priv = netdev_priv(dev);
3746 	enum request_irq_err irq_err;
3747 	int ret;
3748 
3749 	ret = request_irq(dev->irq, stmmac_interrupt,
3750 			  IRQF_SHARED, dev->name, dev);
3751 	if (unlikely(ret < 0)) {
3752 		netdev_err(priv->dev,
3753 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3754 			   __func__, dev->irq, ret);
3755 		irq_err = REQ_IRQ_ERR_MAC;
3756 		goto irq_error;
3757 	}
3758 
3759 	/* Request the Wake IRQ in case of another line
3760 	 * is used for WoL
3761 	 */
3762 	priv->wol_irq_disabled = true;
3763 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3764 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3765 				  IRQF_SHARED, dev->name, dev);
3766 		if (unlikely(ret < 0)) {
3767 			netdev_err(priv->dev,
3768 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3769 				   __func__, priv->wol_irq, ret);
3770 			irq_err = REQ_IRQ_ERR_WOL;
3771 			goto irq_error;
3772 		}
3773 	}
3774 
3775 	/* Request the IRQ lines */
3776 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3777 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3778 				  IRQF_SHARED, dev->name, dev);
3779 		if (unlikely(ret < 0)) {
3780 			netdev_err(priv->dev,
3781 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3782 				   __func__, priv->lpi_irq, ret);
3783 			irq_err = REQ_IRQ_ERR_LPI;
3784 			goto irq_error;
3785 		}
3786 	}
3787 
3788 	/* Request the common Safety Feature Correctible/Uncorrectible
3789 	 * Error line in case of another line is used
3790 	 */
3791 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3792 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3793 				  IRQF_SHARED, dev->name, dev);
3794 		if (unlikely(ret < 0)) {
3795 			netdev_err(priv->dev,
3796 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3797 				   __func__, priv->sfty_irq, ret);
3798 			irq_err = REQ_IRQ_ERR_SFTY;
3799 			goto irq_error;
3800 		}
3801 	}
3802 
3803 	return 0;
3804 
3805 irq_error:
3806 	stmmac_free_irq(dev, irq_err, 0);
3807 	return ret;
3808 }
3809 
3810 static int stmmac_request_irq(struct net_device *dev)
3811 {
3812 	struct stmmac_priv *priv = netdev_priv(dev);
3813 	int ret;
3814 
3815 	/* Request the IRQ lines */
3816 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3817 		ret = stmmac_request_irq_multi_msi(dev);
3818 	else
3819 		ret = stmmac_request_irq_single(dev);
3820 
3821 	return ret;
3822 }
3823 
3824 /**
3825  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3826  *  @priv: driver private structure
3827  *  @mtu: MTU to setup the dma queue and buf with
3828  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3829  *  Allocate the Tx/Rx DMA queue and init them.
3830  *  Return value:
3831  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3832  */
3833 static struct stmmac_dma_conf *
3834 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3835 {
3836 	struct stmmac_dma_conf *dma_conf;
3837 	int chan, bfsize, ret;
3838 
3839 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3840 	if (!dma_conf) {
3841 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3842 			   __func__);
3843 		return ERR_PTR(-ENOMEM);
3844 	}
3845 
3846 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3847 	if (bfsize < 0)
3848 		bfsize = 0;
3849 
3850 	if (bfsize < BUF_SIZE_16KiB)
3851 		bfsize = stmmac_set_bfsize(mtu, 0);
3852 
3853 	dma_conf->dma_buf_sz = bfsize;
3854 	/* Chose the tx/rx size from the already defined one in the
3855 	 * priv struct. (if defined)
3856 	 */
3857 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3858 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3859 
3860 	if (!dma_conf->dma_tx_size)
3861 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3862 	if (!dma_conf->dma_rx_size)
3863 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3864 
3865 	/* Earlier check for TBS */
3866 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3867 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3868 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3869 
3870 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3871 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3872 	}
3873 
3874 	ret = alloc_dma_desc_resources(priv, dma_conf);
3875 	if (ret < 0) {
3876 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3877 			   __func__);
3878 		goto alloc_error;
3879 	}
3880 
3881 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3882 	if (ret < 0) {
3883 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3884 			   __func__);
3885 		goto init_error;
3886 	}
3887 
3888 	return dma_conf;
3889 
3890 init_error:
3891 	free_dma_desc_resources(priv, dma_conf);
3892 alloc_error:
3893 	kfree(dma_conf);
3894 	return ERR_PTR(ret);
3895 }
3896 
3897 /**
3898  *  __stmmac_open - open entry point of the driver
3899  *  @dev : pointer to the device structure.
3900  *  @dma_conf :  structure to take the dma data
3901  *  Description:
3902  *  This function is the open entry point of the driver.
3903  *  Return value:
3904  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3905  *  file on failure.
3906  */
3907 static int __stmmac_open(struct net_device *dev,
3908 			 struct stmmac_dma_conf *dma_conf)
3909 {
3910 	struct stmmac_priv *priv = netdev_priv(dev);
3911 	int mode = priv->plat->phy_interface;
3912 	u32 chan;
3913 	int ret;
3914 
3915 	ret = pm_runtime_resume_and_get(priv->device);
3916 	if (ret < 0)
3917 		return ret;
3918 
3919 	if ((!priv->hw->xpcs ||
3920 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3921 		ret = stmmac_init_phy(dev);
3922 		if (ret) {
3923 			netdev_err(priv->dev,
3924 				   "%s: Cannot attach to PHY (error: %d)\n",
3925 				   __func__, ret);
3926 			goto init_phy_error;
3927 		}
3928 	}
3929 
3930 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3931 
3932 	buf_sz = dma_conf->dma_buf_sz;
3933 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3934 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3935 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3936 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3937 
3938 	stmmac_reset_queues_param(priv);
3939 
3940 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3941 	    priv->plat->serdes_powerup) {
3942 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3943 		if (ret < 0) {
3944 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3945 				   __func__);
3946 			goto init_error;
3947 		}
3948 	}
3949 
3950 	ret = stmmac_hw_setup(dev, true);
3951 	if (ret < 0) {
3952 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3953 		goto init_error;
3954 	}
3955 
3956 	stmmac_init_coalesce(priv);
3957 
3958 	phylink_start(priv->phylink);
3959 	/* We may have called phylink_speed_down before */
3960 	phylink_speed_up(priv->phylink);
3961 
3962 	ret = stmmac_request_irq(dev);
3963 	if (ret)
3964 		goto irq_error;
3965 
3966 	stmmac_enable_all_queues(priv);
3967 	netif_tx_start_all_queues(priv->dev);
3968 	stmmac_enable_all_dma_irq(priv);
3969 
3970 	return 0;
3971 
3972 irq_error:
3973 	phylink_stop(priv->phylink);
3974 
3975 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3976 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3977 
3978 	stmmac_hw_teardown(dev);
3979 init_error:
3980 	phylink_disconnect_phy(priv->phylink);
3981 init_phy_error:
3982 	pm_runtime_put(priv->device);
3983 	return ret;
3984 }
3985 
3986 static int stmmac_open(struct net_device *dev)
3987 {
3988 	struct stmmac_priv *priv = netdev_priv(dev);
3989 	struct stmmac_dma_conf *dma_conf;
3990 	int ret;
3991 
3992 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3993 	if (IS_ERR(dma_conf))
3994 		return PTR_ERR(dma_conf);
3995 
3996 	ret = __stmmac_open(dev, dma_conf);
3997 	if (ret)
3998 		free_dma_desc_resources(priv, dma_conf);
3999 
4000 	kfree(dma_conf);
4001 	return ret;
4002 }
4003 
4004 /**
4005  *  stmmac_release - close entry point of the driver
4006  *  @dev : device pointer.
4007  *  Description:
4008  *  This is the stop entry point of the driver.
4009  */
4010 static int stmmac_release(struct net_device *dev)
4011 {
4012 	struct stmmac_priv *priv = netdev_priv(dev);
4013 	u32 chan;
4014 
4015 	if (device_may_wakeup(priv->device))
4016 		phylink_speed_down(priv->phylink, false);
4017 	/* Stop and disconnect the PHY */
4018 	phylink_stop(priv->phylink);
4019 	phylink_disconnect_phy(priv->phylink);
4020 
4021 	stmmac_disable_all_queues(priv);
4022 
4023 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4024 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4025 
4026 	netif_tx_disable(dev);
4027 
4028 	/* Free the IRQ lines */
4029 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4030 
4031 	if (priv->eee_enabled) {
4032 		priv->tx_path_in_lpi_mode = false;
4033 		del_timer_sync(&priv->eee_ctrl_timer);
4034 	}
4035 
4036 	/* Stop TX/RX DMA and clear the descriptors */
4037 	stmmac_stop_all_dma(priv);
4038 
4039 	/* Release and free the Rx/Tx resources */
4040 	free_dma_desc_resources(priv, &priv->dma_conf);
4041 
4042 	/* Disable the MAC Rx/Tx */
4043 	stmmac_mac_set(priv, priv->ioaddr, false);
4044 
4045 	/* Powerdown Serdes if there is */
4046 	if (priv->plat->serdes_powerdown)
4047 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4048 
4049 	stmmac_release_ptp(priv);
4050 
4051 	if (stmmac_fpe_supported(priv))
4052 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4053 
4054 	pm_runtime_put(priv->device);
4055 
4056 	return 0;
4057 }
4058 
4059 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4060 			       struct stmmac_tx_queue *tx_q)
4061 {
4062 	u16 tag = 0x0, inner_tag = 0x0;
4063 	u32 inner_type = 0x0;
4064 	struct dma_desc *p;
4065 
4066 	if (!priv->dma_cap.vlins)
4067 		return false;
4068 	if (!skb_vlan_tag_present(skb))
4069 		return false;
4070 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4071 		inner_tag = skb_vlan_tag_get(skb);
4072 		inner_type = STMMAC_VLAN_INSERT;
4073 	}
4074 
4075 	tag = skb_vlan_tag_get(skb);
4076 
4077 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4078 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4079 	else
4080 		p = &tx_q->dma_tx[tx_q->cur_tx];
4081 
4082 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4083 		return false;
4084 
4085 	stmmac_set_tx_owner(priv, p);
4086 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4087 	return true;
4088 }
4089 
4090 /**
4091  *  stmmac_tso_allocator - close entry point of the driver
4092  *  @priv: driver private structure
4093  *  @des: buffer start address
4094  *  @total_len: total length to fill in descriptors
4095  *  @last_segment: condition for the last descriptor
4096  *  @queue: TX queue index
4097  *  Description:
4098  *  This function fills descriptor and request new descriptors according to
4099  *  buffer length to fill
4100  */
4101 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4102 				 int total_len, bool last_segment, u32 queue)
4103 {
4104 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4105 	struct dma_desc *desc;
4106 	u32 buff_size;
4107 	int tmp_len;
4108 
4109 	tmp_len = total_len;
4110 
4111 	while (tmp_len > 0) {
4112 		dma_addr_t curr_addr;
4113 
4114 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4115 						priv->dma_conf.dma_tx_size);
4116 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4117 
4118 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4119 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4120 		else
4121 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4122 
4123 		curr_addr = des + (total_len - tmp_len);
4124 		if (priv->dma_cap.addr64 <= 32)
4125 			desc->des0 = cpu_to_le32(curr_addr);
4126 		else
4127 			stmmac_set_desc_addr(priv, desc, curr_addr);
4128 
4129 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4130 			    TSO_MAX_BUFF_SIZE : tmp_len;
4131 
4132 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4133 				0, 1,
4134 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4135 				0, 0);
4136 
4137 		tmp_len -= TSO_MAX_BUFF_SIZE;
4138 	}
4139 }
4140 
4141 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4142 {
4143 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4144 	int desc_size;
4145 
4146 	if (likely(priv->extend_desc))
4147 		desc_size = sizeof(struct dma_extended_desc);
4148 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4149 		desc_size = sizeof(struct dma_edesc);
4150 	else
4151 		desc_size = sizeof(struct dma_desc);
4152 
4153 	/* The own bit must be the latest setting done when prepare the
4154 	 * descriptor and then barrier is needed to make sure that
4155 	 * all is coherent before granting the DMA engine.
4156 	 */
4157 	wmb();
4158 
4159 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4160 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4161 }
4162 
4163 /**
4164  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4165  *  @skb : the socket buffer
4166  *  @dev : device pointer
4167  *  Description: this is the transmit function that is called on TSO frames
4168  *  (support available on GMAC4 and newer chips).
4169  *  Diagram below show the ring programming in case of TSO frames:
4170  *
4171  *  First Descriptor
4172  *   --------
4173  *   | DES0 |---> buffer1 = L2/L3/L4 header
4174  *   | DES1 |---> TCP Payload (can continue on next descr...)
4175  *   | DES2 |---> buffer 1 and 2 len
4176  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4177  *   --------
4178  *	|
4179  *     ...
4180  *	|
4181  *   --------
4182  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4183  *   | DES1 | --|
4184  *   | DES2 | --> buffer 1 and 2 len
4185  *   | DES3 |
4186  *   --------
4187  *
4188  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4189  */
4190 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4191 {
4192 	struct dma_desc *desc, *first, *mss_desc = NULL;
4193 	struct stmmac_priv *priv = netdev_priv(dev);
4194 	int tmp_pay_len = 0, first_tx, nfrags;
4195 	unsigned int first_entry, tx_packets;
4196 	struct stmmac_txq_stats *txq_stats;
4197 	struct stmmac_tx_queue *tx_q;
4198 	u32 pay_len, mss, queue;
4199 	dma_addr_t tso_des, des;
4200 	u8 proto_hdr_len, hdr;
4201 	bool set_ic;
4202 	int i;
4203 
4204 	/* Always insert VLAN tag to SKB payload for TSO frames.
4205 	 *
4206 	 * Never insert VLAN tag by HW, since segments splited by
4207 	 * TSO engine will be un-tagged by mistake.
4208 	 */
4209 	if (skb_vlan_tag_present(skb)) {
4210 		skb = __vlan_hwaccel_push_inside(skb);
4211 		if (unlikely(!skb)) {
4212 			priv->xstats.tx_dropped++;
4213 			return NETDEV_TX_OK;
4214 		}
4215 	}
4216 
4217 	nfrags = skb_shinfo(skb)->nr_frags;
4218 	queue = skb_get_queue_mapping(skb);
4219 
4220 	tx_q = &priv->dma_conf.tx_queue[queue];
4221 	txq_stats = &priv->xstats.txq_stats[queue];
4222 	first_tx = tx_q->cur_tx;
4223 
4224 	/* Compute header lengths */
4225 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4226 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4227 		hdr = sizeof(struct udphdr);
4228 	} else {
4229 		proto_hdr_len = skb_tcp_all_headers(skb);
4230 		hdr = tcp_hdrlen(skb);
4231 	}
4232 
4233 	/* Desc availability based on threshold should be enough safe */
4234 	if (unlikely(stmmac_tx_avail(priv, queue) <
4235 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4236 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4237 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4238 								queue));
4239 			/* This is a hard error, log it. */
4240 			netdev_err(priv->dev,
4241 				   "%s: Tx Ring full when queue awake\n",
4242 				   __func__);
4243 		}
4244 		return NETDEV_TX_BUSY;
4245 	}
4246 
4247 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4248 
4249 	mss = skb_shinfo(skb)->gso_size;
4250 
4251 	/* set new MSS value if needed */
4252 	if (mss != tx_q->mss) {
4253 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4254 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4255 		else
4256 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4257 
4258 		stmmac_set_mss(priv, mss_desc, mss);
4259 		tx_q->mss = mss;
4260 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4261 						priv->dma_conf.dma_tx_size);
4262 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4263 	}
4264 
4265 	if (netif_msg_tx_queued(priv)) {
4266 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4267 			__func__, hdr, proto_hdr_len, pay_len, mss);
4268 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4269 			skb->data_len);
4270 	}
4271 
4272 	first_entry = tx_q->cur_tx;
4273 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4274 
4275 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4276 		desc = &tx_q->dma_entx[first_entry].basic;
4277 	else
4278 		desc = &tx_q->dma_tx[first_entry];
4279 	first = desc;
4280 
4281 	/* first descriptor: fill Headers on Buf1 */
4282 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4283 			     DMA_TO_DEVICE);
4284 	if (dma_mapping_error(priv->device, des))
4285 		goto dma_map_err;
4286 
4287 	if (priv->dma_cap.addr64 <= 32) {
4288 		first->des0 = cpu_to_le32(des);
4289 
4290 		/* Fill start of payload in buff2 of first descriptor */
4291 		if (pay_len)
4292 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4293 
4294 		/* If needed take extra descriptors to fill the remaining payload */
4295 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4296 		tso_des = des;
4297 	} else {
4298 		stmmac_set_desc_addr(priv, first, des);
4299 		tmp_pay_len = pay_len;
4300 		tso_des = des + proto_hdr_len;
4301 		pay_len = 0;
4302 	}
4303 
4304 	stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
4305 
4306 	/* In case two or more DMA transmit descriptors are allocated for this
4307 	 * non-paged SKB data, the DMA buffer address should be saved to
4308 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4309 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4310 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4311 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4312 	 * sooner or later.
4313 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4314 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4315 	 * this DMA buffer right after the DMA engine completely finishes the
4316 	 * full buffer transmission.
4317 	 */
4318 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4319 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4320 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4321 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4322 
4323 	/* Prepare fragments */
4324 	for (i = 0; i < nfrags; i++) {
4325 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4326 
4327 		des = skb_frag_dma_map(priv->device, frag, 0,
4328 				       skb_frag_size(frag),
4329 				       DMA_TO_DEVICE);
4330 		if (dma_mapping_error(priv->device, des))
4331 			goto dma_map_err;
4332 
4333 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4334 				     (i == nfrags - 1), queue);
4335 
4336 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4337 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4338 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4339 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4340 	}
4341 
4342 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4343 
4344 	/* Only the last descriptor gets to point to the skb. */
4345 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4346 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4347 
4348 	/* Manage tx mitigation */
4349 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4350 	tx_q->tx_count_frames += tx_packets;
4351 
4352 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4353 		set_ic = true;
4354 	else if (!priv->tx_coal_frames[queue])
4355 		set_ic = false;
4356 	else if (tx_packets > priv->tx_coal_frames[queue])
4357 		set_ic = true;
4358 	else if ((tx_q->tx_count_frames %
4359 		  priv->tx_coal_frames[queue]) < tx_packets)
4360 		set_ic = true;
4361 	else
4362 		set_ic = false;
4363 
4364 	if (set_ic) {
4365 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4366 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4367 		else
4368 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4369 
4370 		tx_q->tx_count_frames = 0;
4371 		stmmac_set_tx_ic(priv, desc);
4372 	}
4373 
4374 	/* We've used all descriptors we need for this skb, however,
4375 	 * advance cur_tx so that it references a fresh descriptor.
4376 	 * ndo_start_xmit will fill this descriptor the next time it's
4377 	 * called and stmmac_tx_clean may clean up to this descriptor.
4378 	 */
4379 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4380 
4381 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4382 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4383 			  __func__);
4384 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4385 	}
4386 
4387 	u64_stats_update_begin(&txq_stats->q_syncp);
4388 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4389 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4390 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4391 	if (set_ic)
4392 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4393 	u64_stats_update_end(&txq_stats->q_syncp);
4394 
4395 	if (priv->sarc_type)
4396 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4397 
4398 	skb_tx_timestamp(skb);
4399 
4400 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4401 		     priv->hwts_tx_en)) {
4402 		/* declare that device is doing timestamping */
4403 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4404 		stmmac_enable_tx_timestamp(priv, first);
4405 	}
4406 
4407 	/* Complete the first descriptor before granting the DMA */
4408 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4409 			proto_hdr_len,
4410 			pay_len,
4411 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4412 			hdr / 4, (skb->len - proto_hdr_len));
4413 
4414 	/* If context desc is used to change MSS */
4415 	if (mss_desc) {
4416 		/* Make sure that first descriptor has been completely
4417 		 * written, including its own bit. This is because MSS is
4418 		 * actually before first descriptor, so we need to make
4419 		 * sure that MSS's own bit is the last thing written.
4420 		 */
4421 		dma_wmb();
4422 		stmmac_set_tx_owner(priv, mss_desc);
4423 	}
4424 
4425 	if (netif_msg_pktdata(priv)) {
4426 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4427 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4428 			tx_q->cur_tx, first, nfrags);
4429 		pr_info(">>> frame to be transmitted: ");
4430 		print_pkt(skb->data, skb_headlen(skb));
4431 	}
4432 
4433 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4434 
4435 	stmmac_flush_tx_descriptors(priv, queue);
4436 	stmmac_tx_timer_arm(priv, queue);
4437 
4438 	return NETDEV_TX_OK;
4439 
4440 dma_map_err:
4441 	dev_err(priv->device, "Tx dma map failed\n");
4442 	dev_kfree_skb(skb);
4443 	priv->xstats.tx_dropped++;
4444 	return NETDEV_TX_OK;
4445 }
4446 
4447 /**
4448  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4449  * @skb: socket buffer to check
4450  *
4451  * Check if a packet has an ethertype that will trigger the IP header checks
4452  * and IP/TCP checksum engine of the stmmac core.
4453  *
4454  * Return: true if the ethertype can trigger the checksum engine, false
4455  * otherwise
4456  */
4457 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4458 {
4459 	int depth = 0;
4460 	__be16 proto;
4461 
4462 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4463 				    &depth);
4464 
4465 	return (depth <= ETH_HLEN) &&
4466 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4467 }
4468 
4469 /**
4470  *  stmmac_xmit - Tx entry point of the driver
4471  *  @skb : the socket buffer
4472  *  @dev : device pointer
4473  *  Description : this is the tx entry point of the driver.
4474  *  It programs the chain or the ring and supports oversized frames
4475  *  and SG feature.
4476  */
4477 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4478 {
4479 	unsigned int first_entry, tx_packets, enh_desc;
4480 	struct stmmac_priv *priv = netdev_priv(dev);
4481 	unsigned int nopaged_len = skb_headlen(skb);
4482 	int i, csum_insertion = 0, is_jumbo = 0;
4483 	u32 queue = skb_get_queue_mapping(skb);
4484 	int nfrags = skb_shinfo(skb)->nr_frags;
4485 	int gso = skb_shinfo(skb)->gso_type;
4486 	struct stmmac_txq_stats *txq_stats;
4487 	struct dma_edesc *tbs_desc = NULL;
4488 	struct dma_desc *desc, *first;
4489 	struct stmmac_tx_queue *tx_q;
4490 	bool has_vlan, set_ic;
4491 	int entry, first_tx;
4492 	dma_addr_t des;
4493 
4494 	tx_q = &priv->dma_conf.tx_queue[queue];
4495 	txq_stats = &priv->xstats.txq_stats[queue];
4496 	first_tx = tx_q->cur_tx;
4497 
4498 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4499 		stmmac_disable_eee_mode(priv);
4500 
4501 	/* Manage oversized TCP frames for GMAC4 device */
4502 	if (skb_is_gso(skb) && priv->tso) {
4503 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4504 			return stmmac_tso_xmit(skb, dev);
4505 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4506 			return stmmac_tso_xmit(skb, dev);
4507 	}
4508 
4509 	if (priv->est && priv->est->enable &&
4510 	    priv->est->max_sdu[queue] &&
4511 	    skb->len > priv->est->max_sdu[queue]){
4512 		priv->xstats.max_sdu_txq_drop[queue]++;
4513 		goto max_sdu_err;
4514 	}
4515 
4516 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4517 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4518 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4519 								queue));
4520 			/* This is a hard error, log it. */
4521 			netdev_err(priv->dev,
4522 				   "%s: Tx Ring full when queue awake\n",
4523 				   __func__);
4524 		}
4525 		return NETDEV_TX_BUSY;
4526 	}
4527 
4528 	/* Check if VLAN can be inserted by HW */
4529 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4530 
4531 	entry = tx_q->cur_tx;
4532 	first_entry = entry;
4533 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4534 
4535 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4536 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4537 	 * queues. In that case, checksum offloading for those queues that don't
4538 	 * support tx coe needs to fallback to software checksum calculation.
4539 	 *
4540 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4541 	 * also have to be checksummed in software.
4542 	 */
4543 	if (csum_insertion &&
4544 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4545 	     !stmmac_has_ip_ethertype(skb))) {
4546 		if (unlikely(skb_checksum_help(skb)))
4547 			goto dma_map_err;
4548 		csum_insertion = !csum_insertion;
4549 	}
4550 
4551 	if (likely(priv->extend_desc))
4552 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4553 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4554 		desc = &tx_q->dma_entx[entry].basic;
4555 	else
4556 		desc = tx_q->dma_tx + entry;
4557 
4558 	first = desc;
4559 
4560 	if (has_vlan)
4561 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4562 
4563 	enh_desc = priv->plat->enh_desc;
4564 	/* To program the descriptors according to the size of the frame */
4565 	if (enh_desc)
4566 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4567 
4568 	if (unlikely(is_jumbo)) {
4569 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4570 		if (unlikely(entry < 0) && (entry != -EINVAL))
4571 			goto dma_map_err;
4572 	}
4573 
4574 	for (i = 0; i < nfrags; i++) {
4575 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4576 		int len = skb_frag_size(frag);
4577 		bool last_segment = (i == (nfrags - 1));
4578 
4579 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4580 		WARN_ON(tx_q->tx_skbuff[entry]);
4581 
4582 		if (likely(priv->extend_desc))
4583 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4584 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4585 			desc = &tx_q->dma_entx[entry].basic;
4586 		else
4587 			desc = tx_q->dma_tx + entry;
4588 
4589 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4590 				       DMA_TO_DEVICE);
4591 		if (dma_mapping_error(priv->device, des))
4592 			goto dma_map_err; /* should reuse desc w/o issues */
4593 
4594 		tx_q->tx_skbuff_dma[entry].buf = des;
4595 
4596 		stmmac_set_desc_addr(priv, desc, des);
4597 
4598 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4599 		tx_q->tx_skbuff_dma[entry].len = len;
4600 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4601 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4602 
4603 		/* Prepare the descriptor and set the own bit too */
4604 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4605 				priv->mode, 1, last_segment, skb->len);
4606 	}
4607 
4608 	/* Only the last descriptor gets to point to the skb. */
4609 	tx_q->tx_skbuff[entry] = skb;
4610 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4611 
4612 	/* According to the coalesce parameter the IC bit for the latest
4613 	 * segment is reset and the timer re-started to clean the tx status.
4614 	 * This approach takes care about the fragments: desc is the first
4615 	 * element in case of no SG.
4616 	 */
4617 	tx_packets = (entry + 1) - first_tx;
4618 	tx_q->tx_count_frames += tx_packets;
4619 
4620 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4621 		set_ic = true;
4622 	else if (!priv->tx_coal_frames[queue])
4623 		set_ic = false;
4624 	else if (tx_packets > priv->tx_coal_frames[queue])
4625 		set_ic = true;
4626 	else if ((tx_q->tx_count_frames %
4627 		  priv->tx_coal_frames[queue]) < tx_packets)
4628 		set_ic = true;
4629 	else
4630 		set_ic = false;
4631 
4632 	if (set_ic) {
4633 		if (likely(priv->extend_desc))
4634 			desc = &tx_q->dma_etx[entry].basic;
4635 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4636 			desc = &tx_q->dma_entx[entry].basic;
4637 		else
4638 			desc = &tx_q->dma_tx[entry];
4639 
4640 		tx_q->tx_count_frames = 0;
4641 		stmmac_set_tx_ic(priv, desc);
4642 	}
4643 
4644 	/* We've used all descriptors we need for this skb, however,
4645 	 * advance cur_tx so that it references a fresh descriptor.
4646 	 * ndo_start_xmit will fill this descriptor the next time it's
4647 	 * called and stmmac_tx_clean may clean up to this descriptor.
4648 	 */
4649 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4650 	tx_q->cur_tx = entry;
4651 
4652 	if (netif_msg_pktdata(priv)) {
4653 		netdev_dbg(priv->dev,
4654 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4655 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4656 			   entry, first, nfrags);
4657 
4658 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4659 		print_pkt(skb->data, skb->len);
4660 	}
4661 
4662 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4663 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4664 			  __func__);
4665 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4666 	}
4667 
4668 	u64_stats_update_begin(&txq_stats->q_syncp);
4669 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4670 	if (set_ic)
4671 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4672 	u64_stats_update_end(&txq_stats->q_syncp);
4673 
4674 	if (priv->sarc_type)
4675 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4676 
4677 	skb_tx_timestamp(skb);
4678 
4679 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4680 	 * problems because all the descriptors are actually ready to be
4681 	 * passed to the DMA engine.
4682 	 */
4683 	if (likely(!is_jumbo)) {
4684 		bool last_segment = (nfrags == 0);
4685 
4686 		des = dma_map_single(priv->device, skb->data,
4687 				     nopaged_len, DMA_TO_DEVICE);
4688 		if (dma_mapping_error(priv->device, des))
4689 			goto dma_map_err;
4690 
4691 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4692 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4693 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4694 
4695 		stmmac_set_desc_addr(priv, first, des);
4696 
4697 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4698 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4699 
4700 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4701 			     priv->hwts_tx_en)) {
4702 			/* declare that device is doing timestamping */
4703 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4704 			stmmac_enable_tx_timestamp(priv, first);
4705 		}
4706 
4707 		/* Prepare the first descriptor setting the OWN bit too */
4708 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4709 				csum_insertion, priv->mode, 0, last_segment,
4710 				skb->len);
4711 	}
4712 
4713 	if (tx_q->tbs & STMMAC_TBS_EN) {
4714 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4715 
4716 		tbs_desc = &tx_q->dma_entx[first_entry];
4717 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4718 	}
4719 
4720 	stmmac_set_tx_owner(priv, first);
4721 
4722 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4723 
4724 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4725 
4726 	stmmac_flush_tx_descriptors(priv, queue);
4727 	stmmac_tx_timer_arm(priv, queue);
4728 
4729 	return NETDEV_TX_OK;
4730 
4731 dma_map_err:
4732 	netdev_err(priv->dev, "Tx DMA map failed\n");
4733 max_sdu_err:
4734 	dev_kfree_skb(skb);
4735 	priv->xstats.tx_dropped++;
4736 	return NETDEV_TX_OK;
4737 }
4738 
4739 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4740 {
4741 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4742 	__be16 vlan_proto = veth->h_vlan_proto;
4743 	u16 vlanid;
4744 
4745 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4746 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4747 	    (vlan_proto == htons(ETH_P_8021AD) &&
4748 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4749 		/* pop the vlan tag */
4750 		vlanid = ntohs(veth->h_vlan_TCI);
4751 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4752 		skb_pull(skb, VLAN_HLEN);
4753 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4754 	}
4755 }
4756 
4757 /**
4758  * stmmac_rx_refill - refill used skb preallocated buffers
4759  * @priv: driver private structure
4760  * @queue: RX queue index
4761  * Description : this is to reallocate the skb for the reception process
4762  * that is based on zero-copy.
4763  */
4764 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4765 {
4766 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4767 	int dirty = stmmac_rx_dirty(priv, queue);
4768 	unsigned int entry = rx_q->dirty_rx;
4769 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4770 
4771 	if (priv->dma_cap.host_dma_width <= 32)
4772 		gfp |= GFP_DMA32;
4773 
4774 	while (dirty-- > 0) {
4775 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4776 		struct dma_desc *p;
4777 		bool use_rx_wd;
4778 
4779 		if (priv->extend_desc)
4780 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4781 		else
4782 			p = rx_q->dma_rx + entry;
4783 
4784 		if (!buf->page) {
4785 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4786 			if (!buf->page)
4787 				break;
4788 		}
4789 
4790 		if (priv->sph && !buf->sec_page) {
4791 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4792 			if (!buf->sec_page)
4793 				break;
4794 
4795 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4796 		}
4797 
4798 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4799 
4800 		stmmac_set_desc_addr(priv, p, buf->addr);
4801 		if (priv->sph)
4802 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4803 		else
4804 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4805 		stmmac_refill_desc3(priv, rx_q, p);
4806 
4807 		rx_q->rx_count_frames++;
4808 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4809 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4810 			rx_q->rx_count_frames = 0;
4811 
4812 		use_rx_wd = !priv->rx_coal_frames[queue];
4813 		use_rx_wd |= rx_q->rx_count_frames > 0;
4814 		if (!priv->use_riwt)
4815 			use_rx_wd = false;
4816 
4817 		dma_wmb();
4818 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4819 
4820 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4821 	}
4822 	rx_q->dirty_rx = entry;
4823 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4824 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4825 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4826 }
4827 
4828 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4829 				       struct dma_desc *p,
4830 				       int status, unsigned int len)
4831 {
4832 	unsigned int plen = 0, hlen = 0;
4833 	int coe = priv->hw->rx_csum;
4834 
4835 	/* Not first descriptor, buffer is always zero */
4836 	if (priv->sph && len)
4837 		return 0;
4838 
4839 	/* First descriptor, get split header length */
4840 	stmmac_get_rx_header_len(priv, p, &hlen);
4841 	if (priv->sph && hlen) {
4842 		priv->xstats.rx_split_hdr_pkt_n++;
4843 		return hlen;
4844 	}
4845 
4846 	/* First descriptor, not last descriptor and not split header */
4847 	if (status & rx_not_ls)
4848 		return priv->dma_conf.dma_buf_sz;
4849 
4850 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4851 
4852 	/* First descriptor and last descriptor and not split header */
4853 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4854 }
4855 
4856 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4857 				       struct dma_desc *p,
4858 				       int status, unsigned int len)
4859 {
4860 	int coe = priv->hw->rx_csum;
4861 	unsigned int plen = 0;
4862 
4863 	/* Not split header, buffer is not available */
4864 	if (!priv->sph)
4865 		return 0;
4866 
4867 	/* Not last descriptor */
4868 	if (status & rx_not_ls)
4869 		return priv->dma_conf.dma_buf_sz;
4870 
4871 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4872 
4873 	/* Last descriptor */
4874 	return plen - len;
4875 }
4876 
4877 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4878 				struct xdp_frame *xdpf, bool dma_map)
4879 {
4880 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4881 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4882 	unsigned int entry = tx_q->cur_tx;
4883 	struct dma_desc *tx_desc;
4884 	dma_addr_t dma_addr;
4885 	bool set_ic;
4886 
4887 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4888 		return STMMAC_XDP_CONSUMED;
4889 
4890 	if (priv->est && priv->est->enable &&
4891 	    priv->est->max_sdu[queue] &&
4892 	    xdpf->len > priv->est->max_sdu[queue]) {
4893 		priv->xstats.max_sdu_txq_drop[queue]++;
4894 		return STMMAC_XDP_CONSUMED;
4895 	}
4896 
4897 	if (likely(priv->extend_desc))
4898 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4899 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4900 		tx_desc = &tx_q->dma_entx[entry].basic;
4901 	else
4902 		tx_desc = tx_q->dma_tx + entry;
4903 
4904 	if (dma_map) {
4905 		dma_addr = dma_map_single(priv->device, xdpf->data,
4906 					  xdpf->len, DMA_TO_DEVICE);
4907 		if (dma_mapping_error(priv->device, dma_addr))
4908 			return STMMAC_XDP_CONSUMED;
4909 
4910 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4911 	} else {
4912 		struct page *page = virt_to_page(xdpf->data);
4913 
4914 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4915 			   xdpf->headroom;
4916 		dma_sync_single_for_device(priv->device, dma_addr,
4917 					   xdpf->len, DMA_BIDIRECTIONAL);
4918 
4919 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4920 	}
4921 
4922 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4923 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4924 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4925 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4926 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4927 
4928 	tx_q->xdpf[entry] = xdpf;
4929 
4930 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4931 
4932 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4933 			       true, priv->mode, true, true,
4934 			       xdpf->len);
4935 
4936 	tx_q->tx_count_frames++;
4937 
4938 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4939 		set_ic = true;
4940 	else
4941 		set_ic = false;
4942 
4943 	if (set_ic) {
4944 		tx_q->tx_count_frames = 0;
4945 		stmmac_set_tx_ic(priv, tx_desc);
4946 		u64_stats_update_begin(&txq_stats->q_syncp);
4947 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4948 		u64_stats_update_end(&txq_stats->q_syncp);
4949 	}
4950 
4951 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4952 
4953 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4954 	tx_q->cur_tx = entry;
4955 
4956 	return STMMAC_XDP_TX;
4957 }
4958 
4959 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4960 				   int cpu)
4961 {
4962 	int index = cpu;
4963 
4964 	if (unlikely(index < 0))
4965 		index = 0;
4966 
4967 	while (index >= priv->plat->tx_queues_to_use)
4968 		index -= priv->plat->tx_queues_to_use;
4969 
4970 	return index;
4971 }
4972 
4973 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4974 				struct xdp_buff *xdp)
4975 {
4976 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4977 	int cpu = smp_processor_id();
4978 	struct netdev_queue *nq;
4979 	int queue;
4980 	int res;
4981 
4982 	if (unlikely(!xdpf))
4983 		return STMMAC_XDP_CONSUMED;
4984 
4985 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4986 	nq = netdev_get_tx_queue(priv->dev, queue);
4987 
4988 	__netif_tx_lock(nq, cpu);
4989 	/* Avoids TX time-out as we are sharing with slow path */
4990 	txq_trans_cond_update(nq);
4991 
4992 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4993 	if (res == STMMAC_XDP_TX)
4994 		stmmac_flush_tx_descriptors(priv, queue);
4995 
4996 	__netif_tx_unlock(nq);
4997 
4998 	return res;
4999 }
5000 
5001 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5002 				 struct bpf_prog *prog,
5003 				 struct xdp_buff *xdp)
5004 {
5005 	u32 act;
5006 	int res;
5007 
5008 	act = bpf_prog_run_xdp(prog, xdp);
5009 	switch (act) {
5010 	case XDP_PASS:
5011 		res = STMMAC_XDP_PASS;
5012 		break;
5013 	case XDP_TX:
5014 		res = stmmac_xdp_xmit_back(priv, xdp);
5015 		break;
5016 	case XDP_REDIRECT:
5017 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5018 			res = STMMAC_XDP_CONSUMED;
5019 		else
5020 			res = STMMAC_XDP_REDIRECT;
5021 		break;
5022 	default:
5023 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5024 		fallthrough;
5025 	case XDP_ABORTED:
5026 		trace_xdp_exception(priv->dev, prog, act);
5027 		fallthrough;
5028 	case XDP_DROP:
5029 		res = STMMAC_XDP_CONSUMED;
5030 		break;
5031 	}
5032 
5033 	return res;
5034 }
5035 
5036 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5037 					   struct xdp_buff *xdp)
5038 {
5039 	struct bpf_prog *prog;
5040 	int res;
5041 
5042 	prog = READ_ONCE(priv->xdp_prog);
5043 	if (!prog) {
5044 		res = STMMAC_XDP_PASS;
5045 		goto out;
5046 	}
5047 
5048 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5049 out:
5050 	return ERR_PTR(-res);
5051 }
5052 
5053 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5054 				   int xdp_status)
5055 {
5056 	int cpu = smp_processor_id();
5057 	int queue;
5058 
5059 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5060 
5061 	if (xdp_status & STMMAC_XDP_TX)
5062 		stmmac_tx_timer_arm(priv, queue);
5063 
5064 	if (xdp_status & STMMAC_XDP_REDIRECT)
5065 		xdp_do_flush();
5066 }
5067 
5068 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5069 					       struct xdp_buff *xdp)
5070 {
5071 	unsigned int metasize = xdp->data - xdp->data_meta;
5072 	unsigned int datasize = xdp->data_end - xdp->data;
5073 	struct sk_buff *skb;
5074 
5075 	skb = napi_alloc_skb(&ch->rxtx_napi,
5076 			     xdp->data_end - xdp->data_hard_start);
5077 	if (unlikely(!skb))
5078 		return NULL;
5079 
5080 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5081 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5082 	if (metasize)
5083 		skb_metadata_set(skb, metasize);
5084 
5085 	return skb;
5086 }
5087 
5088 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5089 				   struct dma_desc *p, struct dma_desc *np,
5090 				   struct xdp_buff *xdp)
5091 {
5092 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5093 	struct stmmac_channel *ch = &priv->channel[queue];
5094 	unsigned int len = xdp->data_end - xdp->data;
5095 	enum pkt_hash_types hash_type;
5096 	int coe = priv->hw->rx_csum;
5097 	struct sk_buff *skb;
5098 	u32 hash;
5099 
5100 	skb = stmmac_construct_skb_zc(ch, xdp);
5101 	if (!skb) {
5102 		priv->xstats.rx_dropped++;
5103 		return;
5104 	}
5105 
5106 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5107 	if (priv->hw->hw_vlan_en)
5108 		/* MAC level stripping. */
5109 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5110 	else
5111 		/* Driver level stripping. */
5112 		stmmac_rx_vlan(priv->dev, skb);
5113 	skb->protocol = eth_type_trans(skb, priv->dev);
5114 
5115 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5116 		skb_checksum_none_assert(skb);
5117 	else
5118 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5119 
5120 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5121 		skb_set_hash(skb, hash, hash_type);
5122 
5123 	skb_record_rx_queue(skb, queue);
5124 	napi_gro_receive(&ch->rxtx_napi, skb);
5125 
5126 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5127 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5128 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5129 	u64_stats_update_end(&rxq_stats->napi_syncp);
5130 }
5131 
5132 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5133 {
5134 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5135 	unsigned int entry = rx_q->dirty_rx;
5136 	struct dma_desc *rx_desc = NULL;
5137 	bool ret = true;
5138 
5139 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5140 
5141 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5142 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5143 		dma_addr_t dma_addr;
5144 		bool use_rx_wd;
5145 
5146 		if (!buf->xdp) {
5147 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5148 			if (!buf->xdp) {
5149 				ret = false;
5150 				break;
5151 			}
5152 		}
5153 
5154 		if (priv->extend_desc)
5155 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5156 		else
5157 			rx_desc = rx_q->dma_rx + entry;
5158 
5159 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5160 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5161 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5162 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5163 
5164 		rx_q->rx_count_frames++;
5165 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5166 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5167 			rx_q->rx_count_frames = 0;
5168 
5169 		use_rx_wd = !priv->rx_coal_frames[queue];
5170 		use_rx_wd |= rx_q->rx_count_frames > 0;
5171 		if (!priv->use_riwt)
5172 			use_rx_wd = false;
5173 
5174 		dma_wmb();
5175 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5176 
5177 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5178 	}
5179 
5180 	if (rx_desc) {
5181 		rx_q->dirty_rx = entry;
5182 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5183 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5184 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5185 	}
5186 
5187 	return ret;
5188 }
5189 
5190 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5191 {
5192 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5193 	 * to represent incoming packet, whereas cb field in the same structure
5194 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5195 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5196 	 */
5197 	return (struct stmmac_xdp_buff *)xdp;
5198 }
5199 
5200 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5201 {
5202 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5203 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5204 	unsigned int count = 0, error = 0, len = 0;
5205 	int dirty = stmmac_rx_dirty(priv, queue);
5206 	unsigned int next_entry = rx_q->cur_rx;
5207 	u32 rx_errors = 0, rx_dropped = 0;
5208 	unsigned int desc_size;
5209 	struct bpf_prog *prog;
5210 	bool failure = false;
5211 	int xdp_status = 0;
5212 	int status = 0;
5213 
5214 	if (netif_msg_rx_status(priv)) {
5215 		void *rx_head;
5216 
5217 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5218 		if (priv->extend_desc) {
5219 			rx_head = (void *)rx_q->dma_erx;
5220 			desc_size = sizeof(struct dma_extended_desc);
5221 		} else {
5222 			rx_head = (void *)rx_q->dma_rx;
5223 			desc_size = sizeof(struct dma_desc);
5224 		}
5225 
5226 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5227 				    rx_q->dma_rx_phy, desc_size);
5228 	}
5229 	while (count < limit) {
5230 		struct stmmac_rx_buffer *buf;
5231 		struct stmmac_xdp_buff *ctx;
5232 		unsigned int buf1_len = 0;
5233 		struct dma_desc *np, *p;
5234 		int entry;
5235 		int res;
5236 
5237 		if (!count && rx_q->state_saved) {
5238 			error = rx_q->state.error;
5239 			len = rx_q->state.len;
5240 		} else {
5241 			rx_q->state_saved = false;
5242 			error = 0;
5243 			len = 0;
5244 		}
5245 
5246 		if (count >= limit)
5247 			break;
5248 
5249 read_again:
5250 		buf1_len = 0;
5251 		entry = next_entry;
5252 		buf = &rx_q->buf_pool[entry];
5253 
5254 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5255 			failure = failure ||
5256 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5257 			dirty = 0;
5258 		}
5259 
5260 		if (priv->extend_desc)
5261 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5262 		else
5263 			p = rx_q->dma_rx + entry;
5264 
5265 		/* read the status of the incoming frame */
5266 		status = stmmac_rx_status(priv, &priv->xstats, p);
5267 		/* check if managed by the DMA otherwise go ahead */
5268 		if (unlikely(status & dma_own))
5269 			break;
5270 
5271 		/* Prefetch the next RX descriptor */
5272 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5273 						priv->dma_conf.dma_rx_size);
5274 		next_entry = rx_q->cur_rx;
5275 
5276 		if (priv->extend_desc)
5277 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5278 		else
5279 			np = rx_q->dma_rx + next_entry;
5280 
5281 		prefetch(np);
5282 
5283 		/* Ensure a valid XSK buffer before proceed */
5284 		if (!buf->xdp)
5285 			break;
5286 
5287 		if (priv->extend_desc)
5288 			stmmac_rx_extended_status(priv, &priv->xstats,
5289 						  rx_q->dma_erx + entry);
5290 		if (unlikely(status == discard_frame)) {
5291 			xsk_buff_free(buf->xdp);
5292 			buf->xdp = NULL;
5293 			dirty++;
5294 			error = 1;
5295 			if (!priv->hwts_rx_en)
5296 				rx_errors++;
5297 		}
5298 
5299 		if (unlikely(error && (status & rx_not_ls)))
5300 			goto read_again;
5301 		if (unlikely(error)) {
5302 			count++;
5303 			continue;
5304 		}
5305 
5306 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5307 		if (likely(status & rx_not_ls)) {
5308 			xsk_buff_free(buf->xdp);
5309 			buf->xdp = NULL;
5310 			dirty++;
5311 			count++;
5312 			goto read_again;
5313 		}
5314 
5315 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5316 		ctx->priv = priv;
5317 		ctx->desc = p;
5318 		ctx->ndesc = np;
5319 
5320 		/* XDP ZC Frame only support primary buffers for now */
5321 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5322 		len += buf1_len;
5323 
5324 		/* ACS is disabled; strip manually. */
5325 		if (likely(!(status & rx_not_ls))) {
5326 			buf1_len -= ETH_FCS_LEN;
5327 			len -= ETH_FCS_LEN;
5328 		}
5329 
5330 		/* RX buffer is good and fit into a XSK pool buffer */
5331 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5332 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5333 
5334 		prog = READ_ONCE(priv->xdp_prog);
5335 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5336 
5337 		switch (res) {
5338 		case STMMAC_XDP_PASS:
5339 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5340 			xsk_buff_free(buf->xdp);
5341 			break;
5342 		case STMMAC_XDP_CONSUMED:
5343 			xsk_buff_free(buf->xdp);
5344 			rx_dropped++;
5345 			break;
5346 		case STMMAC_XDP_TX:
5347 		case STMMAC_XDP_REDIRECT:
5348 			xdp_status |= res;
5349 			break;
5350 		}
5351 
5352 		buf->xdp = NULL;
5353 		dirty++;
5354 		count++;
5355 	}
5356 
5357 	if (status & rx_not_ls) {
5358 		rx_q->state_saved = true;
5359 		rx_q->state.error = error;
5360 		rx_q->state.len = len;
5361 	}
5362 
5363 	stmmac_finalize_xdp_rx(priv, xdp_status);
5364 
5365 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5366 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5367 	u64_stats_update_end(&rxq_stats->napi_syncp);
5368 
5369 	priv->xstats.rx_dropped += rx_dropped;
5370 	priv->xstats.rx_errors += rx_errors;
5371 
5372 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5373 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5374 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5375 		else
5376 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5377 
5378 		return (int)count;
5379 	}
5380 
5381 	return failure ? limit : (int)count;
5382 }
5383 
5384 /**
5385  * stmmac_rx - manage the receive process
5386  * @priv: driver private structure
5387  * @limit: napi bugget
5388  * @queue: RX queue index.
5389  * Description :  this the function called by the napi poll method.
5390  * It gets all the frames inside the ring.
5391  */
5392 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5393 {
5394 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5395 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5396 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5397 	struct stmmac_channel *ch = &priv->channel[queue];
5398 	unsigned int count = 0, error = 0, len = 0;
5399 	int status = 0, coe = priv->hw->rx_csum;
5400 	unsigned int next_entry = rx_q->cur_rx;
5401 	enum dma_data_direction dma_dir;
5402 	unsigned int desc_size;
5403 	struct sk_buff *skb = NULL;
5404 	struct stmmac_xdp_buff ctx;
5405 	int xdp_status = 0;
5406 	int buf_sz;
5407 
5408 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5409 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5410 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5411 
5412 	if (netif_msg_rx_status(priv)) {
5413 		void *rx_head;
5414 
5415 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5416 		if (priv->extend_desc) {
5417 			rx_head = (void *)rx_q->dma_erx;
5418 			desc_size = sizeof(struct dma_extended_desc);
5419 		} else {
5420 			rx_head = (void *)rx_q->dma_rx;
5421 			desc_size = sizeof(struct dma_desc);
5422 		}
5423 
5424 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5425 				    rx_q->dma_rx_phy, desc_size);
5426 	}
5427 	while (count < limit) {
5428 		unsigned int buf1_len = 0, buf2_len = 0;
5429 		enum pkt_hash_types hash_type;
5430 		struct stmmac_rx_buffer *buf;
5431 		struct dma_desc *np, *p;
5432 		int entry;
5433 		u32 hash;
5434 
5435 		if (!count && rx_q->state_saved) {
5436 			skb = rx_q->state.skb;
5437 			error = rx_q->state.error;
5438 			len = rx_q->state.len;
5439 		} else {
5440 			rx_q->state_saved = false;
5441 			skb = NULL;
5442 			error = 0;
5443 			len = 0;
5444 		}
5445 
5446 read_again:
5447 		if (count >= limit)
5448 			break;
5449 
5450 		buf1_len = 0;
5451 		buf2_len = 0;
5452 		entry = next_entry;
5453 		buf = &rx_q->buf_pool[entry];
5454 
5455 		if (priv->extend_desc)
5456 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5457 		else
5458 			p = rx_q->dma_rx + entry;
5459 
5460 		/* read the status of the incoming frame */
5461 		status = stmmac_rx_status(priv, &priv->xstats, p);
5462 		/* check if managed by the DMA otherwise go ahead */
5463 		if (unlikely(status & dma_own))
5464 			break;
5465 
5466 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5467 						priv->dma_conf.dma_rx_size);
5468 		next_entry = rx_q->cur_rx;
5469 
5470 		if (priv->extend_desc)
5471 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5472 		else
5473 			np = rx_q->dma_rx + next_entry;
5474 
5475 		prefetch(np);
5476 
5477 		if (priv->extend_desc)
5478 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5479 		if (unlikely(status == discard_frame)) {
5480 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5481 			buf->page = NULL;
5482 			error = 1;
5483 			if (!priv->hwts_rx_en)
5484 				rx_errors++;
5485 		}
5486 
5487 		if (unlikely(error && (status & rx_not_ls)))
5488 			goto read_again;
5489 		if (unlikely(error)) {
5490 			dev_kfree_skb(skb);
5491 			skb = NULL;
5492 			count++;
5493 			continue;
5494 		}
5495 
5496 		/* Buffer is good. Go on. */
5497 
5498 		prefetch(page_address(buf->page) + buf->page_offset);
5499 		if (buf->sec_page)
5500 			prefetch(page_address(buf->sec_page));
5501 
5502 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5503 		len += buf1_len;
5504 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5505 		len += buf2_len;
5506 
5507 		/* ACS is disabled; strip manually. */
5508 		if (likely(!(status & rx_not_ls))) {
5509 			if (buf2_len) {
5510 				buf2_len -= ETH_FCS_LEN;
5511 				len -= ETH_FCS_LEN;
5512 			} else if (buf1_len) {
5513 				buf1_len -= ETH_FCS_LEN;
5514 				len -= ETH_FCS_LEN;
5515 			}
5516 		}
5517 
5518 		if (!skb) {
5519 			unsigned int pre_len, sync_len;
5520 
5521 			dma_sync_single_for_cpu(priv->device, buf->addr,
5522 						buf1_len, dma_dir);
5523 
5524 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5525 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5526 					 buf->page_offset, buf1_len, true);
5527 
5528 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5529 				  buf->page_offset;
5530 
5531 			ctx.priv = priv;
5532 			ctx.desc = p;
5533 			ctx.ndesc = np;
5534 
5535 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5536 			/* Due xdp_adjust_tail: DMA sync for_device
5537 			 * cover max len CPU touch
5538 			 */
5539 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5540 				   buf->page_offset;
5541 			sync_len = max(sync_len, pre_len);
5542 
5543 			/* For Not XDP_PASS verdict */
5544 			if (IS_ERR(skb)) {
5545 				unsigned int xdp_res = -PTR_ERR(skb);
5546 
5547 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5548 					page_pool_put_page(rx_q->page_pool,
5549 							   virt_to_head_page(ctx.xdp.data),
5550 							   sync_len, true);
5551 					buf->page = NULL;
5552 					rx_dropped++;
5553 
5554 					/* Clear skb as it was set as
5555 					 * status by XDP program.
5556 					 */
5557 					skb = NULL;
5558 
5559 					if (unlikely((status & rx_not_ls)))
5560 						goto read_again;
5561 
5562 					count++;
5563 					continue;
5564 				} else if (xdp_res & (STMMAC_XDP_TX |
5565 						      STMMAC_XDP_REDIRECT)) {
5566 					xdp_status |= xdp_res;
5567 					buf->page = NULL;
5568 					skb = NULL;
5569 					count++;
5570 					continue;
5571 				}
5572 			}
5573 		}
5574 
5575 		if (!skb) {
5576 			/* XDP program may expand or reduce tail */
5577 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5578 
5579 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5580 			if (!skb) {
5581 				rx_dropped++;
5582 				count++;
5583 				goto drain_data;
5584 			}
5585 
5586 			/* XDP program may adjust header */
5587 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5588 			skb_put(skb, buf1_len);
5589 
5590 			/* Data payload copied into SKB, page ready for recycle */
5591 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5592 			buf->page = NULL;
5593 		} else if (buf1_len) {
5594 			dma_sync_single_for_cpu(priv->device, buf->addr,
5595 						buf1_len, dma_dir);
5596 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5597 					buf->page, buf->page_offset, buf1_len,
5598 					priv->dma_conf.dma_buf_sz);
5599 
5600 			/* Data payload appended into SKB */
5601 			skb_mark_for_recycle(skb);
5602 			buf->page = NULL;
5603 		}
5604 
5605 		if (buf2_len) {
5606 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5607 						buf2_len, dma_dir);
5608 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5609 					buf->sec_page, 0, buf2_len,
5610 					priv->dma_conf.dma_buf_sz);
5611 
5612 			/* Data payload appended into SKB */
5613 			skb_mark_for_recycle(skb);
5614 			buf->sec_page = NULL;
5615 		}
5616 
5617 drain_data:
5618 		if (likely(status & rx_not_ls))
5619 			goto read_again;
5620 		if (!skb)
5621 			continue;
5622 
5623 		/* Got entire packet into SKB. Finish it. */
5624 
5625 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5626 
5627 		if (priv->hw->hw_vlan_en)
5628 			/* MAC level stripping. */
5629 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5630 		else
5631 			/* Driver level stripping. */
5632 			stmmac_rx_vlan(priv->dev, skb);
5633 
5634 		skb->protocol = eth_type_trans(skb, priv->dev);
5635 
5636 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5637 			skb_checksum_none_assert(skb);
5638 		else
5639 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5640 
5641 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5642 			skb_set_hash(skb, hash, hash_type);
5643 
5644 		skb_record_rx_queue(skb, queue);
5645 		napi_gro_receive(&ch->rx_napi, skb);
5646 		skb = NULL;
5647 
5648 		rx_packets++;
5649 		rx_bytes += len;
5650 		count++;
5651 	}
5652 
5653 	if (status & rx_not_ls || skb) {
5654 		rx_q->state_saved = true;
5655 		rx_q->state.skb = skb;
5656 		rx_q->state.error = error;
5657 		rx_q->state.len = len;
5658 	}
5659 
5660 	stmmac_finalize_xdp_rx(priv, xdp_status);
5661 
5662 	stmmac_rx_refill(priv, queue);
5663 
5664 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5665 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5666 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5667 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5668 	u64_stats_update_end(&rxq_stats->napi_syncp);
5669 
5670 	priv->xstats.rx_dropped += rx_dropped;
5671 	priv->xstats.rx_errors += rx_errors;
5672 
5673 	return count;
5674 }
5675 
5676 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5677 {
5678 	struct stmmac_channel *ch =
5679 		container_of(napi, struct stmmac_channel, rx_napi);
5680 	struct stmmac_priv *priv = ch->priv_data;
5681 	struct stmmac_rxq_stats *rxq_stats;
5682 	u32 chan = ch->index;
5683 	int work_done;
5684 
5685 	rxq_stats = &priv->xstats.rxq_stats[chan];
5686 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5687 	u64_stats_inc(&rxq_stats->napi.poll);
5688 	u64_stats_update_end(&rxq_stats->napi_syncp);
5689 
5690 	work_done = stmmac_rx(priv, budget, chan);
5691 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5692 		unsigned long flags;
5693 
5694 		spin_lock_irqsave(&ch->lock, flags);
5695 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5696 		spin_unlock_irqrestore(&ch->lock, flags);
5697 	}
5698 
5699 	return work_done;
5700 }
5701 
5702 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5703 {
5704 	struct stmmac_channel *ch =
5705 		container_of(napi, struct stmmac_channel, tx_napi);
5706 	struct stmmac_priv *priv = ch->priv_data;
5707 	struct stmmac_txq_stats *txq_stats;
5708 	bool pending_packets = false;
5709 	u32 chan = ch->index;
5710 	int work_done;
5711 
5712 	txq_stats = &priv->xstats.txq_stats[chan];
5713 	u64_stats_update_begin(&txq_stats->napi_syncp);
5714 	u64_stats_inc(&txq_stats->napi.poll);
5715 	u64_stats_update_end(&txq_stats->napi_syncp);
5716 
5717 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5718 	work_done = min(work_done, budget);
5719 
5720 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5721 		unsigned long flags;
5722 
5723 		spin_lock_irqsave(&ch->lock, flags);
5724 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5725 		spin_unlock_irqrestore(&ch->lock, flags);
5726 	}
5727 
5728 	/* TX still have packet to handle, check if we need to arm tx timer */
5729 	if (pending_packets)
5730 		stmmac_tx_timer_arm(priv, chan);
5731 
5732 	return work_done;
5733 }
5734 
5735 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5736 {
5737 	struct stmmac_channel *ch =
5738 		container_of(napi, struct stmmac_channel, rxtx_napi);
5739 	struct stmmac_priv *priv = ch->priv_data;
5740 	bool tx_pending_packets = false;
5741 	int rx_done, tx_done, rxtx_done;
5742 	struct stmmac_rxq_stats *rxq_stats;
5743 	struct stmmac_txq_stats *txq_stats;
5744 	u32 chan = ch->index;
5745 
5746 	rxq_stats = &priv->xstats.rxq_stats[chan];
5747 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5748 	u64_stats_inc(&rxq_stats->napi.poll);
5749 	u64_stats_update_end(&rxq_stats->napi_syncp);
5750 
5751 	txq_stats = &priv->xstats.txq_stats[chan];
5752 	u64_stats_update_begin(&txq_stats->napi_syncp);
5753 	u64_stats_inc(&txq_stats->napi.poll);
5754 	u64_stats_update_end(&txq_stats->napi_syncp);
5755 
5756 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5757 	tx_done = min(tx_done, budget);
5758 
5759 	rx_done = stmmac_rx_zc(priv, budget, chan);
5760 
5761 	rxtx_done = max(tx_done, rx_done);
5762 
5763 	/* If either TX or RX work is not complete, return budget
5764 	 * and keep pooling
5765 	 */
5766 	if (rxtx_done >= budget)
5767 		return budget;
5768 
5769 	/* all work done, exit the polling mode */
5770 	if (napi_complete_done(napi, rxtx_done)) {
5771 		unsigned long flags;
5772 
5773 		spin_lock_irqsave(&ch->lock, flags);
5774 		/* Both RX and TX work done are compelte,
5775 		 * so enable both RX & TX IRQs.
5776 		 */
5777 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5778 		spin_unlock_irqrestore(&ch->lock, flags);
5779 	}
5780 
5781 	/* TX still have packet to handle, check if we need to arm tx timer */
5782 	if (tx_pending_packets)
5783 		stmmac_tx_timer_arm(priv, chan);
5784 
5785 	return min(rxtx_done, budget - 1);
5786 }
5787 
5788 /**
5789  *  stmmac_tx_timeout
5790  *  @dev : Pointer to net device structure
5791  *  @txqueue: the index of the hanging transmit queue
5792  *  Description: this function is called when a packet transmission fails to
5793  *   complete within a reasonable time. The driver will mark the error in the
5794  *   netdev structure and arrange for the device to be reset to a sane state
5795  *   in order to transmit a new packet.
5796  */
5797 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5798 {
5799 	struct stmmac_priv *priv = netdev_priv(dev);
5800 
5801 	stmmac_global_err(priv);
5802 }
5803 
5804 /**
5805  *  stmmac_set_rx_mode - entry point for multicast addressing
5806  *  @dev : pointer to the device structure
5807  *  Description:
5808  *  This function is a driver entry point which gets called by the kernel
5809  *  whenever multicast addresses must be enabled/disabled.
5810  *  Return value:
5811  *  void.
5812  */
5813 static void stmmac_set_rx_mode(struct net_device *dev)
5814 {
5815 	struct stmmac_priv *priv = netdev_priv(dev);
5816 
5817 	stmmac_set_filter(priv, priv->hw, dev);
5818 }
5819 
5820 /**
5821  *  stmmac_change_mtu - entry point to change MTU size for the device.
5822  *  @dev : device pointer.
5823  *  @new_mtu : the new MTU size for the device.
5824  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5825  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5826  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5827  *  Return value:
5828  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5829  *  file on failure.
5830  */
5831 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5832 {
5833 	struct stmmac_priv *priv = netdev_priv(dev);
5834 	int txfifosz = priv->plat->tx_fifo_size;
5835 	struct stmmac_dma_conf *dma_conf;
5836 	const int mtu = new_mtu;
5837 	int ret;
5838 
5839 	if (txfifosz == 0)
5840 		txfifosz = priv->dma_cap.tx_fifo_size;
5841 
5842 	txfifosz /= priv->plat->tx_queues_to_use;
5843 
5844 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5845 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5846 		return -EINVAL;
5847 	}
5848 
5849 	new_mtu = STMMAC_ALIGN(new_mtu);
5850 
5851 	/* If condition true, FIFO is too small or MTU too large */
5852 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5853 		return -EINVAL;
5854 
5855 	if (netif_running(dev)) {
5856 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5857 		/* Try to allocate the new DMA conf with the new mtu */
5858 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5859 		if (IS_ERR(dma_conf)) {
5860 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5861 				   mtu);
5862 			return PTR_ERR(dma_conf);
5863 		}
5864 
5865 		stmmac_release(dev);
5866 
5867 		ret = __stmmac_open(dev, dma_conf);
5868 		if (ret) {
5869 			free_dma_desc_resources(priv, dma_conf);
5870 			kfree(dma_conf);
5871 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5872 			return ret;
5873 		}
5874 
5875 		kfree(dma_conf);
5876 
5877 		stmmac_set_rx_mode(dev);
5878 	}
5879 
5880 	WRITE_ONCE(dev->mtu, mtu);
5881 	netdev_update_features(dev);
5882 
5883 	return 0;
5884 }
5885 
5886 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5887 					     netdev_features_t features)
5888 {
5889 	struct stmmac_priv *priv = netdev_priv(dev);
5890 
5891 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5892 		features &= ~NETIF_F_RXCSUM;
5893 
5894 	if (!priv->plat->tx_coe)
5895 		features &= ~NETIF_F_CSUM_MASK;
5896 
5897 	/* Some GMAC devices have a bugged Jumbo frame support that
5898 	 * needs to have the Tx COE disabled for oversized frames
5899 	 * (due to limited buffer sizes). In this case we disable
5900 	 * the TX csum insertion in the TDES and not use SF.
5901 	 */
5902 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5903 		features &= ~NETIF_F_CSUM_MASK;
5904 
5905 	/* Disable tso if asked by ethtool */
5906 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5907 		if (features & NETIF_F_TSO)
5908 			priv->tso = true;
5909 		else
5910 			priv->tso = false;
5911 	}
5912 
5913 	return features;
5914 }
5915 
5916 static int stmmac_set_features(struct net_device *netdev,
5917 			       netdev_features_t features)
5918 {
5919 	struct stmmac_priv *priv = netdev_priv(netdev);
5920 
5921 	/* Keep the COE Type in case of csum is supporting */
5922 	if (features & NETIF_F_RXCSUM)
5923 		priv->hw->rx_csum = priv->plat->rx_coe;
5924 	else
5925 		priv->hw->rx_csum = 0;
5926 	/* No check needed because rx_coe has been set before and it will be
5927 	 * fixed in case of issue.
5928 	 */
5929 	stmmac_rx_ipc(priv, priv->hw);
5930 
5931 	if (priv->sph_cap) {
5932 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5933 		u32 chan;
5934 
5935 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5936 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5937 	}
5938 
5939 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5940 		priv->hw->hw_vlan_en = true;
5941 	else
5942 		priv->hw->hw_vlan_en = false;
5943 
5944 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5945 
5946 	return 0;
5947 }
5948 
5949 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5950 {
5951 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5952 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5953 	u32 queues_count;
5954 	u32 queue;
5955 	bool xmac;
5956 
5957 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5958 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5959 
5960 	if (priv->irq_wake)
5961 		pm_wakeup_event(priv->device, 0);
5962 
5963 	if (priv->dma_cap.estsel)
5964 		stmmac_est_irq_status(priv, priv, priv->dev,
5965 				      &priv->xstats, tx_cnt);
5966 
5967 	if (stmmac_fpe_supported(priv))
5968 		stmmac_fpe_irq_status(priv);
5969 
5970 	/* To handle GMAC own interrupts */
5971 	if ((priv->plat->has_gmac) || xmac) {
5972 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5973 
5974 		if (unlikely(status)) {
5975 			/* For LPI we need to save the tx status */
5976 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5977 				priv->tx_path_in_lpi_mode = true;
5978 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5979 				priv->tx_path_in_lpi_mode = false;
5980 		}
5981 
5982 		for (queue = 0; queue < queues_count; queue++)
5983 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
5984 
5985 		/* PCS link status */
5986 		if (priv->hw->pcs &&
5987 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5988 			if (priv->xstats.pcs_link)
5989 				netif_carrier_on(priv->dev);
5990 			else
5991 				netif_carrier_off(priv->dev);
5992 		}
5993 
5994 		stmmac_timestamp_interrupt(priv, priv);
5995 	}
5996 }
5997 
5998 /**
5999  *  stmmac_interrupt - main ISR
6000  *  @irq: interrupt number.
6001  *  @dev_id: to pass the net device pointer.
6002  *  Description: this is the main driver interrupt service routine.
6003  *  It can call:
6004  *  o DMA service routine (to manage incoming frame reception and transmission
6005  *    status)
6006  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6007  *    interrupts.
6008  */
6009 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6010 {
6011 	struct net_device *dev = (struct net_device *)dev_id;
6012 	struct stmmac_priv *priv = netdev_priv(dev);
6013 
6014 	/* Check if adapter is up */
6015 	if (test_bit(STMMAC_DOWN, &priv->state))
6016 		return IRQ_HANDLED;
6017 
6018 	/* Check ASP error if it isn't delivered via an individual IRQ */
6019 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6020 		return IRQ_HANDLED;
6021 
6022 	/* To handle Common interrupts */
6023 	stmmac_common_interrupt(priv);
6024 
6025 	/* To handle DMA interrupts */
6026 	stmmac_dma_interrupt(priv);
6027 
6028 	return IRQ_HANDLED;
6029 }
6030 
6031 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6032 {
6033 	struct net_device *dev = (struct net_device *)dev_id;
6034 	struct stmmac_priv *priv = netdev_priv(dev);
6035 
6036 	/* Check if adapter is up */
6037 	if (test_bit(STMMAC_DOWN, &priv->state))
6038 		return IRQ_HANDLED;
6039 
6040 	/* To handle Common interrupts */
6041 	stmmac_common_interrupt(priv);
6042 
6043 	return IRQ_HANDLED;
6044 }
6045 
6046 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6047 {
6048 	struct net_device *dev = (struct net_device *)dev_id;
6049 	struct stmmac_priv *priv = netdev_priv(dev);
6050 
6051 	/* Check if adapter is up */
6052 	if (test_bit(STMMAC_DOWN, &priv->state))
6053 		return IRQ_HANDLED;
6054 
6055 	/* Check if a fatal error happened */
6056 	stmmac_safety_feat_interrupt(priv);
6057 
6058 	return IRQ_HANDLED;
6059 }
6060 
6061 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6062 {
6063 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6064 	struct stmmac_dma_conf *dma_conf;
6065 	int chan = tx_q->queue_index;
6066 	struct stmmac_priv *priv;
6067 	int status;
6068 
6069 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6070 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6071 
6072 	/* Check if adapter is up */
6073 	if (test_bit(STMMAC_DOWN, &priv->state))
6074 		return IRQ_HANDLED;
6075 
6076 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6077 
6078 	if (unlikely(status & tx_hard_error_bump_tc)) {
6079 		/* Try to bump up the dma threshold on this failure */
6080 		stmmac_bump_dma_threshold(priv, chan);
6081 	} else if (unlikely(status == tx_hard_error)) {
6082 		stmmac_tx_err(priv, chan);
6083 	}
6084 
6085 	return IRQ_HANDLED;
6086 }
6087 
6088 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6089 {
6090 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6091 	struct stmmac_dma_conf *dma_conf;
6092 	int chan = rx_q->queue_index;
6093 	struct stmmac_priv *priv;
6094 
6095 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6096 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6097 
6098 	/* Check if adapter is up */
6099 	if (test_bit(STMMAC_DOWN, &priv->state))
6100 		return IRQ_HANDLED;
6101 
6102 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6103 
6104 	return IRQ_HANDLED;
6105 }
6106 
6107 /**
6108  *  stmmac_ioctl - Entry point for the Ioctl
6109  *  @dev: Device pointer.
6110  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6111  *  a proprietary structure used to pass information to the driver.
6112  *  @cmd: IOCTL command
6113  *  Description:
6114  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6115  */
6116 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6117 {
6118 	struct stmmac_priv *priv = netdev_priv (dev);
6119 	int ret = -EOPNOTSUPP;
6120 
6121 	if (!netif_running(dev))
6122 		return -EINVAL;
6123 
6124 	switch (cmd) {
6125 	case SIOCGMIIPHY:
6126 	case SIOCGMIIREG:
6127 	case SIOCSMIIREG:
6128 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6129 		break;
6130 	case SIOCSHWTSTAMP:
6131 		ret = stmmac_hwtstamp_set(dev, rq);
6132 		break;
6133 	case SIOCGHWTSTAMP:
6134 		ret = stmmac_hwtstamp_get(dev, rq);
6135 		break;
6136 	default:
6137 		break;
6138 	}
6139 
6140 	return ret;
6141 }
6142 
6143 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6144 				    void *cb_priv)
6145 {
6146 	struct stmmac_priv *priv = cb_priv;
6147 	int ret = -EOPNOTSUPP;
6148 
6149 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6150 		return ret;
6151 
6152 	__stmmac_disable_all_queues(priv);
6153 
6154 	switch (type) {
6155 	case TC_SETUP_CLSU32:
6156 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6157 		break;
6158 	case TC_SETUP_CLSFLOWER:
6159 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6160 		break;
6161 	default:
6162 		break;
6163 	}
6164 
6165 	stmmac_enable_all_queues(priv);
6166 	return ret;
6167 }
6168 
6169 static LIST_HEAD(stmmac_block_cb_list);
6170 
6171 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6172 			   void *type_data)
6173 {
6174 	struct stmmac_priv *priv = netdev_priv(ndev);
6175 
6176 	switch (type) {
6177 	case TC_QUERY_CAPS:
6178 		return stmmac_tc_query_caps(priv, priv, type_data);
6179 	case TC_SETUP_QDISC_MQPRIO:
6180 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6181 	case TC_SETUP_BLOCK:
6182 		return flow_block_cb_setup_simple(type_data,
6183 						  &stmmac_block_cb_list,
6184 						  stmmac_setup_tc_block_cb,
6185 						  priv, priv, true);
6186 	case TC_SETUP_QDISC_CBS:
6187 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6188 	case TC_SETUP_QDISC_TAPRIO:
6189 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6190 	case TC_SETUP_QDISC_ETF:
6191 		return stmmac_tc_setup_etf(priv, priv, type_data);
6192 	default:
6193 		return -EOPNOTSUPP;
6194 	}
6195 }
6196 
6197 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6198 			       struct net_device *sb_dev)
6199 {
6200 	int gso = skb_shinfo(skb)->gso_type;
6201 
6202 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6203 		/*
6204 		 * There is no way to determine the number of TSO/USO
6205 		 * capable Queues. Let's use always the Queue 0
6206 		 * because if TSO/USO is supported then at least this
6207 		 * one will be capable.
6208 		 */
6209 		return 0;
6210 	}
6211 
6212 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6213 }
6214 
6215 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6216 {
6217 	struct stmmac_priv *priv = netdev_priv(ndev);
6218 	int ret = 0;
6219 
6220 	ret = pm_runtime_resume_and_get(priv->device);
6221 	if (ret < 0)
6222 		return ret;
6223 
6224 	ret = eth_mac_addr(ndev, addr);
6225 	if (ret)
6226 		goto set_mac_error;
6227 
6228 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6229 
6230 set_mac_error:
6231 	pm_runtime_put(priv->device);
6232 
6233 	return ret;
6234 }
6235 
6236 #ifdef CONFIG_DEBUG_FS
6237 static struct dentry *stmmac_fs_dir;
6238 
6239 static void sysfs_display_ring(void *head, int size, int extend_desc,
6240 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6241 {
6242 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6243 	struct dma_desc *p = (struct dma_desc *)head;
6244 	unsigned int desc_size;
6245 	dma_addr_t dma_addr;
6246 	int i;
6247 
6248 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6249 	for (i = 0; i < size; i++) {
6250 		dma_addr = dma_phy_addr + i * desc_size;
6251 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6252 				i, &dma_addr,
6253 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6254 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6255 		if (extend_desc)
6256 			p = &(++ep)->basic;
6257 		else
6258 			p++;
6259 	}
6260 }
6261 
6262 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6263 {
6264 	struct net_device *dev = seq->private;
6265 	struct stmmac_priv *priv = netdev_priv(dev);
6266 	u32 rx_count = priv->plat->rx_queues_to_use;
6267 	u32 tx_count = priv->plat->tx_queues_to_use;
6268 	u32 queue;
6269 
6270 	if ((dev->flags & IFF_UP) == 0)
6271 		return 0;
6272 
6273 	for (queue = 0; queue < rx_count; queue++) {
6274 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6275 
6276 		seq_printf(seq, "RX Queue %d:\n", queue);
6277 
6278 		if (priv->extend_desc) {
6279 			seq_printf(seq, "Extended descriptor ring:\n");
6280 			sysfs_display_ring((void *)rx_q->dma_erx,
6281 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6282 		} else {
6283 			seq_printf(seq, "Descriptor ring:\n");
6284 			sysfs_display_ring((void *)rx_q->dma_rx,
6285 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6286 		}
6287 	}
6288 
6289 	for (queue = 0; queue < tx_count; queue++) {
6290 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6291 
6292 		seq_printf(seq, "TX Queue %d:\n", queue);
6293 
6294 		if (priv->extend_desc) {
6295 			seq_printf(seq, "Extended descriptor ring:\n");
6296 			sysfs_display_ring((void *)tx_q->dma_etx,
6297 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6298 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6299 			seq_printf(seq, "Descriptor ring:\n");
6300 			sysfs_display_ring((void *)tx_q->dma_tx,
6301 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6302 		}
6303 	}
6304 
6305 	return 0;
6306 }
6307 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6308 
6309 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6310 {
6311 	static const char * const dwxgmac_timestamp_source[] = {
6312 		"None",
6313 		"Internal",
6314 		"External",
6315 		"Both",
6316 	};
6317 	static const char * const dwxgmac_safety_feature_desc[] = {
6318 		"No",
6319 		"All Safety Features with ECC and Parity",
6320 		"All Safety Features without ECC or Parity",
6321 		"All Safety Features with Parity Only",
6322 		"ECC Only",
6323 		"UNDEFINED",
6324 		"UNDEFINED",
6325 		"UNDEFINED",
6326 	};
6327 	struct net_device *dev = seq->private;
6328 	struct stmmac_priv *priv = netdev_priv(dev);
6329 
6330 	if (!priv->hw_cap_support) {
6331 		seq_printf(seq, "DMA HW features not supported\n");
6332 		return 0;
6333 	}
6334 
6335 	seq_printf(seq, "==============================\n");
6336 	seq_printf(seq, "\tDMA HW features\n");
6337 	seq_printf(seq, "==============================\n");
6338 
6339 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6340 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6341 	seq_printf(seq, "\t1000 Mbps: %s\n",
6342 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6343 	seq_printf(seq, "\tHalf duplex: %s\n",
6344 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6345 	if (priv->plat->has_xgmac) {
6346 		seq_printf(seq,
6347 			   "\tNumber of Additional MAC address registers: %d\n",
6348 			   priv->dma_cap.multi_addr);
6349 	} else {
6350 		seq_printf(seq, "\tHash Filter: %s\n",
6351 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6352 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6353 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6354 	}
6355 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6356 		   (priv->dma_cap.pcs) ? "Y" : "N");
6357 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6358 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6359 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6360 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6361 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6362 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6363 	seq_printf(seq, "\tRMON module: %s\n",
6364 		   (priv->dma_cap.rmon) ? "Y" : "N");
6365 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6366 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6367 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6368 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6369 	if (priv->plat->has_xgmac)
6370 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6371 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6372 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6373 		   (priv->dma_cap.eee) ? "Y" : "N");
6374 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6375 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6376 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6377 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6378 	    priv->plat->has_xgmac) {
6379 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6380 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6381 	} else {
6382 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6383 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6384 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6385 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6386 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6387 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6388 	}
6389 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6390 		   priv->dma_cap.number_rx_channel);
6391 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6392 		   priv->dma_cap.number_tx_channel);
6393 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6394 		   priv->dma_cap.number_rx_queues);
6395 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6396 		   priv->dma_cap.number_tx_queues);
6397 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6398 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6399 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6400 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6401 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6402 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6403 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6404 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6405 		   priv->dma_cap.pps_out_num);
6406 	seq_printf(seq, "\tSafety Features: %s\n",
6407 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6408 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6409 		   priv->dma_cap.frpsel ? "Y" : "N");
6410 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6411 		   priv->dma_cap.host_dma_width);
6412 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6413 		   priv->dma_cap.rssen ? "Y" : "N");
6414 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6415 		   priv->dma_cap.vlhash ? "Y" : "N");
6416 	seq_printf(seq, "\tSplit Header: %s\n",
6417 		   priv->dma_cap.sphen ? "Y" : "N");
6418 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6419 		   priv->dma_cap.vlins ? "Y" : "N");
6420 	seq_printf(seq, "\tDouble VLAN: %s\n",
6421 		   priv->dma_cap.dvlan ? "Y" : "N");
6422 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6423 		   priv->dma_cap.l3l4fnum);
6424 	seq_printf(seq, "\tARP Offloading: %s\n",
6425 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6426 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6427 		   priv->dma_cap.estsel ? "Y" : "N");
6428 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6429 		   priv->dma_cap.fpesel ? "Y" : "N");
6430 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6431 		   priv->dma_cap.tbssel ? "Y" : "N");
6432 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6433 		   priv->dma_cap.tbs_ch_num);
6434 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6435 		   priv->dma_cap.sgfsel ? "Y" : "N");
6436 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6437 		   BIT(priv->dma_cap.ttsfd) >> 1);
6438 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6439 		   priv->dma_cap.numtc);
6440 	seq_printf(seq, "\tDCB Feature: %s\n",
6441 		   priv->dma_cap.dcben ? "Y" : "N");
6442 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6443 		   priv->dma_cap.advthword ? "Y" : "N");
6444 	seq_printf(seq, "\tPTP Offload: %s\n",
6445 		   priv->dma_cap.ptoen ? "Y" : "N");
6446 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6447 		   priv->dma_cap.osten ? "Y" : "N");
6448 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6449 		   priv->dma_cap.pfcen ? "Y" : "N");
6450 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6451 		   BIT(priv->dma_cap.frpes) << 6);
6452 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6453 		   BIT(priv->dma_cap.frpbs) << 6);
6454 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6455 		   priv->dma_cap.frppipe_num);
6456 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6457 		   priv->dma_cap.nrvf_num ?
6458 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6459 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6460 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6461 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6462 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6463 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6464 		   priv->dma_cap.cbtisel ? "Y" : "N");
6465 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6466 		   priv->dma_cap.aux_snapshot_n);
6467 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6468 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6469 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6470 		   priv->dma_cap.edma ? "Y" : "N");
6471 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6472 		   priv->dma_cap.ediffc ? "Y" : "N");
6473 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6474 		   priv->dma_cap.vxn ? "Y" : "N");
6475 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6476 		   priv->dma_cap.dbgmem ? "Y" : "N");
6477 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6478 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6479 	return 0;
6480 }
6481 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6482 
6483 /* Use network device events to rename debugfs file entries.
6484  */
6485 static int stmmac_device_event(struct notifier_block *unused,
6486 			       unsigned long event, void *ptr)
6487 {
6488 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6489 	struct stmmac_priv *priv = netdev_priv(dev);
6490 
6491 	if (dev->netdev_ops != &stmmac_netdev_ops)
6492 		goto done;
6493 
6494 	switch (event) {
6495 	case NETDEV_CHANGENAME:
6496 		if (priv->dbgfs_dir)
6497 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6498 							 priv->dbgfs_dir,
6499 							 stmmac_fs_dir,
6500 							 dev->name);
6501 		break;
6502 	}
6503 done:
6504 	return NOTIFY_DONE;
6505 }
6506 
6507 static struct notifier_block stmmac_notifier = {
6508 	.notifier_call = stmmac_device_event,
6509 };
6510 
6511 static void stmmac_init_fs(struct net_device *dev)
6512 {
6513 	struct stmmac_priv *priv = netdev_priv(dev);
6514 
6515 	rtnl_lock();
6516 
6517 	/* Create per netdev entries */
6518 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6519 
6520 	/* Entry to report DMA RX/TX rings */
6521 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6522 			    &stmmac_rings_status_fops);
6523 
6524 	/* Entry to report the DMA HW features */
6525 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6526 			    &stmmac_dma_cap_fops);
6527 
6528 	rtnl_unlock();
6529 }
6530 
6531 static void stmmac_exit_fs(struct net_device *dev)
6532 {
6533 	struct stmmac_priv *priv = netdev_priv(dev);
6534 
6535 	debugfs_remove_recursive(priv->dbgfs_dir);
6536 }
6537 #endif /* CONFIG_DEBUG_FS */
6538 
6539 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6540 {
6541 	unsigned char *data = (unsigned char *)&vid_le;
6542 	unsigned char data_byte = 0;
6543 	u32 crc = ~0x0;
6544 	u32 temp = 0;
6545 	int i, bits;
6546 
6547 	bits = get_bitmask_order(VLAN_VID_MASK);
6548 	for (i = 0; i < bits; i++) {
6549 		if ((i % 8) == 0)
6550 			data_byte = data[i / 8];
6551 
6552 		temp = ((crc & 1) ^ data_byte) & 1;
6553 		crc >>= 1;
6554 		data_byte >>= 1;
6555 
6556 		if (temp)
6557 			crc ^= 0xedb88320;
6558 	}
6559 
6560 	return crc;
6561 }
6562 
6563 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6564 {
6565 	u32 crc, hash = 0;
6566 	u16 pmatch = 0;
6567 	int count = 0;
6568 	u16 vid = 0;
6569 
6570 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6571 		__le16 vid_le = cpu_to_le16(vid);
6572 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6573 		hash |= (1 << crc);
6574 		count++;
6575 	}
6576 
6577 	if (!priv->dma_cap.vlhash) {
6578 		if (count > 2) /* VID = 0 always passes filter */
6579 			return -EOPNOTSUPP;
6580 
6581 		pmatch = vid;
6582 		hash = 0;
6583 	}
6584 
6585 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6586 }
6587 
6588 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6589 {
6590 	struct stmmac_priv *priv = netdev_priv(ndev);
6591 	bool is_double = false;
6592 	int ret;
6593 
6594 	ret = pm_runtime_resume_and_get(priv->device);
6595 	if (ret < 0)
6596 		return ret;
6597 
6598 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6599 		is_double = true;
6600 
6601 	set_bit(vid, priv->active_vlans);
6602 	ret = stmmac_vlan_update(priv, is_double);
6603 	if (ret) {
6604 		clear_bit(vid, priv->active_vlans);
6605 		goto err_pm_put;
6606 	}
6607 
6608 	if (priv->hw->num_vlan) {
6609 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6610 		if (ret)
6611 			goto err_pm_put;
6612 	}
6613 err_pm_put:
6614 	pm_runtime_put(priv->device);
6615 
6616 	return ret;
6617 }
6618 
6619 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6620 {
6621 	struct stmmac_priv *priv = netdev_priv(ndev);
6622 	bool is_double = false;
6623 	int ret;
6624 
6625 	ret = pm_runtime_resume_and_get(priv->device);
6626 	if (ret < 0)
6627 		return ret;
6628 
6629 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6630 		is_double = true;
6631 
6632 	clear_bit(vid, priv->active_vlans);
6633 
6634 	if (priv->hw->num_vlan) {
6635 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6636 		if (ret)
6637 			goto del_vlan_error;
6638 	}
6639 
6640 	ret = stmmac_vlan_update(priv, is_double);
6641 
6642 del_vlan_error:
6643 	pm_runtime_put(priv->device);
6644 
6645 	return ret;
6646 }
6647 
6648 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6649 {
6650 	struct stmmac_priv *priv = netdev_priv(dev);
6651 
6652 	switch (bpf->command) {
6653 	case XDP_SETUP_PROG:
6654 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6655 	case XDP_SETUP_XSK_POOL:
6656 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6657 					     bpf->xsk.queue_id);
6658 	default:
6659 		return -EOPNOTSUPP;
6660 	}
6661 }
6662 
6663 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6664 			   struct xdp_frame **frames, u32 flags)
6665 {
6666 	struct stmmac_priv *priv = netdev_priv(dev);
6667 	int cpu = smp_processor_id();
6668 	struct netdev_queue *nq;
6669 	int i, nxmit = 0;
6670 	int queue;
6671 
6672 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6673 		return -ENETDOWN;
6674 
6675 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6676 		return -EINVAL;
6677 
6678 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6679 	nq = netdev_get_tx_queue(priv->dev, queue);
6680 
6681 	__netif_tx_lock(nq, cpu);
6682 	/* Avoids TX time-out as we are sharing with slow path */
6683 	txq_trans_cond_update(nq);
6684 
6685 	for (i = 0; i < num_frames; i++) {
6686 		int res;
6687 
6688 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6689 		if (res == STMMAC_XDP_CONSUMED)
6690 			break;
6691 
6692 		nxmit++;
6693 	}
6694 
6695 	if (flags & XDP_XMIT_FLUSH) {
6696 		stmmac_flush_tx_descriptors(priv, queue);
6697 		stmmac_tx_timer_arm(priv, queue);
6698 	}
6699 
6700 	__netif_tx_unlock(nq);
6701 
6702 	return nxmit;
6703 }
6704 
6705 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6706 {
6707 	struct stmmac_channel *ch = &priv->channel[queue];
6708 	unsigned long flags;
6709 
6710 	spin_lock_irqsave(&ch->lock, flags);
6711 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6712 	spin_unlock_irqrestore(&ch->lock, flags);
6713 
6714 	stmmac_stop_rx_dma(priv, queue);
6715 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6716 }
6717 
6718 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6719 {
6720 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6721 	struct stmmac_channel *ch = &priv->channel[queue];
6722 	unsigned long flags;
6723 	u32 buf_size;
6724 	int ret;
6725 
6726 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6727 	if (ret) {
6728 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6729 		return;
6730 	}
6731 
6732 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6733 	if (ret) {
6734 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6735 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6736 		return;
6737 	}
6738 
6739 	stmmac_reset_rx_queue(priv, queue);
6740 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6741 
6742 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6743 			    rx_q->dma_rx_phy, rx_q->queue_index);
6744 
6745 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6746 			     sizeof(struct dma_desc));
6747 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6748 			       rx_q->rx_tail_addr, rx_q->queue_index);
6749 
6750 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6751 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6752 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6753 				      buf_size,
6754 				      rx_q->queue_index);
6755 	} else {
6756 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6757 				      priv->dma_conf.dma_buf_sz,
6758 				      rx_q->queue_index);
6759 	}
6760 
6761 	stmmac_start_rx_dma(priv, queue);
6762 
6763 	spin_lock_irqsave(&ch->lock, flags);
6764 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6765 	spin_unlock_irqrestore(&ch->lock, flags);
6766 }
6767 
6768 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6769 {
6770 	struct stmmac_channel *ch = &priv->channel[queue];
6771 	unsigned long flags;
6772 
6773 	spin_lock_irqsave(&ch->lock, flags);
6774 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6775 	spin_unlock_irqrestore(&ch->lock, flags);
6776 
6777 	stmmac_stop_tx_dma(priv, queue);
6778 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6779 }
6780 
6781 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6782 {
6783 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6784 	struct stmmac_channel *ch = &priv->channel[queue];
6785 	unsigned long flags;
6786 	int ret;
6787 
6788 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6789 	if (ret) {
6790 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6791 		return;
6792 	}
6793 
6794 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6795 	if (ret) {
6796 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6797 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6798 		return;
6799 	}
6800 
6801 	stmmac_reset_tx_queue(priv, queue);
6802 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6803 
6804 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6805 			    tx_q->dma_tx_phy, tx_q->queue_index);
6806 
6807 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6808 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6809 
6810 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6811 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6812 			       tx_q->tx_tail_addr, tx_q->queue_index);
6813 
6814 	stmmac_start_tx_dma(priv, queue);
6815 
6816 	spin_lock_irqsave(&ch->lock, flags);
6817 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6818 	spin_unlock_irqrestore(&ch->lock, flags);
6819 }
6820 
6821 void stmmac_xdp_release(struct net_device *dev)
6822 {
6823 	struct stmmac_priv *priv = netdev_priv(dev);
6824 	u32 chan;
6825 
6826 	/* Ensure tx function is not running */
6827 	netif_tx_disable(dev);
6828 
6829 	/* Disable NAPI process */
6830 	stmmac_disable_all_queues(priv);
6831 
6832 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6833 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6834 
6835 	/* Free the IRQ lines */
6836 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6837 
6838 	/* Stop TX/RX DMA channels */
6839 	stmmac_stop_all_dma(priv);
6840 
6841 	/* Release and free the Rx/Tx resources */
6842 	free_dma_desc_resources(priv, &priv->dma_conf);
6843 
6844 	/* Disable the MAC Rx/Tx */
6845 	stmmac_mac_set(priv, priv->ioaddr, false);
6846 
6847 	/* set trans_start so we don't get spurious
6848 	 * watchdogs during reset
6849 	 */
6850 	netif_trans_update(dev);
6851 	netif_carrier_off(dev);
6852 }
6853 
6854 int stmmac_xdp_open(struct net_device *dev)
6855 {
6856 	struct stmmac_priv *priv = netdev_priv(dev);
6857 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6858 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6859 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6860 	struct stmmac_rx_queue *rx_q;
6861 	struct stmmac_tx_queue *tx_q;
6862 	u32 buf_size;
6863 	bool sph_en;
6864 	u32 chan;
6865 	int ret;
6866 
6867 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6868 	if (ret < 0) {
6869 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6870 			   __func__);
6871 		goto dma_desc_error;
6872 	}
6873 
6874 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6875 	if (ret < 0) {
6876 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6877 			   __func__);
6878 		goto init_error;
6879 	}
6880 
6881 	stmmac_reset_queues_param(priv);
6882 
6883 	/* DMA CSR Channel configuration */
6884 	for (chan = 0; chan < dma_csr_ch; chan++) {
6885 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6886 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6887 	}
6888 
6889 	/* Adjust Split header */
6890 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6891 
6892 	/* DMA RX Channel Configuration */
6893 	for (chan = 0; chan < rx_cnt; chan++) {
6894 		rx_q = &priv->dma_conf.rx_queue[chan];
6895 
6896 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6897 				    rx_q->dma_rx_phy, chan);
6898 
6899 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6900 				     (rx_q->buf_alloc_num *
6901 				      sizeof(struct dma_desc));
6902 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6903 				       rx_q->rx_tail_addr, chan);
6904 
6905 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6906 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6907 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6908 					      buf_size,
6909 					      rx_q->queue_index);
6910 		} else {
6911 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6912 					      priv->dma_conf.dma_buf_sz,
6913 					      rx_q->queue_index);
6914 		}
6915 
6916 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6917 	}
6918 
6919 	/* DMA TX Channel Configuration */
6920 	for (chan = 0; chan < tx_cnt; chan++) {
6921 		tx_q = &priv->dma_conf.tx_queue[chan];
6922 
6923 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6924 				    tx_q->dma_tx_phy, chan);
6925 
6926 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6927 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6928 				       tx_q->tx_tail_addr, chan);
6929 
6930 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6931 		tx_q->txtimer.function = stmmac_tx_timer;
6932 	}
6933 
6934 	/* Enable the MAC Rx/Tx */
6935 	stmmac_mac_set(priv, priv->ioaddr, true);
6936 
6937 	/* Start Rx & Tx DMA Channels */
6938 	stmmac_start_all_dma(priv);
6939 
6940 	ret = stmmac_request_irq(dev);
6941 	if (ret)
6942 		goto irq_error;
6943 
6944 	/* Enable NAPI process*/
6945 	stmmac_enable_all_queues(priv);
6946 	netif_carrier_on(dev);
6947 	netif_tx_start_all_queues(dev);
6948 	stmmac_enable_all_dma_irq(priv);
6949 
6950 	return 0;
6951 
6952 irq_error:
6953 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6954 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6955 
6956 	stmmac_hw_teardown(dev);
6957 init_error:
6958 	free_dma_desc_resources(priv, &priv->dma_conf);
6959 dma_desc_error:
6960 	return ret;
6961 }
6962 
6963 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6964 {
6965 	struct stmmac_priv *priv = netdev_priv(dev);
6966 	struct stmmac_rx_queue *rx_q;
6967 	struct stmmac_tx_queue *tx_q;
6968 	struct stmmac_channel *ch;
6969 
6970 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6971 	    !netif_carrier_ok(priv->dev))
6972 		return -ENETDOWN;
6973 
6974 	if (!stmmac_xdp_is_enabled(priv))
6975 		return -EINVAL;
6976 
6977 	if (queue >= priv->plat->rx_queues_to_use ||
6978 	    queue >= priv->plat->tx_queues_to_use)
6979 		return -EINVAL;
6980 
6981 	rx_q = &priv->dma_conf.rx_queue[queue];
6982 	tx_q = &priv->dma_conf.tx_queue[queue];
6983 	ch = &priv->channel[queue];
6984 
6985 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6986 		return -EINVAL;
6987 
6988 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6989 		/* EQoS does not have per-DMA channel SW interrupt,
6990 		 * so we schedule RX Napi straight-away.
6991 		 */
6992 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6993 			__napi_schedule(&ch->rxtx_napi);
6994 	}
6995 
6996 	return 0;
6997 }
6998 
6999 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7000 {
7001 	struct stmmac_priv *priv = netdev_priv(dev);
7002 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7003 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7004 	unsigned int start;
7005 	int q;
7006 
7007 	for (q = 0; q < tx_cnt; q++) {
7008 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7009 		u64 tx_packets;
7010 		u64 tx_bytes;
7011 
7012 		do {
7013 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7014 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7015 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7016 		do {
7017 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7018 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7019 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7020 
7021 		stats->tx_packets += tx_packets;
7022 		stats->tx_bytes += tx_bytes;
7023 	}
7024 
7025 	for (q = 0; q < rx_cnt; q++) {
7026 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7027 		u64 rx_packets;
7028 		u64 rx_bytes;
7029 
7030 		do {
7031 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7032 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7033 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7034 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7035 
7036 		stats->rx_packets += rx_packets;
7037 		stats->rx_bytes += rx_bytes;
7038 	}
7039 
7040 	stats->rx_dropped = priv->xstats.rx_dropped;
7041 	stats->rx_errors = priv->xstats.rx_errors;
7042 	stats->tx_dropped = priv->xstats.tx_dropped;
7043 	stats->tx_errors = priv->xstats.tx_errors;
7044 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7045 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7046 	stats->rx_length_errors = priv->xstats.rx_length;
7047 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7048 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7049 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7050 }
7051 
7052 static const struct net_device_ops stmmac_netdev_ops = {
7053 	.ndo_open = stmmac_open,
7054 	.ndo_start_xmit = stmmac_xmit,
7055 	.ndo_stop = stmmac_release,
7056 	.ndo_change_mtu = stmmac_change_mtu,
7057 	.ndo_fix_features = stmmac_fix_features,
7058 	.ndo_set_features = stmmac_set_features,
7059 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7060 	.ndo_tx_timeout = stmmac_tx_timeout,
7061 	.ndo_eth_ioctl = stmmac_ioctl,
7062 	.ndo_get_stats64 = stmmac_get_stats64,
7063 	.ndo_setup_tc = stmmac_setup_tc,
7064 	.ndo_select_queue = stmmac_select_queue,
7065 	.ndo_set_mac_address = stmmac_set_mac_address,
7066 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7067 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7068 	.ndo_bpf = stmmac_bpf,
7069 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7070 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7071 };
7072 
7073 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7074 {
7075 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7076 		return;
7077 	if (test_bit(STMMAC_DOWN, &priv->state))
7078 		return;
7079 
7080 	netdev_err(priv->dev, "Reset adapter.\n");
7081 
7082 	rtnl_lock();
7083 	netif_trans_update(priv->dev);
7084 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7085 		usleep_range(1000, 2000);
7086 
7087 	set_bit(STMMAC_DOWN, &priv->state);
7088 	dev_close(priv->dev);
7089 	dev_open(priv->dev, NULL);
7090 	clear_bit(STMMAC_DOWN, &priv->state);
7091 	clear_bit(STMMAC_RESETING, &priv->state);
7092 	rtnl_unlock();
7093 }
7094 
7095 static void stmmac_service_task(struct work_struct *work)
7096 {
7097 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7098 			service_task);
7099 
7100 	stmmac_reset_subtask(priv);
7101 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7102 }
7103 
7104 /**
7105  *  stmmac_hw_init - Init the MAC device
7106  *  @priv: driver private structure
7107  *  Description: this function is to configure the MAC device according to
7108  *  some platform parameters or the HW capability register. It prepares the
7109  *  driver to use either ring or chain modes and to setup either enhanced or
7110  *  normal descriptors.
7111  */
7112 static int stmmac_hw_init(struct stmmac_priv *priv)
7113 {
7114 	int ret;
7115 
7116 	/* dwmac-sun8i only work in chain mode */
7117 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7118 		chain_mode = 1;
7119 	priv->chain_mode = chain_mode;
7120 
7121 	/* Initialize HW Interface */
7122 	ret = stmmac_hwif_init(priv);
7123 	if (ret)
7124 		return ret;
7125 
7126 	/* Get the HW capability (new GMAC newer than 3.50a) */
7127 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7128 	if (priv->hw_cap_support) {
7129 		dev_info(priv->device, "DMA HW capability register supported\n");
7130 
7131 		/* We can override some gmac/dma configuration fields: e.g.
7132 		 * enh_desc, tx_coe (e.g. that are passed through the
7133 		 * platform) with the values from the HW capability
7134 		 * register (if supported).
7135 		 */
7136 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7137 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7138 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7139 		priv->hw->pmt = priv->plat->pmt;
7140 		if (priv->dma_cap.hash_tb_sz) {
7141 			priv->hw->multicast_filter_bins =
7142 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7143 			priv->hw->mcast_bits_log2 =
7144 					ilog2(priv->hw->multicast_filter_bins);
7145 		}
7146 
7147 		/* TXCOE doesn't work in thresh DMA mode */
7148 		if (priv->plat->force_thresh_dma_mode)
7149 			priv->plat->tx_coe = 0;
7150 		else
7151 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7152 
7153 		/* In case of GMAC4 rx_coe is from HW cap register. */
7154 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7155 
7156 		if (priv->dma_cap.rx_coe_type2)
7157 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7158 		else if (priv->dma_cap.rx_coe_type1)
7159 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7160 
7161 	} else {
7162 		dev_info(priv->device, "No HW DMA feature register supported\n");
7163 	}
7164 
7165 	if (priv->plat->rx_coe) {
7166 		priv->hw->rx_csum = priv->plat->rx_coe;
7167 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7168 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7169 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7170 	}
7171 	if (priv->plat->tx_coe)
7172 		dev_info(priv->device, "TX Checksum insertion supported\n");
7173 
7174 	if (priv->plat->pmt) {
7175 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7176 		device_set_wakeup_capable(priv->device, 1);
7177 	}
7178 
7179 	if (priv->dma_cap.tsoen)
7180 		dev_info(priv->device, "TSO supported\n");
7181 
7182 	priv->hw->vlan_fail_q_en =
7183 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7184 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7185 
7186 	/* Run HW quirks, if any */
7187 	if (priv->hwif_quirks) {
7188 		ret = priv->hwif_quirks(priv);
7189 		if (ret)
7190 			return ret;
7191 	}
7192 
7193 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7194 	 * In some case, for example on bugged HW this feature
7195 	 * has to be disable and this can be done by passing the
7196 	 * riwt_off field from the platform.
7197 	 */
7198 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7199 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7200 		priv->use_riwt = 1;
7201 		dev_info(priv->device,
7202 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7203 	}
7204 
7205 	return 0;
7206 }
7207 
7208 static void stmmac_napi_add(struct net_device *dev)
7209 {
7210 	struct stmmac_priv *priv = netdev_priv(dev);
7211 	u32 queue, maxq;
7212 
7213 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7214 
7215 	for (queue = 0; queue < maxq; queue++) {
7216 		struct stmmac_channel *ch = &priv->channel[queue];
7217 
7218 		ch->priv_data = priv;
7219 		ch->index = queue;
7220 		spin_lock_init(&ch->lock);
7221 
7222 		if (queue < priv->plat->rx_queues_to_use) {
7223 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7224 		}
7225 		if (queue < priv->plat->tx_queues_to_use) {
7226 			netif_napi_add_tx(dev, &ch->tx_napi,
7227 					  stmmac_napi_poll_tx);
7228 		}
7229 		if (queue < priv->plat->rx_queues_to_use &&
7230 		    queue < priv->plat->tx_queues_to_use) {
7231 			netif_napi_add(dev, &ch->rxtx_napi,
7232 				       stmmac_napi_poll_rxtx);
7233 		}
7234 	}
7235 }
7236 
7237 static void stmmac_napi_del(struct net_device *dev)
7238 {
7239 	struct stmmac_priv *priv = netdev_priv(dev);
7240 	u32 queue, maxq;
7241 
7242 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7243 
7244 	for (queue = 0; queue < maxq; queue++) {
7245 		struct stmmac_channel *ch = &priv->channel[queue];
7246 
7247 		if (queue < priv->plat->rx_queues_to_use)
7248 			netif_napi_del(&ch->rx_napi);
7249 		if (queue < priv->plat->tx_queues_to_use)
7250 			netif_napi_del(&ch->tx_napi);
7251 		if (queue < priv->plat->rx_queues_to_use &&
7252 		    queue < priv->plat->tx_queues_to_use) {
7253 			netif_napi_del(&ch->rxtx_napi);
7254 		}
7255 	}
7256 }
7257 
7258 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7259 {
7260 	struct stmmac_priv *priv = netdev_priv(dev);
7261 	int ret = 0, i;
7262 
7263 	if (netif_running(dev))
7264 		stmmac_release(dev);
7265 
7266 	stmmac_napi_del(dev);
7267 
7268 	priv->plat->rx_queues_to_use = rx_cnt;
7269 	priv->plat->tx_queues_to_use = tx_cnt;
7270 	if (!netif_is_rxfh_configured(dev))
7271 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7272 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7273 									rx_cnt);
7274 
7275 	stmmac_napi_add(dev);
7276 
7277 	if (netif_running(dev))
7278 		ret = stmmac_open(dev);
7279 
7280 	return ret;
7281 }
7282 
7283 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7284 {
7285 	struct stmmac_priv *priv = netdev_priv(dev);
7286 	int ret = 0;
7287 
7288 	if (netif_running(dev))
7289 		stmmac_release(dev);
7290 
7291 	priv->dma_conf.dma_rx_size = rx_size;
7292 	priv->dma_conf.dma_tx_size = tx_size;
7293 
7294 	if (netif_running(dev))
7295 		ret = stmmac_open(dev);
7296 
7297 	return ret;
7298 }
7299 
7300 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7301 {
7302 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7303 	struct dma_desc *desc_contains_ts = ctx->desc;
7304 	struct stmmac_priv *priv = ctx->priv;
7305 	struct dma_desc *ndesc = ctx->ndesc;
7306 	struct dma_desc *desc = ctx->desc;
7307 	u64 ns = 0;
7308 
7309 	if (!priv->hwts_rx_en)
7310 		return -ENODATA;
7311 
7312 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7313 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7314 		desc_contains_ts = ndesc;
7315 
7316 	/* Check if timestamp is available */
7317 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7318 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7319 		ns -= priv->plat->cdc_error_adj;
7320 		*timestamp = ns_to_ktime(ns);
7321 		return 0;
7322 	}
7323 
7324 	return -ENODATA;
7325 }
7326 
7327 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7328 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7329 };
7330 
7331 /**
7332  * stmmac_dvr_probe
7333  * @device: device pointer
7334  * @plat_dat: platform data pointer
7335  * @res: stmmac resource pointer
7336  * Description: this is the main probe function used to
7337  * call the alloc_etherdev, allocate the priv structure.
7338  * Return:
7339  * returns 0 on success, otherwise errno.
7340  */
7341 int stmmac_dvr_probe(struct device *device,
7342 		     struct plat_stmmacenet_data *plat_dat,
7343 		     struct stmmac_resources *res)
7344 {
7345 	struct net_device *ndev = NULL;
7346 	struct stmmac_priv *priv;
7347 	u32 rxq;
7348 	int i, ret = 0;
7349 
7350 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7351 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7352 	if (!ndev)
7353 		return -ENOMEM;
7354 
7355 	SET_NETDEV_DEV(ndev, device);
7356 
7357 	priv = netdev_priv(ndev);
7358 	priv->device = device;
7359 	priv->dev = ndev;
7360 
7361 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7362 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7363 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7364 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7365 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7366 	}
7367 
7368 	priv->xstats.pcpu_stats =
7369 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7370 	if (!priv->xstats.pcpu_stats)
7371 		return -ENOMEM;
7372 
7373 	stmmac_set_ethtool_ops(ndev);
7374 	priv->pause = pause;
7375 	priv->plat = plat_dat;
7376 	priv->ioaddr = res->addr;
7377 	priv->dev->base_addr = (unsigned long)res->addr;
7378 	priv->plat->dma_cfg->multi_msi_en =
7379 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7380 
7381 	priv->dev->irq = res->irq;
7382 	priv->wol_irq = res->wol_irq;
7383 	priv->lpi_irq = res->lpi_irq;
7384 	priv->sfty_irq = res->sfty_irq;
7385 	priv->sfty_ce_irq = res->sfty_ce_irq;
7386 	priv->sfty_ue_irq = res->sfty_ue_irq;
7387 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7388 		priv->rx_irq[i] = res->rx_irq[i];
7389 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7390 		priv->tx_irq[i] = res->tx_irq[i];
7391 
7392 	if (!is_zero_ether_addr(res->mac))
7393 		eth_hw_addr_set(priv->dev, res->mac);
7394 
7395 	dev_set_drvdata(device, priv->dev);
7396 
7397 	/* Verify driver arguments */
7398 	stmmac_verify_args();
7399 
7400 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7401 	if (!priv->af_xdp_zc_qps)
7402 		return -ENOMEM;
7403 
7404 	/* Allocate workqueue */
7405 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7406 	if (!priv->wq) {
7407 		dev_err(priv->device, "failed to create workqueue\n");
7408 		ret = -ENOMEM;
7409 		goto error_wq_init;
7410 	}
7411 
7412 	INIT_WORK(&priv->service_task, stmmac_service_task);
7413 
7414 	/* Override with kernel parameters if supplied XXX CRS XXX
7415 	 * this needs to have multiple instances
7416 	 */
7417 	if ((phyaddr >= 0) && (phyaddr <= 31))
7418 		priv->plat->phy_addr = phyaddr;
7419 
7420 	if (priv->plat->stmmac_rst) {
7421 		ret = reset_control_assert(priv->plat->stmmac_rst);
7422 		reset_control_deassert(priv->plat->stmmac_rst);
7423 		/* Some reset controllers have only reset callback instead of
7424 		 * assert + deassert callbacks pair.
7425 		 */
7426 		if (ret == -ENOTSUPP)
7427 			reset_control_reset(priv->plat->stmmac_rst);
7428 	}
7429 
7430 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7431 	if (ret == -ENOTSUPP)
7432 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7433 			ERR_PTR(ret));
7434 
7435 	/* Wait a bit for the reset to take effect */
7436 	udelay(10);
7437 
7438 	/* Init MAC and get the capabilities */
7439 	ret = stmmac_hw_init(priv);
7440 	if (ret)
7441 		goto error_hw_init;
7442 
7443 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7444 	 */
7445 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7446 		priv->plat->dma_cfg->dche = false;
7447 
7448 	stmmac_check_ether_addr(priv);
7449 
7450 	ndev->netdev_ops = &stmmac_netdev_ops;
7451 
7452 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7453 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7454 
7455 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7456 			    NETIF_F_RXCSUM;
7457 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7458 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7459 
7460 	ret = stmmac_tc_init(priv, priv);
7461 	if (!ret) {
7462 		ndev->hw_features |= NETIF_F_HW_TC;
7463 	}
7464 
7465 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7466 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7467 		if (priv->plat->has_gmac4)
7468 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7469 		priv->tso = true;
7470 		dev_info(priv->device, "TSO feature enabled\n");
7471 	}
7472 
7473 	if (priv->dma_cap.sphen &&
7474 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7475 		ndev->hw_features |= NETIF_F_GRO;
7476 		priv->sph_cap = true;
7477 		priv->sph = priv->sph_cap;
7478 		dev_info(priv->device, "SPH feature enabled\n");
7479 	}
7480 
7481 	/* Ideally our host DMA address width is the same as for the
7482 	 * device. However, it may differ and then we have to use our
7483 	 * host DMA width for allocation and the device DMA width for
7484 	 * register handling.
7485 	 */
7486 	if (priv->plat->host_dma_width)
7487 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7488 	else
7489 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7490 
7491 	if (priv->dma_cap.host_dma_width) {
7492 		ret = dma_set_mask_and_coherent(device,
7493 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7494 		if (!ret) {
7495 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7496 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7497 
7498 			/*
7499 			 * If more than 32 bits can be addressed, make sure to
7500 			 * enable enhanced addressing mode.
7501 			 */
7502 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7503 				priv->plat->dma_cfg->eame = true;
7504 		} else {
7505 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7506 			if (ret) {
7507 				dev_err(priv->device, "Failed to set DMA Mask\n");
7508 				goto error_hw_init;
7509 			}
7510 
7511 			priv->dma_cap.host_dma_width = 32;
7512 		}
7513 	}
7514 
7515 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7516 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7517 #ifdef STMMAC_VLAN_TAG_USED
7518 	/* Both mac100 and gmac support receive VLAN tag detection */
7519 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7520 	if (priv->plat->has_gmac4) {
7521 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7522 		priv->hw->hw_vlan_en = true;
7523 	}
7524 	if (priv->dma_cap.vlhash) {
7525 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7526 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7527 	}
7528 	if (priv->dma_cap.vlins) {
7529 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7530 		if (priv->dma_cap.dvlan)
7531 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7532 	}
7533 #endif
7534 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7535 
7536 	priv->xstats.threshold = tc;
7537 
7538 	/* Initialize RSS */
7539 	rxq = priv->plat->rx_queues_to_use;
7540 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7541 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7542 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7543 
7544 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7545 		ndev->features |= NETIF_F_RXHASH;
7546 
7547 	ndev->vlan_features |= ndev->features;
7548 
7549 	/* MTU range: 46 - hw-specific max */
7550 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7551 	if (priv->plat->has_xgmac)
7552 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7553 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7554 		ndev->max_mtu = JUMBO_LEN;
7555 	else
7556 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7557 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7558 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7559 	 */
7560 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7561 	    (priv->plat->maxmtu >= ndev->min_mtu))
7562 		ndev->max_mtu = priv->plat->maxmtu;
7563 	else if (priv->plat->maxmtu < ndev->min_mtu)
7564 		dev_warn(priv->device,
7565 			 "%s: warning: maxmtu having invalid value (%d)\n",
7566 			 __func__, priv->plat->maxmtu);
7567 
7568 	if (flow_ctrl)
7569 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7570 
7571 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7572 
7573 	/* Setup channels NAPI */
7574 	stmmac_napi_add(ndev);
7575 
7576 	mutex_init(&priv->lock);
7577 
7578 	stmmac_fpe_init(priv);
7579 
7580 	/* If a specific clk_csr value is passed from the platform
7581 	 * this means that the CSR Clock Range selection cannot be
7582 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7583 	 * set the MDC clock dynamically according to the csr actual
7584 	 * clock input.
7585 	 */
7586 	if (priv->plat->clk_csr >= 0)
7587 		priv->clk_csr = priv->plat->clk_csr;
7588 	else
7589 		stmmac_clk_csr_set(priv);
7590 
7591 	stmmac_check_pcs_mode(priv);
7592 
7593 	pm_runtime_get_noresume(device);
7594 	pm_runtime_set_active(device);
7595 	if (!pm_runtime_enabled(device))
7596 		pm_runtime_enable(device);
7597 
7598 	ret = stmmac_mdio_register(ndev);
7599 	if (ret < 0) {
7600 		dev_err_probe(priv->device, ret,
7601 			      "MDIO bus (id: %d) registration failed\n",
7602 			      priv->plat->bus_id);
7603 		goto error_mdio_register;
7604 	}
7605 
7606 	if (priv->plat->speed_mode_2500)
7607 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7608 
7609 	ret = stmmac_pcs_setup(ndev);
7610 	if (ret)
7611 		goto error_pcs_setup;
7612 
7613 	ret = stmmac_phy_setup(priv);
7614 	if (ret) {
7615 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7616 		goto error_phy_setup;
7617 	}
7618 
7619 	ret = register_netdev(ndev);
7620 	if (ret) {
7621 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7622 			__func__, ret);
7623 		goto error_netdev_register;
7624 	}
7625 
7626 #ifdef CONFIG_DEBUG_FS
7627 	stmmac_init_fs(ndev);
7628 #endif
7629 
7630 	if (priv->plat->dump_debug_regs)
7631 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7632 
7633 	/* Let pm_runtime_put() disable the clocks.
7634 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7635 	 */
7636 	pm_runtime_put(device);
7637 
7638 	return ret;
7639 
7640 error_netdev_register:
7641 	phylink_destroy(priv->phylink);
7642 error_phy_setup:
7643 	stmmac_pcs_clean(ndev);
7644 error_pcs_setup:
7645 	stmmac_mdio_unregister(ndev);
7646 error_mdio_register:
7647 	stmmac_napi_del(ndev);
7648 error_hw_init:
7649 	destroy_workqueue(priv->wq);
7650 error_wq_init:
7651 	bitmap_free(priv->af_xdp_zc_qps);
7652 
7653 	return ret;
7654 }
7655 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7656 
7657 /**
7658  * stmmac_dvr_remove
7659  * @dev: device pointer
7660  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7661  * changes the link status, releases the DMA descriptor rings.
7662  */
7663 void stmmac_dvr_remove(struct device *dev)
7664 {
7665 	struct net_device *ndev = dev_get_drvdata(dev);
7666 	struct stmmac_priv *priv = netdev_priv(ndev);
7667 
7668 	netdev_info(priv->dev, "%s: removing driver", __func__);
7669 
7670 	pm_runtime_get_sync(dev);
7671 
7672 	stmmac_stop_all_dma(priv);
7673 	stmmac_mac_set(priv, priv->ioaddr, false);
7674 	unregister_netdev(ndev);
7675 
7676 #ifdef CONFIG_DEBUG_FS
7677 	stmmac_exit_fs(ndev);
7678 #endif
7679 	phylink_destroy(priv->phylink);
7680 	if (priv->plat->stmmac_rst)
7681 		reset_control_assert(priv->plat->stmmac_rst);
7682 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7683 
7684 	stmmac_pcs_clean(ndev);
7685 	stmmac_mdio_unregister(ndev);
7686 
7687 	destroy_workqueue(priv->wq);
7688 	mutex_destroy(&priv->lock);
7689 	bitmap_free(priv->af_xdp_zc_qps);
7690 
7691 	pm_runtime_disable(dev);
7692 	pm_runtime_put_noidle(dev);
7693 }
7694 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7695 
7696 /**
7697  * stmmac_suspend - suspend callback
7698  * @dev: device pointer
7699  * Description: this is the function to suspend the device and it is called
7700  * by the platform driver to stop the network queue, release the resources,
7701  * program the PMT register (for WoL), clean and release driver resources.
7702  */
7703 int stmmac_suspend(struct device *dev)
7704 {
7705 	struct net_device *ndev = dev_get_drvdata(dev);
7706 	struct stmmac_priv *priv = netdev_priv(ndev);
7707 	u32 chan;
7708 
7709 	if (!ndev || !netif_running(ndev))
7710 		return 0;
7711 
7712 	mutex_lock(&priv->lock);
7713 
7714 	netif_device_detach(ndev);
7715 
7716 	stmmac_disable_all_queues(priv);
7717 
7718 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7719 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7720 
7721 	if (priv->eee_enabled) {
7722 		priv->tx_path_in_lpi_mode = false;
7723 		del_timer_sync(&priv->eee_ctrl_timer);
7724 	}
7725 
7726 	/* Stop TX/RX DMA */
7727 	stmmac_stop_all_dma(priv);
7728 
7729 	if (priv->plat->serdes_powerdown)
7730 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7731 
7732 	/* Enable Power down mode by programming the PMT regs */
7733 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7734 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7735 		priv->irq_wake = 1;
7736 	} else {
7737 		stmmac_mac_set(priv, priv->ioaddr, false);
7738 		pinctrl_pm_select_sleep_state(priv->device);
7739 	}
7740 
7741 	mutex_unlock(&priv->lock);
7742 
7743 	rtnl_lock();
7744 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7745 		phylink_suspend(priv->phylink, true);
7746 	} else {
7747 		if (device_may_wakeup(priv->device))
7748 			phylink_speed_down(priv->phylink, false);
7749 		phylink_suspend(priv->phylink, false);
7750 	}
7751 	rtnl_unlock();
7752 
7753 	if (stmmac_fpe_supported(priv))
7754 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7755 
7756 	priv->speed = SPEED_UNKNOWN;
7757 	return 0;
7758 }
7759 EXPORT_SYMBOL_GPL(stmmac_suspend);
7760 
7761 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7762 {
7763 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7764 
7765 	rx_q->cur_rx = 0;
7766 	rx_q->dirty_rx = 0;
7767 }
7768 
7769 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7770 {
7771 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7772 
7773 	tx_q->cur_tx = 0;
7774 	tx_q->dirty_tx = 0;
7775 	tx_q->mss = 0;
7776 
7777 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7778 }
7779 
7780 /**
7781  * stmmac_reset_queues_param - reset queue parameters
7782  * @priv: device pointer
7783  */
7784 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7785 {
7786 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7787 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7788 	u32 queue;
7789 
7790 	for (queue = 0; queue < rx_cnt; queue++)
7791 		stmmac_reset_rx_queue(priv, queue);
7792 
7793 	for (queue = 0; queue < tx_cnt; queue++)
7794 		stmmac_reset_tx_queue(priv, queue);
7795 }
7796 
7797 /**
7798  * stmmac_resume - resume callback
7799  * @dev: device pointer
7800  * Description: when resume this function is invoked to setup the DMA and CORE
7801  * in a usable state.
7802  */
7803 int stmmac_resume(struct device *dev)
7804 {
7805 	struct net_device *ndev = dev_get_drvdata(dev);
7806 	struct stmmac_priv *priv = netdev_priv(ndev);
7807 	int ret;
7808 
7809 	if (!netif_running(ndev))
7810 		return 0;
7811 
7812 	/* Power Down bit, into the PM register, is cleared
7813 	 * automatically as soon as a magic packet or a Wake-up frame
7814 	 * is received. Anyway, it's better to manually clear
7815 	 * this bit because it can generate problems while resuming
7816 	 * from another devices (e.g. serial console).
7817 	 */
7818 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7819 		mutex_lock(&priv->lock);
7820 		stmmac_pmt(priv, priv->hw, 0);
7821 		mutex_unlock(&priv->lock);
7822 		priv->irq_wake = 0;
7823 	} else {
7824 		pinctrl_pm_select_default_state(priv->device);
7825 		/* reset the phy so that it's ready */
7826 		if (priv->mii)
7827 			stmmac_mdio_reset(priv->mii);
7828 	}
7829 
7830 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7831 	    priv->plat->serdes_powerup) {
7832 		ret = priv->plat->serdes_powerup(ndev,
7833 						 priv->plat->bsp_priv);
7834 
7835 		if (ret < 0)
7836 			return ret;
7837 	}
7838 
7839 	rtnl_lock();
7840 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7841 		phylink_resume(priv->phylink);
7842 	} else {
7843 		phylink_resume(priv->phylink);
7844 		if (device_may_wakeup(priv->device))
7845 			phylink_speed_up(priv->phylink);
7846 	}
7847 	rtnl_unlock();
7848 
7849 	rtnl_lock();
7850 	mutex_lock(&priv->lock);
7851 
7852 	stmmac_reset_queues_param(priv);
7853 
7854 	stmmac_free_tx_skbufs(priv);
7855 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7856 
7857 	stmmac_hw_setup(ndev, false);
7858 	stmmac_init_coalesce(priv);
7859 	stmmac_set_rx_mode(ndev);
7860 
7861 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7862 
7863 	stmmac_enable_all_queues(priv);
7864 	stmmac_enable_all_dma_irq(priv);
7865 
7866 	mutex_unlock(&priv->lock);
7867 	rtnl_unlock();
7868 
7869 	netif_device_attach(ndev);
7870 
7871 	return 0;
7872 }
7873 EXPORT_SYMBOL_GPL(stmmac_resume);
7874 
7875 #ifndef MODULE
7876 static int __init stmmac_cmdline_opt(char *str)
7877 {
7878 	char *opt;
7879 
7880 	if (!str || !*str)
7881 		return 1;
7882 	while ((opt = strsep(&str, ",")) != NULL) {
7883 		if (!strncmp(opt, "debug:", 6)) {
7884 			if (kstrtoint(opt + 6, 0, &debug))
7885 				goto err;
7886 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7887 			if (kstrtoint(opt + 8, 0, &phyaddr))
7888 				goto err;
7889 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7890 			if (kstrtoint(opt + 7, 0, &buf_sz))
7891 				goto err;
7892 		} else if (!strncmp(opt, "tc:", 3)) {
7893 			if (kstrtoint(opt + 3, 0, &tc))
7894 				goto err;
7895 		} else if (!strncmp(opt, "watchdog:", 9)) {
7896 			if (kstrtoint(opt + 9, 0, &watchdog))
7897 				goto err;
7898 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7899 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7900 				goto err;
7901 		} else if (!strncmp(opt, "pause:", 6)) {
7902 			if (kstrtoint(opt + 6, 0, &pause))
7903 				goto err;
7904 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7905 			if (kstrtoint(opt + 10, 0, &eee_timer))
7906 				goto err;
7907 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7908 			if (kstrtoint(opt + 11, 0, &chain_mode))
7909 				goto err;
7910 		}
7911 	}
7912 	return 1;
7913 
7914 err:
7915 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7916 	return 1;
7917 }
7918 
7919 __setup("stmmaceth=", stmmac_cmdline_opt);
7920 #endif /* MODULE */
7921 
7922 static int __init stmmac_init(void)
7923 {
7924 #ifdef CONFIG_DEBUG_FS
7925 	/* Create debugfs main directory if it doesn't exist yet */
7926 	if (!stmmac_fs_dir)
7927 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7928 	register_netdevice_notifier(&stmmac_notifier);
7929 #endif
7930 
7931 	return 0;
7932 }
7933 
7934 static void __exit stmmac_exit(void)
7935 {
7936 #ifdef CONFIG_DEBUG_FS
7937 	unregister_netdevice_notifier(&stmmac_notifier);
7938 	debugfs_remove_recursive(stmmac_fs_dir);
7939 #endif
7940 }
7941 
7942 module_init(stmmac_init)
7943 module_exit(stmmac_exit)
7944 
7945 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7946 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7947 MODULE_LICENSE("GPL");
7948