xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 14ea4cd1b19162888f629c4ce1ba268c683b0f12)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54 
55 /* As long as the interface is active, we keep the timestamping counter enabled
56  * with fine resolution and binary rollover. This avoid non-monotonic behavior
57  * (clock jumps) when changing timestamping settings at runtime.
58  */
59 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 				 PTP_TCR_TSCTRLSSR)
61 
62 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
64 
65 /* Module parameters */
66 #define TX_TIMEO	5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70 
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78 
79 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
110 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
111 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
112 
113 #define STMMAC_DEFAULT_LPI_TIMER	1000
114 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
115 module_param(eee_timer, int, 0644);
116 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
117 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
118 
119 /* By default the driver will use the ring mode to manage tx and rx descriptors,
120  * but allow user to force to use the chain instead of the ring
121  */
122 static unsigned int chain_mode;
123 module_param(chain_mode, int, 0444);
124 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
125 
126 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
127 /* For MSI interrupts handling */
128 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
129 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
131 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
132 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
133 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
135 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
137 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
138 					  u32 rxmode, u32 chan);
139 
140 #ifdef CONFIG_DEBUG_FS
141 static const struct net_device_ops stmmac_netdev_ops;
142 static void stmmac_init_fs(struct net_device *dev);
143 static void stmmac_exit_fs(struct net_device *dev);
144 #endif
145 
146 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
147 
148 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
149 {
150 	int ret = 0;
151 
152 	if (enabled) {
153 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
154 		if (ret)
155 			return ret;
156 		ret = clk_prepare_enable(priv->plat->pclk);
157 		if (ret) {
158 			clk_disable_unprepare(priv->plat->stmmac_clk);
159 			return ret;
160 		}
161 		if (priv->plat->clks_config) {
162 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
163 			if (ret) {
164 				clk_disable_unprepare(priv->plat->stmmac_clk);
165 				clk_disable_unprepare(priv->plat->pclk);
166 				return ret;
167 			}
168 		}
169 	} else {
170 		clk_disable_unprepare(priv->plat->stmmac_clk);
171 		clk_disable_unprepare(priv->plat->pclk);
172 		if (priv->plat->clks_config)
173 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
174 	}
175 
176 	return ret;
177 }
178 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
179 
180 /**
181  * stmmac_verify_args - verify the driver parameters.
182  * Description: it checks the driver parameters and set a default in case of
183  * errors.
184  */
185 static void stmmac_verify_args(void)
186 {
187 	if (unlikely(watchdog < 0))
188 		watchdog = TX_TIMEO;
189 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
190 		buf_sz = DEFAULT_BUFSIZE;
191 	if (unlikely(flow_ctrl > 1))
192 		flow_ctrl = FLOW_AUTO;
193 	else if (likely(flow_ctrl < 0))
194 		flow_ctrl = FLOW_OFF;
195 	if (unlikely((pause < 0) || (pause > 0xffff)))
196 		pause = PAUSE_TIME;
197 	if (eee_timer < 0)
198 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
199 }
200 
201 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
202 {
203 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
204 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
205 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
206 	u32 queue;
207 
208 	for (queue = 0; queue < maxq; queue++) {
209 		struct stmmac_channel *ch = &priv->channel[queue];
210 
211 		if (stmmac_xdp_is_enabled(priv) &&
212 		    test_bit(queue, priv->af_xdp_zc_qps)) {
213 			napi_disable(&ch->rxtx_napi);
214 			continue;
215 		}
216 
217 		if (queue < rx_queues_cnt)
218 			napi_disable(&ch->rx_napi);
219 		if (queue < tx_queues_cnt)
220 			napi_disable(&ch->tx_napi);
221 	}
222 }
223 
224 /**
225  * stmmac_disable_all_queues - Disable all queues
226  * @priv: driver private structure
227  */
228 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
229 {
230 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
231 	struct stmmac_rx_queue *rx_q;
232 	u32 queue;
233 
234 	/* synchronize_rcu() needed for pending XDP buffers to drain */
235 	for (queue = 0; queue < rx_queues_cnt; queue++) {
236 		rx_q = &priv->dma_conf.rx_queue[queue];
237 		if (rx_q->xsk_pool) {
238 			synchronize_rcu();
239 			break;
240 		}
241 	}
242 
243 	__stmmac_disable_all_queues(priv);
244 }
245 
246 /**
247  * stmmac_enable_all_queues - Enable all queues
248  * @priv: driver private structure
249  */
250 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
251 {
252 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
253 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
254 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
255 	u32 queue;
256 
257 	for (queue = 0; queue < maxq; queue++) {
258 		struct stmmac_channel *ch = &priv->channel[queue];
259 
260 		if (stmmac_xdp_is_enabled(priv) &&
261 		    test_bit(queue, priv->af_xdp_zc_qps)) {
262 			napi_enable(&ch->rxtx_napi);
263 			continue;
264 		}
265 
266 		if (queue < rx_queues_cnt)
267 			napi_enable(&ch->rx_napi);
268 		if (queue < tx_queues_cnt)
269 			napi_enable(&ch->tx_napi);
270 	}
271 }
272 
273 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
274 {
275 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
276 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
277 		queue_work(priv->wq, &priv->service_task);
278 }
279 
280 static void stmmac_global_err(struct stmmac_priv *priv)
281 {
282 	netif_carrier_off(priv->dev);
283 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
284 	stmmac_service_event_schedule(priv);
285 }
286 
287 /**
288  * stmmac_clk_csr_set - dynamically set the MDC clock
289  * @priv: driver private structure
290  * Description: this is to dynamically set the MDC clock according to the csr
291  * clock input.
292  * Note:
293  *	If a specific clk_csr value is passed from the platform
294  *	this means that the CSR Clock Range selection cannot be
295  *	changed at run-time and it is fixed (as reported in the driver
296  *	documentation). Viceversa the driver will try to set the MDC
297  *	clock dynamically according to the actual clock input.
298  */
299 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
300 {
301 	unsigned long clk_rate;
302 
303 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
304 
305 	/* Platform provided default clk_csr would be assumed valid
306 	 * for all other cases except for the below mentioned ones.
307 	 * For values higher than the IEEE 802.3 specified frequency
308 	 * we can not estimate the proper divider as it is not known
309 	 * the frequency of clk_csr_i. So we do not change the default
310 	 * divider.
311 	 */
312 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
313 		if (clk_rate < CSR_F_35M)
314 			priv->clk_csr = STMMAC_CSR_20_35M;
315 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
316 			priv->clk_csr = STMMAC_CSR_35_60M;
317 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
318 			priv->clk_csr = STMMAC_CSR_60_100M;
319 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
320 			priv->clk_csr = STMMAC_CSR_100_150M;
321 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
322 			priv->clk_csr = STMMAC_CSR_150_250M;
323 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
324 			priv->clk_csr = STMMAC_CSR_250_300M;
325 		else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
326 			priv->clk_csr = STMMAC_CSR_300_500M;
327 		else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
328 			priv->clk_csr = STMMAC_CSR_500_800M;
329 	}
330 
331 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
332 		if (clk_rate > 160000000)
333 			priv->clk_csr = 0x03;
334 		else if (clk_rate > 80000000)
335 			priv->clk_csr = 0x02;
336 		else if (clk_rate > 40000000)
337 			priv->clk_csr = 0x01;
338 		else
339 			priv->clk_csr = 0;
340 	}
341 
342 	if (priv->plat->has_xgmac) {
343 		if (clk_rate > 400000000)
344 			priv->clk_csr = 0x5;
345 		else if (clk_rate > 350000000)
346 			priv->clk_csr = 0x4;
347 		else if (clk_rate > 300000000)
348 			priv->clk_csr = 0x3;
349 		else if (clk_rate > 250000000)
350 			priv->clk_csr = 0x2;
351 		else if (clk_rate > 150000000)
352 			priv->clk_csr = 0x1;
353 		else
354 			priv->clk_csr = 0x0;
355 	}
356 }
357 
358 static void print_pkt(unsigned char *buf, int len)
359 {
360 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
361 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
362 }
363 
364 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
365 {
366 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
367 	u32 avail;
368 
369 	if (tx_q->dirty_tx > tx_q->cur_tx)
370 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
371 	else
372 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
373 
374 	return avail;
375 }
376 
377 /**
378  * stmmac_rx_dirty - Get RX queue dirty
379  * @priv: driver private structure
380  * @queue: RX queue index
381  */
382 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
383 {
384 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
385 	u32 dirty;
386 
387 	if (rx_q->dirty_rx <= rx_q->cur_rx)
388 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
389 	else
390 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
391 
392 	return dirty;
393 }
394 
395 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
396 {
397 	int tx_lpi_timer;
398 
399 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
400 	priv->eee_sw_timer_en = en ? 0 : 1;
401 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
402 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
403 }
404 
405 /**
406  * stmmac_enable_eee_mode - check and enter in LPI mode
407  * @priv: driver private structure
408  * Description: this function is to verify and enter in LPI mode in case of
409  * EEE.
410  */
411 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
412 {
413 	u32 tx_cnt = priv->plat->tx_queues_to_use;
414 	u32 queue;
415 
416 	/* check if all TX queues have the work finished */
417 	for (queue = 0; queue < tx_cnt; queue++) {
418 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
419 
420 		if (tx_q->dirty_tx != tx_q->cur_tx)
421 			return -EBUSY; /* still unfinished work */
422 	}
423 
424 	/* Check and enter in LPI mode */
425 	if (!priv->tx_path_in_lpi_mode)
426 		stmmac_set_eee_mode(priv, priv->hw,
427 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
428 	return 0;
429 }
430 
431 /**
432  * stmmac_disable_eee_mode - disable and exit from LPI mode
433  * @priv: driver private structure
434  * Description: this function is to exit and disable EEE in case of
435  * LPI state is true. This is called by the xmit.
436  */
437 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
438 {
439 	if (!priv->eee_sw_timer_en) {
440 		stmmac_lpi_entry_timer_config(priv, 0);
441 		return;
442 	}
443 
444 	stmmac_reset_eee_mode(priv, priv->hw);
445 	del_timer_sync(&priv->eee_ctrl_timer);
446 	priv->tx_path_in_lpi_mode = false;
447 }
448 
449 /**
450  * stmmac_eee_ctrl_timer - EEE TX SW timer.
451  * @t:  timer_list struct containing private info
452  * Description:
453  *  if there is no data transfer and if we are not in LPI state,
454  *  then MAC Transmitter can be moved to LPI state.
455  */
456 static void stmmac_eee_ctrl_timer(struct timer_list *t)
457 {
458 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
459 
460 	if (stmmac_enable_eee_mode(priv))
461 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
462 }
463 
464 /**
465  * stmmac_eee_init - init EEE
466  * @priv: driver private structure
467  * Description:
468  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
469  *  can also manage EEE, this function enable the LPI state and start related
470  *  timer.
471  */
472 bool stmmac_eee_init(struct stmmac_priv *priv)
473 {
474 	int eee_tw_timer = priv->eee_tw_timer;
475 
476 	/* Check if MAC core supports the EEE feature. */
477 	if (!priv->dma_cap.eee)
478 		return false;
479 
480 	mutex_lock(&priv->lock);
481 
482 	/* Check if it needs to be deactivated */
483 	if (!priv->eee_active) {
484 		if (priv->eee_enabled) {
485 			netdev_dbg(priv->dev, "disable EEE\n");
486 			stmmac_lpi_entry_timer_config(priv, 0);
487 			del_timer_sync(&priv->eee_ctrl_timer);
488 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
489 			if (priv->hw->xpcs)
490 				xpcs_config_eee(priv->hw->xpcs,
491 						priv->plat->mult_fact_100ns,
492 						false);
493 		}
494 		mutex_unlock(&priv->lock);
495 		return false;
496 	}
497 
498 	if (priv->eee_active && !priv->eee_enabled) {
499 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
500 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
501 				     eee_tw_timer);
502 		if (priv->hw->xpcs)
503 			xpcs_config_eee(priv->hw->xpcs,
504 					priv->plat->mult_fact_100ns,
505 					true);
506 	}
507 
508 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
509 		del_timer_sync(&priv->eee_ctrl_timer);
510 		priv->tx_path_in_lpi_mode = false;
511 		stmmac_lpi_entry_timer_config(priv, 1);
512 	} else {
513 		stmmac_lpi_entry_timer_config(priv, 0);
514 		mod_timer(&priv->eee_ctrl_timer,
515 			  STMMAC_LPI_T(priv->tx_lpi_timer));
516 	}
517 
518 	mutex_unlock(&priv->lock);
519 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
520 	return true;
521 }
522 
523 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
524  * @priv: driver private structure
525  * @p : descriptor pointer
526  * @skb : the socket buffer
527  * Description :
528  * This function will read timestamp from the descriptor & pass it to stack.
529  * and also perform some sanity checks.
530  */
531 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
532 				   struct dma_desc *p, struct sk_buff *skb)
533 {
534 	struct skb_shared_hwtstamps shhwtstamp;
535 	bool found = false;
536 	u64 ns = 0;
537 
538 	if (!priv->hwts_tx_en)
539 		return;
540 
541 	/* exit if skb doesn't support hw tstamp */
542 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
543 		return;
544 
545 	/* check tx tstamp status */
546 	if (stmmac_get_tx_timestamp_status(priv, p)) {
547 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
548 		found = true;
549 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
550 		found = true;
551 	}
552 
553 	if (found) {
554 		ns -= priv->plat->cdc_error_adj;
555 
556 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
557 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
558 
559 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
560 		/* pass tstamp to stack */
561 		skb_tstamp_tx(skb, &shhwtstamp);
562 	}
563 }
564 
565 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
566  * @priv: driver private structure
567  * @p : descriptor pointer
568  * @np : next descriptor pointer
569  * @skb : the socket buffer
570  * Description :
571  * This function will read received packet's timestamp from the descriptor
572  * and pass it to stack. It also perform some sanity checks.
573  */
574 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
575 				   struct dma_desc *np, struct sk_buff *skb)
576 {
577 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
578 	struct dma_desc *desc = p;
579 	u64 ns = 0;
580 
581 	if (!priv->hwts_rx_en)
582 		return;
583 	/* For GMAC4, the valid timestamp is from CTX next desc. */
584 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
585 		desc = np;
586 
587 	/* Check if timestamp is available */
588 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
589 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
590 
591 		ns -= priv->plat->cdc_error_adj;
592 
593 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
594 		shhwtstamp = skb_hwtstamps(skb);
595 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
596 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
597 	} else  {
598 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
599 	}
600 }
601 
602 /**
603  *  stmmac_hwtstamp_set - control hardware timestamping.
604  *  @dev: device pointer.
605  *  @ifr: An IOCTL specific structure, that can contain a pointer to
606  *  a proprietary structure used to pass information to the driver.
607  *  Description:
608  *  This function configures the MAC to enable/disable both outgoing(TX)
609  *  and incoming(RX) packets time stamping based on user input.
610  *  Return Value:
611  *  0 on success and an appropriate -ve integer on failure.
612  */
613 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
614 {
615 	struct stmmac_priv *priv = netdev_priv(dev);
616 	struct hwtstamp_config config;
617 	u32 ptp_v2 = 0;
618 	u32 tstamp_all = 0;
619 	u32 ptp_over_ipv4_udp = 0;
620 	u32 ptp_over_ipv6_udp = 0;
621 	u32 ptp_over_ethernet = 0;
622 	u32 snap_type_sel = 0;
623 	u32 ts_master_en = 0;
624 	u32 ts_event_en = 0;
625 
626 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
627 		netdev_alert(priv->dev, "No support for HW time stamping\n");
628 		priv->hwts_tx_en = 0;
629 		priv->hwts_rx_en = 0;
630 
631 		return -EOPNOTSUPP;
632 	}
633 
634 	if (copy_from_user(&config, ifr->ifr_data,
635 			   sizeof(config)))
636 		return -EFAULT;
637 
638 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
639 		   __func__, config.flags, config.tx_type, config.rx_filter);
640 
641 	if (config.tx_type != HWTSTAMP_TX_OFF &&
642 	    config.tx_type != HWTSTAMP_TX_ON)
643 		return -ERANGE;
644 
645 	if (priv->adv_ts) {
646 		switch (config.rx_filter) {
647 		case HWTSTAMP_FILTER_NONE:
648 			/* time stamp no incoming packet at all */
649 			config.rx_filter = HWTSTAMP_FILTER_NONE;
650 			break;
651 
652 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
653 			/* PTP v1, UDP, any kind of event packet */
654 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
655 			/* 'xmac' hardware can support Sync, Pdelay_Req and
656 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
657 			 * This leaves Delay_Req timestamps out.
658 			 * Enable all events *and* general purpose message
659 			 * timestamping
660 			 */
661 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
662 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
663 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
664 			break;
665 
666 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
667 			/* PTP v1, UDP, Sync packet */
668 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
669 			/* take time stamp for SYNC messages only */
670 			ts_event_en = PTP_TCR_TSEVNTENA;
671 
672 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
673 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
674 			break;
675 
676 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
677 			/* PTP v1, UDP, Delay_req packet */
678 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
679 			/* take time stamp for Delay_Req messages only */
680 			ts_master_en = PTP_TCR_TSMSTRENA;
681 			ts_event_en = PTP_TCR_TSEVNTENA;
682 
683 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
684 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
685 			break;
686 
687 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
688 			/* PTP v2, UDP, any kind of event packet */
689 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
690 			ptp_v2 = PTP_TCR_TSVER2ENA;
691 			/* take time stamp for all event messages */
692 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
693 
694 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
695 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
696 			break;
697 
698 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
699 			/* PTP v2, UDP, Sync packet */
700 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
701 			ptp_v2 = PTP_TCR_TSVER2ENA;
702 			/* take time stamp for SYNC messages only */
703 			ts_event_en = PTP_TCR_TSEVNTENA;
704 
705 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
706 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
707 			break;
708 
709 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
710 			/* PTP v2, UDP, Delay_req packet */
711 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
712 			ptp_v2 = PTP_TCR_TSVER2ENA;
713 			/* take time stamp for Delay_Req messages only */
714 			ts_master_en = PTP_TCR_TSMSTRENA;
715 			ts_event_en = PTP_TCR_TSEVNTENA;
716 
717 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
718 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
719 			break;
720 
721 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
722 			/* PTP v2/802.AS1 any layer, any kind of event packet */
723 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
724 			ptp_v2 = PTP_TCR_TSVER2ENA;
725 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
726 			if (priv->synopsys_id < DWMAC_CORE_4_10)
727 				ts_event_en = PTP_TCR_TSEVNTENA;
728 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
729 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
730 			ptp_over_ethernet = PTP_TCR_TSIPENA;
731 			break;
732 
733 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
734 			/* PTP v2/802.AS1, any layer, Sync packet */
735 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
736 			ptp_v2 = PTP_TCR_TSVER2ENA;
737 			/* take time stamp for SYNC messages only */
738 			ts_event_en = PTP_TCR_TSEVNTENA;
739 
740 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
741 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
742 			ptp_over_ethernet = PTP_TCR_TSIPENA;
743 			break;
744 
745 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
746 			/* PTP v2/802.AS1, any layer, Delay_req packet */
747 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
748 			ptp_v2 = PTP_TCR_TSVER2ENA;
749 			/* take time stamp for Delay_Req messages only */
750 			ts_master_en = PTP_TCR_TSMSTRENA;
751 			ts_event_en = PTP_TCR_TSEVNTENA;
752 
753 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
754 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
755 			ptp_over_ethernet = PTP_TCR_TSIPENA;
756 			break;
757 
758 		case HWTSTAMP_FILTER_NTP_ALL:
759 		case HWTSTAMP_FILTER_ALL:
760 			/* time stamp any incoming packet */
761 			config.rx_filter = HWTSTAMP_FILTER_ALL;
762 			tstamp_all = PTP_TCR_TSENALL;
763 			break;
764 
765 		default:
766 			return -ERANGE;
767 		}
768 	} else {
769 		switch (config.rx_filter) {
770 		case HWTSTAMP_FILTER_NONE:
771 			config.rx_filter = HWTSTAMP_FILTER_NONE;
772 			break;
773 		default:
774 			/* PTP v1, UDP, any kind of event packet */
775 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
776 			break;
777 		}
778 	}
779 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
780 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
781 
782 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
783 
784 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
785 		priv->systime_flags |= tstamp_all | ptp_v2 |
786 				       ptp_over_ethernet | ptp_over_ipv6_udp |
787 				       ptp_over_ipv4_udp | ts_event_en |
788 				       ts_master_en | snap_type_sel;
789 	}
790 
791 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
792 
793 	memcpy(&priv->tstamp_config, &config, sizeof(config));
794 
795 	return copy_to_user(ifr->ifr_data, &config,
796 			    sizeof(config)) ? -EFAULT : 0;
797 }
798 
799 /**
800  *  stmmac_hwtstamp_get - read hardware timestamping.
801  *  @dev: device pointer.
802  *  @ifr: An IOCTL specific structure, that can contain a pointer to
803  *  a proprietary structure used to pass information to the driver.
804  *  Description:
805  *  This function obtain the current hardware timestamping settings
806  *  as requested.
807  */
808 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
809 {
810 	struct stmmac_priv *priv = netdev_priv(dev);
811 	struct hwtstamp_config *config = &priv->tstamp_config;
812 
813 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
814 		return -EOPNOTSUPP;
815 
816 	return copy_to_user(ifr->ifr_data, config,
817 			    sizeof(*config)) ? -EFAULT : 0;
818 }
819 
820 /**
821  * stmmac_init_tstamp_counter - init hardware timestamping counter
822  * @priv: driver private structure
823  * @systime_flags: timestamping flags
824  * Description:
825  * Initialize hardware counter for packet timestamping.
826  * This is valid as long as the interface is open and not suspended.
827  * Will be rerun after resuming from suspend, case in which the timestamping
828  * flags updated by stmmac_hwtstamp_set() also need to be restored.
829  */
830 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
831 {
832 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
833 	struct timespec64 now;
834 	u32 sec_inc = 0;
835 	u64 temp = 0;
836 
837 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
838 		return -EOPNOTSUPP;
839 
840 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
841 	priv->systime_flags = systime_flags;
842 
843 	/* program Sub Second Increment reg */
844 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
845 					   priv->plat->clk_ptp_rate,
846 					   xmac, &sec_inc);
847 	temp = div_u64(1000000000ULL, sec_inc);
848 
849 	/* Store sub second increment for later use */
850 	priv->sub_second_inc = sec_inc;
851 
852 	/* calculate default added value:
853 	 * formula is :
854 	 * addend = (2^32)/freq_div_ratio;
855 	 * where, freq_div_ratio = 1e9ns/sec_inc
856 	 */
857 	temp = (u64)(temp << 32);
858 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
859 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
860 
861 	/* initialize system time */
862 	ktime_get_real_ts64(&now);
863 
864 	/* lower 32 bits of tv_sec are safe until y2106 */
865 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
866 
867 	return 0;
868 }
869 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
870 
871 /**
872  * stmmac_init_ptp - init PTP
873  * @priv: driver private structure
874  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
875  * This is done by looking at the HW cap. register.
876  * This function also registers the ptp driver.
877  */
878 static int stmmac_init_ptp(struct stmmac_priv *priv)
879 {
880 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
881 	int ret;
882 
883 	if (priv->plat->ptp_clk_freq_config)
884 		priv->plat->ptp_clk_freq_config(priv);
885 
886 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
887 	if (ret)
888 		return ret;
889 
890 	priv->adv_ts = 0;
891 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
892 	if (xmac && priv->dma_cap.atime_stamp)
893 		priv->adv_ts = 1;
894 	/* Dwmac 3.x core with extend_desc can support adv_ts */
895 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
896 		priv->adv_ts = 1;
897 
898 	if (priv->dma_cap.time_stamp)
899 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
900 
901 	if (priv->adv_ts)
902 		netdev_info(priv->dev,
903 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
904 
905 	priv->hwts_tx_en = 0;
906 	priv->hwts_rx_en = 0;
907 
908 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
909 		stmmac_hwtstamp_correct_latency(priv, priv);
910 
911 	return 0;
912 }
913 
914 static void stmmac_release_ptp(struct stmmac_priv *priv)
915 {
916 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
917 	stmmac_ptp_unregister(priv);
918 }
919 
920 /**
921  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
922  *  @priv: driver private structure
923  *  @duplex: duplex passed to the next function
924  *  Description: It is used for configuring the flow control in all queues
925  */
926 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
927 {
928 	u32 tx_cnt = priv->plat->tx_queues_to_use;
929 
930 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
931 			priv->pause, tx_cnt);
932 }
933 
934 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
935 					 phy_interface_t interface)
936 {
937 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
938 
939 	/* Refresh the MAC-specific capabilities */
940 	stmmac_mac_update_caps(priv);
941 
942 	config->mac_capabilities = priv->hw->link.caps;
943 
944 	if (priv->plat->max_speed)
945 		phylink_limit_mac_speed(config, priv->plat->max_speed);
946 
947 	return config->mac_capabilities;
948 }
949 
950 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
951 						 phy_interface_t interface)
952 {
953 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
954 	struct phylink_pcs *pcs;
955 
956 	if (priv->plat->select_pcs) {
957 		pcs = priv->plat->select_pcs(priv, interface);
958 		if (!IS_ERR(pcs))
959 			return pcs;
960 	}
961 
962 	return NULL;
963 }
964 
965 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
966 			      const struct phylink_link_state *state)
967 {
968 	/* Nothing to do, xpcs_config() handles everything */
969 }
970 
971 static void stmmac_mac_link_down(struct phylink_config *config,
972 				 unsigned int mode, phy_interface_t interface)
973 {
974 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
975 
976 	stmmac_mac_set(priv, priv->ioaddr, false);
977 	priv->eee_active = false;
978 	priv->tx_lpi_enabled = false;
979 	priv->eee_enabled = stmmac_eee_init(priv);
980 	stmmac_set_eee_pls(priv, priv->hw, false);
981 
982 	if (stmmac_fpe_supported(priv))
983 		stmmac_fpe_link_state_handle(priv, false);
984 }
985 
986 static void stmmac_mac_link_up(struct phylink_config *config,
987 			       struct phy_device *phy,
988 			       unsigned int mode, phy_interface_t interface,
989 			       int speed, int duplex,
990 			       bool tx_pause, bool rx_pause)
991 {
992 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
993 	u32 old_ctrl, ctrl;
994 
995 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
996 	    priv->plat->serdes_powerup)
997 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
998 
999 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1000 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1001 
1002 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1003 		switch (speed) {
1004 		case SPEED_10000:
1005 			ctrl |= priv->hw->link.xgmii.speed10000;
1006 			break;
1007 		case SPEED_5000:
1008 			ctrl |= priv->hw->link.xgmii.speed5000;
1009 			break;
1010 		case SPEED_2500:
1011 			ctrl |= priv->hw->link.xgmii.speed2500;
1012 			break;
1013 		default:
1014 			return;
1015 		}
1016 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1017 		switch (speed) {
1018 		case SPEED_100000:
1019 			ctrl |= priv->hw->link.xlgmii.speed100000;
1020 			break;
1021 		case SPEED_50000:
1022 			ctrl |= priv->hw->link.xlgmii.speed50000;
1023 			break;
1024 		case SPEED_40000:
1025 			ctrl |= priv->hw->link.xlgmii.speed40000;
1026 			break;
1027 		case SPEED_25000:
1028 			ctrl |= priv->hw->link.xlgmii.speed25000;
1029 			break;
1030 		case SPEED_10000:
1031 			ctrl |= priv->hw->link.xgmii.speed10000;
1032 			break;
1033 		case SPEED_2500:
1034 			ctrl |= priv->hw->link.speed2500;
1035 			break;
1036 		case SPEED_1000:
1037 			ctrl |= priv->hw->link.speed1000;
1038 			break;
1039 		default:
1040 			return;
1041 		}
1042 	} else {
1043 		switch (speed) {
1044 		case SPEED_2500:
1045 			ctrl |= priv->hw->link.speed2500;
1046 			break;
1047 		case SPEED_1000:
1048 			ctrl |= priv->hw->link.speed1000;
1049 			break;
1050 		case SPEED_100:
1051 			ctrl |= priv->hw->link.speed100;
1052 			break;
1053 		case SPEED_10:
1054 			ctrl |= priv->hw->link.speed10;
1055 			break;
1056 		default:
1057 			return;
1058 		}
1059 	}
1060 
1061 	priv->speed = speed;
1062 
1063 	if (priv->plat->fix_mac_speed)
1064 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1065 
1066 	if (!duplex)
1067 		ctrl &= ~priv->hw->link.duplex;
1068 	else
1069 		ctrl |= priv->hw->link.duplex;
1070 
1071 	/* Flow Control operation */
1072 	if (rx_pause && tx_pause)
1073 		priv->flow_ctrl = FLOW_AUTO;
1074 	else if (rx_pause && !tx_pause)
1075 		priv->flow_ctrl = FLOW_RX;
1076 	else if (!rx_pause && tx_pause)
1077 		priv->flow_ctrl = FLOW_TX;
1078 	else
1079 		priv->flow_ctrl = FLOW_OFF;
1080 
1081 	stmmac_mac_flow_ctrl(priv, duplex);
1082 
1083 	if (ctrl != old_ctrl)
1084 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1085 
1086 	stmmac_mac_set(priv, priv->ioaddr, true);
1087 	if (phy && priv->dma_cap.eee) {
1088 		priv->eee_active =
1089 			phy_init_eee(phy, !(priv->plat->flags &
1090 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1091 		priv->eee_enabled = stmmac_eee_init(priv);
1092 		priv->tx_lpi_enabled = priv->eee_enabled;
1093 		stmmac_set_eee_pls(priv, priv->hw, true);
1094 	}
1095 
1096 	if (stmmac_fpe_supported(priv))
1097 		stmmac_fpe_link_state_handle(priv, true);
1098 
1099 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1100 		stmmac_hwtstamp_correct_latency(priv, priv);
1101 }
1102 
1103 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1104 	.mac_get_caps = stmmac_mac_get_caps,
1105 	.mac_select_pcs = stmmac_mac_select_pcs,
1106 	.mac_config = stmmac_mac_config,
1107 	.mac_link_down = stmmac_mac_link_down,
1108 	.mac_link_up = stmmac_mac_link_up,
1109 };
1110 
1111 /**
1112  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1113  * @priv: driver private structure
1114  * Description: this is to verify if the HW supports the PCS.
1115  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1116  * configured for the TBI, RTBI, or SGMII PHY interface.
1117  */
1118 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1119 {
1120 	int interface = priv->plat->mac_interface;
1121 
1122 	if (priv->dma_cap.pcs) {
1123 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1124 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1125 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1127 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1128 			priv->hw->pcs = STMMAC_PCS_RGMII;
1129 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1130 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_SGMII;
1132 		}
1133 	}
1134 }
1135 
1136 /**
1137  * stmmac_init_phy - PHY initialization
1138  * @dev: net device structure
1139  * Description: it initializes the driver's PHY state, and attaches the PHY
1140  * to the mac driver.
1141  *  Return value:
1142  *  0 on success
1143  */
1144 static int stmmac_init_phy(struct net_device *dev)
1145 {
1146 	struct stmmac_priv *priv = netdev_priv(dev);
1147 	struct fwnode_handle *phy_fwnode;
1148 	struct fwnode_handle *fwnode;
1149 	int ret;
1150 
1151 	if (!phylink_expects_phy(priv->phylink))
1152 		return 0;
1153 
1154 	fwnode = priv->plat->port_node;
1155 	if (!fwnode)
1156 		fwnode = dev_fwnode(priv->device);
1157 
1158 	if (fwnode)
1159 		phy_fwnode = fwnode_get_phy_node(fwnode);
1160 	else
1161 		phy_fwnode = NULL;
1162 
1163 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1164 	 * manually parse it
1165 	 */
1166 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1167 		int addr = priv->plat->phy_addr;
1168 		struct phy_device *phydev;
1169 
1170 		if (addr < 0) {
1171 			netdev_err(priv->dev, "no phy found\n");
1172 			return -ENODEV;
1173 		}
1174 
1175 		phydev = mdiobus_get_phy(priv->mii, addr);
1176 		if (!phydev) {
1177 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1178 			return -ENODEV;
1179 		}
1180 
1181 		if (priv->dma_cap.eee)
1182 			phy_support_eee(phydev);
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (!priv->plat->pmt) {
1191 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192 
1193 		phylink_ethtool_get_wol(priv->phylink, &wol);
1194 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 static int stmmac_phy_setup(struct stmmac_priv *priv)
1202 {
1203 	struct stmmac_mdio_bus_data *mdio_bus_data;
1204 	int mode = priv->plat->phy_interface;
1205 	struct fwnode_handle *fwnode;
1206 	struct phylink_pcs *pcs;
1207 	struct phylink *phylink;
1208 
1209 	priv->phylink_config.dev = &priv->dev->dev;
1210 	priv->phylink_config.type = PHYLINK_NETDEV;
1211 	priv->phylink_config.mac_managed_pm = true;
1212 
1213 	/* Stmmac always requires an RX clock for hardware initialization */
1214 	priv->phylink_config.mac_requires_rxc = true;
1215 
1216 	mdio_bus_data = priv->plat->mdio_bus_data;
1217 	if (mdio_bus_data)
1218 		priv->phylink_config.default_an_inband =
1219 			mdio_bus_data->default_an_inband;
1220 
1221 	/* Set the platform/firmware specified interface mode. Note, phylink
1222 	 * deals with the PHY interface mode, not the MAC interface mode.
1223 	 */
1224 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1225 
1226 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1227 	if (priv->hw->xpcs)
1228 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1229 	else
1230 		pcs = priv->hw->phylink_pcs;
1231 
1232 	if (pcs)
1233 		phy_interface_or(priv->phylink_config.supported_interfaces,
1234 				 priv->phylink_config.supported_interfaces,
1235 				 pcs->supported_interfaces);
1236 
1237 	fwnode = priv->plat->port_node;
1238 	if (!fwnode)
1239 		fwnode = dev_fwnode(priv->device);
1240 
1241 	phylink = phylink_create(&priv->phylink_config, fwnode,
1242 				 mode, &stmmac_phylink_mac_ops);
1243 	if (IS_ERR(phylink))
1244 		return PTR_ERR(phylink);
1245 
1246 	priv->phylink = phylink;
1247 	return 0;
1248 }
1249 
1250 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1251 				    struct stmmac_dma_conf *dma_conf)
1252 {
1253 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1254 	unsigned int desc_size;
1255 	void *head_rx;
1256 	u32 queue;
1257 
1258 	/* Display RX rings */
1259 	for (queue = 0; queue < rx_cnt; queue++) {
1260 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1261 
1262 		pr_info("\tRX Queue %u rings\n", queue);
1263 
1264 		if (priv->extend_desc) {
1265 			head_rx = (void *)rx_q->dma_erx;
1266 			desc_size = sizeof(struct dma_extended_desc);
1267 		} else {
1268 			head_rx = (void *)rx_q->dma_rx;
1269 			desc_size = sizeof(struct dma_desc);
1270 		}
1271 
1272 		/* Display RX ring */
1273 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1274 				    rx_q->dma_rx_phy, desc_size);
1275 	}
1276 }
1277 
1278 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1279 				    struct stmmac_dma_conf *dma_conf)
1280 {
1281 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1282 	unsigned int desc_size;
1283 	void *head_tx;
1284 	u32 queue;
1285 
1286 	/* Display TX rings */
1287 	for (queue = 0; queue < tx_cnt; queue++) {
1288 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1289 
1290 		pr_info("\tTX Queue %d rings\n", queue);
1291 
1292 		if (priv->extend_desc) {
1293 			head_tx = (void *)tx_q->dma_etx;
1294 			desc_size = sizeof(struct dma_extended_desc);
1295 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1296 			head_tx = (void *)tx_q->dma_entx;
1297 			desc_size = sizeof(struct dma_edesc);
1298 		} else {
1299 			head_tx = (void *)tx_q->dma_tx;
1300 			desc_size = sizeof(struct dma_desc);
1301 		}
1302 
1303 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1304 				    tx_q->dma_tx_phy, desc_size);
1305 	}
1306 }
1307 
1308 static void stmmac_display_rings(struct stmmac_priv *priv,
1309 				 struct stmmac_dma_conf *dma_conf)
1310 {
1311 	/* Display RX ring */
1312 	stmmac_display_rx_rings(priv, dma_conf);
1313 
1314 	/* Display TX ring */
1315 	stmmac_display_tx_rings(priv, dma_conf);
1316 }
1317 
1318 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1319 {
1320 	if (stmmac_xdp_is_enabled(priv))
1321 		return XDP_PACKET_HEADROOM;
1322 
1323 	return 0;
1324 }
1325 
1326 static int stmmac_set_bfsize(int mtu, int bufsize)
1327 {
1328 	int ret = bufsize;
1329 
1330 	if (mtu >= BUF_SIZE_8KiB)
1331 		ret = BUF_SIZE_16KiB;
1332 	else if (mtu >= BUF_SIZE_4KiB)
1333 		ret = BUF_SIZE_8KiB;
1334 	else if (mtu >= BUF_SIZE_2KiB)
1335 		ret = BUF_SIZE_4KiB;
1336 	else if (mtu > DEFAULT_BUFSIZE)
1337 		ret = BUF_SIZE_2KiB;
1338 	else
1339 		ret = DEFAULT_BUFSIZE;
1340 
1341 	return ret;
1342 }
1343 
1344 /**
1345  * stmmac_clear_rx_descriptors - clear RX descriptors
1346  * @priv: driver private structure
1347  * @dma_conf: structure to take the dma data
1348  * @queue: RX queue index
1349  * Description: this function is called to clear the RX descriptors
1350  * in case of both basic and extended descriptors are used.
1351  */
1352 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1353 					struct stmmac_dma_conf *dma_conf,
1354 					u32 queue)
1355 {
1356 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1357 	int i;
1358 
1359 	/* Clear the RX descriptors */
1360 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1361 		if (priv->extend_desc)
1362 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1363 					priv->use_riwt, priv->mode,
1364 					(i == dma_conf->dma_rx_size - 1),
1365 					dma_conf->dma_buf_sz);
1366 		else
1367 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1368 					priv->use_riwt, priv->mode,
1369 					(i == dma_conf->dma_rx_size - 1),
1370 					dma_conf->dma_buf_sz);
1371 }
1372 
1373 /**
1374  * stmmac_clear_tx_descriptors - clear tx descriptors
1375  * @priv: driver private structure
1376  * @dma_conf: structure to take the dma data
1377  * @queue: TX queue index.
1378  * Description: this function is called to clear the TX descriptors
1379  * in case of both basic and extended descriptors are used.
1380  */
1381 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1382 					struct stmmac_dma_conf *dma_conf,
1383 					u32 queue)
1384 {
1385 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1386 	int i;
1387 
1388 	/* Clear the TX descriptors */
1389 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1390 		int last = (i == (dma_conf->dma_tx_size - 1));
1391 		struct dma_desc *p;
1392 
1393 		if (priv->extend_desc)
1394 			p = &tx_q->dma_etx[i].basic;
1395 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1396 			p = &tx_q->dma_entx[i].basic;
1397 		else
1398 			p = &tx_q->dma_tx[i];
1399 
1400 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1401 	}
1402 }
1403 
1404 /**
1405  * stmmac_clear_descriptors - clear descriptors
1406  * @priv: driver private structure
1407  * @dma_conf: structure to take the dma data
1408  * Description: this function is called to clear the TX and RX descriptors
1409  * in case of both basic and extended descriptors are used.
1410  */
1411 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1412 				     struct stmmac_dma_conf *dma_conf)
1413 {
1414 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1415 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1416 	u32 queue;
1417 
1418 	/* Clear the RX descriptors */
1419 	for (queue = 0; queue < rx_queue_cnt; queue++)
1420 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1421 
1422 	/* Clear the TX descriptors */
1423 	for (queue = 0; queue < tx_queue_cnt; queue++)
1424 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1425 }
1426 
1427 /**
1428  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1429  * @priv: driver private structure
1430  * @dma_conf: structure to take the dma data
1431  * @p: descriptor pointer
1432  * @i: descriptor index
1433  * @flags: gfp flag
1434  * @queue: RX queue index
1435  * Description: this function is called to allocate a receive buffer, perform
1436  * the DMA mapping and init the descriptor.
1437  */
1438 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1439 				  struct stmmac_dma_conf *dma_conf,
1440 				  struct dma_desc *p,
1441 				  int i, gfp_t flags, u32 queue)
1442 {
1443 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1444 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1445 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1446 
1447 	if (priv->dma_cap.host_dma_width <= 32)
1448 		gfp |= GFP_DMA32;
1449 
1450 	if (!buf->page) {
1451 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1452 		if (!buf->page)
1453 			return -ENOMEM;
1454 		buf->page_offset = stmmac_rx_offset(priv);
1455 	}
1456 
1457 	if (priv->sph && !buf->sec_page) {
1458 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459 		if (!buf->sec_page)
1460 			return -ENOMEM;
1461 
1462 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1463 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1464 	} else {
1465 		buf->sec_page = NULL;
1466 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1467 	}
1468 
1469 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1470 
1471 	stmmac_set_desc_addr(priv, p, buf->addr);
1472 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1473 		stmmac_init_desc3(priv, p);
1474 
1475 	return 0;
1476 }
1477 
1478 /**
1479  * stmmac_free_rx_buffer - free RX dma buffers
1480  * @priv: private structure
1481  * @rx_q: RX queue
1482  * @i: buffer index.
1483  */
1484 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1485 				  struct stmmac_rx_queue *rx_q,
1486 				  int i)
1487 {
1488 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1489 
1490 	if (buf->page)
1491 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1492 	buf->page = NULL;
1493 
1494 	if (buf->sec_page)
1495 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1496 	buf->sec_page = NULL;
1497 }
1498 
1499 /**
1500  * stmmac_free_tx_buffer - free RX dma buffers
1501  * @priv: private structure
1502  * @dma_conf: structure to take the dma data
1503  * @queue: RX queue index
1504  * @i: buffer index.
1505  */
1506 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1507 				  struct stmmac_dma_conf *dma_conf,
1508 				  u32 queue, int i)
1509 {
1510 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1511 
1512 	if (tx_q->tx_skbuff_dma[i].buf &&
1513 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1514 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1515 			dma_unmap_page(priv->device,
1516 				       tx_q->tx_skbuff_dma[i].buf,
1517 				       tx_q->tx_skbuff_dma[i].len,
1518 				       DMA_TO_DEVICE);
1519 		else
1520 			dma_unmap_single(priv->device,
1521 					 tx_q->tx_skbuff_dma[i].buf,
1522 					 tx_q->tx_skbuff_dma[i].len,
1523 					 DMA_TO_DEVICE);
1524 	}
1525 
1526 	if (tx_q->xdpf[i] &&
1527 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1528 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1529 		xdp_return_frame(tx_q->xdpf[i]);
1530 		tx_q->xdpf[i] = NULL;
1531 	}
1532 
1533 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1534 		tx_q->xsk_frames_done++;
1535 
1536 	if (tx_q->tx_skbuff[i] &&
1537 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1538 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1539 		tx_q->tx_skbuff[i] = NULL;
1540 	}
1541 
1542 	tx_q->tx_skbuff_dma[i].buf = 0;
1543 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1544 }
1545 
1546 /**
1547  * dma_free_rx_skbufs - free RX dma buffers
1548  * @priv: private structure
1549  * @dma_conf: structure to take the dma data
1550  * @queue: RX queue index
1551  */
1552 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1553 			       struct stmmac_dma_conf *dma_conf,
1554 			       u32 queue)
1555 {
1556 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1557 	int i;
1558 
1559 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1560 		stmmac_free_rx_buffer(priv, rx_q, i);
1561 }
1562 
1563 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1564 				   struct stmmac_dma_conf *dma_conf,
1565 				   u32 queue, gfp_t flags)
1566 {
1567 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1568 	int i;
1569 
1570 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1571 		struct dma_desc *p;
1572 		int ret;
1573 
1574 		if (priv->extend_desc)
1575 			p = &((rx_q->dma_erx + i)->basic);
1576 		else
1577 			p = rx_q->dma_rx + i;
1578 
1579 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1580 					     queue);
1581 		if (ret)
1582 			return ret;
1583 
1584 		rx_q->buf_alloc_num++;
1585 	}
1586 
1587 	return 0;
1588 }
1589 
1590 /**
1591  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1592  * @priv: private structure
1593  * @dma_conf: structure to take the dma data
1594  * @queue: RX queue index
1595  */
1596 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1597 				struct stmmac_dma_conf *dma_conf,
1598 				u32 queue)
1599 {
1600 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1601 	int i;
1602 
1603 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1604 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1605 
1606 		if (!buf->xdp)
1607 			continue;
1608 
1609 		xsk_buff_free(buf->xdp);
1610 		buf->xdp = NULL;
1611 	}
1612 }
1613 
1614 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1615 				      struct stmmac_dma_conf *dma_conf,
1616 				      u32 queue)
1617 {
1618 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1619 	int i;
1620 
1621 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1622 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1623 	 * use this macro to make sure no size violations.
1624 	 */
1625 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1626 
1627 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1628 		struct stmmac_rx_buffer *buf;
1629 		dma_addr_t dma_addr;
1630 		struct dma_desc *p;
1631 
1632 		if (priv->extend_desc)
1633 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1634 		else
1635 			p = rx_q->dma_rx + i;
1636 
1637 		buf = &rx_q->buf_pool[i];
1638 
1639 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1640 		if (!buf->xdp)
1641 			return -ENOMEM;
1642 
1643 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1644 		stmmac_set_desc_addr(priv, p, dma_addr);
1645 		rx_q->buf_alloc_num++;
1646 	}
1647 
1648 	return 0;
1649 }
1650 
1651 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1652 {
1653 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1654 		return NULL;
1655 
1656 	return xsk_get_pool_from_qid(priv->dev, queue);
1657 }
1658 
1659 /**
1660  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1661  * @priv: driver private structure
1662  * @dma_conf: structure to take the dma data
1663  * @queue: RX queue index
1664  * @flags: gfp flag.
1665  * Description: this function initializes the DMA RX descriptors
1666  * and allocates the socket buffers. It supports the chained and ring
1667  * modes.
1668  */
1669 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1670 				    struct stmmac_dma_conf *dma_conf,
1671 				    u32 queue, gfp_t flags)
1672 {
1673 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1674 	int ret;
1675 
1676 	netif_dbg(priv, probe, priv->dev,
1677 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1678 		  (u32)rx_q->dma_rx_phy);
1679 
1680 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1681 
1682 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1683 
1684 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1685 
1686 	if (rx_q->xsk_pool) {
1687 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1688 						   MEM_TYPE_XSK_BUFF_POOL,
1689 						   NULL));
1690 		netdev_info(priv->dev,
1691 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1692 			    rx_q->queue_index);
1693 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1694 	} else {
1695 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1696 						   MEM_TYPE_PAGE_POOL,
1697 						   rx_q->page_pool));
1698 		netdev_info(priv->dev,
1699 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1700 			    rx_q->queue_index);
1701 	}
1702 
1703 	if (rx_q->xsk_pool) {
1704 		/* RX XDP ZC buffer pool may not be populated, e.g.
1705 		 * xdpsock TX-only.
1706 		 */
1707 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1708 	} else {
1709 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1710 		if (ret < 0)
1711 			return -ENOMEM;
1712 	}
1713 
1714 	/* Setup the chained descriptor addresses */
1715 	if (priv->mode == STMMAC_CHAIN_MODE) {
1716 		if (priv->extend_desc)
1717 			stmmac_mode_init(priv, rx_q->dma_erx,
1718 					 rx_q->dma_rx_phy,
1719 					 dma_conf->dma_rx_size, 1);
1720 		else
1721 			stmmac_mode_init(priv, rx_q->dma_rx,
1722 					 rx_q->dma_rx_phy,
1723 					 dma_conf->dma_rx_size, 0);
1724 	}
1725 
1726 	return 0;
1727 }
1728 
1729 static int init_dma_rx_desc_rings(struct net_device *dev,
1730 				  struct stmmac_dma_conf *dma_conf,
1731 				  gfp_t flags)
1732 {
1733 	struct stmmac_priv *priv = netdev_priv(dev);
1734 	u32 rx_count = priv->plat->rx_queues_to_use;
1735 	int queue;
1736 	int ret;
1737 
1738 	/* RX INITIALIZATION */
1739 	netif_dbg(priv, probe, priv->dev,
1740 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1741 
1742 	for (queue = 0; queue < rx_count; queue++) {
1743 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1744 		if (ret)
1745 			goto err_init_rx_buffers;
1746 	}
1747 
1748 	return 0;
1749 
1750 err_init_rx_buffers:
1751 	while (queue >= 0) {
1752 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1753 
1754 		if (rx_q->xsk_pool)
1755 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1756 		else
1757 			dma_free_rx_skbufs(priv, dma_conf, queue);
1758 
1759 		rx_q->buf_alloc_num = 0;
1760 		rx_q->xsk_pool = NULL;
1761 
1762 		queue--;
1763 	}
1764 
1765 	return ret;
1766 }
1767 
1768 /**
1769  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1770  * @priv: driver private structure
1771  * @dma_conf: structure to take the dma data
1772  * @queue: TX queue index
1773  * Description: this function initializes the DMA TX descriptors
1774  * and allocates the socket buffers. It supports the chained and ring
1775  * modes.
1776  */
1777 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1778 				    struct stmmac_dma_conf *dma_conf,
1779 				    u32 queue)
1780 {
1781 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1782 	int i;
1783 
1784 	netif_dbg(priv, probe, priv->dev,
1785 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1786 		  (u32)tx_q->dma_tx_phy);
1787 
1788 	/* Setup the chained descriptor addresses */
1789 	if (priv->mode == STMMAC_CHAIN_MODE) {
1790 		if (priv->extend_desc)
1791 			stmmac_mode_init(priv, tx_q->dma_etx,
1792 					 tx_q->dma_tx_phy,
1793 					 dma_conf->dma_tx_size, 1);
1794 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1795 			stmmac_mode_init(priv, tx_q->dma_tx,
1796 					 tx_q->dma_tx_phy,
1797 					 dma_conf->dma_tx_size, 0);
1798 	}
1799 
1800 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1801 
1802 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1803 		struct dma_desc *p;
1804 
1805 		if (priv->extend_desc)
1806 			p = &((tx_q->dma_etx + i)->basic);
1807 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1808 			p = &((tx_q->dma_entx + i)->basic);
1809 		else
1810 			p = tx_q->dma_tx + i;
1811 
1812 		stmmac_clear_desc(priv, p);
1813 
1814 		tx_q->tx_skbuff_dma[i].buf = 0;
1815 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1816 		tx_q->tx_skbuff_dma[i].len = 0;
1817 		tx_q->tx_skbuff_dma[i].last_segment = false;
1818 		tx_q->tx_skbuff[i] = NULL;
1819 	}
1820 
1821 	return 0;
1822 }
1823 
1824 static int init_dma_tx_desc_rings(struct net_device *dev,
1825 				  struct stmmac_dma_conf *dma_conf)
1826 {
1827 	struct stmmac_priv *priv = netdev_priv(dev);
1828 	u32 tx_queue_cnt;
1829 	u32 queue;
1830 
1831 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1832 
1833 	for (queue = 0; queue < tx_queue_cnt; queue++)
1834 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1835 
1836 	return 0;
1837 }
1838 
1839 /**
1840  * init_dma_desc_rings - init the RX/TX descriptor rings
1841  * @dev: net device structure
1842  * @dma_conf: structure to take the dma data
1843  * @flags: gfp flag.
1844  * Description: this function initializes the DMA RX/TX descriptors
1845  * and allocates the socket buffers. It supports the chained and ring
1846  * modes.
1847  */
1848 static int init_dma_desc_rings(struct net_device *dev,
1849 			       struct stmmac_dma_conf *dma_conf,
1850 			       gfp_t flags)
1851 {
1852 	struct stmmac_priv *priv = netdev_priv(dev);
1853 	int ret;
1854 
1855 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1856 	if (ret)
1857 		return ret;
1858 
1859 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1860 
1861 	stmmac_clear_descriptors(priv, dma_conf);
1862 
1863 	if (netif_msg_hw(priv))
1864 		stmmac_display_rings(priv, dma_conf);
1865 
1866 	return ret;
1867 }
1868 
1869 /**
1870  * dma_free_tx_skbufs - free TX dma buffers
1871  * @priv: private structure
1872  * @dma_conf: structure to take the dma data
1873  * @queue: TX queue index
1874  */
1875 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1876 			       struct stmmac_dma_conf *dma_conf,
1877 			       u32 queue)
1878 {
1879 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1880 	int i;
1881 
1882 	tx_q->xsk_frames_done = 0;
1883 
1884 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1885 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1886 
1887 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1888 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1889 		tx_q->xsk_frames_done = 0;
1890 		tx_q->xsk_pool = NULL;
1891 	}
1892 }
1893 
1894 /**
1895  * stmmac_free_tx_skbufs - free TX skb buffers
1896  * @priv: private structure
1897  */
1898 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1899 {
1900 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1901 	u32 queue;
1902 
1903 	for (queue = 0; queue < tx_queue_cnt; queue++)
1904 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1905 }
1906 
1907 /**
1908  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1909  * @priv: private structure
1910  * @dma_conf: structure to take the dma data
1911  * @queue: RX queue index
1912  */
1913 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1914 					 struct stmmac_dma_conf *dma_conf,
1915 					 u32 queue)
1916 {
1917 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1918 
1919 	/* Release the DMA RX socket buffers */
1920 	if (rx_q->xsk_pool)
1921 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1922 	else
1923 		dma_free_rx_skbufs(priv, dma_conf, queue);
1924 
1925 	rx_q->buf_alloc_num = 0;
1926 	rx_q->xsk_pool = NULL;
1927 
1928 	/* Free DMA regions of consistent memory previously allocated */
1929 	if (!priv->extend_desc)
1930 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1931 				  sizeof(struct dma_desc),
1932 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1933 	else
1934 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1935 				  sizeof(struct dma_extended_desc),
1936 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1937 
1938 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1939 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1940 
1941 	kfree(rx_q->buf_pool);
1942 	if (rx_q->page_pool)
1943 		page_pool_destroy(rx_q->page_pool);
1944 }
1945 
1946 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1947 				       struct stmmac_dma_conf *dma_conf)
1948 {
1949 	u32 rx_count = priv->plat->rx_queues_to_use;
1950 	u32 queue;
1951 
1952 	/* Free RX queue resources */
1953 	for (queue = 0; queue < rx_count; queue++)
1954 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1955 }
1956 
1957 /**
1958  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1959  * @priv: private structure
1960  * @dma_conf: structure to take the dma data
1961  * @queue: TX queue index
1962  */
1963 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1964 					 struct stmmac_dma_conf *dma_conf,
1965 					 u32 queue)
1966 {
1967 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1968 	size_t size;
1969 	void *addr;
1970 
1971 	/* Release the DMA TX socket buffers */
1972 	dma_free_tx_skbufs(priv, dma_conf, queue);
1973 
1974 	if (priv->extend_desc) {
1975 		size = sizeof(struct dma_extended_desc);
1976 		addr = tx_q->dma_etx;
1977 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1978 		size = sizeof(struct dma_edesc);
1979 		addr = tx_q->dma_entx;
1980 	} else {
1981 		size = sizeof(struct dma_desc);
1982 		addr = tx_q->dma_tx;
1983 	}
1984 
1985 	size *= dma_conf->dma_tx_size;
1986 
1987 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1988 
1989 	kfree(tx_q->tx_skbuff_dma);
1990 	kfree(tx_q->tx_skbuff);
1991 }
1992 
1993 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1994 				       struct stmmac_dma_conf *dma_conf)
1995 {
1996 	u32 tx_count = priv->plat->tx_queues_to_use;
1997 	u32 queue;
1998 
1999 	/* Free TX queue resources */
2000 	for (queue = 0; queue < tx_count; queue++)
2001 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2002 }
2003 
2004 /**
2005  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2006  * @priv: private structure
2007  * @dma_conf: structure to take the dma data
2008  * @queue: RX queue index
2009  * Description: according to which descriptor can be used (extend or basic)
2010  * this function allocates the resources for TX and RX paths. In case of
2011  * reception, for example, it pre-allocated the RX socket buffer in order to
2012  * allow zero-copy mechanism.
2013  */
2014 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2015 					 struct stmmac_dma_conf *dma_conf,
2016 					 u32 queue)
2017 {
2018 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2019 	struct stmmac_channel *ch = &priv->channel[queue];
2020 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2021 	struct page_pool_params pp_params = { 0 };
2022 	unsigned int num_pages;
2023 	unsigned int napi_id;
2024 	int ret;
2025 
2026 	rx_q->queue_index = queue;
2027 	rx_q->priv_data = priv;
2028 
2029 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2030 	pp_params.pool_size = dma_conf->dma_rx_size;
2031 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2032 	pp_params.order = ilog2(num_pages);
2033 	pp_params.nid = dev_to_node(priv->device);
2034 	pp_params.dev = priv->device;
2035 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2036 	pp_params.offset = stmmac_rx_offset(priv);
2037 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2038 
2039 	rx_q->page_pool = page_pool_create(&pp_params);
2040 	if (IS_ERR(rx_q->page_pool)) {
2041 		ret = PTR_ERR(rx_q->page_pool);
2042 		rx_q->page_pool = NULL;
2043 		return ret;
2044 	}
2045 
2046 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2047 				 sizeof(*rx_q->buf_pool),
2048 				 GFP_KERNEL);
2049 	if (!rx_q->buf_pool)
2050 		return -ENOMEM;
2051 
2052 	if (priv->extend_desc) {
2053 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2054 						   dma_conf->dma_rx_size *
2055 						   sizeof(struct dma_extended_desc),
2056 						   &rx_q->dma_rx_phy,
2057 						   GFP_KERNEL);
2058 		if (!rx_q->dma_erx)
2059 			return -ENOMEM;
2060 
2061 	} else {
2062 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2063 						  dma_conf->dma_rx_size *
2064 						  sizeof(struct dma_desc),
2065 						  &rx_q->dma_rx_phy,
2066 						  GFP_KERNEL);
2067 		if (!rx_q->dma_rx)
2068 			return -ENOMEM;
2069 	}
2070 
2071 	if (stmmac_xdp_is_enabled(priv) &&
2072 	    test_bit(queue, priv->af_xdp_zc_qps))
2073 		napi_id = ch->rxtx_napi.napi_id;
2074 	else
2075 		napi_id = ch->rx_napi.napi_id;
2076 
2077 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2078 			       rx_q->queue_index,
2079 			       napi_id);
2080 	if (ret) {
2081 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2082 		return -EINVAL;
2083 	}
2084 
2085 	return 0;
2086 }
2087 
2088 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2089 				       struct stmmac_dma_conf *dma_conf)
2090 {
2091 	u32 rx_count = priv->plat->rx_queues_to_use;
2092 	u32 queue;
2093 	int ret;
2094 
2095 	/* RX queues buffers and DMA */
2096 	for (queue = 0; queue < rx_count; queue++) {
2097 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2098 		if (ret)
2099 			goto err_dma;
2100 	}
2101 
2102 	return 0;
2103 
2104 err_dma:
2105 	free_dma_rx_desc_resources(priv, dma_conf);
2106 
2107 	return ret;
2108 }
2109 
2110 /**
2111  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2112  * @priv: private structure
2113  * @dma_conf: structure to take the dma data
2114  * @queue: TX queue index
2115  * Description: according to which descriptor can be used (extend or basic)
2116  * this function allocates the resources for TX and RX paths. In case of
2117  * reception, for example, it pre-allocated the RX socket buffer in order to
2118  * allow zero-copy mechanism.
2119  */
2120 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2121 					 struct stmmac_dma_conf *dma_conf,
2122 					 u32 queue)
2123 {
2124 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2125 	size_t size;
2126 	void *addr;
2127 
2128 	tx_q->queue_index = queue;
2129 	tx_q->priv_data = priv;
2130 
2131 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2132 				      sizeof(*tx_q->tx_skbuff_dma),
2133 				      GFP_KERNEL);
2134 	if (!tx_q->tx_skbuff_dma)
2135 		return -ENOMEM;
2136 
2137 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2138 				  sizeof(struct sk_buff *),
2139 				  GFP_KERNEL);
2140 	if (!tx_q->tx_skbuff)
2141 		return -ENOMEM;
2142 
2143 	if (priv->extend_desc)
2144 		size = sizeof(struct dma_extended_desc);
2145 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2146 		size = sizeof(struct dma_edesc);
2147 	else
2148 		size = sizeof(struct dma_desc);
2149 
2150 	size *= dma_conf->dma_tx_size;
2151 
2152 	addr = dma_alloc_coherent(priv->device, size,
2153 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2154 	if (!addr)
2155 		return -ENOMEM;
2156 
2157 	if (priv->extend_desc)
2158 		tx_q->dma_etx = addr;
2159 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2160 		tx_q->dma_entx = addr;
2161 	else
2162 		tx_q->dma_tx = addr;
2163 
2164 	return 0;
2165 }
2166 
2167 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2168 				       struct stmmac_dma_conf *dma_conf)
2169 {
2170 	u32 tx_count = priv->plat->tx_queues_to_use;
2171 	u32 queue;
2172 	int ret;
2173 
2174 	/* TX queues buffers and DMA */
2175 	for (queue = 0; queue < tx_count; queue++) {
2176 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2177 		if (ret)
2178 			goto err_dma;
2179 	}
2180 
2181 	return 0;
2182 
2183 err_dma:
2184 	free_dma_tx_desc_resources(priv, dma_conf);
2185 	return ret;
2186 }
2187 
2188 /**
2189  * alloc_dma_desc_resources - alloc TX/RX resources.
2190  * @priv: private structure
2191  * @dma_conf: structure to take the dma data
2192  * Description: according to which descriptor can be used (extend or basic)
2193  * this function allocates the resources for TX and RX paths. In case of
2194  * reception, for example, it pre-allocated the RX socket buffer in order to
2195  * allow zero-copy mechanism.
2196  */
2197 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2198 				    struct stmmac_dma_conf *dma_conf)
2199 {
2200 	/* RX Allocation */
2201 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2202 
2203 	if (ret)
2204 		return ret;
2205 
2206 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2207 
2208 	return ret;
2209 }
2210 
2211 /**
2212  * free_dma_desc_resources - free dma desc resources
2213  * @priv: private structure
2214  * @dma_conf: structure to take the dma data
2215  */
2216 static void free_dma_desc_resources(struct stmmac_priv *priv,
2217 				    struct stmmac_dma_conf *dma_conf)
2218 {
2219 	/* Release the DMA TX socket buffers */
2220 	free_dma_tx_desc_resources(priv, dma_conf);
2221 
2222 	/* Release the DMA RX socket buffers later
2223 	 * to ensure all pending XDP_TX buffers are returned.
2224 	 */
2225 	free_dma_rx_desc_resources(priv, dma_conf);
2226 }
2227 
2228 /**
2229  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2230  *  @priv: driver private structure
2231  *  Description: It is used for enabling the rx queues in the MAC
2232  */
2233 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2234 {
2235 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2236 	int queue;
2237 	u8 mode;
2238 
2239 	for (queue = 0; queue < rx_queues_count; queue++) {
2240 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2241 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2242 	}
2243 }
2244 
2245 /**
2246  * stmmac_start_rx_dma - start RX DMA channel
2247  * @priv: driver private structure
2248  * @chan: RX channel index
2249  * Description:
2250  * This starts a RX DMA channel
2251  */
2252 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2253 {
2254 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2255 	stmmac_start_rx(priv, priv->ioaddr, chan);
2256 }
2257 
2258 /**
2259  * stmmac_start_tx_dma - start TX DMA channel
2260  * @priv: driver private structure
2261  * @chan: TX channel index
2262  * Description:
2263  * This starts a TX DMA channel
2264  */
2265 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2266 {
2267 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2268 	stmmac_start_tx(priv, priv->ioaddr, chan);
2269 }
2270 
2271 /**
2272  * stmmac_stop_rx_dma - stop RX DMA channel
2273  * @priv: driver private structure
2274  * @chan: RX channel index
2275  * Description:
2276  * This stops a RX DMA channel
2277  */
2278 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2279 {
2280 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2281 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2282 }
2283 
2284 /**
2285  * stmmac_stop_tx_dma - stop TX DMA channel
2286  * @priv: driver private structure
2287  * @chan: TX channel index
2288  * Description:
2289  * This stops a TX DMA channel
2290  */
2291 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2292 {
2293 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2294 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2295 }
2296 
2297 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2298 {
2299 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2300 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2301 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2302 	u32 chan;
2303 
2304 	for (chan = 0; chan < dma_csr_ch; chan++) {
2305 		struct stmmac_channel *ch = &priv->channel[chan];
2306 		unsigned long flags;
2307 
2308 		spin_lock_irqsave(&ch->lock, flags);
2309 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2310 		spin_unlock_irqrestore(&ch->lock, flags);
2311 	}
2312 }
2313 
2314 /**
2315  * stmmac_start_all_dma - start all RX and TX DMA channels
2316  * @priv: driver private structure
2317  * Description:
2318  * This starts all the RX and TX DMA channels
2319  */
2320 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2321 {
2322 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2323 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2324 	u32 chan = 0;
2325 
2326 	for (chan = 0; chan < rx_channels_count; chan++)
2327 		stmmac_start_rx_dma(priv, chan);
2328 
2329 	for (chan = 0; chan < tx_channels_count; chan++)
2330 		stmmac_start_tx_dma(priv, chan);
2331 }
2332 
2333 /**
2334  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2335  * @priv: driver private structure
2336  * Description:
2337  * This stops the RX and TX DMA channels
2338  */
2339 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2340 {
2341 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2342 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2343 	u32 chan = 0;
2344 
2345 	for (chan = 0; chan < rx_channels_count; chan++)
2346 		stmmac_stop_rx_dma(priv, chan);
2347 
2348 	for (chan = 0; chan < tx_channels_count; chan++)
2349 		stmmac_stop_tx_dma(priv, chan);
2350 }
2351 
2352 /**
2353  *  stmmac_dma_operation_mode - HW DMA operation mode
2354  *  @priv: driver private structure
2355  *  Description: it is used for configuring the DMA operation mode register in
2356  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2357  */
2358 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2359 {
2360 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2361 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2362 	int rxfifosz = priv->plat->rx_fifo_size;
2363 	int txfifosz = priv->plat->tx_fifo_size;
2364 	u32 txmode = 0;
2365 	u32 rxmode = 0;
2366 	u32 chan = 0;
2367 	u8 qmode = 0;
2368 
2369 	if (rxfifosz == 0)
2370 		rxfifosz = priv->dma_cap.rx_fifo_size;
2371 	if (txfifosz == 0)
2372 		txfifosz = priv->dma_cap.tx_fifo_size;
2373 
2374 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2375 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2376 		rxfifosz /= rx_channels_count;
2377 		txfifosz /= tx_channels_count;
2378 	}
2379 
2380 	if (priv->plat->force_thresh_dma_mode) {
2381 		txmode = tc;
2382 		rxmode = tc;
2383 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2384 		/*
2385 		 * In case of GMAC, SF mode can be enabled
2386 		 * to perform the TX COE in HW. This depends on:
2387 		 * 1) TX COE if actually supported
2388 		 * 2) There is no bugged Jumbo frame support
2389 		 *    that needs to not insert csum in the TDES.
2390 		 */
2391 		txmode = SF_DMA_MODE;
2392 		rxmode = SF_DMA_MODE;
2393 		priv->xstats.threshold = SF_DMA_MODE;
2394 	} else {
2395 		txmode = tc;
2396 		rxmode = SF_DMA_MODE;
2397 	}
2398 
2399 	/* configure all channels */
2400 	for (chan = 0; chan < rx_channels_count; chan++) {
2401 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2402 		u32 buf_size;
2403 
2404 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2405 
2406 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2407 				rxfifosz, qmode);
2408 
2409 		if (rx_q->xsk_pool) {
2410 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2411 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2412 					      buf_size,
2413 					      chan);
2414 		} else {
2415 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2416 					      priv->dma_conf.dma_buf_sz,
2417 					      chan);
2418 		}
2419 	}
2420 
2421 	for (chan = 0; chan < tx_channels_count; chan++) {
2422 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2423 
2424 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2425 				txfifosz, qmode);
2426 	}
2427 }
2428 
2429 static void stmmac_xsk_request_timestamp(void *_priv)
2430 {
2431 	struct stmmac_metadata_request *meta_req = _priv;
2432 
2433 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2434 	*meta_req->set_ic = true;
2435 }
2436 
2437 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2438 {
2439 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2440 	struct stmmac_priv *priv = tx_compl->priv;
2441 	struct dma_desc *desc = tx_compl->desc;
2442 	bool found = false;
2443 	u64 ns = 0;
2444 
2445 	if (!priv->hwts_tx_en)
2446 		return 0;
2447 
2448 	/* check tx tstamp status */
2449 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2450 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2451 		found = true;
2452 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2453 		found = true;
2454 	}
2455 
2456 	if (found) {
2457 		ns -= priv->plat->cdc_error_adj;
2458 		return ns_to_ktime(ns);
2459 	}
2460 
2461 	return 0;
2462 }
2463 
2464 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2465 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2466 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2467 };
2468 
2469 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2470 {
2471 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2472 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2473 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2474 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2475 	unsigned int entry = tx_q->cur_tx;
2476 	struct dma_desc *tx_desc = NULL;
2477 	struct xdp_desc xdp_desc;
2478 	bool work_done = true;
2479 	u32 tx_set_ic_bit = 0;
2480 
2481 	/* Avoids TX time-out as we are sharing with slow path */
2482 	txq_trans_cond_update(nq);
2483 
2484 	budget = min(budget, stmmac_tx_avail(priv, queue));
2485 
2486 	while (budget-- > 0) {
2487 		struct stmmac_metadata_request meta_req;
2488 		struct xsk_tx_metadata *meta = NULL;
2489 		dma_addr_t dma_addr;
2490 		bool set_ic;
2491 
2492 		/* We are sharing with slow path and stop XSK TX desc submission when
2493 		 * available TX ring is less than threshold.
2494 		 */
2495 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2496 		    !netif_carrier_ok(priv->dev)) {
2497 			work_done = false;
2498 			break;
2499 		}
2500 
2501 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2502 			break;
2503 
2504 		if (priv->est && priv->est->enable &&
2505 		    priv->est->max_sdu[queue] &&
2506 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2507 			priv->xstats.max_sdu_txq_drop[queue]++;
2508 			continue;
2509 		}
2510 
2511 		if (likely(priv->extend_desc))
2512 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2513 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2514 			tx_desc = &tx_q->dma_entx[entry].basic;
2515 		else
2516 			tx_desc = tx_q->dma_tx + entry;
2517 
2518 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2519 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2520 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2521 
2522 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2523 
2524 		/* To return XDP buffer to XSK pool, we simple call
2525 		 * xsk_tx_completed(), so we don't need to fill up
2526 		 * 'buf' and 'xdpf'.
2527 		 */
2528 		tx_q->tx_skbuff_dma[entry].buf = 0;
2529 		tx_q->xdpf[entry] = NULL;
2530 
2531 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2532 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2533 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2534 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2535 
2536 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2537 
2538 		tx_q->tx_count_frames++;
2539 
2540 		if (!priv->tx_coal_frames[queue])
2541 			set_ic = false;
2542 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2543 			set_ic = true;
2544 		else
2545 			set_ic = false;
2546 
2547 		meta_req.priv = priv;
2548 		meta_req.tx_desc = tx_desc;
2549 		meta_req.set_ic = &set_ic;
2550 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2551 					&meta_req);
2552 		if (set_ic) {
2553 			tx_q->tx_count_frames = 0;
2554 			stmmac_set_tx_ic(priv, tx_desc);
2555 			tx_set_ic_bit++;
2556 		}
2557 
2558 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2559 				       true, priv->mode, true, true,
2560 				       xdp_desc.len);
2561 
2562 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2563 
2564 		xsk_tx_metadata_to_compl(meta,
2565 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2566 
2567 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2568 		entry = tx_q->cur_tx;
2569 	}
2570 	u64_stats_update_begin(&txq_stats->napi_syncp);
2571 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2572 	u64_stats_update_end(&txq_stats->napi_syncp);
2573 
2574 	if (tx_desc) {
2575 		stmmac_flush_tx_descriptors(priv, queue);
2576 		xsk_tx_release(pool);
2577 	}
2578 
2579 	/* Return true if all of the 3 conditions are met
2580 	 *  a) TX Budget is still available
2581 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2582 	 *     pending XSK TX for transmission)
2583 	 */
2584 	return !!budget && work_done;
2585 }
2586 
2587 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2588 {
2589 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2590 		tc += 64;
2591 
2592 		if (priv->plat->force_thresh_dma_mode)
2593 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2594 		else
2595 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2596 						      chan);
2597 
2598 		priv->xstats.threshold = tc;
2599 	}
2600 }
2601 
2602 /**
2603  * stmmac_tx_clean - to manage the transmission completion
2604  * @priv: driver private structure
2605  * @budget: napi budget limiting this functions packet handling
2606  * @queue: TX queue index
2607  * @pending_packets: signal to arm the TX coal timer
2608  * Description: it reclaims the transmit resources after transmission completes.
2609  * If some packets still needs to be handled, due to TX coalesce, set
2610  * pending_packets to true to make NAPI arm the TX coal timer.
2611  */
2612 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2613 			   bool *pending_packets)
2614 {
2615 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2616 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2617 	unsigned int bytes_compl = 0, pkts_compl = 0;
2618 	unsigned int entry, xmits = 0, count = 0;
2619 	u32 tx_packets = 0, tx_errors = 0;
2620 
2621 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2622 
2623 	tx_q->xsk_frames_done = 0;
2624 
2625 	entry = tx_q->dirty_tx;
2626 
2627 	/* Try to clean all TX complete frame in 1 shot */
2628 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2629 		struct xdp_frame *xdpf;
2630 		struct sk_buff *skb;
2631 		struct dma_desc *p;
2632 		int status;
2633 
2634 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2635 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2636 			xdpf = tx_q->xdpf[entry];
2637 			skb = NULL;
2638 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2639 			xdpf = NULL;
2640 			skb = tx_q->tx_skbuff[entry];
2641 		} else {
2642 			xdpf = NULL;
2643 			skb = NULL;
2644 		}
2645 
2646 		if (priv->extend_desc)
2647 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2648 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2649 			p = &tx_q->dma_entx[entry].basic;
2650 		else
2651 			p = tx_q->dma_tx + entry;
2652 
2653 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2654 		/* Check if the descriptor is owned by the DMA */
2655 		if (unlikely(status & tx_dma_own))
2656 			break;
2657 
2658 		count++;
2659 
2660 		/* Make sure descriptor fields are read after reading
2661 		 * the own bit.
2662 		 */
2663 		dma_rmb();
2664 
2665 		/* Just consider the last segment and ...*/
2666 		if (likely(!(status & tx_not_ls))) {
2667 			/* ... verify the status error condition */
2668 			if (unlikely(status & tx_err)) {
2669 				tx_errors++;
2670 				if (unlikely(status & tx_err_bump_tc))
2671 					stmmac_bump_dma_threshold(priv, queue);
2672 			} else {
2673 				tx_packets++;
2674 			}
2675 			if (skb) {
2676 				stmmac_get_tx_hwtstamp(priv, p, skb);
2677 			} else if (tx_q->xsk_pool &&
2678 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2679 				struct stmmac_xsk_tx_complete tx_compl = {
2680 					.priv = priv,
2681 					.desc = p,
2682 				};
2683 
2684 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2685 							 &stmmac_xsk_tx_metadata_ops,
2686 							 &tx_compl);
2687 			}
2688 		}
2689 
2690 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2691 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2692 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2693 				dma_unmap_page(priv->device,
2694 					       tx_q->tx_skbuff_dma[entry].buf,
2695 					       tx_q->tx_skbuff_dma[entry].len,
2696 					       DMA_TO_DEVICE);
2697 			else
2698 				dma_unmap_single(priv->device,
2699 						 tx_q->tx_skbuff_dma[entry].buf,
2700 						 tx_q->tx_skbuff_dma[entry].len,
2701 						 DMA_TO_DEVICE);
2702 			tx_q->tx_skbuff_dma[entry].buf = 0;
2703 			tx_q->tx_skbuff_dma[entry].len = 0;
2704 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2705 		}
2706 
2707 		stmmac_clean_desc3(priv, tx_q, p);
2708 
2709 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2710 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2711 
2712 		if (xdpf &&
2713 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2714 			xdp_return_frame_rx_napi(xdpf);
2715 			tx_q->xdpf[entry] = NULL;
2716 		}
2717 
2718 		if (xdpf &&
2719 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2720 			xdp_return_frame(xdpf);
2721 			tx_q->xdpf[entry] = NULL;
2722 		}
2723 
2724 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2725 			tx_q->xsk_frames_done++;
2726 
2727 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2728 			if (likely(skb)) {
2729 				pkts_compl++;
2730 				bytes_compl += skb->len;
2731 				dev_consume_skb_any(skb);
2732 				tx_q->tx_skbuff[entry] = NULL;
2733 			}
2734 		}
2735 
2736 		stmmac_release_tx_desc(priv, p, priv->mode);
2737 
2738 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2739 	}
2740 	tx_q->dirty_tx = entry;
2741 
2742 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2743 				  pkts_compl, bytes_compl);
2744 
2745 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2746 								queue))) &&
2747 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2748 
2749 		netif_dbg(priv, tx_done, priv->dev,
2750 			  "%s: restart transmit\n", __func__);
2751 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2752 	}
2753 
2754 	if (tx_q->xsk_pool) {
2755 		bool work_done;
2756 
2757 		if (tx_q->xsk_frames_done)
2758 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2759 
2760 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2761 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2762 
2763 		/* For XSK TX, we try to send as many as possible.
2764 		 * If XSK work done (XSK TX desc empty and budget still
2765 		 * available), return "budget - 1" to reenable TX IRQ.
2766 		 * Else, return "budget" to make NAPI continue polling.
2767 		 */
2768 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2769 					       STMMAC_XSK_TX_BUDGET_MAX);
2770 		if (work_done)
2771 			xmits = budget - 1;
2772 		else
2773 			xmits = budget;
2774 	}
2775 
2776 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2777 	    priv->eee_sw_timer_en) {
2778 		if (stmmac_enable_eee_mode(priv))
2779 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2780 	}
2781 
2782 	/* We still have pending packets, let's call for a new scheduling */
2783 	if (tx_q->dirty_tx != tx_q->cur_tx)
2784 		*pending_packets = true;
2785 
2786 	u64_stats_update_begin(&txq_stats->napi_syncp);
2787 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2788 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2789 	u64_stats_inc(&txq_stats->napi.tx_clean);
2790 	u64_stats_update_end(&txq_stats->napi_syncp);
2791 
2792 	priv->xstats.tx_errors += tx_errors;
2793 
2794 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2795 
2796 	/* Combine decisions from TX clean and XSK TX */
2797 	return max(count, xmits);
2798 }
2799 
2800 /**
2801  * stmmac_tx_err - to manage the tx error
2802  * @priv: driver private structure
2803  * @chan: channel index
2804  * Description: it cleans the descriptors and restarts the transmission
2805  * in case of transmission errors.
2806  */
2807 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2808 {
2809 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2810 
2811 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2812 
2813 	stmmac_stop_tx_dma(priv, chan);
2814 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2815 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2816 	stmmac_reset_tx_queue(priv, chan);
2817 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2818 			    tx_q->dma_tx_phy, chan);
2819 	stmmac_start_tx_dma(priv, chan);
2820 
2821 	priv->xstats.tx_errors++;
2822 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2823 }
2824 
2825 /**
2826  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2827  *  @priv: driver private structure
2828  *  @txmode: TX operating mode
2829  *  @rxmode: RX operating mode
2830  *  @chan: channel index
2831  *  Description: it is used for configuring of the DMA operation mode in
2832  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2833  *  mode.
2834  */
2835 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2836 					  u32 rxmode, u32 chan)
2837 {
2838 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2839 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2840 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2841 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2842 	int rxfifosz = priv->plat->rx_fifo_size;
2843 	int txfifosz = priv->plat->tx_fifo_size;
2844 
2845 	if (rxfifosz == 0)
2846 		rxfifosz = priv->dma_cap.rx_fifo_size;
2847 	if (txfifosz == 0)
2848 		txfifosz = priv->dma_cap.tx_fifo_size;
2849 
2850 	/* Adjust for real per queue fifo size */
2851 	rxfifosz /= rx_channels_count;
2852 	txfifosz /= tx_channels_count;
2853 
2854 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2855 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2856 }
2857 
2858 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2859 {
2860 	int ret;
2861 
2862 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2863 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2864 	if (ret && (ret != -EINVAL)) {
2865 		stmmac_global_err(priv);
2866 		return true;
2867 	}
2868 
2869 	return false;
2870 }
2871 
2872 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2873 {
2874 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2875 						 &priv->xstats, chan, dir);
2876 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2877 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2878 	struct stmmac_channel *ch = &priv->channel[chan];
2879 	struct napi_struct *rx_napi;
2880 	struct napi_struct *tx_napi;
2881 	unsigned long flags;
2882 
2883 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2884 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2885 
2886 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2887 		if (napi_schedule_prep(rx_napi)) {
2888 			spin_lock_irqsave(&ch->lock, flags);
2889 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2890 			spin_unlock_irqrestore(&ch->lock, flags);
2891 			__napi_schedule(rx_napi);
2892 		}
2893 	}
2894 
2895 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2896 		if (napi_schedule_prep(tx_napi)) {
2897 			spin_lock_irqsave(&ch->lock, flags);
2898 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2899 			spin_unlock_irqrestore(&ch->lock, flags);
2900 			__napi_schedule(tx_napi);
2901 		}
2902 	}
2903 
2904 	return status;
2905 }
2906 
2907 /**
2908  * stmmac_dma_interrupt - DMA ISR
2909  * @priv: driver private structure
2910  * Description: this is the DMA ISR. It is called by the main ISR.
2911  * It calls the dwmac dma routine and schedule poll method in case of some
2912  * work can be done.
2913  */
2914 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2915 {
2916 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2917 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2918 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2919 				tx_channel_count : rx_channel_count;
2920 	u32 chan;
2921 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2922 
2923 	/* Make sure we never check beyond our status buffer. */
2924 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2925 		channels_to_check = ARRAY_SIZE(status);
2926 
2927 	for (chan = 0; chan < channels_to_check; chan++)
2928 		status[chan] = stmmac_napi_check(priv, chan,
2929 						 DMA_DIR_RXTX);
2930 
2931 	for (chan = 0; chan < tx_channel_count; chan++) {
2932 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2933 			/* Try to bump up the dma threshold on this failure */
2934 			stmmac_bump_dma_threshold(priv, chan);
2935 		} else if (unlikely(status[chan] == tx_hard_error)) {
2936 			stmmac_tx_err(priv, chan);
2937 		}
2938 	}
2939 }
2940 
2941 /**
2942  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2943  * @priv: driver private structure
2944  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2945  */
2946 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2947 {
2948 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2949 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2950 
2951 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2952 
2953 	if (priv->dma_cap.rmon) {
2954 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2955 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2956 	} else
2957 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2958 }
2959 
2960 /**
2961  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2962  * @priv: driver private structure
2963  * Description:
2964  *  new GMAC chip generations have a new register to indicate the
2965  *  presence of the optional feature/functions.
2966  *  This can be also used to override the value passed through the
2967  *  platform and necessary for old MAC10/100 and GMAC chips.
2968  */
2969 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2970 {
2971 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2972 }
2973 
2974 /**
2975  * stmmac_check_ether_addr - check if the MAC addr is valid
2976  * @priv: driver private structure
2977  * Description:
2978  * it is to verify if the MAC address is valid, in case of failures it
2979  * generates a random MAC address
2980  */
2981 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2982 {
2983 	u8 addr[ETH_ALEN];
2984 
2985 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2986 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2987 		if (is_valid_ether_addr(addr))
2988 			eth_hw_addr_set(priv->dev, addr);
2989 		else
2990 			eth_hw_addr_random(priv->dev);
2991 		dev_info(priv->device, "device MAC address %pM\n",
2992 			 priv->dev->dev_addr);
2993 	}
2994 }
2995 
2996 /**
2997  * stmmac_init_dma_engine - DMA init.
2998  * @priv: driver private structure
2999  * Description:
3000  * It inits the DMA invoking the specific MAC/GMAC callback.
3001  * Some DMA parameters can be passed from the platform;
3002  * in case of these are not passed a default is kept for the MAC or GMAC.
3003  */
3004 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3005 {
3006 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3007 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3008 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3009 	struct stmmac_rx_queue *rx_q;
3010 	struct stmmac_tx_queue *tx_q;
3011 	u32 chan = 0;
3012 	int ret = 0;
3013 
3014 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3015 		dev_err(priv->device, "Invalid DMA configuration\n");
3016 		return -EINVAL;
3017 	}
3018 
3019 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3020 		priv->plat->dma_cfg->atds = 1;
3021 
3022 	ret = stmmac_reset(priv, priv->ioaddr);
3023 	if (ret) {
3024 		dev_err(priv->device, "Failed to reset the dma\n");
3025 		return ret;
3026 	}
3027 
3028 	/* DMA Configuration */
3029 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3030 
3031 	if (priv->plat->axi)
3032 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3033 
3034 	/* DMA CSR Channel configuration */
3035 	for (chan = 0; chan < dma_csr_ch; chan++) {
3036 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3037 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3038 	}
3039 
3040 	/* DMA RX Channel Configuration */
3041 	for (chan = 0; chan < rx_channels_count; chan++) {
3042 		rx_q = &priv->dma_conf.rx_queue[chan];
3043 
3044 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3045 				    rx_q->dma_rx_phy, chan);
3046 
3047 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3048 				     (rx_q->buf_alloc_num *
3049 				      sizeof(struct dma_desc));
3050 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3051 				       rx_q->rx_tail_addr, chan);
3052 	}
3053 
3054 	/* DMA TX Channel Configuration */
3055 	for (chan = 0; chan < tx_channels_count; chan++) {
3056 		tx_q = &priv->dma_conf.tx_queue[chan];
3057 
3058 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3059 				    tx_q->dma_tx_phy, chan);
3060 
3061 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3062 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3063 				       tx_q->tx_tail_addr, chan);
3064 	}
3065 
3066 	return ret;
3067 }
3068 
3069 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3070 {
3071 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3072 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3073 	struct stmmac_channel *ch;
3074 	struct napi_struct *napi;
3075 
3076 	if (!tx_coal_timer)
3077 		return;
3078 
3079 	ch = &priv->channel[tx_q->queue_index];
3080 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3081 
3082 	/* Arm timer only if napi is not already scheduled.
3083 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3084 	 * again in the next scheduled napi.
3085 	 */
3086 	if (unlikely(!napi_is_scheduled(napi)))
3087 		hrtimer_start(&tx_q->txtimer,
3088 			      STMMAC_COAL_TIMER(tx_coal_timer),
3089 			      HRTIMER_MODE_REL);
3090 	else
3091 		hrtimer_try_to_cancel(&tx_q->txtimer);
3092 }
3093 
3094 /**
3095  * stmmac_tx_timer - mitigation sw timer for tx.
3096  * @t: data pointer
3097  * Description:
3098  * This is the timer handler to directly invoke the stmmac_tx_clean.
3099  */
3100 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3101 {
3102 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3103 	struct stmmac_priv *priv = tx_q->priv_data;
3104 	struct stmmac_channel *ch;
3105 	struct napi_struct *napi;
3106 
3107 	ch = &priv->channel[tx_q->queue_index];
3108 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3109 
3110 	if (likely(napi_schedule_prep(napi))) {
3111 		unsigned long flags;
3112 
3113 		spin_lock_irqsave(&ch->lock, flags);
3114 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3115 		spin_unlock_irqrestore(&ch->lock, flags);
3116 		__napi_schedule(napi);
3117 	}
3118 
3119 	return HRTIMER_NORESTART;
3120 }
3121 
3122 /**
3123  * stmmac_init_coalesce - init mitigation options.
3124  * @priv: driver private structure
3125  * Description:
3126  * This inits the coalesce parameters: i.e. timer rate,
3127  * timer handler and default threshold used for enabling the
3128  * interrupt on completion bit.
3129  */
3130 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3131 {
3132 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3133 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3134 	u32 chan;
3135 
3136 	for (chan = 0; chan < tx_channel_count; chan++) {
3137 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3138 
3139 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3140 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3141 
3142 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3143 		tx_q->txtimer.function = stmmac_tx_timer;
3144 	}
3145 
3146 	for (chan = 0; chan < rx_channel_count; chan++)
3147 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3148 }
3149 
3150 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3151 {
3152 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3153 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3154 	u32 chan;
3155 
3156 	/* set TX ring length */
3157 	for (chan = 0; chan < tx_channels_count; chan++)
3158 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3159 				       (priv->dma_conf.dma_tx_size - 1), chan);
3160 
3161 	/* set RX ring length */
3162 	for (chan = 0; chan < rx_channels_count; chan++)
3163 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3164 				       (priv->dma_conf.dma_rx_size - 1), chan);
3165 }
3166 
3167 /**
3168  *  stmmac_set_tx_queue_weight - Set TX queue weight
3169  *  @priv: driver private structure
3170  *  Description: It is used for setting TX queues weight
3171  */
3172 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3173 {
3174 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3175 	u32 weight;
3176 	u32 queue;
3177 
3178 	for (queue = 0; queue < tx_queues_count; queue++) {
3179 		weight = priv->plat->tx_queues_cfg[queue].weight;
3180 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3181 	}
3182 }
3183 
3184 /**
3185  *  stmmac_configure_cbs - Configure CBS in TX queue
3186  *  @priv: driver private structure
3187  *  Description: It is used for configuring CBS in AVB TX queues
3188  */
3189 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3190 {
3191 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3192 	u32 mode_to_use;
3193 	u32 queue;
3194 
3195 	/* queue 0 is reserved for legacy traffic */
3196 	for (queue = 1; queue < tx_queues_count; queue++) {
3197 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3198 		if (mode_to_use == MTL_QUEUE_DCB)
3199 			continue;
3200 
3201 		stmmac_config_cbs(priv, priv->hw,
3202 				priv->plat->tx_queues_cfg[queue].send_slope,
3203 				priv->plat->tx_queues_cfg[queue].idle_slope,
3204 				priv->plat->tx_queues_cfg[queue].high_credit,
3205 				priv->plat->tx_queues_cfg[queue].low_credit,
3206 				queue);
3207 	}
3208 }
3209 
3210 /**
3211  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3212  *  @priv: driver private structure
3213  *  Description: It is used for mapping RX queues to RX dma channels
3214  */
3215 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3216 {
3217 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3218 	u32 queue;
3219 	u32 chan;
3220 
3221 	for (queue = 0; queue < rx_queues_count; queue++) {
3222 		chan = priv->plat->rx_queues_cfg[queue].chan;
3223 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3224 	}
3225 }
3226 
3227 /**
3228  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3229  *  @priv: driver private structure
3230  *  Description: It is used for configuring the RX Queue Priority
3231  */
3232 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3233 {
3234 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3235 	u32 queue;
3236 	u32 prio;
3237 
3238 	for (queue = 0; queue < rx_queues_count; queue++) {
3239 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3240 			continue;
3241 
3242 		prio = priv->plat->rx_queues_cfg[queue].prio;
3243 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3244 	}
3245 }
3246 
3247 /**
3248  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3249  *  @priv: driver private structure
3250  *  Description: It is used for configuring the TX Queue Priority
3251  */
3252 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3253 {
3254 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3255 	u32 queue;
3256 	u32 prio;
3257 
3258 	for (queue = 0; queue < tx_queues_count; queue++) {
3259 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3260 			continue;
3261 
3262 		prio = priv->plat->tx_queues_cfg[queue].prio;
3263 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3264 	}
3265 }
3266 
3267 /**
3268  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3269  *  @priv: driver private structure
3270  *  Description: It is used for configuring the RX queue routing
3271  */
3272 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3273 {
3274 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3275 	u32 queue;
3276 	u8 packet;
3277 
3278 	for (queue = 0; queue < rx_queues_count; queue++) {
3279 		/* no specific packet type routing specified for the queue */
3280 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3281 			continue;
3282 
3283 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3284 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3285 	}
3286 }
3287 
3288 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3289 {
3290 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3291 		priv->rss.enable = false;
3292 		return;
3293 	}
3294 
3295 	if (priv->dev->features & NETIF_F_RXHASH)
3296 		priv->rss.enable = true;
3297 	else
3298 		priv->rss.enable = false;
3299 
3300 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3301 			     priv->plat->rx_queues_to_use);
3302 }
3303 
3304 /**
3305  *  stmmac_mtl_configuration - Configure MTL
3306  *  @priv: driver private structure
3307  *  Description: It is used for configurring MTL
3308  */
3309 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3310 {
3311 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3312 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3313 
3314 	if (tx_queues_count > 1)
3315 		stmmac_set_tx_queue_weight(priv);
3316 
3317 	/* Configure MTL RX algorithms */
3318 	if (rx_queues_count > 1)
3319 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3320 				priv->plat->rx_sched_algorithm);
3321 
3322 	/* Configure MTL TX algorithms */
3323 	if (tx_queues_count > 1)
3324 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3325 				priv->plat->tx_sched_algorithm);
3326 
3327 	/* Configure CBS in AVB TX queues */
3328 	if (tx_queues_count > 1)
3329 		stmmac_configure_cbs(priv);
3330 
3331 	/* Map RX MTL to DMA channels */
3332 	stmmac_rx_queue_dma_chan_map(priv);
3333 
3334 	/* Enable MAC RX Queues */
3335 	stmmac_mac_enable_rx_queues(priv);
3336 
3337 	/* Set RX priorities */
3338 	if (rx_queues_count > 1)
3339 		stmmac_mac_config_rx_queues_prio(priv);
3340 
3341 	/* Set TX priorities */
3342 	if (tx_queues_count > 1)
3343 		stmmac_mac_config_tx_queues_prio(priv);
3344 
3345 	/* Set RX routing */
3346 	if (rx_queues_count > 1)
3347 		stmmac_mac_config_rx_queues_routing(priv);
3348 
3349 	/* Receive Side Scaling */
3350 	if (rx_queues_count > 1)
3351 		stmmac_mac_config_rss(priv);
3352 }
3353 
3354 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3355 {
3356 	if (priv->dma_cap.asp) {
3357 		netdev_info(priv->dev, "Enabling Safety Features\n");
3358 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3359 					  priv->plat->safety_feat_cfg);
3360 	} else {
3361 		netdev_info(priv->dev, "No Safety Features support found\n");
3362 	}
3363 }
3364 
3365 /**
3366  * stmmac_hw_setup - setup mac in a usable state.
3367  *  @dev : pointer to the device structure.
3368  *  @ptp_register: register PTP if set
3369  *  Description:
3370  *  this is the main function to setup the HW in a usable state because the
3371  *  dma engine is reset, the core registers are configured (e.g. AXI,
3372  *  Checksum features, timers). The DMA is ready to start receiving and
3373  *  transmitting.
3374  *  Return value:
3375  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3376  *  file on failure.
3377  */
3378 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3379 {
3380 	struct stmmac_priv *priv = netdev_priv(dev);
3381 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3382 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3383 	bool sph_en;
3384 	u32 chan;
3385 	int ret;
3386 
3387 	/* Make sure RX clock is enabled */
3388 	if (priv->hw->phylink_pcs)
3389 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3390 
3391 	/* DMA initialization and SW reset */
3392 	ret = stmmac_init_dma_engine(priv);
3393 	if (ret < 0) {
3394 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3395 			   __func__);
3396 		return ret;
3397 	}
3398 
3399 	/* Copy the MAC addr into the HW  */
3400 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3401 
3402 	/* PS and related bits will be programmed according to the speed */
3403 	if (priv->hw->pcs) {
3404 		int speed = priv->plat->mac_port_sel_speed;
3405 
3406 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3407 		    (speed == SPEED_1000)) {
3408 			priv->hw->ps = speed;
3409 		} else {
3410 			dev_warn(priv->device, "invalid port speed\n");
3411 			priv->hw->ps = 0;
3412 		}
3413 	}
3414 
3415 	/* Initialize the MAC Core */
3416 	stmmac_core_init(priv, priv->hw, dev);
3417 
3418 	/* Initialize MTL*/
3419 	stmmac_mtl_configuration(priv);
3420 
3421 	/* Initialize Safety Features */
3422 	stmmac_safety_feat_configuration(priv);
3423 
3424 	ret = stmmac_rx_ipc(priv, priv->hw);
3425 	if (!ret) {
3426 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3427 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3428 		priv->hw->rx_csum = 0;
3429 	}
3430 
3431 	/* Enable the MAC Rx/Tx */
3432 	stmmac_mac_set(priv, priv->ioaddr, true);
3433 
3434 	/* Set the HW DMA mode and the COE */
3435 	stmmac_dma_operation_mode(priv);
3436 
3437 	stmmac_mmc_setup(priv);
3438 
3439 	if (ptp_register) {
3440 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3441 		if (ret < 0)
3442 			netdev_warn(priv->dev,
3443 				    "failed to enable PTP reference clock: %pe\n",
3444 				    ERR_PTR(ret));
3445 	}
3446 
3447 	ret = stmmac_init_ptp(priv);
3448 	if (ret == -EOPNOTSUPP)
3449 		netdev_info(priv->dev, "PTP not supported by HW\n");
3450 	else if (ret)
3451 		netdev_warn(priv->dev, "PTP init failed\n");
3452 	else if (ptp_register)
3453 		stmmac_ptp_register(priv);
3454 
3455 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3456 
3457 	/* Convert the timer from msec to usec */
3458 	if (!priv->tx_lpi_timer)
3459 		priv->tx_lpi_timer = eee_timer * 1000;
3460 
3461 	if (priv->use_riwt) {
3462 		u32 queue;
3463 
3464 		for (queue = 0; queue < rx_cnt; queue++) {
3465 			if (!priv->rx_riwt[queue])
3466 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3467 
3468 			stmmac_rx_watchdog(priv, priv->ioaddr,
3469 					   priv->rx_riwt[queue], queue);
3470 		}
3471 	}
3472 
3473 	if (priv->hw->pcs)
3474 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3475 
3476 	/* set TX and RX rings length */
3477 	stmmac_set_rings_length(priv);
3478 
3479 	/* Enable TSO */
3480 	if (priv->tso) {
3481 		for (chan = 0; chan < tx_cnt; chan++) {
3482 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3483 
3484 			/* TSO and TBS cannot co-exist */
3485 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3486 				continue;
3487 
3488 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3489 		}
3490 	}
3491 
3492 	/* Enable Split Header */
3493 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3494 	for (chan = 0; chan < rx_cnt; chan++)
3495 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3496 
3497 
3498 	/* VLAN Tag Insertion */
3499 	if (priv->dma_cap.vlins)
3500 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3501 
3502 	/* TBS */
3503 	for (chan = 0; chan < tx_cnt; chan++) {
3504 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3505 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3506 
3507 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3508 	}
3509 
3510 	/* Configure real RX and TX queues */
3511 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3512 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3513 
3514 	/* Start the ball rolling... */
3515 	stmmac_start_all_dma(priv);
3516 
3517 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3518 
3519 	return 0;
3520 }
3521 
3522 static void stmmac_hw_teardown(struct net_device *dev)
3523 {
3524 	struct stmmac_priv *priv = netdev_priv(dev);
3525 
3526 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3527 }
3528 
3529 static void stmmac_free_irq(struct net_device *dev,
3530 			    enum request_irq_err irq_err, int irq_idx)
3531 {
3532 	struct stmmac_priv *priv = netdev_priv(dev);
3533 	int j;
3534 
3535 	switch (irq_err) {
3536 	case REQ_IRQ_ERR_ALL:
3537 		irq_idx = priv->plat->tx_queues_to_use;
3538 		fallthrough;
3539 	case REQ_IRQ_ERR_TX:
3540 		for (j = irq_idx - 1; j >= 0; j--) {
3541 			if (priv->tx_irq[j] > 0) {
3542 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3543 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3544 			}
3545 		}
3546 		irq_idx = priv->plat->rx_queues_to_use;
3547 		fallthrough;
3548 	case REQ_IRQ_ERR_RX:
3549 		for (j = irq_idx - 1; j >= 0; j--) {
3550 			if (priv->rx_irq[j] > 0) {
3551 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3552 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3553 			}
3554 		}
3555 
3556 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3557 			free_irq(priv->sfty_ue_irq, dev);
3558 		fallthrough;
3559 	case REQ_IRQ_ERR_SFTY_UE:
3560 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3561 			free_irq(priv->sfty_ce_irq, dev);
3562 		fallthrough;
3563 	case REQ_IRQ_ERR_SFTY_CE:
3564 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3565 			free_irq(priv->lpi_irq, dev);
3566 		fallthrough;
3567 	case REQ_IRQ_ERR_LPI:
3568 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3569 			free_irq(priv->wol_irq, dev);
3570 		fallthrough;
3571 	case REQ_IRQ_ERR_SFTY:
3572 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3573 			free_irq(priv->sfty_irq, dev);
3574 		fallthrough;
3575 	case REQ_IRQ_ERR_WOL:
3576 		free_irq(dev->irq, dev);
3577 		fallthrough;
3578 	case REQ_IRQ_ERR_MAC:
3579 	case REQ_IRQ_ERR_NO:
3580 		/* If MAC IRQ request error, no more IRQ to free */
3581 		break;
3582 	}
3583 }
3584 
3585 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3586 {
3587 	struct stmmac_priv *priv = netdev_priv(dev);
3588 	enum request_irq_err irq_err;
3589 	cpumask_t cpu_mask;
3590 	int irq_idx = 0;
3591 	char *int_name;
3592 	int ret;
3593 	int i;
3594 
3595 	/* For common interrupt */
3596 	int_name = priv->int_name_mac;
3597 	sprintf(int_name, "%s:%s", dev->name, "mac");
3598 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3599 			  0, int_name, dev);
3600 	if (unlikely(ret < 0)) {
3601 		netdev_err(priv->dev,
3602 			   "%s: alloc mac MSI %d (error: %d)\n",
3603 			   __func__, dev->irq, ret);
3604 		irq_err = REQ_IRQ_ERR_MAC;
3605 		goto irq_error;
3606 	}
3607 
3608 	/* Request the Wake IRQ in case of another line
3609 	 * is used for WoL
3610 	 */
3611 	priv->wol_irq_disabled = true;
3612 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3613 		int_name = priv->int_name_wol;
3614 		sprintf(int_name, "%s:%s", dev->name, "wol");
3615 		ret = request_irq(priv->wol_irq,
3616 				  stmmac_mac_interrupt,
3617 				  0, int_name, dev);
3618 		if (unlikely(ret < 0)) {
3619 			netdev_err(priv->dev,
3620 				   "%s: alloc wol MSI %d (error: %d)\n",
3621 				   __func__, priv->wol_irq, ret);
3622 			irq_err = REQ_IRQ_ERR_WOL;
3623 			goto irq_error;
3624 		}
3625 	}
3626 
3627 	/* Request the LPI IRQ in case of another line
3628 	 * is used for LPI
3629 	 */
3630 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3631 		int_name = priv->int_name_lpi;
3632 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3633 		ret = request_irq(priv->lpi_irq,
3634 				  stmmac_mac_interrupt,
3635 				  0, int_name, dev);
3636 		if (unlikely(ret < 0)) {
3637 			netdev_err(priv->dev,
3638 				   "%s: alloc lpi MSI %d (error: %d)\n",
3639 				   __func__, priv->lpi_irq, ret);
3640 			irq_err = REQ_IRQ_ERR_LPI;
3641 			goto irq_error;
3642 		}
3643 	}
3644 
3645 	/* Request the common Safety Feature Correctible/Uncorrectible
3646 	 * Error line in case of another line is used
3647 	 */
3648 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3649 		int_name = priv->int_name_sfty;
3650 		sprintf(int_name, "%s:%s", dev->name, "safety");
3651 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3652 				  0, int_name, dev);
3653 		if (unlikely(ret < 0)) {
3654 			netdev_err(priv->dev,
3655 				   "%s: alloc sfty MSI %d (error: %d)\n",
3656 				   __func__, priv->sfty_irq, ret);
3657 			irq_err = REQ_IRQ_ERR_SFTY;
3658 			goto irq_error;
3659 		}
3660 	}
3661 
3662 	/* Request the Safety Feature Correctible Error line in
3663 	 * case of another line is used
3664 	 */
3665 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3666 		int_name = priv->int_name_sfty_ce;
3667 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3668 		ret = request_irq(priv->sfty_ce_irq,
3669 				  stmmac_safety_interrupt,
3670 				  0, int_name, dev);
3671 		if (unlikely(ret < 0)) {
3672 			netdev_err(priv->dev,
3673 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3674 				   __func__, priv->sfty_ce_irq, ret);
3675 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3676 			goto irq_error;
3677 		}
3678 	}
3679 
3680 	/* Request the Safety Feature Uncorrectible Error line in
3681 	 * case of another line is used
3682 	 */
3683 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3684 		int_name = priv->int_name_sfty_ue;
3685 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3686 		ret = request_irq(priv->sfty_ue_irq,
3687 				  stmmac_safety_interrupt,
3688 				  0, int_name, dev);
3689 		if (unlikely(ret < 0)) {
3690 			netdev_err(priv->dev,
3691 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3692 				   __func__, priv->sfty_ue_irq, ret);
3693 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3694 			goto irq_error;
3695 		}
3696 	}
3697 
3698 	/* Request Rx MSI irq */
3699 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3700 		if (i >= MTL_MAX_RX_QUEUES)
3701 			break;
3702 		if (priv->rx_irq[i] == 0)
3703 			continue;
3704 
3705 		int_name = priv->int_name_rx_irq[i];
3706 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3707 		ret = request_irq(priv->rx_irq[i],
3708 				  stmmac_msi_intr_rx,
3709 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3710 		if (unlikely(ret < 0)) {
3711 			netdev_err(priv->dev,
3712 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3713 				   __func__, i, priv->rx_irq[i], ret);
3714 			irq_err = REQ_IRQ_ERR_RX;
3715 			irq_idx = i;
3716 			goto irq_error;
3717 		}
3718 		cpumask_clear(&cpu_mask);
3719 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3720 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3721 	}
3722 
3723 	/* Request Tx MSI irq */
3724 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3725 		if (i >= MTL_MAX_TX_QUEUES)
3726 			break;
3727 		if (priv->tx_irq[i] == 0)
3728 			continue;
3729 
3730 		int_name = priv->int_name_tx_irq[i];
3731 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3732 		ret = request_irq(priv->tx_irq[i],
3733 				  stmmac_msi_intr_tx,
3734 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3735 		if (unlikely(ret < 0)) {
3736 			netdev_err(priv->dev,
3737 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3738 				   __func__, i, priv->tx_irq[i], ret);
3739 			irq_err = REQ_IRQ_ERR_TX;
3740 			irq_idx = i;
3741 			goto irq_error;
3742 		}
3743 		cpumask_clear(&cpu_mask);
3744 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3745 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3746 	}
3747 
3748 	return 0;
3749 
3750 irq_error:
3751 	stmmac_free_irq(dev, irq_err, irq_idx);
3752 	return ret;
3753 }
3754 
3755 static int stmmac_request_irq_single(struct net_device *dev)
3756 {
3757 	struct stmmac_priv *priv = netdev_priv(dev);
3758 	enum request_irq_err irq_err;
3759 	int ret;
3760 
3761 	ret = request_irq(dev->irq, stmmac_interrupt,
3762 			  IRQF_SHARED, dev->name, dev);
3763 	if (unlikely(ret < 0)) {
3764 		netdev_err(priv->dev,
3765 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3766 			   __func__, dev->irq, ret);
3767 		irq_err = REQ_IRQ_ERR_MAC;
3768 		goto irq_error;
3769 	}
3770 
3771 	/* Request the Wake IRQ in case of another line
3772 	 * is used for WoL
3773 	 */
3774 	priv->wol_irq_disabled = true;
3775 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3776 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3777 				  IRQF_SHARED, dev->name, dev);
3778 		if (unlikely(ret < 0)) {
3779 			netdev_err(priv->dev,
3780 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3781 				   __func__, priv->wol_irq, ret);
3782 			irq_err = REQ_IRQ_ERR_WOL;
3783 			goto irq_error;
3784 		}
3785 	}
3786 
3787 	/* Request the IRQ lines */
3788 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3789 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3790 				  IRQF_SHARED, dev->name, dev);
3791 		if (unlikely(ret < 0)) {
3792 			netdev_err(priv->dev,
3793 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3794 				   __func__, priv->lpi_irq, ret);
3795 			irq_err = REQ_IRQ_ERR_LPI;
3796 			goto irq_error;
3797 		}
3798 	}
3799 
3800 	/* Request the common Safety Feature Correctible/Uncorrectible
3801 	 * Error line in case of another line is used
3802 	 */
3803 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3804 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3805 				  IRQF_SHARED, dev->name, dev);
3806 		if (unlikely(ret < 0)) {
3807 			netdev_err(priv->dev,
3808 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3809 				   __func__, priv->sfty_irq, ret);
3810 			irq_err = REQ_IRQ_ERR_SFTY;
3811 			goto irq_error;
3812 		}
3813 	}
3814 
3815 	return 0;
3816 
3817 irq_error:
3818 	stmmac_free_irq(dev, irq_err, 0);
3819 	return ret;
3820 }
3821 
3822 static int stmmac_request_irq(struct net_device *dev)
3823 {
3824 	struct stmmac_priv *priv = netdev_priv(dev);
3825 	int ret;
3826 
3827 	/* Request the IRQ lines */
3828 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3829 		ret = stmmac_request_irq_multi_msi(dev);
3830 	else
3831 		ret = stmmac_request_irq_single(dev);
3832 
3833 	return ret;
3834 }
3835 
3836 /**
3837  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3838  *  @priv: driver private structure
3839  *  @mtu: MTU to setup the dma queue and buf with
3840  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3841  *  Allocate the Tx/Rx DMA queue and init them.
3842  *  Return value:
3843  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3844  */
3845 static struct stmmac_dma_conf *
3846 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3847 {
3848 	struct stmmac_dma_conf *dma_conf;
3849 	int chan, bfsize, ret;
3850 
3851 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3852 	if (!dma_conf) {
3853 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3854 			   __func__);
3855 		return ERR_PTR(-ENOMEM);
3856 	}
3857 
3858 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3859 	if (bfsize < 0)
3860 		bfsize = 0;
3861 
3862 	if (bfsize < BUF_SIZE_16KiB)
3863 		bfsize = stmmac_set_bfsize(mtu, 0);
3864 
3865 	dma_conf->dma_buf_sz = bfsize;
3866 	/* Chose the tx/rx size from the already defined one in the
3867 	 * priv struct. (if defined)
3868 	 */
3869 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3870 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3871 
3872 	if (!dma_conf->dma_tx_size)
3873 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3874 	if (!dma_conf->dma_rx_size)
3875 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3876 
3877 	/* Earlier check for TBS */
3878 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3879 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3880 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3881 
3882 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3883 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3884 	}
3885 
3886 	ret = alloc_dma_desc_resources(priv, dma_conf);
3887 	if (ret < 0) {
3888 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3889 			   __func__);
3890 		goto alloc_error;
3891 	}
3892 
3893 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3894 	if (ret < 0) {
3895 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3896 			   __func__);
3897 		goto init_error;
3898 	}
3899 
3900 	return dma_conf;
3901 
3902 init_error:
3903 	free_dma_desc_resources(priv, dma_conf);
3904 alloc_error:
3905 	kfree(dma_conf);
3906 	return ERR_PTR(ret);
3907 }
3908 
3909 /**
3910  *  __stmmac_open - open entry point of the driver
3911  *  @dev : pointer to the device structure.
3912  *  @dma_conf :  structure to take the dma data
3913  *  Description:
3914  *  This function is the open entry point of the driver.
3915  *  Return value:
3916  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3917  *  file on failure.
3918  */
3919 static int __stmmac_open(struct net_device *dev,
3920 			 struct stmmac_dma_conf *dma_conf)
3921 {
3922 	struct stmmac_priv *priv = netdev_priv(dev);
3923 	int mode = priv->plat->phy_interface;
3924 	u32 chan;
3925 	int ret;
3926 
3927 	ret = pm_runtime_resume_and_get(priv->device);
3928 	if (ret < 0)
3929 		return ret;
3930 
3931 	if ((!priv->hw->xpcs ||
3932 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3933 		ret = stmmac_init_phy(dev);
3934 		if (ret) {
3935 			netdev_err(priv->dev,
3936 				   "%s: Cannot attach to PHY (error: %d)\n",
3937 				   __func__, ret);
3938 			goto init_phy_error;
3939 		}
3940 	}
3941 
3942 	buf_sz = dma_conf->dma_buf_sz;
3943 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3944 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3945 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3946 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3947 
3948 	stmmac_reset_queues_param(priv);
3949 
3950 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3951 	    priv->plat->serdes_powerup) {
3952 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3953 		if (ret < 0) {
3954 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3955 				   __func__);
3956 			goto init_error;
3957 		}
3958 	}
3959 
3960 	ret = stmmac_hw_setup(dev, true);
3961 	if (ret < 0) {
3962 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3963 		goto init_error;
3964 	}
3965 
3966 	stmmac_init_coalesce(priv);
3967 
3968 	phylink_start(priv->phylink);
3969 	/* We may have called phylink_speed_down before */
3970 	phylink_speed_up(priv->phylink);
3971 
3972 	ret = stmmac_request_irq(dev);
3973 	if (ret)
3974 		goto irq_error;
3975 
3976 	stmmac_enable_all_queues(priv);
3977 	netif_tx_start_all_queues(priv->dev);
3978 	stmmac_enable_all_dma_irq(priv);
3979 
3980 	return 0;
3981 
3982 irq_error:
3983 	phylink_stop(priv->phylink);
3984 
3985 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3986 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3987 
3988 	stmmac_hw_teardown(dev);
3989 init_error:
3990 	phylink_disconnect_phy(priv->phylink);
3991 init_phy_error:
3992 	pm_runtime_put(priv->device);
3993 	return ret;
3994 }
3995 
3996 static int stmmac_open(struct net_device *dev)
3997 {
3998 	struct stmmac_priv *priv = netdev_priv(dev);
3999 	struct stmmac_dma_conf *dma_conf;
4000 	int ret;
4001 
4002 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4003 	if (IS_ERR(dma_conf))
4004 		return PTR_ERR(dma_conf);
4005 
4006 	ret = __stmmac_open(dev, dma_conf);
4007 	if (ret)
4008 		free_dma_desc_resources(priv, dma_conf);
4009 
4010 	kfree(dma_conf);
4011 	return ret;
4012 }
4013 
4014 /**
4015  *  stmmac_release - close entry point of the driver
4016  *  @dev : device pointer.
4017  *  Description:
4018  *  This is the stop entry point of the driver.
4019  */
4020 static int stmmac_release(struct net_device *dev)
4021 {
4022 	struct stmmac_priv *priv = netdev_priv(dev);
4023 	u32 chan;
4024 
4025 	if (device_may_wakeup(priv->device))
4026 		phylink_speed_down(priv->phylink, false);
4027 	/* Stop and disconnect the PHY */
4028 	phylink_stop(priv->phylink);
4029 	phylink_disconnect_phy(priv->phylink);
4030 
4031 	stmmac_disable_all_queues(priv);
4032 
4033 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4034 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4035 
4036 	netif_tx_disable(dev);
4037 
4038 	/* Free the IRQ lines */
4039 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4040 
4041 	if (priv->eee_enabled) {
4042 		priv->tx_path_in_lpi_mode = false;
4043 		del_timer_sync(&priv->eee_ctrl_timer);
4044 	}
4045 
4046 	/* Stop TX/RX DMA and clear the descriptors */
4047 	stmmac_stop_all_dma(priv);
4048 
4049 	/* Release and free the Rx/Tx resources */
4050 	free_dma_desc_resources(priv, &priv->dma_conf);
4051 
4052 	/* Disable the MAC Rx/Tx */
4053 	stmmac_mac_set(priv, priv->ioaddr, false);
4054 
4055 	/* Powerdown Serdes if there is */
4056 	if (priv->plat->serdes_powerdown)
4057 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4058 
4059 	stmmac_release_ptp(priv);
4060 
4061 	if (stmmac_fpe_supported(priv))
4062 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4063 
4064 	pm_runtime_put(priv->device);
4065 
4066 	return 0;
4067 }
4068 
4069 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4070 			       struct stmmac_tx_queue *tx_q)
4071 {
4072 	u16 tag = 0x0, inner_tag = 0x0;
4073 	u32 inner_type = 0x0;
4074 	struct dma_desc *p;
4075 
4076 	if (!priv->dma_cap.vlins)
4077 		return false;
4078 	if (!skb_vlan_tag_present(skb))
4079 		return false;
4080 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4081 		inner_tag = skb_vlan_tag_get(skb);
4082 		inner_type = STMMAC_VLAN_INSERT;
4083 	}
4084 
4085 	tag = skb_vlan_tag_get(skb);
4086 
4087 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4088 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4089 	else
4090 		p = &tx_q->dma_tx[tx_q->cur_tx];
4091 
4092 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4093 		return false;
4094 
4095 	stmmac_set_tx_owner(priv, p);
4096 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4097 	return true;
4098 }
4099 
4100 /**
4101  *  stmmac_tso_allocator - close entry point of the driver
4102  *  @priv: driver private structure
4103  *  @des: buffer start address
4104  *  @total_len: total length to fill in descriptors
4105  *  @last_segment: condition for the last descriptor
4106  *  @queue: TX queue index
4107  *  Description:
4108  *  This function fills descriptor and request new descriptors according to
4109  *  buffer length to fill
4110  */
4111 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4112 				 int total_len, bool last_segment, u32 queue)
4113 {
4114 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4115 	struct dma_desc *desc;
4116 	u32 buff_size;
4117 	int tmp_len;
4118 
4119 	tmp_len = total_len;
4120 
4121 	while (tmp_len > 0) {
4122 		dma_addr_t curr_addr;
4123 
4124 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4125 						priv->dma_conf.dma_tx_size);
4126 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4127 
4128 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4129 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4130 		else
4131 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4132 
4133 		curr_addr = des + (total_len - tmp_len);
4134 		stmmac_set_desc_addr(priv, desc, curr_addr);
4135 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4136 			    TSO_MAX_BUFF_SIZE : tmp_len;
4137 
4138 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4139 				0, 1,
4140 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4141 				0, 0);
4142 
4143 		tmp_len -= TSO_MAX_BUFF_SIZE;
4144 	}
4145 }
4146 
4147 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4148 {
4149 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4150 	int desc_size;
4151 
4152 	if (likely(priv->extend_desc))
4153 		desc_size = sizeof(struct dma_extended_desc);
4154 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4155 		desc_size = sizeof(struct dma_edesc);
4156 	else
4157 		desc_size = sizeof(struct dma_desc);
4158 
4159 	/* The own bit must be the latest setting done when prepare the
4160 	 * descriptor and then barrier is needed to make sure that
4161 	 * all is coherent before granting the DMA engine.
4162 	 */
4163 	wmb();
4164 
4165 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4166 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4167 }
4168 
4169 /**
4170  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4171  *  @skb : the socket buffer
4172  *  @dev : device pointer
4173  *  Description: this is the transmit function that is called on TSO frames
4174  *  (support available on GMAC4 and newer chips).
4175  *  Diagram below show the ring programming in case of TSO frames:
4176  *
4177  *  First Descriptor
4178  *   --------
4179  *   | DES0 |---> buffer1 = L2/L3/L4 header
4180  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4181  *   |      |     width is 32-bit, but we never use it.
4182  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4183  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4184  *   |      |     or 48-bit, and we always use it.
4185  *   | DES2 |---> buffer1 len
4186  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4187  *   --------
4188  *   --------
4189  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4190  *   | DES1 |---> same as the First Descriptor
4191  *   | DES2 |---> buffer1 len
4192  *   | DES3 |
4193  *   --------
4194  *	|
4195  *     ...
4196  *	|
4197  *   --------
4198  *   | DES0 |---> buffer1 = Split TCP Payload
4199  *   | DES1 |---> same as the First Descriptor
4200  *   | DES2 |---> buffer1 len
4201  *   | DES3 |
4202  *   --------
4203  *
4204  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4205  */
4206 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4207 {
4208 	struct dma_desc *desc, *first, *mss_desc = NULL;
4209 	struct stmmac_priv *priv = netdev_priv(dev);
4210 	unsigned int first_entry, tx_packets;
4211 	struct stmmac_txq_stats *txq_stats;
4212 	struct stmmac_tx_queue *tx_q;
4213 	u32 pay_len, mss, queue;
4214 	int i, first_tx, nfrags;
4215 	u8 proto_hdr_len, hdr;
4216 	dma_addr_t des;
4217 	bool set_ic;
4218 
4219 	/* Always insert VLAN tag to SKB payload for TSO frames.
4220 	 *
4221 	 * Never insert VLAN tag by HW, since segments splited by
4222 	 * TSO engine will be un-tagged by mistake.
4223 	 */
4224 	if (skb_vlan_tag_present(skb)) {
4225 		skb = __vlan_hwaccel_push_inside(skb);
4226 		if (unlikely(!skb)) {
4227 			priv->xstats.tx_dropped++;
4228 			return NETDEV_TX_OK;
4229 		}
4230 	}
4231 
4232 	nfrags = skb_shinfo(skb)->nr_frags;
4233 	queue = skb_get_queue_mapping(skb);
4234 
4235 	tx_q = &priv->dma_conf.tx_queue[queue];
4236 	txq_stats = &priv->xstats.txq_stats[queue];
4237 	first_tx = tx_q->cur_tx;
4238 
4239 	/* Compute header lengths */
4240 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4241 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4242 		hdr = sizeof(struct udphdr);
4243 	} else {
4244 		proto_hdr_len = skb_tcp_all_headers(skb);
4245 		hdr = tcp_hdrlen(skb);
4246 	}
4247 
4248 	/* Desc availability based on threshold should be enough safe */
4249 	if (unlikely(stmmac_tx_avail(priv, queue) <
4250 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4251 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4252 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4253 								queue));
4254 			/* This is a hard error, log it. */
4255 			netdev_err(priv->dev,
4256 				   "%s: Tx Ring full when queue awake\n",
4257 				   __func__);
4258 		}
4259 		return NETDEV_TX_BUSY;
4260 	}
4261 
4262 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4263 
4264 	mss = skb_shinfo(skb)->gso_size;
4265 
4266 	/* set new MSS value if needed */
4267 	if (mss != tx_q->mss) {
4268 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4269 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4270 		else
4271 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4272 
4273 		stmmac_set_mss(priv, mss_desc, mss);
4274 		tx_q->mss = mss;
4275 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4276 						priv->dma_conf.dma_tx_size);
4277 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4278 	}
4279 
4280 	if (netif_msg_tx_queued(priv)) {
4281 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4282 			__func__, hdr, proto_hdr_len, pay_len, mss);
4283 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4284 			skb->data_len);
4285 	}
4286 
4287 	first_entry = tx_q->cur_tx;
4288 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4289 
4290 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4291 		desc = &tx_q->dma_entx[first_entry].basic;
4292 	else
4293 		desc = &tx_q->dma_tx[first_entry];
4294 	first = desc;
4295 
4296 	/* first descriptor: fill Headers on Buf1 */
4297 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4298 			     DMA_TO_DEVICE);
4299 	if (dma_mapping_error(priv->device, des))
4300 		goto dma_map_err;
4301 
4302 	stmmac_set_desc_addr(priv, first, des);
4303 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4304 			     (nfrags == 0), queue);
4305 
4306 	/* In case two or more DMA transmit descriptors are allocated for this
4307 	 * non-paged SKB data, the DMA buffer address should be saved to
4308 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4309 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4310 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4311 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4312 	 * sooner or later.
4313 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4314 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4315 	 * this DMA buffer right after the DMA engine completely finishes the
4316 	 * full buffer transmission.
4317 	 */
4318 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4319 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4320 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4321 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4322 
4323 	/* Prepare fragments */
4324 	for (i = 0; i < nfrags; i++) {
4325 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4326 
4327 		des = skb_frag_dma_map(priv->device, frag, 0,
4328 				       skb_frag_size(frag),
4329 				       DMA_TO_DEVICE);
4330 		if (dma_mapping_error(priv->device, des))
4331 			goto dma_map_err;
4332 
4333 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4334 				     (i == nfrags - 1), queue);
4335 
4336 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4337 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4338 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4339 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4340 	}
4341 
4342 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4343 
4344 	/* Only the last descriptor gets to point to the skb. */
4345 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4346 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4347 
4348 	/* Manage tx mitigation */
4349 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4350 	tx_q->tx_count_frames += tx_packets;
4351 
4352 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4353 		set_ic = true;
4354 	else if (!priv->tx_coal_frames[queue])
4355 		set_ic = false;
4356 	else if (tx_packets > priv->tx_coal_frames[queue])
4357 		set_ic = true;
4358 	else if ((tx_q->tx_count_frames %
4359 		  priv->tx_coal_frames[queue]) < tx_packets)
4360 		set_ic = true;
4361 	else
4362 		set_ic = false;
4363 
4364 	if (set_ic) {
4365 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4366 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4367 		else
4368 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4369 
4370 		tx_q->tx_count_frames = 0;
4371 		stmmac_set_tx_ic(priv, desc);
4372 	}
4373 
4374 	/* We've used all descriptors we need for this skb, however,
4375 	 * advance cur_tx so that it references a fresh descriptor.
4376 	 * ndo_start_xmit will fill this descriptor the next time it's
4377 	 * called and stmmac_tx_clean may clean up to this descriptor.
4378 	 */
4379 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4380 
4381 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4382 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4383 			  __func__);
4384 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4385 	}
4386 
4387 	u64_stats_update_begin(&txq_stats->q_syncp);
4388 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4389 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4390 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4391 	if (set_ic)
4392 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4393 	u64_stats_update_end(&txq_stats->q_syncp);
4394 
4395 	if (priv->sarc_type)
4396 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4397 
4398 	skb_tx_timestamp(skb);
4399 
4400 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4401 		     priv->hwts_tx_en)) {
4402 		/* declare that device is doing timestamping */
4403 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4404 		stmmac_enable_tx_timestamp(priv, first);
4405 	}
4406 
4407 	/* Complete the first descriptor before granting the DMA */
4408 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4409 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4410 				   hdr / 4, (skb->len - proto_hdr_len));
4411 
4412 	/* If context desc is used to change MSS */
4413 	if (mss_desc) {
4414 		/* Make sure that first descriptor has been completely
4415 		 * written, including its own bit. This is because MSS is
4416 		 * actually before first descriptor, so we need to make
4417 		 * sure that MSS's own bit is the last thing written.
4418 		 */
4419 		dma_wmb();
4420 		stmmac_set_tx_owner(priv, mss_desc);
4421 	}
4422 
4423 	if (netif_msg_pktdata(priv)) {
4424 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4425 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4426 			tx_q->cur_tx, first, nfrags);
4427 		pr_info(">>> frame to be transmitted: ");
4428 		print_pkt(skb->data, skb_headlen(skb));
4429 	}
4430 
4431 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4432 
4433 	stmmac_flush_tx_descriptors(priv, queue);
4434 	stmmac_tx_timer_arm(priv, queue);
4435 
4436 	return NETDEV_TX_OK;
4437 
4438 dma_map_err:
4439 	dev_err(priv->device, "Tx dma map failed\n");
4440 	dev_kfree_skb(skb);
4441 	priv->xstats.tx_dropped++;
4442 	return NETDEV_TX_OK;
4443 }
4444 
4445 /**
4446  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4447  * @skb: socket buffer to check
4448  *
4449  * Check if a packet has an ethertype that will trigger the IP header checks
4450  * and IP/TCP checksum engine of the stmmac core.
4451  *
4452  * Return: true if the ethertype can trigger the checksum engine, false
4453  * otherwise
4454  */
4455 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4456 {
4457 	int depth = 0;
4458 	__be16 proto;
4459 
4460 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4461 				    &depth);
4462 
4463 	return (depth <= ETH_HLEN) &&
4464 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4465 }
4466 
4467 /**
4468  *  stmmac_xmit - Tx entry point of the driver
4469  *  @skb : the socket buffer
4470  *  @dev : device pointer
4471  *  Description : this is the tx entry point of the driver.
4472  *  It programs the chain or the ring and supports oversized frames
4473  *  and SG feature.
4474  */
4475 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4476 {
4477 	unsigned int first_entry, tx_packets, enh_desc;
4478 	struct stmmac_priv *priv = netdev_priv(dev);
4479 	unsigned int nopaged_len = skb_headlen(skb);
4480 	int i, csum_insertion = 0, is_jumbo = 0;
4481 	u32 queue = skb_get_queue_mapping(skb);
4482 	int nfrags = skb_shinfo(skb)->nr_frags;
4483 	int gso = skb_shinfo(skb)->gso_type;
4484 	struct stmmac_txq_stats *txq_stats;
4485 	struct dma_edesc *tbs_desc = NULL;
4486 	struct dma_desc *desc, *first;
4487 	struct stmmac_tx_queue *tx_q;
4488 	bool has_vlan, set_ic;
4489 	int entry, first_tx;
4490 	dma_addr_t des;
4491 
4492 	tx_q = &priv->dma_conf.tx_queue[queue];
4493 	txq_stats = &priv->xstats.txq_stats[queue];
4494 	first_tx = tx_q->cur_tx;
4495 
4496 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4497 		stmmac_disable_eee_mode(priv);
4498 
4499 	/* Manage oversized TCP frames for GMAC4 device */
4500 	if (skb_is_gso(skb) && priv->tso) {
4501 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4502 			return stmmac_tso_xmit(skb, dev);
4503 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4504 			return stmmac_tso_xmit(skb, dev);
4505 	}
4506 
4507 	if (priv->est && priv->est->enable &&
4508 	    priv->est->max_sdu[queue] &&
4509 	    skb->len > priv->est->max_sdu[queue]){
4510 		priv->xstats.max_sdu_txq_drop[queue]++;
4511 		goto max_sdu_err;
4512 	}
4513 
4514 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4515 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4516 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4517 								queue));
4518 			/* This is a hard error, log it. */
4519 			netdev_err(priv->dev,
4520 				   "%s: Tx Ring full when queue awake\n",
4521 				   __func__);
4522 		}
4523 		return NETDEV_TX_BUSY;
4524 	}
4525 
4526 	/* Check if VLAN can be inserted by HW */
4527 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4528 
4529 	entry = tx_q->cur_tx;
4530 	first_entry = entry;
4531 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4532 
4533 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4534 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4535 	 * queues. In that case, checksum offloading for those queues that don't
4536 	 * support tx coe needs to fallback to software checksum calculation.
4537 	 *
4538 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4539 	 * also have to be checksummed in software.
4540 	 */
4541 	if (csum_insertion &&
4542 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4543 	     !stmmac_has_ip_ethertype(skb))) {
4544 		if (unlikely(skb_checksum_help(skb)))
4545 			goto dma_map_err;
4546 		csum_insertion = !csum_insertion;
4547 	}
4548 
4549 	if (likely(priv->extend_desc))
4550 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4551 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4552 		desc = &tx_q->dma_entx[entry].basic;
4553 	else
4554 		desc = tx_q->dma_tx + entry;
4555 
4556 	first = desc;
4557 
4558 	if (has_vlan)
4559 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4560 
4561 	enh_desc = priv->plat->enh_desc;
4562 	/* To program the descriptors according to the size of the frame */
4563 	if (enh_desc)
4564 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4565 
4566 	if (unlikely(is_jumbo)) {
4567 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4568 		if (unlikely(entry < 0) && (entry != -EINVAL))
4569 			goto dma_map_err;
4570 	}
4571 
4572 	for (i = 0; i < nfrags; i++) {
4573 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4574 		int len = skb_frag_size(frag);
4575 		bool last_segment = (i == (nfrags - 1));
4576 
4577 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4578 		WARN_ON(tx_q->tx_skbuff[entry]);
4579 
4580 		if (likely(priv->extend_desc))
4581 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4582 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4583 			desc = &tx_q->dma_entx[entry].basic;
4584 		else
4585 			desc = tx_q->dma_tx + entry;
4586 
4587 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4588 				       DMA_TO_DEVICE);
4589 		if (dma_mapping_error(priv->device, des))
4590 			goto dma_map_err; /* should reuse desc w/o issues */
4591 
4592 		tx_q->tx_skbuff_dma[entry].buf = des;
4593 
4594 		stmmac_set_desc_addr(priv, desc, des);
4595 
4596 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4597 		tx_q->tx_skbuff_dma[entry].len = len;
4598 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4599 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4600 
4601 		/* Prepare the descriptor and set the own bit too */
4602 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4603 				priv->mode, 1, last_segment, skb->len);
4604 	}
4605 
4606 	/* Only the last descriptor gets to point to the skb. */
4607 	tx_q->tx_skbuff[entry] = skb;
4608 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4609 
4610 	/* According to the coalesce parameter the IC bit for the latest
4611 	 * segment is reset and the timer re-started to clean the tx status.
4612 	 * This approach takes care about the fragments: desc is the first
4613 	 * element in case of no SG.
4614 	 */
4615 	tx_packets = (entry + 1) - first_tx;
4616 	tx_q->tx_count_frames += tx_packets;
4617 
4618 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4619 		set_ic = true;
4620 	else if (!priv->tx_coal_frames[queue])
4621 		set_ic = false;
4622 	else if (tx_packets > priv->tx_coal_frames[queue])
4623 		set_ic = true;
4624 	else if ((tx_q->tx_count_frames %
4625 		  priv->tx_coal_frames[queue]) < tx_packets)
4626 		set_ic = true;
4627 	else
4628 		set_ic = false;
4629 
4630 	if (set_ic) {
4631 		if (likely(priv->extend_desc))
4632 			desc = &tx_q->dma_etx[entry].basic;
4633 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4634 			desc = &tx_q->dma_entx[entry].basic;
4635 		else
4636 			desc = &tx_q->dma_tx[entry];
4637 
4638 		tx_q->tx_count_frames = 0;
4639 		stmmac_set_tx_ic(priv, desc);
4640 	}
4641 
4642 	/* We've used all descriptors we need for this skb, however,
4643 	 * advance cur_tx so that it references a fresh descriptor.
4644 	 * ndo_start_xmit will fill this descriptor the next time it's
4645 	 * called and stmmac_tx_clean may clean up to this descriptor.
4646 	 */
4647 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4648 	tx_q->cur_tx = entry;
4649 
4650 	if (netif_msg_pktdata(priv)) {
4651 		netdev_dbg(priv->dev,
4652 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4653 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4654 			   entry, first, nfrags);
4655 
4656 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4657 		print_pkt(skb->data, skb->len);
4658 	}
4659 
4660 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4661 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4662 			  __func__);
4663 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4664 	}
4665 
4666 	u64_stats_update_begin(&txq_stats->q_syncp);
4667 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4668 	if (set_ic)
4669 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4670 	u64_stats_update_end(&txq_stats->q_syncp);
4671 
4672 	if (priv->sarc_type)
4673 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4674 
4675 	skb_tx_timestamp(skb);
4676 
4677 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4678 	 * problems because all the descriptors are actually ready to be
4679 	 * passed to the DMA engine.
4680 	 */
4681 	if (likely(!is_jumbo)) {
4682 		bool last_segment = (nfrags == 0);
4683 
4684 		des = dma_map_single(priv->device, skb->data,
4685 				     nopaged_len, DMA_TO_DEVICE);
4686 		if (dma_mapping_error(priv->device, des))
4687 			goto dma_map_err;
4688 
4689 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4690 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4691 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4692 
4693 		stmmac_set_desc_addr(priv, first, des);
4694 
4695 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4696 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4697 
4698 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4699 			     priv->hwts_tx_en)) {
4700 			/* declare that device is doing timestamping */
4701 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4702 			stmmac_enable_tx_timestamp(priv, first);
4703 		}
4704 
4705 		/* Prepare the first descriptor setting the OWN bit too */
4706 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4707 				csum_insertion, priv->mode, 0, last_segment,
4708 				skb->len);
4709 	}
4710 
4711 	if (tx_q->tbs & STMMAC_TBS_EN) {
4712 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4713 
4714 		tbs_desc = &tx_q->dma_entx[first_entry];
4715 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4716 	}
4717 
4718 	stmmac_set_tx_owner(priv, first);
4719 
4720 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4721 
4722 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4723 
4724 	stmmac_flush_tx_descriptors(priv, queue);
4725 	stmmac_tx_timer_arm(priv, queue);
4726 
4727 	return NETDEV_TX_OK;
4728 
4729 dma_map_err:
4730 	netdev_err(priv->dev, "Tx DMA map failed\n");
4731 max_sdu_err:
4732 	dev_kfree_skb(skb);
4733 	priv->xstats.tx_dropped++;
4734 	return NETDEV_TX_OK;
4735 }
4736 
4737 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4738 {
4739 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4740 	__be16 vlan_proto = veth->h_vlan_proto;
4741 	u16 vlanid;
4742 
4743 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4744 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4745 	    (vlan_proto == htons(ETH_P_8021AD) &&
4746 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4747 		/* pop the vlan tag */
4748 		vlanid = ntohs(veth->h_vlan_TCI);
4749 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4750 		skb_pull(skb, VLAN_HLEN);
4751 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4752 	}
4753 }
4754 
4755 /**
4756  * stmmac_rx_refill - refill used skb preallocated buffers
4757  * @priv: driver private structure
4758  * @queue: RX queue index
4759  * Description : this is to reallocate the skb for the reception process
4760  * that is based on zero-copy.
4761  */
4762 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4763 {
4764 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4765 	int dirty = stmmac_rx_dirty(priv, queue);
4766 	unsigned int entry = rx_q->dirty_rx;
4767 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4768 
4769 	if (priv->dma_cap.host_dma_width <= 32)
4770 		gfp |= GFP_DMA32;
4771 
4772 	while (dirty-- > 0) {
4773 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4774 		struct dma_desc *p;
4775 		bool use_rx_wd;
4776 
4777 		if (priv->extend_desc)
4778 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4779 		else
4780 			p = rx_q->dma_rx + entry;
4781 
4782 		if (!buf->page) {
4783 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4784 			if (!buf->page)
4785 				break;
4786 		}
4787 
4788 		if (priv->sph && !buf->sec_page) {
4789 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4790 			if (!buf->sec_page)
4791 				break;
4792 
4793 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4794 		}
4795 
4796 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4797 
4798 		stmmac_set_desc_addr(priv, p, buf->addr);
4799 		if (priv->sph)
4800 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4801 		else
4802 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4803 		stmmac_refill_desc3(priv, rx_q, p);
4804 
4805 		rx_q->rx_count_frames++;
4806 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4807 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4808 			rx_q->rx_count_frames = 0;
4809 
4810 		use_rx_wd = !priv->rx_coal_frames[queue];
4811 		use_rx_wd |= rx_q->rx_count_frames > 0;
4812 		if (!priv->use_riwt)
4813 			use_rx_wd = false;
4814 
4815 		dma_wmb();
4816 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4817 
4818 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4819 	}
4820 	rx_q->dirty_rx = entry;
4821 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4822 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4823 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4824 }
4825 
4826 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4827 				       struct dma_desc *p,
4828 				       int status, unsigned int len)
4829 {
4830 	unsigned int plen = 0, hlen = 0;
4831 	int coe = priv->hw->rx_csum;
4832 
4833 	/* Not first descriptor, buffer is always zero */
4834 	if (priv->sph && len)
4835 		return 0;
4836 
4837 	/* First descriptor, get split header length */
4838 	stmmac_get_rx_header_len(priv, p, &hlen);
4839 	if (priv->sph && hlen) {
4840 		priv->xstats.rx_split_hdr_pkt_n++;
4841 		return hlen;
4842 	}
4843 
4844 	/* First descriptor, not last descriptor and not split header */
4845 	if (status & rx_not_ls)
4846 		return priv->dma_conf.dma_buf_sz;
4847 
4848 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4849 
4850 	/* First descriptor and last descriptor and not split header */
4851 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4852 }
4853 
4854 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4855 				       struct dma_desc *p,
4856 				       int status, unsigned int len)
4857 {
4858 	int coe = priv->hw->rx_csum;
4859 	unsigned int plen = 0;
4860 
4861 	/* Not split header, buffer is not available */
4862 	if (!priv->sph)
4863 		return 0;
4864 
4865 	/* Not last descriptor */
4866 	if (status & rx_not_ls)
4867 		return priv->dma_conf.dma_buf_sz;
4868 
4869 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4870 
4871 	/* Last descriptor */
4872 	return plen - len;
4873 }
4874 
4875 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4876 				struct xdp_frame *xdpf, bool dma_map)
4877 {
4878 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4879 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4880 	unsigned int entry = tx_q->cur_tx;
4881 	struct dma_desc *tx_desc;
4882 	dma_addr_t dma_addr;
4883 	bool set_ic;
4884 
4885 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4886 		return STMMAC_XDP_CONSUMED;
4887 
4888 	if (priv->est && priv->est->enable &&
4889 	    priv->est->max_sdu[queue] &&
4890 	    xdpf->len > priv->est->max_sdu[queue]) {
4891 		priv->xstats.max_sdu_txq_drop[queue]++;
4892 		return STMMAC_XDP_CONSUMED;
4893 	}
4894 
4895 	if (likely(priv->extend_desc))
4896 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4897 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4898 		tx_desc = &tx_q->dma_entx[entry].basic;
4899 	else
4900 		tx_desc = tx_q->dma_tx + entry;
4901 
4902 	if (dma_map) {
4903 		dma_addr = dma_map_single(priv->device, xdpf->data,
4904 					  xdpf->len, DMA_TO_DEVICE);
4905 		if (dma_mapping_error(priv->device, dma_addr))
4906 			return STMMAC_XDP_CONSUMED;
4907 
4908 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4909 	} else {
4910 		struct page *page = virt_to_page(xdpf->data);
4911 
4912 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4913 			   xdpf->headroom;
4914 		dma_sync_single_for_device(priv->device, dma_addr,
4915 					   xdpf->len, DMA_BIDIRECTIONAL);
4916 
4917 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4918 	}
4919 
4920 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4921 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4922 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4923 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4924 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4925 
4926 	tx_q->xdpf[entry] = xdpf;
4927 
4928 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4929 
4930 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4931 			       true, priv->mode, true, true,
4932 			       xdpf->len);
4933 
4934 	tx_q->tx_count_frames++;
4935 
4936 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4937 		set_ic = true;
4938 	else
4939 		set_ic = false;
4940 
4941 	if (set_ic) {
4942 		tx_q->tx_count_frames = 0;
4943 		stmmac_set_tx_ic(priv, tx_desc);
4944 		u64_stats_update_begin(&txq_stats->q_syncp);
4945 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4946 		u64_stats_update_end(&txq_stats->q_syncp);
4947 	}
4948 
4949 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4950 
4951 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4952 	tx_q->cur_tx = entry;
4953 
4954 	return STMMAC_XDP_TX;
4955 }
4956 
4957 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4958 				   int cpu)
4959 {
4960 	int index = cpu;
4961 
4962 	if (unlikely(index < 0))
4963 		index = 0;
4964 
4965 	while (index >= priv->plat->tx_queues_to_use)
4966 		index -= priv->plat->tx_queues_to_use;
4967 
4968 	return index;
4969 }
4970 
4971 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4972 				struct xdp_buff *xdp)
4973 {
4974 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4975 	int cpu = smp_processor_id();
4976 	struct netdev_queue *nq;
4977 	int queue;
4978 	int res;
4979 
4980 	if (unlikely(!xdpf))
4981 		return STMMAC_XDP_CONSUMED;
4982 
4983 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4984 	nq = netdev_get_tx_queue(priv->dev, queue);
4985 
4986 	__netif_tx_lock(nq, cpu);
4987 	/* Avoids TX time-out as we are sharing with slow path */
4988 	txq_trans_cond_update(nq);
4989 
4990 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4991 	if (res == STMMAC_XDP_TX)
4992 		stmmac_flush_tx_descriptors(priv, queue);
4993 
4994 	__netif_tx_unlock(nq);
4995 
4996 	return res;
4997 }
4998 
4999 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5000 				 struct bpf_prog *prog,
5001 				 struct xdp_buff *xdp)
5002 {
5003 	u32 act;
5004 	int res;
5005 
5006 	act = bpf_prog_run_xdp(prog, xdp);
5007 	switch (act) {
5008 	case XDP_PASS:
5009 		res = STMMAC_XDP_PASS;
5010 		break;
5011 	case XDP_TX:
5012 		res = stmmac_xdp_xmit_back(priv, xdp);
5013 		break;
5014 	case XDP_REDIRECT:
5015 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5016 			res = STMMAC_XDP_CONSUMED;
5017 		else
5018 			res = STMMAC_XDP_REDIRECT;
5019 		break;
5020 	default:
5021 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5022 		fallthrough;
5023 	case XDP_ABORTED:
5024 		trace_xdp_exception(priv->dev, prog, act);
5025 		fallthrough;
5026 	case XDP_DROP:
5027 		res = STMMAC_XDP_CONSUMED;
5028 		break;
5029 	}
5030 
5031 	return res;
5032 }
5033 
5034 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5035 					   struct xdp_buff *xdp)
5036 {
5037 	struct bpf_prog *prog;
5038 	int res;
5039 
5040 	prog = READ_ONCE(priv->xdp_prog);
5041 	if (!prog) {
5042 		res = STMMAC_XDP_PASS;
5043 		goto out;
5044 	}
5045 
5046 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5047 out:
5048 	return ERR_PTR(-res);
5049 }
5050 
5051 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5052 				   int xdp_status)
5053 {
5054 	int cpu = smp_processor_id();
5055 	int queue;
5056 
5057 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5058 
5059 	if (xdp_status & STMMAC_XDP_TX)
5060 		stmmac_tx_timer_arm(priv, queue);
5061 
5062 	if (xdp_status & STMMAC_XDP_REDIRECT)
5063 		xdp_do_flush();
5064 }
5065 
5066 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5067 					       struct xdp_buff *xdp)
5068 {
5069 	unsigned int metasize = xdp->data - xdp->data_meta;
5070 	unsigned int datasize = xdp->data_end - xdp->data;
5071 	struct sk_buff *skb;
5072 
5073 	skb = napi_alloc_skb(&ch->rxtx_napi,
5074 			     xdp->data_end - xdp->data_hard_start);
5075 	if (unlikely(!skb))
5076 		return NULL;
5077 
5078 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5079 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5080 	if (metasize)
5081 		skb_metadata_set(skb, metasize);
5082 
5083 	return skb;
5084 }
5085 
5086 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5087 				   struct dma_desc *p, struct dma_desc *np,
5088 				   struct xdp_buff *xdp)
5089 {
5090 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5091 	struct stmmac_channel *ch = &priv->channel[queue];
5092 	unsigned int len = xdp->data_end - xdp->data;
5093 	enum pkt_hash_types hash_type;
5094 	int coe = priv->hw->rx_csum;
5095 	struct sk_buff *skb;
5096 	u32 hash;
5097 
5098 	skb = stmmac_construct_skb_zc(ch, xdp);
5099 	if (!skb) {
5100 		priv->xstats.rx_dropped++;
5101 		return;
5102 	}
5103 
5104 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5105 	if (priv->hw->hw_vlan_en)
5106 		/* MAC level stripping. */
5107 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5108 	else
5109 		/* Driver level stripping. */
5110 		stmmac_rx_vlan(priv->dev, skb);
5111 	skb->protocol = eth_type_trans(skb, priv->dev);
5112 
5113 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5114 		skb_checksum_none_assert(skb);
5115 	else
5116 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5117 
5118 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5119 		skb_set_hash(skb, hash, hash_type);
5120 
5121 	skb_record_rx_queue(skb, queue);
5122 	napi_gro_receive(&ch->rxtx_napi, skb);
5123 
5124 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5125 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5126 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5127 	u64_stats_update_end(&rxq_stats->napi_syncp);
5128 }
5129 
5130 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5131 {
5132 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5133 	unsigned int entry = rx_q->dirty_rx;
5134 	struct dma_desc *rx_desc = NULL;
5135 	bool ret = true;
5136 
5137 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5138 
5139 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5140 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5141 		dma_addr_t dma_addr;
5142 		bool use_rx_wd;
5143 
5144 		if (!buf->xdp) {
5145 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5146 			if (!buf->xdp) {
5147 				ret = false;
5148 				break;
5149 			}
5150 		}
5151 
5152 		if (priv->extend_desc)
5153 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5154 		else
5155 			rx_desc = rx_q->dma_rx + entry;
5156 
5157 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5158 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5159 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5160 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5161 
5162 		rx_q->rx_count_frames++;
5163 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5164 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5165 			rx_q->rx_count_frames = 0;
5166 
5167 		use_rx_wd = !priv->rx_coal_frames[queue];
5168 		use_rx_wd |= rx_q->rx_count_frames > 0;
5169 		if (!priv->use_riwt)
5170 			use_rx_wd = false;
5171 
5172 		dma_wmb();
5173 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5174 
5175 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5176 	}
5177 
5178 	if (rx_desc) {
5179 		rx_q->dirty_rx = entry;
5180 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5181 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5182 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5183 	}
5184 
5185 	return ret;
5186 }
5187 
5188 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5189 {
5190 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5191 	 * to represent incoming packet, whereas cb field in the same structure
5192 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5193 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5194 	 */
5195 	return (struct stmmac_xdp_buff *)xdp;
5196 }
5197 
5198 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5199 {
5200 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5201 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5202 	unsigned int count = 0, error = 0, len = 0;
5203 	int dirty = stmmac_rx_dirty(priv, queue);
5204 	unsigned int next_entry = rx_q->cur_rx;
5205 	u32 rx_errors = 0, rx_dropped = 0;
5206 	unsigned int desc_size;
5207 	struct bpf_prog *prog;
5208 	bool failure = false;
5209 	int xdp_status = 0;
5210 	int status = 0;
5211 
5212 	if (netif_msg_rx_status(priv)) {
5213 		void *rx_head;
5214 
5215 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5216 		if (priv->extend_desc) {
5217 			rx_head = (void *)rx_q->dma_erx;
5218 			desc_size = sizeof(struct dma_extended_desc);
5219 		} else {
5220 			rx_head = (void *)rx_q->dma_rx;
5221 			desc_size = sizeof(struct dma_desc);
5222 		}
5223 
5224 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5225 				    rx_q->dma_rx_phy, desc_size);
5226 	}
5227 	while (count < limit) {
5228 		struct stmmac_rx_buffer *buf;
5229 		struct stmmac_xdp_buff *ctx;
5230 		unsigned int buf1_len = 0;
5231 		struct dma_desc *np, *p;
5232 		int entry;
5233 		int res;
5234 
5235 		if (!count && rx_q->state_saved) {
5236 			error = rx_q->state.error;
5237 			len = rx_q->state.len;
5238 		} else {
5239 			rx_q->state_saved = false;
5240 			error = 0;
5241 			len = 0;
5242 		}
5243 
5244 		if (count >= limit)
5245 			break;
5246 
5247 read_again:
5248 		buf1_len = 0;
5249 		entry = next_entry;
5250 		buf = &rx_q->buf_pool[entry];
5251 
5252 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5253 			failure = failure ||
5254 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5255 			dirty = 0;
5256 		}
5257 
5258 		if (priv->extend_desc)
5259 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5260 		else
5261 			p = rx_q->dma_rx + entry;
5262 
5263 		/* read the status of the incoming frame */
5264 		status = stmmac_rx_status(priv, &priv->xstats, p);
5265 		/* check if managed by the DMA otherwise go ahead */
5266 		if (unlikely(status & dma_own))
5267 			break;
5268 
5269 		/* Prefetch the next RX descriptor */
5270 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5271 						priv->dma_conf.dma_rx_size);
5272 		next_entry = rx_q->cur_rx;
5273 
5274 		if (priv->extend_desc)
5275 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5276 		else
5277 			np = rx_q->dma_rx + next_entry;
5278 
5279 		prefetch(np);
5280 
5281 		/* Ensure a valid XSK buffer before proceed */
5282 		if (!buf->xdp)
5283 			break;
5284 
5285 		if (priv->extend_desc)
5286 			stmmac_rx_extended_status(priv, &priv->xstats,
5287 						  rx_q->dma_erx + entry);
5288 		if (unlikely(status == discard_frame)) {
5289 			xsk_buff_free(buf->xdp);
5290 			buf->xdp = NULL;
5291 			dirty++;
5292 			error = 1;
5293 			if (!priv->hwts_rx_en)
5294 				rx_errors++;
5295 		}
5296 
5297 		if (unlikely(error && (status & rx_not_ls)))
5298 			goto read_again;
5299 		if (unlikely(error)) {
5300 			count++;
5301 			continue;
5302 		}
5303 
5304 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5305 		if (likely(status & rx_not_ls)) {
5306 			xsk_buff_free(buf->xdp);
5307 			buf->xdp = NULL;
5308 			dirty++;
5309 			count++;
5310 			goto read_again;
5311 		}
5312 
5313 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5314 		ctx->priv = priv;
5315 		ctx->desc = p;
5316 		ctx->ndesc = np;
5317 
5318 		/* XDP ZC Frame only support primary buffers for now */
5319 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5320 		len += buf1_len;
5321 
5322 		/* ACS is disabled; strip manually. */
5323 		if (likely(!(status & rx_not_ls))) {
5324 			buf1_len -= ETH_FCS_LEN;
5325 			len -= ETH_FCS_LEN;
5326 		}
5327 
5328 		/* RX buffer is good and fit into a XSK pool buffer */
5329 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5330 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5331 
5332 		prog = READ_ONCE(priv->xdp_prog);
5333 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5334 
5335 		switch (res) {
5336 		case STMMAC_XDP_PASS:
5337 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5338 			xsk_buff_free(buf->xdp);
5339 			break;
5340 		case STMMAC_XDP_CONSUMED:
5341 			xsk_buff_free(buf->xdp);
5342 			rx_dropped++;
5343 			break;
5344 		case STMMAC_XDP_TX:
5345 		case STMMAC_XDP_REDIRECT:
5346 			xdp_status |= res;
5347 			break;
5348 		}
5349 
5350 		buf->xdp = NULL;
5351 		dirty++;
5352 		count++;
5353 	}
5354 
5355 	if (status & rx_not_ls) {
5356 		rx_q->state_saved = true;
5357 		rx_q->state.error = error;
5358 		rx_q->state.len = len;
5359 	}
5360 
5361 	stmmac_finalize_xdp_rx(priv, xdp_status);
5362 
5363 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5364 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5365 	u64_stats_update_end(&rxq_stats->napi_syncp);
5366 
5367 	priv->xstats.rx_dropped += rx_dropped;
5368 	priv->xstats.rx_errors += rx_errors;
5369 
5370 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5371 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5372 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5373 		else
5374 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5375 
5376 		return (int)count;
5377 	}
5378 
5379 	return failure ? limit : (int)count;
5380 }
5381 
5382 /**
5383  * stmmac_rx - manage the receive process
5384  * @priv: driver private structure
5385  * @limit: napi bugget
5386  * @queue: RX queue index.
5387  * Description :  this the function called by the napi poll method.
5388  * It gets all the frames inside the ring.
5389  */
5390 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5391 {
5392 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5393 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5394 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5395 	struct stmmac_channel *ch = &priv->channel[queue];
5396 	unsigned int count = 0, error = 0, len = 0;
5397 	int status = 0, coe = priv->hw->rx_csum;
5398 	unsigned int next_entry = rx_q->cur_rx;
5399 	enum dma_data_direction dma_dir;
5400 	unsigned int desc_size;
5401 	struct sk_buff *skb = NULL;
5402 	struct stmmac_xdp_buff ctx;
5403 	int xdp_status = 0;
5404 	int buf_sz;
5405 
5406 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5407 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5408 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5409 
5410 	if (netif_msg_rx_status(priv)) {
5411 		void *rx_head;
5412 
5413 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5414 		if (priv->extend_desc) {
5415 			rx_head = (void *)rx_q->dma_erx;
5416 			desc_size = sizeof(struct dma_extended_desc);
5417 		} else {
5418 			rx_head = (void *)rx_q->dma_rx;
5419 			desc_size = sizeof(struct dma_desc);
5420 		}
5421 
5422 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5423 				    rx_q->dma_rx_phy, desc_size);
5424 	}
5425 	while (count < limit) {
5426 		unsigned int buf1_len = 0, buf2_len = 0;
5427 		enum pkt_hash_types hash_type;
5428 		struct stmmac_rx_buffer *buf;
5429 		struct dma_desc *np, *p;
5430 		int entry;
5431 		u32 hash;
5432 
5433 		if (!count && rx_q->state_saved) {
5434 			skb = rx_q->state.skb;
5435 			error = rx_q->state.error;
5436 			len = rx_q->state.len;
5437 		} else {
5438 			rx_q->state_saved = false;
5439 			skb = NULL;
5440 			error = 0;
5441 			len = 0;
5442 		}
5443 
5444 read_again:
5445 		if (count >= limit)
5446 			break;
5447 
5448 		buf1_len = 0;
5449 		buf2_len = 0;
5450 		entry = next_entry;
5451 		buf = &rx_q->buf_pool[entry];
5452 
5453 		if (priv->extend_desc)
5454 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5455 		else
5456 			p = rx_q->dma_rx + entry;
5457 
5458 		/* read the status of the incoming frame */
5459 		status = stmmac_rx_status(priv, &priv->xstats, p);
5460 		/* check if managed by the DMA otherwise go ahead */
5461 		if (unlikely(status & dma_own))
5462 			break;
5463 
5464 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5465 						priv->dma_conf.dma_rx_size);
5466 		next_entry = rx_q->cur_rx;
5467 
5468 		if (priv->extend_desc)
5469 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5470 		else
5471 			np = rx_q->dma_rx + next_entry;
5472 
5473 		prefetch(np);
5474 
5475 		if (priv->extend_desc)
5476 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5477 		if (unlikely(status == discard_frame)) {
5478 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5479 			buf->page = NULL;
5480 			error = 1;
5481 			if (!priv->hwts_rx_en)
5482 				rx_errors++;
5483 		}
5484 
5485 		if (unlikely(error && (status & rx_not_ls)))
5486 			goto read_again;
5487 		if (unlikely(error)) {
5488 			dev_kfree_skb(skb);
5489 			skb = NULL;
5490 			count++;
5491 			continue;
5492 		}
5493 
5494 		/* Buffer is good. Go on. */
5495 
5496 		prefetch(page_address(buf->page) + buf->page_offset);
5497 		if (buf->sec_page)
5498 			prefetch(page_address(buf->sec_page));
5499 
5500 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5501 		len += buf1_len;
5502 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5503 		len += buf2_len;
5504 
5505 		/* ACS is disabled; strip manually. */
5506 		if (likely(!(status & rx_not_ls))) {
5507 			if (buf2_len) {
5508 				buf2_len -= ETH_FCS_LEN;
5509 				len -= ETH_FCS_LEN;
5510 			} else if (buf1_len) {
5511 				buf1_len -= ETH_FCS_LEN;
5512 				len -= ETH_FCS_LEN;
5513 			}
5514 		}
5515 
5516 		if (!skb) {
5517 			unsigned int pre_len, sync_len;
5518 
5519 			dma_sync_single_for_cpu(priv->device, buf->addr,
5520 						buf1_len, dma_dir);
5521 
5522 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5523 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5524 					 buf->page_offset, buf1_len, true);
5525 
5526 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5527 				  buf->page_offset;
5528 
5529 			ctx.priv = priv;
5530 			ctx.desc = p;
5531 			ctx.ndesc = np;
5532 
5533 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5534 			/* Due xdp_adjust_tail: DMA sync for_device
5535 			 * cover max len CPU touch
5536 			 */
5537 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5538 				   buf->page_offset;
5539 			sync_len = max(sync_len, pre_len);
5540 
5541 			/* For Not XDP_PASS verdict */
5542 			if (IS_ERR(skb)) {
5543 				unsigned int xdp_res = -PTR_ERR(skb);
5544 
5545 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5546 					page_pool_put_page(rx_q->page_pool,
5547 							   virt_to_head_page(ctx.xdp.data),
5548 							   sync_len, true);
5549 					buf->page = NULL;
5550 					rx_dropped++;
5551 
5552 					/* Clear skb as it was set as
5553 					 * status by XDP program.
5554 					 */
5555 					skb = NULL;
5556 
5557 					if (unlikely((status & rx_not_ls)))
5558 						goto read_again;
5559 
5560 					count++;
5561 					continue;
5562 				} else if (xdp_res & (STMMAC_XDP_TX |
5563 						      STMMAC_XDP_REDIRECT)) {
5564 					xdp_status |= xdp_res;
5565 					buf->page = NULL;
5566 					skb = NULL;
5567 					count++;
5568 					continue;
5569 				}
5570 			}
5571 		}
5572 
5573 		if (!skb) {
5574 			/* XDP program may expand or reduce tail */
5575 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5576 
5577 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5578 			if (!skb) {
5579 				rx_dropped++;
5580 				count++;
5581 				goto drain_data;
5582 			}
5583 
5584 			/* XDP program may adjust header */
5585 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5586 			skb_put(skb, buf1_len);
5587 
5588 			/* Data payload copied into SKB, page ready for recycle */
5589 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5590 			buf->page = NULL;
5591 		} else if (buf1_len) {
5592 			dma_sync_single_for_cpu(priv->device, buf->addr,
5593 						buf1_len, dma_dir);
5594 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5595 					buf->page, buf->page_offset, buf1_len,
5596 					priv->dma_conf.dma_buf_sz);
5597 
5598 			/* Data payload appended into SKB */
5599 			skb_mark_for_recycle(skb);
5600 			buf->page = NULL;
5601 		}
5602 
5603 		if (buf2_len) {
5604 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5605 						buf2_len, dma_dir);
5606 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5607 					buf->sec_page, 0, buf2_len,
5608 					priv->dma_conf.dma_buf_sz);
5609 
5610 			/* Data payload appended into SKB */
5611 			skb_mark_for_recycle(skb);
5612 			buf->sec_page = NULL;
5613 		}
5614 
5615 drain_data:
5616 		if (likely(status & rx_not_ls))
5617 			goto read_again;
5618 		if (!skb)
5619 			continue;
5620 
5621 		/* Got entire packet into SKB. Finish it. */
5622 
5623 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5624 
5625 		if (priv->hw->hw_vlan_en)
5626 			/* MAC level stripping. */
5627 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5628 		else
5629 			/* Driver level stripping. */
5630 			stmmac_rx_vlan(priv->dev, skb);
5631 
5632 		skb->protocol = eth_type_trans(skb, priv->dev);
5633 
5634 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5635 			skb_checksum_none_assert(skb);
5636 		else
5637 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5638 
5639 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5640 			skb_set_hash(skb, hash, hash_type);
5641 
5642 		skb_record_rx_queue(skb, queue);
5643 		napi_gro_receive(&ch->rx_napi, skb);
5644 		skb = NULL;
5645 
5646 		rx_packets++;
5647 		rx_bytes += len;
5648 		count++;
5649 	}
5650 
5651 	if (status & rx_not_ls || skb) {
5652 		rx_q->state_saved = true;
5653 		rx_q->state.skb = skb;
5654 		rx_q->state.error = error;
5655 		rx_q->state.len = len;
5656 	}
5657 
5658 	stmmac_finalize_xdp_rx(priv, xdp_status);
5659 
5660 	stmmac_rx_refill(priv, queue);
5661 
5662 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5663 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5664 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5665 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5666 	u64_stats_update_end(&rxq_stats->napi_syncp);
5667 
5668 	priv->xstats.rx_dropped += rx_dropped;
5669 	priv->xstats.rx_errors += rx_errors;
5670 
5671 	return count;
5672 }
5673 
5674 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5675 {
5676 	struct stmmac_channel *ch =
5677 		container_of(napi, struct stmmac_channel, rx_napi);
5678 	struct stmmac_priv *priv = ch->priv_data;
5679 	struct stmmac_rxq_stats *rxq_stats;
5680 	u32 chan = ch->index;
5681 	int work_done;
5682 
5683 	rxq_stats = &priv->xstats.rxq_stats[chan];
5684 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5685 	u64_stats_inc(&rxq_stats->napi.poll);
5686 	u64_stats_update_end(&rxq_stats->napi_syncp);
5687 
5688 	work_done = stmmac_rx(priv, budget, chan);
5689 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5690 		unsigned long flags;
5691 
5692 		spin_lock_irqsave(&ch->lock, flags);
5693 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5694 		spin_unlock_irqrestore(&ch->lock, flags);
5695 	}
5696 
5697 	return work_done;
5698 }
5699 
5700 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5701 {
5702 	struct stmmac_channel *ch =
5703 		container_of(napi, struct stmmac_channel, tx_napi);
5704 	struct stmmac_priv *priv = ch->priv_data;
5705 	struct stmmac_txq_stats *txq_stats;
5706 	bool pending_packets = false;
5707 	u32 chan = ch->index;
5708 	int work_done;
5709 
5710 	txq_stats = &priv->xstats.txq_stats[chan];
5711 	u64_stats_update_begin(&txq_stats->napi_syncp);
5712 	u64_stats_inc(&txq_stats->napi.poll);
5713 	u64_stats_update_end(&txq_stats->napi_syncp);
5714 
5715 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5716 	work_done = min(work_done, budget);
5717 
5718 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5719 		unsigned long flags;
5720 
5721 		spin_lock_irqsave(&ch->lock, flags);
5722 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5723 		spin_unlock_irqrestore(&ch->lock, flags);
5724 	}
5725 
5726 	/* TX still have packet to handle, check if we need to arm tx timer */
5727 	if (pending_packets)
5728 		stmmac_tx_timer_arm(priv, chan);
5729 
5730 	return work_done;
5731 }
5732 
5733 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5734 {
5735 	struct stmmac_channel *ch =
5736 		container_of(napi, struct stmmac_channel, rxtx_napi);
5737 	struct stmmac_priv *priv = ch->priv_data;
5738 	bool tx_pending_packets = false;
5739 	int rx_done, tx_done, rxtx_done;
5740 	struct stmmac_rxq_stats *rxq_stats;
5741 	struct stmmac_txq_stats *txq_stats;
5742 	u32 chan = ch->index;
5743 
5744 	rxq_stats = &priv->xstats.rxq_stats[chan];
5745 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5746 	u64_stats_inc(&rxq_stats->napi.poll);
5747 	u64_stats_update_end(&rxq_stats->napi_syncp);
5748 
5749 	txq_stats = &priv->xstats.txq_stats[chan];
5750 	u64_stats_update_begin(&txq_stats->napi_syncp);
5751 	u64_stats_inc(&txq_stats->napi.poll);
5752 	u64_stats_update_end(&txq_stats->napi_syncp);
5753 
5754 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5755 	tx_done = min(tx_done, budget);
5756 
5757 	rx_done = stmmac_rx_zc(priv, budget, chan);
5758 
5759 	rxtx_done = max(tx_done, rx_done);
5760 
5761 	/* If either TX or RX work is not complete, return budget
5762 	 * and keep pooling
5763 	 */
5764 	if (rxtx_done >= budget)
5765 		return budget;
5766 
5767 	/* all work done, exit the polling mode */
5768 	if (napi_complete_done(napi, rxtx_done)) {
5769 		unsigned long flags;
5770 
5771 		spin_lock_irqsave(&ch->lock, flags);
5772 		/* Both RX and TX work done are compelte,
5773 		 * so enable both RX & TX IRQs.
5774 		 */
5775 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5776 		spin_unlock_irqrestore(&ch->lock, flags);
5777 	}
5778 
5779 	/* TX still have packet to handle, check if we need to arm tx timer */
5780 	if (tx_pending_packets)
5781 		stmmac_tx_timer_arm(priv, chan);
5782 
5783 	return min(rxtx_done, budget - 1);
5784 }
5785 
5786 /**
5787  *  stmmac_tx_timeout
5788  *  @dev : Pointer to net device structure
5789  *  @txqueue: the index of the hanging transmit queue
5790  *  Description: this function is called when a packet transmission fails to
5791  *   complete within a reasonable time. The driver will mark the error in the
5792  *   netdev structure and arrange for the device to be reset to a sane state
5793  *   in order to transmit a new packet.
5794  */
5795 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5796 {
5797 	struct stmmac_priv *priv = netdev_priv(dev);
5798 
5799 	stmmac_global_err(priv);
5800 }
5801 
5802 /**
5803  *  stmmac_set_rx_mode - entry point for multicast addressing
5804  *  @dev : pointer to the device structure
5805  *  Description:
5806  *  This function is a driver entry point which gets called by the kernel
5807  *  whenever multicast addresses must be enabled/disabled.
5808  *  Return value:
5809  *  void.
5810  */
5811 static void stmmac_set_rx_mode(struct net_device *dev)
5812 {
5813 	struct stmmac_priv *priv = netdev_priv(dev);
5814 
5815 	stmmac_set_filter(priv, priv->hw, dev);
5816 }
5817 
5818 /**
5819  *  stmmac_change_mtu - entry point to change MTU size for the device.
5820  *  @dev : device pointer.
5821  *  @new_mtu : the new MTU size for the device.
5822  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5823  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5824  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5825  *  Return value:
5826  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5827  *  file on failure.
5828  */
5829 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5830 {
5831 	struct stmmac_priv *priv = netdev_priv(dev);
5832 	int txfifosz = priv->plat->tx_fifo_size;
5833 	struct stmmac_dma_conf *dma_conf;
5834 	const int mtu = new_mtu;
5835 	int ret;
5836 
5837 	if (txfifosz == 0)
5838 		txfifosz = priv->dma_cap.tx_fifo_size;
5839 
5840 	txfifosz /= priv->plat->tx_queues_to_use;
5841 
5842 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5843 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5844 		return -EINVAL;
5845 	}
5846 
5847 	new_mtu = STMMAC_ALIGN(new_mtu);
5848 
5849 	/* If condition true, FIFO is too small or MTU too large */
5850 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5851 		return -EINVAL;
5852 
5853 	if (netif_running(dev)) {
5854 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5855 		/* Try to allocate the new DMA conf with the new mtu */
5856 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5857 		if (IS_ERR(dma_conf)) {
5858 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5859 				   mtu);
5860 			return PTR_ERR(dma_conf);
5861 		}
5862 
5863 		stmmac_release(dev);
5864 
5865 		ret = __stmmac_open(dev, dma_conf);
5866 		if (ret) {
5867 			free_dma_desc_resources(priv, dma_conf);
5868 			kfree(dma_conf);
5869 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5870 			return ret;
5871 		}
5872 
5873 		kfree(dma_conf);
5874 
5875 		stmmac_set_rx_mode(dev);
5876 	}
5877 
5878 	WRITE_ONCE(dev->mtu, mtu);
5879 	netdev_update_features(dev);
5880 
5881 	return 0;
5882 }
5883 
5884 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5885 					     netdev_features_t features)
5886 {
5887 	struct stmmac_priv *priv = netdev_priv(dev);
5888 
5889 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5890 		features &= ~NETIF_F_RXCSUM;
5891 
5892 	if (!priv->plat->tx_coe)
5893 		features &= ~NETIF_F_CSUM_MASK;
5894 
5895 	/* Some GMAC devices have a bugged Jumbo frame support that
5896 	 * needs to have the Tx COE disabled for oversized frames
5897 	 * (due to limited buffer sizes). In this case we disable
5898 	 * the TX csum insertion in the TDES and not use SF.
5899 	 */
5900 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5901 		features &= ~NETIF_F_CSUM_MASK;
5902 
5903 	/* Disable tso if asked by ethtool */
5904 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5905 		if (features & NETIF_F_TSO)
5906 			priv->tso = true;
5907 		else
5908 			priv->tso = false;
5909 	}
5910 
5911 	return features;
5912 }
5913 
5914 static int stmmac_set_features(struct net_device *netdev,
5915 			       netdev_features_t features)
5916 {
5917 	struct stmmac_priv *priv = netdev_priv(netdev);
5918 
5919 	/* Keep the COE Type in case of csum is supporting */
5920 	if (features & NETIF_F_RXCSUM)
5921 		priv->hw->rx_csum = priv->plat->rx_coe;
5922 	else
5923 		priv->hw->rx_csum = 0;
5924 	/* No check needed because rx_coe has been set before and it will be
5925 	 * fixed in case of issue.
5926 	 */
5927 	stmmac_rx_ipc(priv, priv->hw);
5928 
5929 	if (priv->sph_cap) {
5930 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5931 		u32 chan;
5932 
5933 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5934 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5935 	}
5936 
5937 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5938 		priv->hw->hw_vlan_en = true;
5939 	else
5940 		priv->hw->hw_vlan_en = false;
5941 
5942 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5943 
5944 	return 0;
5945 }
5946 
5947 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5948 {
5949 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5950 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5951 	u32 queues_count;
5952 	u32 queue;
5953 	bool xmac;
5954 
5955 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5956 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5957 
5958 	if (priv->irq_wake)
5959 		pm_wakeup_event(priv->device, 0);
5960 
5961 	if (priv->dma_cap.estsel)
5962 		stmmac_est_irq_status(priv, priv, priv->dev,
5963 				      &priv->xstats, tx_cnt);
5964 
5965 	if (stmmac_fpe_supported(priv))
5966 		stmmac_fpe_irq_status(priv);
5967 
5968 	/* To handle GMAC own interrupts */
5969 	if ((priv->plat->has_gmac) || xmac) {
5970 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5971 
5972 		if (unlikely(status)) {
5973 			/* For LPI we need to save the tx status */
5974 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5975 				priv->tx_path_in_lpi_mode = true;
5976 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5977 				priv->tx_path_in_lpi_mode = false;
5978 		}
5979 
5980 		for (queue = 0; queue < queues_count; queue++)
5981 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
5982 
5983 		/* PCS link status */
5984 		if (priv->hw->pcs &&
5985 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5986 			if (priv->xstats.pcs_link)
5987 				netif_carrier_on(priv->dev);
5988 			else
5989 				netif_carrier_off(priv->dev);
5990 		}
5991 
5992 		stmmac_timestamp_interrupt(priv, priv);
5993 	}
5994 }
5995 
5996 /**
5997  *  stmmac_interrupt - main ISR
5998  *  @irq: interrupt number.
5999  *  @dev_id: to pass the net device pointer.
6000  *  Description: this is the main driver interrupt service routine.
6001  *  It can call:
6002  *  o DMA service routine (to manage incoming frame reception and transmission
6003  *    status)
6004  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6005  *    interrupts.
6006  */
6007 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6008 {
6009 	struct net_device *dev = (struct net_device *)dev_id;
6010 	struct stmmac_priv *priv = netdev_priv(dev);
6011 
6012 	/* Check if adapter is up */
6013 	if (test_bit(STMMAC_DOWN, &priv->state))
6014 		return IRQ_HANDLED;
6015 
6016 	/* Check ASP error if it isn't delivered via an individual IRQ */
6017 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6018 		return IRQ_HANDLED;
6019 
6020 	/* To handle Common interrupts */
6021 	stmmac_common_interrupt(priv);
6022 
6023 	/* To handle DMA interrupts */
6024 	stmmac_dma_interrupt(priv);
6025 
6026 	return IRQ_HANDLED;
6027 }
6028 
6029 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6030 {
6031 	struct net_device *dev = (struct net_device *)dev_id;
6032 	struct stmmac_priv *priv = netdev_priv(dev);
6033 
6034 	/* Check if adapter is up */
6035 	if (test_bit(STMMAC_DOWN, &priv->state))
6036 		return IRQ_HANDLED;
6037 
6038 	/* To handle Common interrupts */
6039 	stmmac_common_interrupt(priv);
6040 
6041 	return IRQ_HANDLED;
6042 }
6043 
6044 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6045 {
6046 	struct net_device *dev = (struct net_device *)dev_id;
6047 	struct stmmac_priv *priv = netdev_priv(dev);
6048 
6049 	/* Check if adapter is up */
6050 	if (test_bit(STMMAC_DOWN, &priv->state))
6051 		return IRQ_HANDLED;
6052 
6053 	/* Check if a fatal error happened */
6054 	stmmac_safety_feat_interrupt(priv);
6055 
6056 	return IRQ_HANDLED;
6057 }
6058 
6059 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6060 {
6061 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6062 	struct stmmac_dma_conf *dma_conf;
6063 	int chan = tx_q->queue_index;
6064 	struct stmmac_priv *priv;
6065 	int status;
6066 
6067 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6068 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6069 
6070 	/* Check if adapter is up */
6071 	if (test_bit(STMMAC_DOWN, &priv->state))
6072 		return IRQ_HANDLED;
6073 
6074 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6075 
6076 	if (unlikely(status & tx_hard_error_bump_tc)) {
6077 		/* Try to bump up the dma threshold on this failure */
6078 		stmmac_bump_dma_threshold(priv, chan);
6079 	} else if (unlikely(status == tx_hard_error)) {
6080 		stmmac_tx_err(priv, chan);
6081 	}
6082 
6083 	return IRQ_HANDLED;
6084 }
6085 
6086 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6087 {
6088 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6089 	struct stmmac_dma_conf *dma_conf;
6090 	int chan = rx_q->queue_index;
6091 	struct stmmac_priv *priv;
6092 
6093 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6094 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6095 
6096 	/* Check if adapter is up */
6097 	if (test_bit(STMMAC_DOWN, &priv->state))
6098 		return IRQ_HANDLED;
6099 
6100 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6101 
6102 	return IRQ_HANDLED;
6103 }
6104 
6105 /**
6106  *  stmmac_ioctl - Entry point for the Ioctl
6107  *  @dev: Device pointer.
6108  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6109  *  a proprietary structure used to pass information to the driver.
6110  *  @cmd: IOCTL command
6111  *  Description:
6112  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6113  */
6114 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6115 {
6116 	struct stmmac_priv *priv = netdev_priv (dev);
6117 	int ret = -EOPNOTSUPP;
6118 
6119 	if (!netif_running(dev))
6120 		return -EINVAL;
6121 
6122 	switch (cmd) {
6123 	case SIOCGMIIPHY:
6124 	case SIOCGMIIREG:
6125 	case SIOCSMIIREG:
6126 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6127 		break;
6128 	case SIOCSHWTSTAMP:
6129 		ret = stmmac_hwtstamp_set(dev, rq);
6130 		break;
6131 	case SIOCGHWTSTAMP:
6132 		ret = stmmac_hwtstamp_get(dev, rq);
6133 		break;
6134 	default:
6135 		break;
6136 	}
6137 
6138 	return ret;
6139 }
6140 
6141 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6142 				    void *cb_priv)
6143 {
6144 	struct stmmac_priv *priv = cb_priv;
6145 	int ret = -EOPNOTSUPP;
6146 
6147 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6148 		return ret;
6149 
6150 	__stmmac_disable_all_queues(priv);
6151 
6152 	switch (type) {
6153 	case TC_SETUP_CLSU32:
6154 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6155 		break;
6156 	case TC_SETUP_CLSFLOWER:
6157 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6158 		break;
6159 	default:
6160 		break;
6161 	}
6162 
6163 	stmmac_enable_all_queues(priv);
6164 	return ret;
6165 }
6166 
6167 static LIST_HEAD(stmmac_block_cb_list);
6168 
6169 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6170 			   void *type_data)
6171 {
6172 	struct stmmac_priv *priv = netdev_priv(ndev);
6173 
6174 	switch (type) {
6175 	case TC_QUERY_CAPS:
6176 		return stmmac_tc_query_caps(priv, priv, type_data);
6177 	case TC_SETUP_QDISC_MQPRIO:
6178 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6179 	case TC_SETUP_BLOCK:
6180 		return flow_block_cb_setup_simple(type_data,
6181 						  &stmmac_block_cb_list,
6182 						  stmmac_setup_tc_block_cb,
6183 						  priv, priv, true);
6184 	case TC_SETUP_QDISC_CBS:
6185 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6186 	case TC_SETUP_QDISC_TAPRIO:
6187 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6188 	case TC_SETUP_QDISC_ETF:
6189 		return stmmac_tc_setup_etf(priv, priv, type_data);
6190 	default:
6191 		return -EOPNOTSUPP;
6192 	}
6193 }
6194 
6195 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6196 			       struct net_device *sb_dev)
6197 {
6198 	int gso = skb_shinfo(skb)->gso_type;
6199 
6200 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6201 		/*
6202 		 * There is no way to determine the number of TSO/USO
6203 		 * capable Queues. Let's use always the Queue 0
6204 		 * because if TSO/USO is supported then at least this
6205 		 * one will be capable.
6206 		 */
6207 		return 0;
6208 	}
6209 
6210 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6211 }
6212 
6213 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6214 {
6215 	struct stmmac_priv *priv = netdev_priv(ndev);
6216 	int ret = 0;
6217 
6218 	ret = pm_runtime_resume_and_get(priv->device);
6219 	if (ret < 0)
6220 		return ret;
6221 
6222 	ret = eth_mac_addr(ndev, addr);
6223 	if (ret)
6224 		goto set_mac_error;
6225 
6226 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6227 
6228 set_mac_error:
6229 	pm_runtime_put(priv->device);
6230 
6231 	return ret;
6232 }
6233 
6234 #ifdef CONFIG_DEBUG_FS
6235 static struct dentry *stmmac_fs_dir;
6236 
6237 static void sysfs_display_ring(void *head, int size, int extend_desc,
6238 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6239 {
6240 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6241 	struct dma_desc *p = (struct dma_desc *)head;
6242 	unsigned int desc_size;
6243 	dma_addr_t dma_addr;
6244 	int i;
6245 
6246 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6247 	for (i = 0; i < size; i++) {
6248 		dma_addr = dma_phy_addr + i * desc_size;
6249 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6250 				i, &dma_addr,
6251 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6252 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6253 		if (extend_desc)
6254 			p = &(++ep)->basic;
6255 		else
6256 			p++;
6257 	}
6258 }
6259 
6260 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6261 {
6262 	struct net_device *dev = seq->private;
6263 	struct stmmac_priv *priv = netdev_priv(dev);
6264 	u32 rx_count = priv->plat->rx_queues_to_use;
6265 	u32 tx_count = priv->plat->tx_queues_to_use;
6266 	u32 queue;
6267 
6268 	if ((dev->flags & IFF_UP) == 0)
6269 		return 0;
6270 
6271 	for (queue = 0; queue < rx_count; queue++) {
6272 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6273 
6274 		seq_printf(seq, "RX Queue %d:\n", queue);
6275 
6276 		if (priv->extend_desc) {
6277 			seq_printf(seq, "Extended descriptor ring:\n");
6278 			sysfs_display_ring((void *)rx_q->dma_erx,
6279 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6280 		} else {
6281 			seq_printf(seq, "Descriptor ring:\n");
6282 			sysfs_display_ring((void *)rx_q->dma_rx,
6283 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6284 		}
6285 	}
6286 
6287 	for (queue = 0; queue < tx_count; queue++) {
6288 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6289 
6290 		seq_printf(seq, "TX Queue %d:\n", queue);
6291 
6292 		if (priv->extend_desc) {
6293 			seq_printf(seq, "Extended descriptor ring:\n");
6294 			sysfs_display_ring((void *)tx_q->dma_etx,
6295 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6296 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6297 			seq_printf(seq, "Descriptor ring:\n");
6298 			sysfs_display_ring((void *)tx_q->dma_tx,
6299 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6300 		}
6301 	}
6302 
6303 	return 0;
6304 }
6305 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6306 
6307 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6308 {
6309 	static const char * const dwxgmac_timestamp_source[] = {
6310 		"None",
6311 		"Internal",
6312 		"External",
6313 		"Both",
6314 	};
6315 	static const char * const dwxgmac_safety_feature_desc[] = {
6316 		"No",
6317 		"All Safety Features with ECC and Parity",
6318 		"All Safety Features without ECC or Parity",
6319 		"All Safety Features with Parity Only",
6320 		"ECC Only",
6321 		"UNDEFINED",
6322 		"UNDEFINED",
6323 		"UNDEFINED",
6324 	};
6325 	struct net_device *dev = seq->private;
6326 	struct stmmac_priv *priv = netdev_priv(dev);
6327 
6328 	if (!priv->hw_cap_support) {
6329 		seq_printf(seq, "DMA HW features not supported\n");
6330 		return 0;
6331 	}
6332 
6333 	seq_printf(seq, "==============================\n");
6334 	seq_printf(seq, "\tDMA HW features\n");
6335 	seq_printf(seq, "==============================\n");
6336 
6337 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6338 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6339 	seq_printf(seq, "\t1000 Mbps: %s\n",
6340 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6341 	seq_printf(seq, "\tHalf duplex: %s\n",
6342 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6343 	if (priv->plat->has_xgmac) {
6344 		seq_printf(seq,
6345 			   "\tNumber of Additional MAC address registers: %d\n",
6346 			   priv->dma_cap.multi_addr);
6347 	} else {
6348 		seq_printf(seq, "\tHash Filter: %s\n",
6349 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6350 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6351 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6352 	}
6353 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6354 		   (priv->dma_cap.pcs) ? "Y" : "N");
6355 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6356 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6357 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6358 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6359 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6360 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6361 	seq_printf(seq, "\tRMON module: %s\n",
6362 		   (priv->dma_cap.rmon) ? "Y" : "N");
6363 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6364 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6365 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6366 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6367 	if (priv->plat->has_xgmac)
6368 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6369 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6370 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6371 		   (priv->dma_cap.eee) ? "Y" : "N");
6372 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6373 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6374 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6375 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6376 	    priv->plat->has_xgmac) {
6377 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6378 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6379 	} else {
6380 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6381 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6382 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6383 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6384 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6385 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6386 	}
6387 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6388 		   priv->dma_cap.number_rx_channel);
6389 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6390 		   priv->dma_cap.number_tx_channel);
6391 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6392 		   priv->dma_cap.number_rx_queues);
6393 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6394 		   priv->dma_cap.number_tx_queues);
6395 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6396 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6397 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6398 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6399 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6400 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6401 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6402 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6403 		   priv->dma_cap.pps_out_num);
6404 	seq_printf(seq, "\tSafety Features: %s\n",
6405 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6406 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6407 		   priv->dma_cap.frpsel ? "Y" : "N");
6408 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6409 		   priv->dma_cap.host_dma_width);
6410 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6411 		   priv->dma_cap.rssen ? "Y" : "N");
6412 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6413 		   priv->dma_cap.vlhash ? "Y" : "N");
6414 	seq_printf(seq, "\tSplit Header: %s\n",
6415 		   priv->dma_cap.sphen ? "Y" : "N");
6416 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6417 		   priv->dma_cap.vlins ? "Y" : "N");
6418 	seq_printf(seq, "\tDouble VLAN: %s\n",
6419 		   priv->dma_cap.dvlan ? "Y" : "N");
6420 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6421 		   priv->dma_cap.l3l4fnum);
6422 	seq_printf(seq, "\tARP Offloading: %s\n",
6423 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6424 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6425 		   priv->dma_cap.estsel ? "Y" : "N");
6426 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6427 		   priv->dma_cap.fpesel ? "Y" : "N");
6428 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6429 		   priv->dma_cap.tbssel ? "Y" : "N");
6430 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6431 		   priv->dma_cap.tbs_ch_num);
6432 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6433 		   priv->dma_cap.sgfsel ? "Y" : "N");
6434 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6435 		   BIT(priv->dma_cap.ttsfd) >> 1);
6436 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6437 		   priv->dma_cap.numtc);
6438 	seq_printf(seq, "\tDCB Feature: %s\n",
6439 		   priv->dma_cap.dcben ? "Y" : "N");
6440 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6441 		   priv->dma_cap.advthword ? "Y" : "N");
6442 	seq_printf(seq, "\tPTP Offload: %s\n",
6443 		   priv->dma_cap.ptoen ? "Y" : "N");
6444 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6445 		   priv->dma_cap.osten ? "Y" : "N");
6446 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6447 		   priv->dma_cap.pfcen ? "Y" : "N");
6448 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6449 		   BIT(priv->dma_cap.frpes) << 6);
6450 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6451 		   BIT(priv->dma_cap.frpbs) << 6);
6452 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6453 		   priv->dma_cap.frppipe_num);
6454 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6455 		   priv->dma_cap.nrvf_num ?
6456 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6457 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6458 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6459 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6460 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6461 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6462 		   priv->dma_cap.cbtisel ? "Y" : "N");
6463 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6464 		   priv->dma_cap.aux_snapshot_n);
6465 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6466 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6467 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6468 		   priv->dma_cap.edma ? "Y" : "N");
6469 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6470 		   priv->dma_cap.ediffc ? "Y" : "N");
6471 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6472 		   priv->dma_cap.vxn ? "Y" : "N");
6473 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6474 		   priv->dma_cap.dbgmem ? "Y" : "N");
6475 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6476 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6477 	return 0;
6478 }
6479 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6480 
6481 /* Use network device events to rename debugfs file entries.
6482  */
6483 static int stmmac_device_event(struct notifier_block *unused,
6484 			       unsigned long event, void *ptr)
6485 {
6486 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6487 	struct stmmac_priv *priv = netdev_priv(dev);
6488 
6489 	if (dev->netdev_ops != &stmmac_netdev_ops)
6490 		goto done;
6491 
6492 	switch (event) {
6493 	case NETDEV_CHANGENAME:
6494 		if (priv->dbgfs_dir)
6495 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6496 							 priv->dbgfs_dir,
6497 							 stmmac_fs_dir,
6498 							 dev->name);
6499 		break;
6500 	}
6501 done:
6502 	return NOTIFY_DONE;
6503 }
6504 
6505 static struct notifier_block stmmac_notifier = {
6506 	.notifier_call = stmmac_device_event,
6507 };
6508 
6509 static void stmmac_init_fs(struct net_device *dev)
6510 {
6511 	struct stmmac_priv *priv = netdev_priv(dev);
6512 
6513 	rtnl_lock();
6514 
6515 	/* Create per netdev entries */
6516 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6517 
6518 	/* Entry to report DMA RX/TX rings */
6519 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6520 			    &stmmac_rings_status_fops);
6521 
6522 	/* Entry to report the DMA HW features */
6523 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6524 			    &stmmac_dma_cap_fops);
6525 
6526 	rtnl_unlock();
6527 }
6528 
6529 static void stmmac_exit_fs(struct net_device *dev)
6530 {
6531 	struct stmmac_priv *priv = netdev_priv(dev);
6532 
6533 	debugfs_remove_recursive(priv->dbgfs_dir);
6534 }
6535 #endif /* CONFIG_DEBUG_FS */
6536 
6537 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6538 {
6539 	unsigned char *data = (unsigned char *)&vid_le;
6540 	unsigned char data_byte = 0;
6541 	u32 crc = ~0x0;
6542 	u32 temp = 0;
6543 	int i, bits;
6544 
6545 	bits = get_bitmask_order(VLAN_VID_MASK);
6546 	for (i = 0; i < bits; i++) {
6547 		if ((i % 8) == 0)
6548 			data_byte = data[i / 8];
6549 
6550 		temp = ((crc & 1) ^ data_byte) & 1;
6551 		crc >>= 1;
6552 		data_byte >>= 1;
6553 
6554 		if (temp)
6555 			crc ^= 0xedb88320;
6556 	}
6557 
6558 	return crc;
6559 }
6560 
6561 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6562 {
6563 	u32 crc, hash = 0;
6564 	u16 pmatch = 0;
6565 	int count = 0;
6566 	u16 vid = 0;
6567 
6568 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6569 		__le16 vid_le = cpu_to_le16(vid);
6570 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6571 		hash |= (1 << crc);
6572 		count++;
6573 	}
6574 
6575 	if (!priv->dma_cap.vlhash) {
6576 		if (count > 2) /* VID = 0 always passes filter */
6577 			return -EOPNOTSUPP;
6578 
6579 		pmatch = vid;
6580 		hash = 0;
6581 	}
6582 
6583 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6584 }
6585 
6586 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6587 {
6588 	struct stmmac_priv *priv = netdev_priv(ndev);
6589 	bool is_double = false;
6590 	int ret;
6591 
6592 	ret = pm_runtime_resume_and_get(priv->device);
6593 	if (ret < 0)
6594 		return ret;
6595 
6596 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6597 		is_double = true;
6598 
6599 	set_bit(vid, priv->active_vlans);
6600 	ret = stmmac_vlan_update(priv, is_double);
6601 	if (ret) {
6602 		clear_bit(vid, priv->active_vlans);
6603 		goto err_pm_put;
6604 	}
6605 
6606 	if (priv->hw->num_vlan) {
6607 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6608 		if (ret)
6609 			goto err_pm_put;
6610 	}
6611 err_pm_put:
6612 	pm_runtime_put(priv->device);
6613 
6614 	return ret;
6615 }
6616 
6617 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6618 {
6619 	struct stmmac_priv *priv = netdev_priv(ndev);
6620 	bool is_double = false;
6621 	int ret;
6622 
6623 	ret = pm_runtime_resume_and_get(priv->device);
6624 	if (ret < 0)
6625 		return ret;
6626 
6627 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6628 		is_double = true;
6629 
6630 	clear_bit(vid, priv->active_vlans);
6631 
6632 	if (priv->hw->num_vlan) {
6633 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6634 		if (ret)
6635 			goto del_vlan_error;
6636 	}
6637 
6638 	ret = stmmac_vlan_update(priv, is_double);
6639 
6640 del_vlan_error:
6641 	pm_runtime_put(priv->device);
6642 
6643 	return ret;
6644 }
6645 
6646 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6647 {
6648 	struct stmmac_priv *priv = netdev_priv(dev);
6649 
6650 	switch (bpf->command) {
6651 	case XDP_SETUP_PROG:
6652 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6653 	case XDP_SETUP_XSK_POOL:
6654 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6655 					     bpf->xsk.queue_id);
6656 	default:
6657 		return -EOPNOTSUPP;
6658 	}
6659 }
6660 
6661 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6662 			   struct xdp_frame **frames, u32 flags)
6663 {
6664 	struct stmmac_priv *priv = netdev_priv(dev);
6665 	int cpu = smp_processor_id();
6666 	struct netdev_queue *nq;
6667 	int i, nxmit = 0;
6668 	int queue;
6669 
6670 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6671 		return -ENETDOWN;
6672 
6673 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6674 		return -EINVAL;
6675 
6676 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6677 	nq = netdev_get_tx_queue(priv->dev, queue);
6678 
6679 	__netif_tx_lock(nq, cpu);
6680 	/* Avoids TX time-out as we are sharing with slow path */
6681 	txq_trans_cond_update(nq);
6682 
6683 	for (i = 0; i < num_frames; i++) {
6684 		int res;
6685 
6686 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6687 		if (res == STMMAC_XDP_CONSUMED)
6688 			break;
6689 
6690 		nxmit++;
6691 	}
6692 
6693 	if (flags & XDP_XMIT_FLUSH) {
6694 		stmmac_flush_tx_descriptors(priv, queue);
6695 		stmmac_tx_timer_arm(priv, queue);
6696 	}
6697 
6698 	__netif_tx_unlock(nq);
6699 
6700 	return nxmit;
6701 }
6702 
6703 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6704 {
6705 	struct stmmac_channel *ch = &priv->channel[queue];
6706 	unsigned long flags;
6707 
6708 	spin_lock_irqsave(&ch->lock, flags);
6709 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6710 	spin_unlock_irqrestore(&ch->lock, flags);
6711 
6712 	stmmac_stop_rx_dma(priv, queue);
6713 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6714 }
6715 
6716 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6717 {
6718 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6719 	struct stmmac_channel *ch = &priv->channel[queue];
6720 	unsigned long flags;
6721 	u32 buf_size;
6722 	int ret;
6723 
6724 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6725 	if (ret) {
6726 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6727 		return;
6728 	}
6729 
6730 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6731 	if (ret) {
6732 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6733 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6734 		return;
6735 	}
6736 
6737 	stmmac_reset_rx_queue(priv, queue);
6738 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6739 
6740 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6741 			    rx_q->dma_rx_phy, rx_q->queue_index);
6742 
6743 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6744 			     sizeof(struct dma_desc));
6745 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6746 			       rx_q->rx_tail_addr, rx_q->queue_index);
6747 
6748 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6749 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6750 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6751 				      buf_size,
6752 				      rx_q->queue_index);
6753 	} else {
6754 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6755 				      priv->dma_conf.dma_buf_sz,
6756 				      rx_q->queue_index);
6757 	}
6758 
6759 	stmmac_start_rx_dma(priv, queue);
6760 
6761 	spin_lock_irqsave(&ch->lock, flags);
6762 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6763 	spin_unlock_irqrestore(&ch->lock, flags);
6764 }
6765 
6766 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6767 {
6768 	struct stmmac_channel *ch = &priv->channel[queue];
6769 	unsigned long flags;
6770 
6771 	spin_lock_irqsave(&ch->lock, flags);
6772 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6773 	spin_unlock_irqrestore(&ch->lock, flags);
6774 
6775 	stmmac_stop_tx_dma(priv, queue);
6776 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6777 }
6778 
6779 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6780 {
6781 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6782 	struct stmmac_channel *ch = &priv->channel[queue];
6783 	unsigned long flags;
6784 	int ret;
6785 
6786 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6787 	if (ret) {
6788 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6789 		return;
6790 	}
6791 
6792 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6793 	if (ret) {
6794 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6795 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6796 		return;
6797 	}
6798 
6799 	stmmac_reset_tx_queue(priv, queue);
6800 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6801 
6802 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6803 			    tx_q->dma_tx_phy, tx_q->queue_index);
6804 
6805 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6806 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6807 
6808 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6809 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6810 			       tx_q->tx_tail_addr, tx_q->queue_index);
6811 
6812 	stmmac_start_tx_dma(priv, queue);
6813 
6814 	spin_lock_irqsave(&ch->lock, flags);
6815 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6816 	spin_unlock_irqrestore(&ch->lock, flags);
6817 }
6818 
6819 void stmmac_xdp_release(struct net_device *dev)
6820 {
6821 	struct stmmac_priv *priv = netdev_priv(dev);
6822 	u32 chan;
6823 
6824 	/* Ensure tx function is not running */
6825 	netif_tx_disable(dev);
6826 
6827 	/* Disable NAPI process */
6828 	stmmac_disable_all_queues(priv);
6829 
6830 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6831 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6832 
6833 	/* Free the IRQ lines */
6834 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6835 
6836 	/* Stop TX/RX DMA channels */
6837 	stmmac_stop_all_dma(priv);
6838 
6839 	/* Release and free the Rx/Tx resources */
6840 	free_dma_desc_resources(priv, &priv->dma_conf);
6841 
6842 	/* Disable the MAC Rx/Tx */
6843 	stmmac_mac_set(priv, priv->ioaddr, false);
6844 
6845 	/* set trans_start so we don't get spurious
6846 	 * watchdogs during reset
6847 	 */
6848 	netif_trans_update(dev);
6849 	netif_carrier_off(dev);
6850 }
6851 
6852 int stmmac_xdp_open(struct net_device *dev)
6853 {
6854 	struct stmmac_priv *priv = netdev_priv(dev);
6855 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6856 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6857 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6858 	struct stmmac_rx_queue *rx_q;
6859 	struct stmmac_tx_queue *tx_q;
6860 	u32 buf_size;
6861 	bool sph_en;
6862 	u32 chan;
6863 	int ret;
6864 
6865 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6866 	if (ret < 0) {
6867 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6868 			   __func__);
6869 		goto dma_desc_error;
6870 	}
6871 
6872 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6873 	if (ret < 0) {
6874 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6875 			   __func__);
6876 		goto init_error;
6877 	}
6878 
6879 	stmmac_reset_queues_param(priv);
6880 
6881 	/* DMA CSR Channel configuration */
6882 	for (chan = 0; chan < dma_csr_ch; chan++) {
6883 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6884 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6885 	}
6886 
6887 	/* Adjust Split header */
6888 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6889 
6890 	/* DMA RX Channel Configuration */
6891 	for (chan = 0; chan < rx_cnt; chan++) {
6892 		rx_q = &priv->dma_conf.rx_queue[chan];
6893 
6894 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6895 				    rx_q->dma_rx_phy, chan);
6896 
6897 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6898 				     (rx_q->buf_alloc_num *
6899 				      sizeof(struct dma_desc));
6900 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6901 				       rx_q->rx_tail_addr, chan);
6902 
6903 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6904 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6905 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6906 					      buf_size,
6907 					      rx_q->queue_index);
6908 		} else {
6909 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6910 					      priv->dma_conf.dma_buf_sz,
6911 					      rx_q->queue_index);
6912 		}
6913 
6914 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6915 	}
6916 
6917 	/* DMA TX Channel Configuration */
6918 	for (chan = 0; chan < tx_cnt; chan++) {
6919 		tx_q = &priv->dma_conf.tx_queue[chan];
6920 
6921 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6922 				    tx_q->dma_tx_phy, chan);
6923 
6924 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6925 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6926 				       tx_q->tx_tail_addr, chan);
6927 
6928 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6929 		tx_q->txtimer.function = stmmac_tx_timer;
6930 	}
6931 
6932 	/* Enable the MAC Rx/Tx */
6933 	stmmac_mac_set(priv, priv->ioaddr, true);
6934 
6935 	/* Start Rx & Tx DMA Channels */
6936 	stmmac_start_all_dma(priv);
6937 
6938 	ret = stmmac_request_irq(dev);
6939 	if (ret)
6940 		goto irq_error;
6941 
6942 	/* Enable NAPI process*/
6943 	stmmac_enable_all_queues(priv);
6944 	netif_carrier_on(dev);
6945 	netif_tx_start_all_queues(dev);
6946 	stmmac_enable_all_dma_irq(priv);
6947 
6948 	return 0;
6949 
6950 irq_error:
6951 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6952 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6953 
6954 	stmmac_hw_teardown(dev);
6955 init_error:
6956 	free_dma_desc_resources(priv, &priv->dma_conf);
6957 dma_desc_error:
6958 	return ret;
6959 }
6960 
6961 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6962 {
6963 	struct stmmac_priv *priv = netdev_priv(dev);
6964 	struct stmmac_rx_queue *rx_q;
6965 	struct stmmac_tx_queue *tx_q;
6966 	struct stmmac_channel *ch;
6967 
6968 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6969 	    !netif_carrier_ok(priv->dev))
6970 		return -ENETDOWN;
6971 
6972 	if (!stmmac_xdp_is_enabled(priv))
6973 		return -EINVAL;
6974 
6975 	if (queue >= priv->plat->rx_queues_to_use ||
6976 	    queue >= priv->plat->tx_queues_to_use)
6977 		return -EINVAL;
6978 
6979 	rx_q = &priv->dma_conf.rx_queue[queue];
6980 	tx_q = &priv->dma_conf.tx_queue[queue];
6981 	ch = &priv->channel[queue];
6982 
6983 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6984 		return -EINVAL;
6985 
6986 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6987 		/* EQoS does not have per-DMA channel SW interrupt,
6988 		 * so we schedule RX Napi straight-away.
6989 		 */
6990 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6991 			__napi_schedule(&ch->rxtx_napi);
6992 	}
6993 
6994 	return 0;
6995 }
6996 
6997 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6998 {
6999 	struct stmmac_priv *priv = netdev_priv(dev);
7000 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7001 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7002 	unsigned int start;
7003 	int q;
7004 
7005 	for (q = 0; q < tx_cnt; q++) {
7006 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7007 		u64 tx_packets;
7008 		u64 tx_bytes;
7009 
7010 		do {
7011 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7012 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7013 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7014 		do {
7015 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7016 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7017 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7018 
7019 		stats->tx_packets += tx_packets;
7020 		stats->tx_bytes += tx_bytes;
7021 	}
7022 
7023 	for (q = 0; q < rx_cnt; q++) {
7024 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7025 		u64 rx_packets;
7026 		u64 rx_bytes;
7027 
7028 		do {
7029 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7030 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7031 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7032 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7033 
7034 		stats->rx_packets += rx_packets;
7035 		stats->rx_bytes += rx_bytes;
7036 	}
7037 
7038 	stats->rx_dropped = priv->xstats.rx_dropped;
7039 	stats->rx_errors = priv->xstats.rx_errors;
7040 	stats->tx_dropped = priv->xstats.tx_dropped;
7041 	stats->tx_errors = priv->xstats.tx_errors;
7042 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7043 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7044 	stats->rx_length_errors = priv->xstats.rx_length;
7045 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7046 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7047 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7048 }
7049 
7050 static const struct net_device_ops stmmac_netdev_ops = {
7051 	.ndo_open = stmmac_open,
7052 	.ndo_start_xmit = stmmac_xmit,
7053 	.ndo_stop = stmmac_release,
7054 	.ndo_change_mtu = stmmac_change_mtu,
7055 	.ndo_fix_features = stmmac_fix_features,
7056 	.ndo_set_features = stmmac_set_features,
7057 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7058 	.ndo_tx_timeout = stmmac_tx_timeout,
7059 	.ndo_eth_ioctl = stmmac_ioctl,
7060 	.ndo_get_stats64 = stmmac_get_stats64,
7061 	.ndo_setup_tc = stmmac_setup_tc,
7062 	.ndo_select_queue = stmmac_select_queue,
7063 	.ndo_set_mac_address = stmmac_set_mac_address,
7064 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7065 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7066 	.ndo_bpf = stmmac_bpf,
7067 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7068 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7069 };
7070 
7071 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7072 {
7073 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7074 		return;
7075 	if (test_bit(STMMAC_DOWN, &priv->state))
7076 		return;
7077 
7078 	netdev_err(priv->dev, "Reset adapter.\n");
7079 
7080 	rtnl_lock();
7081 	netif_trans_update(priv->dev);
7082 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7083 		usleep_range(1000, 2000);
7084 
7085 	set_bit(STMMAC_DOWN, &priv->state);
7086 	dev_close(priv->dev);
7087 	dev_open(priv->dev, NULL);
7088 	clear_bit(STMMAC_DOWN, &priv->state);
7089 	clear_bit(STMMAC_RESETING, &priv->state);
7090 	rtnl_unlock();
7091 }
7092 
7093 static void stmmac_service_task(struct work_struct *work)
7094 {
7095 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7096 			service_task);
7097 
7098 	stmmac_reset_subtask(priv);
7099 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7100 }
7101 
7102 /**
7103  *  stmmac_hw_init - Init the MAC device
7104  *  @priv: driver private structure
7105  *  Description: this function is to configure the MAC device according to
7106  *  some platform parameters or the HW capability register. It prepares the
7107  *  driver to use either ring or chain modes and to setup either enhanced or
7108  *  normal descriptors.
7109  */
7110 static int stmmac_hw_init(struct stmmac_priv *priv)
7111 {
7112 	int ret;
7113 
7114 	/* dwmac-sun8i only work in chain mode */
7115 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7116 		chain_mode = 1;
7117 	priv->chain_mode = chain_mode;
7118 
7119 	/* Initialize HW Interface */
7120 	ret = stmmac_hwif_init(priv);
7121 	if (ret)
7122 		return ret;
7123 
7124 	/* Get the HW capability (new GMAC newer than 3.50a) */
7125 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7126 	if (priv->hw_cap_support) {
7127 		dev_info(priv->device, "DMA HW capability register supported\n");
7128 
7129 		/* We can override some gmac/dma configuration fields: e.g.
7130 		 * enh_desc, tx_coe (e.g. that are passed through the
7131 		 * platform) with the values from the HW capability
7132 		 * register (if supported).
7133 		 */
7134 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7135 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7136 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7137 		priv->hw->pmt = priv->plat->pmt;
7138 		if (priv->dma_cap.hash_tb_sz) {
7139 			priv->hw->multicast_filter_bins =
7140 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7141 			priv->hw->mcast_bits_log2 =
7142 					ilog2(priv->hw->multicast_filter_bins);
7143 		}
7144 
7145 		/* TXCOE doesn't work in thresh DMA mode */
7146 		if (priv->plat->force_thresh_dma_mode)
7147 			priv->plat->tx_coe = 0;
7148 		else
7149 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7150 
7151 		/* In case of GMAC4 rx_coe is from HW cap register. */
7152 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7153 
7154 		if (priv->dma_cap.rx_coe_type2)
7155 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7156 		else if (priv->dma_cap.rx_coe_type1)
7157 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7158 
7159 	} else {
7160 		dev_info(priv->device, "No HW DMA feature register supported\n");
7161 	}
7162 
7163 	if (priv->plat->rx_coe) {
7164 		priv->hw->rx_csum = priv->plat->rx_coe;
7165 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7166 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7167 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7168 	}
7169 	if (priv->plat->tx_coe)
7170 		dev_info(priv->device, "TX Checksum insertion supported\n");
7171 
7172 	if (priv->plat->pmt) {
7173 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7174 		device_set_wakeup_capable(priv->device, 1);
7175 	}
7176 
7177 	if (priv->dma_cap.tsoen)
7178 		dev_info(priv->device, "TSO supported\n");
7179 
7180 	priv->hw->vlan_fail_q_en =
7181 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7182 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7183 
7184 	/* Run HW quirks, if any */
7185 	if (priv->hwif_quirks) {
7186 		ret = priv->hwif_quirks(priv);
7187 		if (ret)
7188 			return ret;
7189 	}
7190 
7191 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7192 	 * In some case, for example on bugged HW this feature
7193 	 * has to be disable and this can be done by passing the
7194 	 * riwt_off field from the platform.
7195 	 */
7196 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7197 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7198 		priv->use_riwt = 1;
7199 		dev_info(priv->device,
7200 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7201 	}
7202 
7203 	return 0;
7204 }
7205 
7206 static void stmmac_napi_add(struct net_device *dev)
7207 {
7208 	struct stmmac_priv *priv = netdev_priv(dev);
7209 	u32 queue, maxq;
7210 
7211 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7212 
7213 	for (queue = 0; queue < maxq; queue++) {
7214 		struct stmmac_channel *ch = &priv->channel[queue];
7215 
7216 		ch->priv_data = priv;
7217 		ch->index = queue;
7218 		spin_lock_init(&ch->lock);
7219 
7220 		if (queue < priv->plat->rx_queues_to_use) {
7221 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7222 		}
7223 		if (queue < priv->plat->tx_queues_to_use) {
7224 			netif_napi_add_tx(dev, &ch->tx_napi,
7225 					  stmmac_napi_poll_tx);
7226 		}
7227 		if (queue < priv->plat->rx_queues_to_use &&
7228 		    queue < priv->plat->tx_queues_to_use) {
7229 			netif_napi_add(dev, &ch->rxtx_napi,
7230 				       stmmac_napi_poll_rxtx);
7231 		}
7232 	}
7233 }
7234 
7235 static void stmmac_napi_del(struct net_device *dev)
7236 {
7237 	struct stmmac_priv *priv = netdev_priv(dev);
7238 	u32 queue, maxq;
7239 
7240 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7241 
7242 	for (queue = 0; queue < maxq; queue++) {
7243 		struct stmmac_channel *ch = &priv->channel[queue];
7244 
7245 		if (queue < priv->plat->rx_queues_to_use)
7246 			netif_napi_del(&ch->rx_napi);
7247 		if (queue < priv->plat->tx_queues_to_use)
7248 			netif_napi_del(&ch->tx_napi);
7249 		if (queue < priv->plat->rx_queues_to_use &&
7250 		    queue < priv->plat->tx_queues_to_use) {
7251 			netif_napi_del(&ch->rxtx_napi);
7252 		}
7253 	}
7254 }
7255 
7256 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7257 {
7258 	struct stmmac_priv *priv = netdev_priv(dev);
7259 	int ret = 0, i;
7260 
7261 	if (netif_running(dev))
7262 		stmmac_release(dev);
7263 
7264 	stmmac_napi_del(dev);
7265 
7266 	priv->plat->rx_queues_to_use = rx_cnt;
7267 	priv->plat->tx_queues_to_use = tx_cnt;
7268 	if (!netif_is_rxfh_configured(dev))
7269 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7270 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7271 									rx_cnt);
7272 
7273 	stmmac_napi_add(dev);
7274 
7275 	if (netif_running(dev))
7276 		ret = stmmac_open(dev);
7277 
7278 	return ret;
7279 }
7280 
7281 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7282 {
7283 	struct stmmac_priv *priv = netdev_priv(dev);
7284 	int ret = 0;
7285 
7286 	if (netif_running(dev))
7287 		stmmac_release(dev);
7288 
7289 	priv->dma_conf.dma_rx_size = rx_size;
7290 	priv->dma_conf.dma_tx_size = tx_size;
7291 
7292 	if (netif_running(dev))
7293 		ret = stmmac_open(dev);
7294 
7295 	return ret;
7296 }
7297 
7298 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7299 {
7300 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7301 	struct dma_desc *desc_contains_ts = ctx->desc;
7302 	struct stmmac_priv *priv = ctx->priv;
7303 	struct dma_desc *ndesc = ctx->ndesc;
7304 	struct dma_desc *desc = ctx->desc;
7305 	u64 ns = 0;
7306 
7307 	if (!priv->hwts_rx_en)
7308 		return -ENODATA;
7309 
7310 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7311 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7312 		desc_contains_ts = ndesc;
7313 
7314 	/* Check if timestamp is available */
7315 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7316 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7317 		ns -= priv->plat->cdc_error_adj;
7318 		*timestamp = ns_to_ktime(ns);
7319 		return 0;
7320 	}
7321 
7322 	return -ENODATA;
7323 }
7324 
7325 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7326 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7327 };
7328 
7329 /**
7330  * stmmac_dvr_probe
7331  * @device: device pointer
7332  * @plat_dat: platform data pointer
7333  * @res: stmmac resource pointer
7334  * Description: this is the main probe function used to
7335  * call the alloc_etherdev, allocate the priv structure.
7336  * Return:
7337  * returns 0 on success, otherwise errno.
7338  */
7339 int stmmac_dvr_probe(struct device *device,
7340 		     struct plat_stmmacenet_data *plat_dat,
7341 		     struct stmmac_resources *res)
7342 {
7343 	struct net_device *ndev = NULL;
7344 	struct stmmac_priv *priv;
7345 	u32 rxq;
7346 	int i, ret = 0;
7347 
7348 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7349 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7350 	if (!ndev)
7351 		return -ENOMEM;
7352 
7353 	SET_NETDEV_DEV(ndev, device);
7354 
7355 	priv = netdev_priv(ndev);
7356 	priv->device = device;
7357 	priv->dev = ndev;
7358 
7359 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7360 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7361 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7362 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7363 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7364 	}
7365 
7366 	priv->xstats.pcpu_stats =
7367 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7368 	if (!priv->xstats.pcpu_stats)
7369 		return -ENOMEM;
7370 
7371 	stmmac_set_ethtool_ops(ndev);
7372 	priv->pause = pause;
7373 	priv->plat = plat_dat;
7374 	priv->ioaddr = res->addr;
7375 	priv->dev->base_addr = (unsigned long)res->addr;
7376 	priv->plat->dma_cfg->multi_msi_en =
7377 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7378 
7379 	priv->dev->irq = res->irq;
7380 	priv->wol_irq = res->wol_irq;
7381 	priv->lpi_irq = res->lpi_irq;
7382 	priv->sfty_irq = res->sfty_irq;
7383 	priv->sfty_ce_irq = res->sfty_ce_irq;
7384 	priv->sfty_ue_irq = res->sfty_ue_irq;
7385 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7386 		priv->rx_irq[i] = res->rx_irq[i];
7387 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7388 		priv->tx_irq[i] = res->tx_irq[i];
7389 
7390 	if (!is_zero_ether_addr(res->mac))
7391 		eth_hw_addr_set(priv->dev, res->mac);
7392 
7393 	dev_set_drvdata(device, priv->dev);
7394 
7395 	/* Verify driver arguments */
7396 	stmmac_verify_args();
7397 
7398 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7399 	if (!priv->af_xdp_zc_qps)
7400 		return -ENOMEM;
7401 
7402 	/* Allocate workqueue */
7403 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7404 	if (!priv->wq) {
7405 		dev_err(priv->device, "failed to create workqueue\n");
7406 		ret = -ENOMEM;
7407 		goto error_wq_init;
7408 	}
7409 
7410 	INIT_WORK(&priv->service_task, stmmac_service_task);
7411 
7412 	/* Override with kernel parameters if supplied XXX CRS XXX
7413 	 * this needs to have multiple instances
7414 	 */
7415 	if ((phyaddr >= 0) && (phyaddr <= 31))
7416 		priv->plat->phy_addr = phyaddr;
7417 
7418 	if (priv->plat->stmmac_rst) {
7419 		ret = reset_control_assert(priv->plat->stmmac_rst);
7420 		reset_control_deassert(priv->plat->stmmac_rst);
7421 		/* Some reset controllers have only reset callback instead of
7422 		 * assert + deassert callbacks pair.
7423 		 */
7424 		if (ret == -ENOTSUPP)
7425 			reset_control_reset(priv->plat->stmmac_rst);
7426 	}
7427 
7428 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7429 	if (ret == -ENOTSUPP)
7430 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7431 			ERR_PTR(ret));
7432 
7433 	/* Wait a bit for the reset to take effect */
7434 	udelay(10);
7435 
7436 	/* Init MAC and get the capabilities */
7437 	ret = stmmac_hw_init(priv);
7438 	if (ret)
7439 		goto error_hw_init;
7440 
7441 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7442 	 */
7443 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7444 		priv->plat->dma_cfg->dche = false;
7445 
7446 	stmmac_check_ether_addr(priv);
7447 
7448 	ndev->netdev_ops = &stmmac_netdev_ops;
7449 
7450 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7451 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7452 
7453 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7454 			    NETIF_F_RXCSUM;
7455 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7456 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7457 
7458 	ret = stmmac_tc_init(priv, priv);
7459 	if (!ret) {
7460 		ndev->hw_features |= NETIF_F_HW_TC;
7461 	}
7462 
7463 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7464 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7465 		if (priv->plat->has_gmac4)
7466 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7467 		priv->tso = true;
7468 		dev_info(priv->device, "TSO feature enabled\n");
7469 	}
7470 
7471 	if (priv->dma_cap.sphen &&
7472 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7473 		ndev->hw_features |= NETIF_F_GRO;
7474 		priv->sph_cap = true;
7475 		priv->sph = priv->sph_cap;
7476 		dev_info(priv->device, "SPH feature enabled\n");
7477 	}
7478 
7479 	/* Ideally our host DMA address width is the same as for the
7480 	 * device. However, it may differ and then we have to use our
7481 	 * host DMA width for allocation and the device DMA width for
7482 	 * register handling.
7483 	 */
7484 	if (priv->plat->host_dma_width)
7485 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7486 	else
7487 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7488 
7489 	if (priv->dma_cap.host_dma_width) {
7490 		ret = dma_set_mask_and_coherent(device,
7491 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7492 		if (!ret) {
7493 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7494 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7495 
7496 			/*
7497 			 * If more than 32 bits can be addressed, make sure to
7498 			 * enable enhanced addressing mode.
7499 			 */
7500 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7501 				priv->plat->dma_cfg->eame = true;
7502 		} else {
7503 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7504 			if (ret) {
7505 				dev_err(priv->device, "Failed to set DMA Mask\n");
7506 				goto error_hw_init;
7507 			}
7508 
7509 			priv->dma_cap.host_dma_width = 32;
7510 		}
7511 	}
7512 
7513 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7514 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7515 #ifdef STMMAC_VLAN_TAG_USED
7516 	/* Both mac100 and gmac support receive VLAN tag detection */
7517 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7518 	if (priv->plat->has_gmac4) {
7519 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7520 		priv->hw->hw_vlan_en = true;
7521 	}
7522 	if (priv->dma_cap.vlhash) {
7523 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7524 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7525 	}
7526 	if (priv->dma_cap.vlins) {
7527 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7528 		if (priv->dma_cap.dvlan)
7529 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7530 	}
7531 #endif
7532 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7533 
7534 	priv->xstats.threshold = tc;
7535 
7536 	/* Initialize RSS */
7537 	rxq = priv->plat->rx_queues_to_use;
7538 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7539 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7540 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7541 
7542 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7543 		ndev->features |= NETIF_F_RXHASH;
7544 
7545 	ndev->vlan_features |= ndev->features;
7546 
7547 	/* MTU range: 46 - hw-specific max */
7548 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7549 	if (priv->plat->has_xgmac)
7550 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7551 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7552 		ndev->max_mtu = JUMBO_LEN;
7553 	else
7554 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7555 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7556 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7557 	 */
7558 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7559 	    (priv->plat->maxmtu >= ndev->min_mtu))
7560 		ndev->max_mtu = priv->plat->maxmtu;
7561 	else if (priv->plat->maxmtu < ndev->min_mtu)
7562 		dev_warn(priv->device,
7563 			 "%s: warning: maxmtu having invalid value (%d)\n",
7564 			 __func__, priv->plat->maxmtu);
7565 
7566 	if (flow_ctrl)
7567 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7568 
7569 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7570 
7571 	/* Setup channels NAPI */
7572 	stmmac_napi_add(ndev);
7573 
7574 	mutex_init(&priv->lock);
7575 
7576 	stmmac_fpe_init(priv);
7577 
7578 	/* If a specific clk_csr value is passed from the platform
7579 	 * this means that the CSR Clock Range selection cannot be
7580 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7581 	 * set the MDC clock dynamically according to the csr actual
7582 	 * clock input.
7583 	 */
7584 	if (priv->plat->clk_csr >= 0)
7585 		priv->clk_csr = priv->plat->clk_csr;
7586 	else
7587 		stmmac_clk_csr_set(priv);
7588 
7589 	stmmac_check_pcs_mode(priv);
7590 
7591 	pm_runtime_get_noresume(device);
7592 	pm_runtime_set_active(device);
7593 	if (!pm_runtime_enabled(device))
7594 		pm_runtime_enable(device);
7595 
7596 	ret = stmmac_mdio_register(ndev);
7597 	if (ret < 0) {
7598 		dev_err_probe(priv->device, ret,
7599 			      "MDIO bus (id: %d) registration failed\n",
7600 			      priv->plat->bus_id);
7601 		goto error_mdio_register;
7602 	}
7603 
7604 	if (priv->plat->speed_mode_2500)
7605 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7606 
7607 	ret = stmmac_pcs_setup(ndev);
7608 	if (ret)
7609 		goto error_pcs_setup;
7610 
7611 	ret = stmmac_phy_setup(priv);
7612 	if (ret) {
7613 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7614 		goto error_phy_setup;
7615 	}
7616 
7617 	ret = register_netdev(ndev);
7618 	if (ret) {
7619 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7620 			__func__, ret);
7621 		goto error_netdev_register;
7622 	}
7623 
7624 #ifdef CONFIG_DEBUG_FS
7625 	stmmac_init_fs(ndev);
7626 #endif
7627 
7628 	if (priv->plat->dump_debug_regs)
7629 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7630 
7631 	/* Let pm_runtime_put() disable the clocks.
7632 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7633 	 */
7634 	pm_runtime_put(device);
7635 
7636 	return ret;
7637 
7638 error_netdev_register:
7639 	phylink_destroy(priv->phylink);
7640 error_phy_setup:
7641 	stmmac_pcs_clean(ndev);
7642 error_pcs_setup:
7643 	stmmac_mdio_unregister(ndev);
7644 error_mdio_register:
7645 	stmmac_napi_del(ndev);
7646 error_hw_init:
7647 	destroy_workqueue(priv->wq);
7648 error_wq_init:
7649 	bitmap_free(priv->af_xdp_zc_qps);
7650 
7651 	return ret;
7652 }
7653 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7654 
7655 /**
7656  * stmmac_dvr_remove
7657  * @dev: device pointer
7658  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7659  * changes the link status, releases the DMA descriptor rings.
7660  */
7661 void stmmac_dvr_remove(struct device *dev)
7662 {
7663 	struct net_device *ndev = dev_get_drvdata(dev);
7664 	struct stmmac_priv *priv = netdev_priv(ndev);
7665 
7666 	netdev_info(priv->dev, "%s: removing driver", __func__);
7667 
7668 	pm_runtime_get_sync(dev);
7669 
7670 	stmmac_stop_all_dma(priv);
7671 	stmmac_mac_set(priv, priv->ioaddr, false);
7672 	unregister_netdev(ndev);
7673 
7674 #ifdef CONFIG_DEBUG_FS
7675 	stmmac_exit_fs(ndev);
7676 #endif
7677 	phylink_destroy(priv->phylink);
7678 	if (priv->plat->stmmac_rst)
7679 		reset_control_assert(priv->plat->stmmac_rst);
7680 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7681 
7682 	stmmac_pcs_clean(ndev);
7683 	stmmac_mdio_unregister(ndev);
7684 
7685 	destroy_workqueue(priv->wq);
7686 	mutex_destroy(&priv->lock);
7687 	bitmap_free(priv->af_xdp_zc_qps);
7688 
7689 	pm_runtime_disable(dev);
7690 	pm_runtime_put_noidle(dev);
7691 }
7692 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7693 
7694 /**
7695  * stmmac_suspend - suspend callback
7696  * @dev: device pointer
7697  * Description: this is the function to suspend the device and it is called
7698  * by the platform driver to stop the network queue, release the resources,
7699  * program the PMT register (for WoL), clean and release driver resources.
7700  */
7701 int stmmac_suspend(struct device *dev)
7702 {
7703 	struct net_device *ndev = dev_get_drvdata(dev);
7704 	struct stmmac_priv *priv = netdev_priv(ndev);
7705 	u32 chan;
7706 
7707 	if (!ndev || !netif_running(ndev))
7708 		return 0;
7709 
7710 	mutex_lock(&priv->lock);
7711 
7712 	netif_device_detach(ndev);
7713 
7714 	stmmac_disable_all_queues(priv);
7715 
7716 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7717 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7718 
7719 	if (priv->eee_enabled) {
7720 		priv->tx_path_in_lpi_mode = false;
7721 		del_timer_sync(&priv->eee_ctrl_timer);
7722 	}
7723 
7724 	/* Stop TX/RX DMA */
7725 	stmmac_stop_all_dma(priv);
7726 
7727 	if (priv->plat->serdes_powerdown)
7728 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7729 
7730 	/* Enable Power down mode by programming the PMT regs */
7731 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7732 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7733 		priv->irq_wake = 1;
7734 	} else {
7735 		stmmac_mac_set(priv, priv->ioaddr, false);
7736 		pinctrl_pm_select_sleep_state(priv->device);
7737 	}
7738 
7739 	mutex_unlock(&priv->lock);
7740 
7741 	rtnl_lock();
7742 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7743 		phylink_suspend(priv->phylink, true);
7744 	} else {
7745 		if (device_may_wakeup(priv->device))
7746 			phylink_speed_down(priv->phylink, false);
7747 		phylink_suspend(priv->phylink, false);
7748 	}
7749 	rtnl_unlock();
7750 
7751 	if (stmmac_fpe_supported(priv))
7752 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7753 
7754 	priv->speed = SPEED_UNKNOWN;
7755 	return 0;
7756 }
7757 EXPORT_SYMBOL_GPL(stmmac_suspend);
7758 
7759 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7760 {
7761 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7762 
7763 	rx_q->cur_rx = 0;
7764 	rx_q->dirty_rx = 0;
7765 }
7766 
7767 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7768 {
7769 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7770 
7771 	tx_q->cur_tx = 0;
7772 	tx_q->dirty_tx = 0;
7773 	tx_q->mss = 0;
7774 
7775 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7776 }
7777 
7778 /**
7779  * stmmac_reset_queues_param - reset queue parameters
7780  * @priv: device pointer
7781  */
7782 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7783 {
7784 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7785 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7786 	u32 queue;
7787 
7788 	for (queue = 0; queue < rx_cnt; queue++)
7789 		stmmac_reset_rx_queue(priv, queue);
7790 
7791 	for (queue = 0; queue < tx_cnt; queue++)
7792 		stmmac_reset_tx_queue(priv, queue);
7793 }
7794 
7795 /**
7796  * stmmac_resume - resume callback
7797  * @dev: device pointer
7798  * Description: when resume this function is invoked to setup the DMA and CORE
7799  * in a usable state.
7800  */
7801 int stmmac_resume(struct device *dev)
7802 {
7803 	struct net_device *ndev = dev_get_drvdata(dev);
7804 	struct stmmac_priv *priv = netdev_priv(ndev);
7805 	int ret;
7806 
7807 	if (!netif_running(ndev))
7808 		return 0;
7809 
7810 	/* Power Down bit, into the PM register, is cleared
7811 	 * automatically as soon as a magic packet or a Wake-up frame
7812 	 * is received. Anyway, it's better to manually clear
7813 	 * this bit because it can generate problems while resuming
7814 	 * from another devices (e.g. serial console).
7815 	 */
7816 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7817 		mutex_lock(&priv->lock);
7818 		stmmac_pmt(priv, priv->hw, 0);
7819 		mutex_unlock(&priv->lock);
7820 		priv->irq_wake = 0;
7821 	} else {
7822 		pinctrl_pm_select_default_state(priv->device);
7823 		/* reset the phy so that it's ready */
7824 		if (priv->mii)
7825 			stmmac_mdio_reset(priv->mii);
7826 	}
7827 
7828 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7829 	    priv->plat->serdes_powerup) {
7830 		ret = priv->plat->serdes_powerup(ndev,
7831 						 priv->plat->bsp_priv);
7832 
7833 		if (ret < 0)
7834 			return ret;
7835 	}
7836 
7837 	rtnl_lock();
7838 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7839 		phylink_resume(priv->phylink);
7840 	} else {
7841 		phylink_resume(priv->phylink);
7842 		if (device_may_wakeup(priv->device))
7843 			phylink_speed_up(priv->phylink);
7844 	}
7845 	rtnl_unlock();
7846 
7847 	rtnl_lock();
7848 	mutex_lock(&priv->lock);
7849 
7850 	stmmac_reset_queues_param(priv);
7851 
7852 	stmmac_free_tx_skbufs(priv);
7853 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7854 
7855 	stmmac_hw_setup(ndev, false);
7856 	stmmac_init_coalesce(priv);
7857 	stmmac_set_rx_mode(ndev);
7858 
7859 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7860 
7861 	stmmac_enable_all_queues(priv);
7862 	stmmac_enable_all_dma_irq(priv);
7863 
7864 	mutex_unlock(&priv->lock);
7865 	rtnl_unlock();
7866 
7867 	netif_device_attach(ndev);
7868 
7869 	return 0;
7870 }
7871 EXPORT_SYMBOL_GPL(stmmac_resume);
7872 
7873 #ifndef MODULE
7874 static int __init stmmac_cmdline_opt(char *str)
7875 {
7876 	char *opt;
7877 
7878 	if (!str || !*str)
7879 		return 1;
7880 	while ((opt = strsep(&str, ",")) != NULL) {
7881 		if (!strncmp(opt, "debug:", 6)) {
7882 			if (kstrtoint(opt + 6, 0, &debug))
7883 				goto err;
7884 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7885 			if (kstrtoint(opt + 8, 0, &phyaddr))
7886 				goto err;
7887 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7888 			if (kstrtoint(opt + 7, 0, &buf_sz))
7889 				goto err;
7890 		} else if (!strncmp(opt, "tc:", 3)) {
7891 			if (kstrtoint(opt + 3, 0, &tc))
7892 				goto err;
7893 		} else if (!strncmp(opt, "watchdog:", 9)) {
7894 			if (kstrtoint(opt + 9, 0, &watchdog))
7895 				goto err;
7896 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7897 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7898 				goto err;
7899 		} else if (!strncmp(opt, "pause:", 6)) {
7900 			if (kstrtoint(opt + 6, 0, &pause))
7901 				goto err;
7902 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7903 			if (kstrtoint(opt + 10, 0, &eee_timer))
7904 				goto err;
7905 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7906 			if (kstrtoint(opt + 11, 0, &chain_mode))
7907 				goto err;
7908 		}
7909 	}
7910 	return 1;
7911 
7912 err:
7913 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7914 	return 1;
7915 }
7916 
7917 __setup("stmmaceth=", stmmac_cmdline_opt);
7918 #endif /* MODULE */
7919 
7920 static int __init stmmac_init(void)
7921 {
7922 #ifdef CONFIG_DEBUG_FS
7923 	/* Create debugfs main directory if it doesn't exist yet */
7924 	if (!stmmac_fs_dir)
7925 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7926 	register_netdevice_notifier(&stmmac_notifier);
7927 #endif
7928 
7929 	return 0;
7930 }
7931 
7932 static void __exit stmmac_exit(void)
7933 {
7934 #ifdef CONFIG_DEBUG_FS
7935 	unregister_netdevice_notifier(&stmmac_notifier);
7936 	debugfs_remove_recursive(stmmac_fs_dir);
7937 #endif
7938 }
7939 
7940 module_init(stmmac_init)
7941 module_exit(stmmac_exit)
7942 
7943 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7944 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7945 MODULE_LICENSE("GPL");
7946