xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 816b02e63a759c4458edee142b721ab09c918b3d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54 
55 /* As long as the interface is active, we keep the timestamping counter enabled
56  * with fine resolution and binary rollover. This avoid non-monotonic behavior
57  * (clock jumps) when changing timestamping settings at runtime.
58  */
59 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 				 PTP_TCR_TSCTRLSSR)
61 
62 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
64 
65 /* Module parameters */
66 #define TX_TIMEO	5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70 
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78 
79 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
110 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
111 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
112 
113 #define STMMAC_DEFAULT_LPI_TIMER	1000
114 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
115 module_param(eee_timer, uint, 0644);
116 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
117 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
118 
119 /* By default the driver will use the ring mode to manage tx and rx descriptors,
120  * but allow user to force to use the chain instead of the ring
121  */
122 static unsigned int chain_mode;
123 module_param(chain_mode, int, 0444);
124 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
125 
126 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
127 /* For MSI interrupts handling */
128 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
129 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
131 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
132 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
133 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
135 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
137 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
138 					  u32 rxmode, u32 chan);
139 
140 #ifdef CONFIG_DEBUG_FS
141 static const struct net_device_ops stmmac_netdev_ops;
142 static void stmmac_init_fs(struct net_device *dev);
143 static void stmmac_exit_fs(struct net_device *dev);
144 #endif
145 
146 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
147 
148 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
149 {
150 	int ret = 0;
151 
152 	if (enabled) {
153 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
154 		if (ret)
155 			return ret;
156 		ret = clk_prepare_enable(priv->plat->pclk);
157 		if (ret) {
158 			clk_disable_unprepare(priv->plat->stmmac_clk);
159 			return ret;
160 		}
161 		if (priv->plat->clks_config) {
162 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
163 			if (ret) {
164 				clk_disable_unprepare(priv->plat->stmmac_clk);
165 				clk_disable_unprepare(priv->plat->pclk);
166 				return ret;
167 			}
168 		}
169 	} else {
170 		clk_disable_unprepare(priv->plat->stmmac_clk);
171 		clk_disable_unprepare(priv->plat->pclk);
172 		if (priv->plat->clks_config)
173 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
174 	}
175 
176 	return ret;
177 }
178 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
179 
180 /**
181  * stmmac_verify_args - verify the driver parameters.
182  * Description: it checks the driver parameters and set a default in case of
183  * errors.
184  */
185 static void stmmac_verify_args(void)
186 {
187 	if (unlikely(watchdog < 0))
188 		watchdog = TX_TIMEO;
189 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
190 		buf_sz = DEFAULT_BUFSIZE;
191 	if (unlikely(flow_ctrl > 1))
192 		flow_ctrl = FLOW_AUTO;
193 	else if (likely(flow_ctrl < 0))
194 		flow_ctrl = FLOW_OFF;
195 	if (unlikely((pause < 0) || (pause > 0xffff)))
196 		pause = PAUSE_TIME;
197 }
198 
199 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
200 {
201 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
202 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
203 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
204 	u32 queue;
205 
206 	for (queue = 0; queue < maxq; queue++) {
207 		struct stmmac_channel *ch = &priv->channel[queue];
208 
209 		if (stmmac_xdp_is_enabled(priv) &&
210 		    test_bit(queue, priv->af_xdp_zc_qps)) {
211 			napi_disable(&ch->rxtx_napi);
212 			continue;
213 		}
214 
215 		if (queue < rx_queues_cnt)
216 			napi_disable(&ch->rx_napi);
217 		if (queue < tx_queues_cnt)
218 			napi_disable(&ch->tx_napi);
219 	}
220 }
221 
222 /**
223  * stmmac_disable_all_queues - Disable all queues
224  * @priv: driver private structure
225  */
226 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
227 {
228 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
229 	struct stmmac_rx_queue *rx_q;
230 	u32 queue;
231 
232 	/* synchronize_rcu() needed for pending XDP buffers to drain */
233 	for (queue = 0; queue < rx_queues_cnt; queue++) {
234 		rx_q = &priv->dma_conf.rx_queue[queue];
235 		if (rx_q->xsk_pool) {
236 			synchronize_rcu();
237 			break;
238 		}
239 	}
240 
241 	__stmmac_disable_all_queues(priv);
242 }
243 
244 /**
245  * stmmac_enable_all_queues - Enable all queues
246  * @priv: driver private structure
247  */
248 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
249 {
250 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
251 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
252 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
253 	u32 queue;
254 
255 	for (queue = 0; queue < maxq; queue++) {
256 		struct stmmac_channel *ch = &priv->channel[queue];
257 
258 		if (stmmac_xdp_is_enabled(priv) &&
259 		    test_bit(queue, priv->af_xdp_zc_qps)) {
260 			napi_enable(&ch->rxtx_napi);
261 			continue;
262 		}
263 
264 		if (queue < rx_queues_cnt)
265 			napi_enable(&ch->rx_napi);
266 		if (queue < tx_queues_cnt)
267 			napi_enable(&ch->tx_napi);
268 	}
269 }
270 
271 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
272 {
273 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
274 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
275 		queue_work(priv->wq, &priv->service_task);
276 }
277 
278 static void stmmac_global_err(struct stmmac_priv *priv)
279 {
280 	netif_carrier_off(priv->dev);
281 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
282 	stmmac_service_event_schedule(priv);
283 }
284 
285 /**
286  * stmmac_clk_csr_set - dynamically set the MDC clock
287  * @priv: driver private structure
288  * Description: this is to dynamically set the MDC clock according to the csr
289  * clock input.
290  * Note:
291  *	If a specific clk_csr value is passed from the platform
292  *	this means that the CSR Clock Range selection cannot be
293  *	changed at run-time and it is fixed (as reported in the driver
294  *	documentation). Viceversa the driver will try to set the MDC
295  *	clock dynamically according to the actual clock input.
296  */
297 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
298 {
299 	unsigned long clk_rate;
300 
301 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
302 
303 	/* Platform provided default clk_csr would be assumed valid
304 	 * for all other cases except for the below mentioned ones.
305 	 * For values higher than the IEEE 802.3 specified frequency
306 	 * we can not estimate the proper divider as it is not known
307 	 * the frequency of clk_csr_i. So we do not change the default
308 	 * divider.
309 	 */
310 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
311 		if (clk_rate < CSR_F_35M)
312 			priv->clk_csr = STMMAC_CSR_20_35M;
313 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
314 			priv->clk_csr = STMMAC_CSR_35_60M;
315 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
316 			priv->clk_csr = STMMAC_CSR_60_100M;
317 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
318 			priv->clk_csr = STMMAC_CSR_100_150M;
319 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
320 			priv->clk_csr = STMMAC_CSR_150_250M;
321 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
322 			priv->clk_csr = STMMAC_CSR_250_300M;
323 		else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
324 			priv->clk_csr = STMMAC_CSR_300_500M;
325 		else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
326 			priv->clk_csr = STMMAC_CSR_500_800M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
394 {
395 	stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
396 }
397 
398 static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
399 {
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_sw_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 static void stmmac_disable_sw_eee_mode(struct stmmac_priv *priv)
436 {
437 	stmmac_reset_eee_mode(priv, priv->hw);
438 	del_timer_sync(&priv->eee_ctrl_timer);
439 	priv->tx_path_in_lpi_mode = false;
440 }
441 
442 /**
443  * stmmac_eee_ctrl_timer - EEE TX SW timer.
444  * @t:  timer_list struct containing private info
445  * Description:
446  *  if there is no data transfer and if we are not in LPI state,
447  *  then MAC Transmitter can be moved to LPI state.
448  */
449 static void stmmac_eee_ctrl_timer(struct timer_list *t)
450 {
451 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
452 
453 	if (stmmac_enable_eee_mode(priv))
454 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
455 }
456 
457 /**
458  * stmmac_eee_init - init EEE
459  * @priv: driver private structure
460  * @active: indicates whether EEE should be enabled.
461  * Description:
462  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
463  *  can also manage EEE, this function enable the LPI state and start related
464  *  timer.
465  */
466 static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
467 {
468 	priv->eee_active = active;
469 
470 	/* Check if MAC core supports the EEE feature. */
471 	if (!priv->dma_cap.eee) {
472 		priv->eee_enabled = false;
473 		return;
474 	}
475 
476 	mutex_lock(&priv->lock);
477 
478 	/* Check if it needs to be deactivated */
479 	if (!priv->eee_active) {
480 		if (priv->eee_enabled) {
481 			netdev_dbg(priv->dev, "disable EEE\n");
482 			priv->eee_sw_timer_en = true;
483 			stmmac_disable_hw_lpi_timer(priv);
484 			del_timer_sync(&priv->eee_ctrl_timer);
485 			stmmac_set_eee_timer(priv, priv->hw, 0,
486 					     STMMAC_DEFAULT_TWT_LS);
487 			if (priv->hw->xpcs)
488 				xpcs_config_eee(priv->hw->xpcs,
489 						priv->plat->mult_fact_100ns,
490 						false);
491 		}
492 		priv->eee_enabled = false;
493 		mutex_unlock(&priv->lock);
494 		return;
495 	}
496 
497 	if (priv->eee_active && !priv->eee_enabled) {
498 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
499 				     STMMAC_DEFAULT_TWT_LS);
500 		if (priv->hw->xpcs)
501 			xpcs_config_eee(priv->hw->xpcs,
502 					priv->plat->mult_fact_100ns,
503 					true);
504 	}
505 
506 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
507 		/* Use hardware LPI mode */
508 		del_timer_sync(&priv->eee_ctrl_timer);
509 		priv->tx_path_in_lpi_mode = false;
510 		priv->eee_sw_timer_en = false;
511 		stmmac_enable_hw_lpi_timer(priv);
512 	} else {
513 		/* Use software LPI mode */
514 		priv->eee_sw_timer_en = true;
515 		stmmac_disable_hw_lpi_timer(priv);
516 		mod_timer(&priv->eee_ctrl_timer,
517 			  STMMAC_LPI_T(priv->tx_lpi_timer));
518 	}
519 
520 	priv->eee_enabled = true;
521 
522 	mutex_unlock(&priv->lock);
523 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
524 }
525 
526 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
527  * @priv: driver private structure
528  * @p : descriptor pointer
529  * @skb : the socket buffer
530  * Description :
531  * This function will read timestamp from the descriptor & pass it to stack.
532  * and also perform some sanity checks.
533  */
534 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
535 				   struct dma_desc *p, struct sk_buff *skb)
536 {
537 	struct skb_shared_hwtstamps shhwtstamp;
538 	bool found = false;
539 	u64 ns = 0;
540 
541 	if (!priv->hwts_tx_en)
542 		return;
543 
544 	/* exit if skb doesn't support hw tstamp */
545 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
546 		return;
547 
548 	/* check tx tstamp status */
549 	if (stmmac_get_tx_timestamp_status(priv, p)) {
550 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
551 		found = true;
552 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
553 		found = true;
554 	}
555 
556 	if (found) {
557 		ns -= priv->plat->cdc_error_adj;
558 
559 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
560 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
561 
562 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
563 		/* pass tstamp to stack */
564 		skb_tstamp_tx(skb, &shhwtstamp);
565 	}
566 }
567 
568 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
569  * @priv: driver private structure
570  * @p : descriptor pointer
571  * @np : next descriptor pointer
572  * @skb : the socket buffer
573  * Description :
574  * This function will read received packet's timestamp from the descriptor
575  * and pass it to stack. It also perform some sanity checks.
576  */
577 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
578 				   struct dma_desc *np, struct sk_buff *skb)
579 {
580 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
581 	struct dma_desc *desc = p;
582 	u64 ns = 0;
583 
584 	if (!priv->hwts_rx_en)
585 		return;
586 	/* For GMAC4, the valid timestamp is from CTX next desc. */
587 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
588 		desc = np;
589 
590 	/* Check if timestamp is available */
591 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
592 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
593 
594 		ns -= priv->plat->cdc_error_adj;
595 
596 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
597 		shhwtstamp = skb_hwtstamps(skb);
598 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
599 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
600 	} else  {
601 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
602 	}
603 }
604 
605 /**
606  *  stmmac_hwtstamp_set - control hardware timestamping.
607  *  @dev: device pointer.
608  *  @ifr: An IOCTL specific structure, that can contain a pointer to
609  *  a proprietary structure used to pass information to the driver.
610  *  Description:
611  *  This function configures the MAC to enable/disable both outgoing(TX)
612  *  and incoming(RX) packets time stamping based on user input.
613  *  Return Value:
614  *  0 on success and an appropriate -ve integer on failure.
615  */
616 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
617 {
618 	struct stmmac_priv *priv = netdev_priv(dev);
619 	struct hwtstamp_config config;
620 	u32 ptp_v2 = 0;
621 	u32 tstamp_all = 0;
622 	u32 ptp_over_ipv4_udp = 0;
623 	u32 ptp_over_ipv6_udp = 0;
624 	u32 ptp_over_ethernet = 0;
625 	u32 snap_type_sel = 0;
626 	u32 ts_master_en = 0;
627 	u32 ts_event_en = 0;
628 
629 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
630 		netdev_alert(priv->dev, "No support for HW time stamping\n");
631 		priv->hwts_tx_en = 0;
632 		priv->hwts_rx_en = 0;
633 
634 		return -EOPNOTSUPP;
635 	}
636 
637 	if (copy_from_user(&config, ifr->ifr_data,
638 			   sizeof(config)))
639 		return -EFAULT;
640 
641 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
642 		   __func__, config.flags, config.tx_type, config.rx_filter);
643 
644 	if (config.tx_type != HWTSTAMP_TX_OFF &&
645 	    config.tx_type != HWTSTAMP_TX_ON)
646 		return -ERANGE;
647 
648 	if (priv->adv_ts) {
649 		switch (config.rx_filter) {
650 		case HWTSTAMP_FILTER_NONE:
651 			/* time stamp no incoming packet at all */
652 			config.rx_filter = HWTSTAMP_FILTER_NONE;
653 			break;
654 
655 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
656 			/* PTP v1, UDP, any kind of event packet */
657 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
658 			/* 'xmac' hardware can support Sync, Pdelay_Req and
659 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
660 			 * This leaves Delay_Req timestamps out.
661 			 * Enable all events *and* general purpose message
662 			 * timestamping
663 			 */
664 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
665 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
666 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
667 			break;
668 
669 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
670 			/* PTP v1, UDP, Sync packet */
671 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
672 			/* take time stamp for SYNC messages only */
673 			ts_event_en = PTP_TCR_TSEVNTENA;
674 
675 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
676 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
677 			break;
678 
679 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
680 			/* PTP v1, UDP, Delay_req packet */
681 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
682 			/* take time stamp for Delay_Req messages only */
683 			ts_master_en = PTP_TCR_TSMSTRENA;
684 			ts_event_en = PTP_TCR_TSEVNTENA;
685 
686 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 			break;
689 
690 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
691 			/* PTP v2, UDP, any kind of event packet */
692 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
693 			ptp_v2 = PTP_TCR_TSVER2ENA;
694 			/* take time stamp for all event messages */
695 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
696 
697 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
698 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
699 			break;
700 
701 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
702 			/* PTP v2, UDP, Sync packet */
703 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
704 			ptp_v2 = PTP_TCR_TSVER2ENA;
705 			/* take time stamp for SYNC messages only */
706 			ts_event_en = PTP_TCR_TSEVNTENA;
707 
708 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
709 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
710 			break;
711 
712 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
713 			/* PTP v2, UDP, Delay_req packet */
714 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
715 			ptp_v2 = PTP_TCR_TSVER2ENA;
716 			/* take time stamp for Delay_Req messages only */
717 			ts_master_en = PTP_TCR_TSMSTRENA;
718 			ts_event_en = PTP_TCR_TSEVNTENA;
719 
720 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
721 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
722 			break;
723 
724 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
725 			/* PTP v2/802.AS1 any layer, any kind of event packet */
726 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
727 			ptp_v2 = PTP_TCR_TSVER2ENA;
728 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
729 			if (priv->synopsys_id < DWMAC_CORE_4_10)
730 				ts_event_en = PTP_TCR_TSEVNTENA;
731 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
732 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
733 			ptp_over_ethernet = PTP_TCR_TSIPENA;
734 			break;
735 
736 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
737 			/* PTP v2/802.AS1, any layer, Sync packet */
738 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
739 			ptp_v2 = PTP_TCR_TSVER2ENA;
740 			/* take time stamp for SYNC messages only */
741 			ts_event_en = PTP_TCR_TSEVNTENA;
742 
743 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
744 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
745 			ptp_over_ethernet = PTP_TCR_TSIPENA;
746 			break;
747 
748 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
749 			/* PTP v2/802.AS1, any layer, Delay_req packet */
750 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
751 			ptp_v2 = PTP_TCR_TSVER2ENA;
752 			/* take time stamp for Delay_Req messages only */
753 			ts_master_en = PTP_TCR_TSMSTRENA;
754 			ts_event_en = PTP_TCR_TSEVNTENA;
755 
756 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
757 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
758 			ptp_over_ethernet = PTP_TCR_TSIPENA;
759 			break;
760 
761 		case HWTSTAMP_FILTER_NTP_ALL:
762 		case HWTSTAMP_FILTER_ALL:
763 			/* time stamp any incoming packet */
764 			config.rx_filter = HWTSTAMP_FILTER_ALL;
765 			tstamp_all = PTP_TCR_TSENALL;
766 			break;
767 
768 		default:
769 			return -ERANGE;
770 		}
771 	} else {
772 		switch (config.rx_filter) {
773 		case HWTSTAMP_FILTER_NONE:
774 			config.rx_filter = HWTSTAMP_FILTER_NONE;
775 			break;
776 		default:
777 			/* PTP v1, UDP, any kind of event packet */
778 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
779 			break;
780 		}
781 	}
782 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
783 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
784 
785 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
786 
787 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
788 		priv->systime_flags |= tstamp_all | ptp_v2 |
789 				       ptp_over_ethernet | ptp_over_ipv6_udp |
790 				       ptp_over_ipv4_udp | ts_event_en |
791 				       ts_master_en | snap_type_sel;
792 	}
793 
794 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
795 
796 	memcpy(&priv->tstamp_config, &config, sizeof(config));
797 
798 	return copy_to_user(ifr->ifr_data, &config,
799 			    sizeof(config)) ? -EFAULT : 0;
800 }
801 
802 /**
803  *  stmmac_hwtstamp_get - read hardware timestamping.
804  *  @dev: device pointer.
805  *  @ifr: An IOCTL specific structure, that can contain a pointer to
806  *  a proprietary structure used to pass information to the driver.
807  *  Description:
808  *  This function obtain the current hardware timestamping settings
809  *  as requested.
810  */
811 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
812 {
813 	struct stmmac_priv *priv = netdev_priv(dev);
814 	struct hwtstamp_config *config = &priv->tstamp_config;
815 
816 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
817 		return -EOPNOTSUPP;
818 
819 	return copy_to_user(ifr->ifr_data, config,
820 			    sizeof(*config)) ? -EFAULT : 0;
821 }
822 
823 /**
824  * stmmac_init_tstamp_counter - init hardware timestamping counter
825  * @priv: driver private structure
826  * @systime_flags: timestamping flags
827  * Description:
828  * Initialize hardware counter for packet timestamping.
829  * This is valid as long as the interface is open and not suspended.
830  * Will be rerun after resuming from suspend, case in which the timestamping
831  * flags updated by stmmac_hwtstamp_set() also need to be restored.
832  */
833 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
834 {
835 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
836 	struct timespec64 now;
837 	u32 sec_inc = 0;
838 	u64 temp = 0;
839 
840 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
841 		return -EOPNOTSUPP;
842 
843 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
844 	priv->systime_flags = systime_flags;
845 
846 	/* program Sub Second Increment reg */
847 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
848 					   priv->plat->clk_ptp_rate,
849 					   xmac, &sec_inc);
850 	temp = div_u64(1000000000ULL, sec_inc);
851 
852 	/* Store sub second increment for later use */
853 	priv->sub_second_inc = sec_inc;
854 
855 	/* calculate default added value:
856 	 * formula is :
857 	 * addend = (2^32)/freq_div_ratio;
858 	 * where, freq_div_ratio = 1e9ns/sec_inc
859 	 */
860 	temp = (u64)(temp << 32);
861 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
862 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
863 
864 	/* initialize system time */
865 	ktime_get_real_ts64(&now);
866 
867 	/* lower 32 bits of tv_sec are safe until y2106 */
868 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
869 
870 	return 0;
871 }
872 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
873 
874 /**
875  * stmmac_init_ptp - init PTP
876  * @priv: driver private structure
877  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
878  * This is done by looking at the HW cap. register.
879  * This function also registers the ptp driver.
880  */
881 static int stmmac_init_ptp(struct stmmac_priv *priv)
882 {
883 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
884 	int ret;
885 
886 	if (priv->plat->ptp_clk_freq_config)
887 		priv->plat->ptp_clk_freq_config(priv);
888 
889 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
890 	if (ret)
891 		return ret;
892 
893 	priv->adv_ts = 0;
894 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
895 	if (xmac && priv->dma_cap.atime_stamp)
896 		priv->adv_ts = 1;
897 	/* Dwmac 3.x core with extend_desc can support adv_ts */
898 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
899 		priv->adv_ts = 1;
900 
901 	if (priv->dma_cap.time_stamp)
902 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
903 
904 	if (priv->adv_ts)
905 		netdev_info(priv->dev,
906 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
907 
908 	priv->hwts_tx_en = 0;
909 	priv->hwts_rx_en = 0;
910 
911 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
912 		stmmac_hwtstamp_correct_latency(priv, priv);
913 
914 	return 0;
915 }
916 
917 static void stmmac_release_ptp(struct stmmac_priv *priv)
918 {
919 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
920 	stmmac_ptp_unregister(priv);
921 }
922 
923 /**
924  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
925  *  @priv: driver private structure
926  *  @duplex: duplex passed to the next function
927  *  Description: It is used for configuring the flow control in all queues
928  */
929 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
930 {
931 	u32 tx_cnt = priv->plat->tx_queues_to_use;
932 
933 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
934 			priv->pause, tx_cnt);
935 }
936 
937 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
938 					 phy_interface_t interface)
939 {
940 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
941 
942 	/* Refresh the MAC-specific capabilities */
943 	stmmac_mac_update_caps(priv);
944 
945 	config->mac_capabilities = priv->hw->link.caps;
946 
947 	if (priv->plat->max_speed)
948 		phylink_limit_mac_speed(config, priv->plat->max_speed);
949 
950 	return config->mac_capabilities;
951 }
952 
953 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
954 						 phy_interface_t interface)
955 {
956 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
957 	struct phylink_pcs *pcs;
958 
959 	if (priv->plat->select_pcs) {
960 		pcs = priv->plat->select_pcs(priv, interface);
961 		if (!IS_ERR(pcs))
962 			return pcs;
963 	}
964 
965 	return NULL;
966 }
967 
968 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
969 			      const struct phylink_link_state *state)
970 {
971 	/* Nothing to do, xpcs_config() handles everything */
972 }
973 
974 static void stmmac_mac_link_down(struct phylink_config *config,
975 				 unsigned int mode, phy_interface_t interface)
976 {
977 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
978 
979 	stmmac_mac_set(priv, priv->ioaddr, false);
980 	stmmac_eee_init(priv, false);
981 	stmmac_set_eee_pls(priv, priv->hw, false);
982 
983 	if (stmmac_fpe_supported(priv))
984 		stmmac_fpe_link_state_handle(priv, false);
985 }
986 
987 static void stmmac_mac_link_up(struct phylink_config *config,
988 			       struct phy_device *phy,
989 			       unsigned int mode, phy_interface_t interface,
990 			       int speed, int duplex,
991 			       bool tx_pause, bool rx_pause)
992 {
993 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
994 	u32 old_ctrl, ctrl;
995 
996 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
997 	    priv->plat->serdes_powerup)
998 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
999 
1000 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1001 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1002 
1003 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1004 		switch (speed) {
1005 		case SPEED_10000:
1006 			ctrl |= priv->hw->link.xgmii.speed10000;
1007 			break;
1008 		case SPEED_5000:
1009 			ctrl |= priv->hw->link.xgmii.speed5000;
1010 			break;
1011 		case SPEED_2500:
1012 			ctrl |= priv->hw->link.xgmii.speed2500;
1013 			break;
1014 		default:
1015 			return;
1016 		}
1017 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1018 		switch (speed) {
1019 		case SPEED_100000:
1020 			ctrl |= priv->hw->link.xlgmii.speed100000;
1021 			break;
1022 		case SPEED_50000:
1023 			ctrl |= priv->hw->link.xlgmii.speed50000;
1024 			break;
1025 		case SPEED_40000:
1026 			ctrl |= priv->hw->link.xlgmii.speed40000;
1027 			break;
1028 		case SPEED_25000:
1029 			ctrl |= priv->hw->link.xlgmii.speed25000;
1030 			break;
1031 		case SPEED_10000:
1032 			ctrl |= priv->hw->link.xgmii.speed10000;
1033 			break;
1034 		case SPEED_2500:
1035 			ctrl |= priv->hw->link.speed2500;
1036 			break;
1037 		case SPEED_1000:
1038 			ctrl |= priv->hw->link.speed1000;
1039 			break;
1040 		default:
1041 			return;
1042 		}
1043 	} else {
1044 		switch (speed) {
1045 		case SPEED_2500:
1046 			ctrl |= priv->hw->link.speed2500;
1047 			break;
1048 		case SPEED_1000:
1049 			ctrl |= priv->hw->link.speed1000;
1050 			break;
1051 		case SPEED_100:
1052 			ctrl |= priv->hw->link.speed100;
1053 			break;
1054 		case SPEED_10:
1055 			ctrl |= priv->hw->link.speed10;
1056 			break;
1057 		default:
1058 			return;
1059 		}
1060 	}
1061 
1062 	priv->speed = speed;
1063 
1064 	if (priv->plat->fix_mac_speed)
1065 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1066 
1067 	if (!duplex)
1068 		ctrl &= ~priv->hw->link.duplex;
1069 	else
1070 		ctrl |= priv->hw->link.duplex;
1071 
1072 	/* Flow Control operation */
1073 	if (rx_pause && tx_pause)
1074 		priv->flow_ctrl = FLOW_AUTO;
1075 	else if (rx_pause && !tx_pause)
1076 		priv->flow_ctrl = FLOW_RX;
1077 	else if (!rx_pause && tx_pause)
1078 		priv->flow_ctrl = FLOW_TX;
1079 	else
1080 		priv->flow_ctrl = FLOW_OFF;
1081 
1082 	stmmac_mac_flow_ctrl(priv, duplex);
1083 
1084 	if (ctrl != old_ctrl)
1085 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1086 
1087 	stmmac_mac_set(priv, priv->ioaddr, true);
1088 	if (phy && priv->dma_cap.eee) {
1089 		phy_eee_rx_clock_stop(phy, !(priv->plat->flags &
1090 					     STMMAC_FLAG_RX_CLK_RUNS_IN_LPI));
1091 		priv->tx_lpi_timer = phy->eee_cfg.tx_lpi_timer;
1092 		stmmac_eee_init(priv, phy->enable_tx_lpi);
1093 		stmmac_set_eee_pls(priv, priv->hw, true);
1094 	}
1095 
1096 	if (stmmac_fpe_supported(priv))
1097 		stmmac_fpe_link_state_handle(priv, true);
1098 
1099 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1100 		stmmac_hwtstamp_correct_latency(priv, priv);
1101 }
1102 
1103 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1104 	.mac_get_caps = stmmac_mac_get_caps,
1105 	.mac_select_pcs = stmmac_mac_select_pcs,
1106 	.mac_config = stmmac_mac_config,
1107 	.mac_link_down = stmmac_mac_link_down,
1108 	.mac_link_up = stmmac_mac_link_up,
1109 };
1110 
1111 /**
1112  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1113  * @priv: driver private structure
1114  * Description: this is to verify if the HW supports the PCS.
1115  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1116  * configured for the TBI, RTBI, or SGMII PHY interface.
1117  */
1118 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1119 {
1120 	int interface = priv->plat->mac_interface;
1121 
1122 	if (priv->dma_cap.pcs) {
1123 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1124 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1125 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1127 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1128 			priv->hw->pcs = STMMAC_PCS_RGMII;
1129 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1130 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_SGMII;
1132 		}
1133 	}
1134 }
1135 
1136 /**
1137  * stmmac_init_phy - PHY initialization
1138  * @dev: net device structure
1139  * Description: it initializes the driver's PHY state, and attaches the PHY
1140  * to the mac driver.
1141  *  Return value:
1142  *  0 on success
1143  */
1144 static int stmmac_init_phy(struct net_device *dev)
1145 {
1146 	struct stmmac_priv *priv = netdev_priv(dev);
1147 	struct fwnode_handle *phy_fwnode;
1148 	struct fwnode_handle *fwnode;
1149 	int ret;
1150 
1151 	if (!phylink_expects_phy(priv->phylink))
1152 		return 0;
1153 
1154 	fwnode = priv->plat->port_node;
1155 	if (!fwnode)
1156 		fwnode = dev_fwnode(priv->device);
1157 
1158 	if (fwnode)
1159 		phy_fwnode = fwnode_get_phy_node(fwnode);
1160 	else
1161 		phy_fwnode = NULL;
1162 
1163 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1164 	 * manually parse it
1165 	 */
1166 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1167 		int addr = priv->plat->phy_addr;
1168 		struct phy_device *phydev;
1169 
1170 		if (addr < 0) {
1171 			netdev_err(priv->dev, "no phy found\n");
1172 			return -ENODEV;
1173 		}
1174 
1175 		phydev = mdiobus_get_phy(priv->mii, addr);
1176 		if (!phydev) {
1177 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1178 			return -ENODEV;
1179 		}
1180 
1181 		if (priv->dma_cap.eee)
1182 			phy_support_eee(phydev);
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (ret == 0) {
1191 		struct ethtool_keee eee;
1192 
1193 		/* Configure phylib's copy of the LPI timer */
1194 		if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1195 			eee.tx_lpi_timer = priv->tx_lpi_timer;
1196 			phylink_ethtool_set_eee(priv->phylink, &eee);
1197 		}
1198 	}
1199 
1200 	if (!priv->plat->pmt) {
1201 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1202 
1203 		phylink_ethtool_get_wol(priv->phylink, &wol);
1204 		device_set_wakeup_capable(priv->device, !!wol.supported);
1205 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1206 	}
1207 
1208 	return ret;
1209 }
1210 
1211 static int stmmac_phy_setup(struct stmmac_priv *priv)
1212 {
1213 	struct stmmac_mdio_bus_data *mdio_bus_data;
1214 	int mode = priv->plat->phy_interface;
1215 	struct fwnode_handle *fwnode;
1216 	struct phylink_pcs *pcs;
1217 	struct phylink *phylink;
1218 
1219 	priv->phylink_config.dev = &priv->dev->dev;
1220 	priv->phylink_config.type = PHYLINK_NETDEV;
1221 	priv->phylink_config.mac_managed_pm = true;
1222 
1223 	/* Stmmac always requires an RX clock for hardware initialization */
1224 	priv->phylink_config.mac_requires_rxc = true;
1225 
1226 	mdio_bus_data = priv->plat->mdio_bus_data;
1227 	if (mdio_bus_data)
1228 		priv->phylink_config.default_an_inband =
1229 			mdio_bus_data->default_an_inband;
1230 
1231 	/* Set the platform/firmware specified interface mode. Note, phylink
1232 	 * deals with the PHY interface mode, not the MAC interface mode.
1233 	 */
1234 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1235 
1236 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1237 	if (priv->hw->xpcs)
1238 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1239 	else
1240 		pcs = priv->hw->phylink_pcs;
1241 
1242 	if (pcs)
1243 		phy_interface_or(priv->phylink_config.supported_interfaces,
1244 				 priv->phylink_config.supported_interfaces,
1245 				 pcs->supported_interfaces);
1246 
1247 	fwnode = priv->plat->port_node;
1248 	if (!fwnode)
1249 		fwnode = dev_fwnode(priv->device);
1250 
1251 	phylink = phylink_create(&priv->phylink_config, fwnode,
1252 				 mode, &stmmac_phylink_mac_ops);
1253 	if (IS_ERR(phylink))
1254 		return PTR_ERR(phylink);
1255 
1256 	priv->phylink = phylink;
1257 	return 0;
1258 }
1259 
1260 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1261 				    struct stmmac_dma_conf *dma_conf)
1262 {
1263 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1264 	unsigned int desc_size;
1265 	void *head_rx;
1266 	u32 queue;
1267 
1268 	/* Display RX rings */
1269 	for (queue = 0; queue < rx_cnt; queue++) {
1270 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1271 
1272 		pr_info("\tRX Queue %u rings\n", queue);
1273 
1274 		if (priv->extend_desc) {
1275 			head_rx = (void *)rx_q->dma_erx;
1276 			desc_size = sizeof(struct dma_extended_desc);
1277 		} else {
1278 			head_rx = (void *)rx_q->dma_rx;
1279 			desc_size = sizeof(struct dma_desc);
1280 		}
1281 
1282 		/* Display RX ring */
1283 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1284 				    rx_q->dma_rx_phy, desc_size);
1285 	}
1286 }
1287 
1288 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1289 				    struct stmmac_dma_conf *dma_conf)
1290 {
1291 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1292 	unsigned int desc_size;
1293 	void *head_tx;
1294 	u32 queue;
1295 
1296 	/* Display TX rings */
1297 	for (queue = 0; queue < tx_cnt; queue++) {
1298 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1299 
1300 		pr_info("\tTX Queue %d rings\n", queue);
1301 
1302 		if (priv->extend_desc) {
1303 			head_tx = (void *)tx_q->dma_etx;
1304 			desc_size = sizeof(struct dma_extended_desc);
1305 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1306 			head_tx = (void *)tx_q->dma_entx;
1307 			desc_size = sizeof(struct dma_edesc);
1308 		} else {
1309 			head_tx = (void *)tx_q->dma_tx;
1310 			desc_size = sizeof(struct dma_desc);
1311 		}
1312 
1313 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1314 				    tx_q->dma_tx_phy, desc_size);
1315 	}
1316 }
1317 
1318 static void stmmac_display_rings(struct stmmac_priv *priv,
1319 				 struct stmmac_dma_conf *dma_conf)
1320 {
1321 	/* Display RX ring */
1322 	stmmac_display_rx_rings(priv, dma_conf);
1323 
1324 	/* Display TX ring */
1325 	stmmac_display_tx_rings(priv, dma_conf);
1326 }
1327 
1328 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1329 {
1330 	if (stmmac_xdp_is_enabled(priv))
1331 		return XDP_PACKET_HEADROOM;
1332 
1333 	return 0;
1334 }
1335 
1336 static int stmmac_set_bfsize(int mtu, int bufsize)
1337 {
1338 	int ret = bufsize;
1339 
1340 	if (mtu >= BUF_SIZE_8KiB)
1341 		ret = BUF_SIZE_16KiB;
1342 	else if (mtu >= BUF_SIZE_4KiB)
1343 		ret = BUF_SIZE_8KiB;
1344 	else if (mtu >= BUF_SIZE_2KiB)
1345 		ret = BUF_SIZE_4KiB;
1346 	else if (mtu > DEFAULT_BUFSIZE)
1347 		ret = BUF_SIZE_2KiB;
1348 	else
1349 		ret = DEFAULT_BUFSIZE;
1350 
1351 	return ret;
1352 }
1353 
1354 /**
1355  * stmmac_clear_rx_descriptors - clear RX descriptors
1356  * @priv: driver private structure
1357  * @dma_conf: structure to take the dma data
1358  * @queue: RX queue index
1359  * Description: this function is called to clear the RX descriptors
1360  * in case of both basic and extended descriptors are used.
1361  */
1362 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1363 					struct stmmac_dma_conf *dma_conf,
1364 					u32 queue)
1365 {
1366 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1367 	int i;
1368 
1369 	/* Clear the RX descriptors */
1370 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1371 		if (priv->extend_desc)
1372 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1373 					priv->use_riwt, priv->mode,
1374 					(i == dma_conf->dma_rx_size - 1),
1375 					dma_conf->dma_buf_sz);
1376 		else
1377 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1378 					priv->use_riwt, priv->mode,
1379 					(i == dma_conf->dma_rx_size - 1),
1380 					dma_conf->dma_buf_sz);
1381 }
1382 
1383 /**
1384  * stmmac_clear_tx_descriptors - clear tx descriptors
1385  * @priv: driver private structure
1386  * @dma_conf: structure to take the dma data
1387  * @queue: TX queue index.
1388  * Description: this function is called to clear the TX descriptors
1389  * in case of both basic and extended descriptors are used.
1390  */
1391 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1392 					struct stmmac_dma_conf *dma_conf,
1393 					u32 queue)
1394 {
1395 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1396 	int i;
1397 
1398 	/* Clear the TX descriptors */
1399 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1400 		int last = (i == (dma_conf->dma_tx_size - 1));
1401 		struct dma_desc *p;
1402 
1403 		if (priv->extend_desc)
1404 			p = &tx_q->dma_etx[i].basic;
1405 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1406 			p = &tx_q->dma_entx[i].basic;
1407 		else
1408 			p = &tx_q->dma_tx[i];
1409 
1410 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1411 	}
1412 }
1413 
1414 /**
1415  * stmmac_clear_descriptors - clear descriptors
1416  * @priv: driver private structure
1417  * @dma_conf: structure to take the dma data
1418  * Description: this function is called to clear the TX and RX descriptors
1419  * in case of both basic and extended descriptors are used.
1420  */
1421 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1422 				     struct stmmac_dma_conf *dma_conf)
1423 {
1424 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1425 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1426 	u32 queue;
1427 
1428 	/* Clear the RX descriptors */
1429 	for (queue = 0; queue < rx_queue_cnt; queue++)
1430 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1431 
1432 	/* Clear the TX descriptors */
1433 	for (queue = 0; queue < tx_queue_cnt; queue++)
1434 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1435 }
1436 
1437 /**
1438  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1439  * @priv: driver private structure
1440  * @dma_conf: structure to take the dma data
1441  * @p: descriptor pointer
1442  * @i: descriptor index
1443  * @flags: gfp flag
1444  * @queue: RX queue index
1445  * Description: this function is called to allocate a receive buffer, perform
1446  * the DMA mapping and init the descriptor.
1447  */
1448 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1449 				  struct stmmac_dma_conf *dma_conf,
1450 				  struct dma_desc *p,
1451 				  int i, gfp_t flags, u32 queue)
1452 {
1453 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1454 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1455 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1456 
1457 	if (priv->dma_cap.host_dma_width <= 32)
1458 		gfp |= GFP_DMA32;
1459 
1460 	if (!buf->page) {
1461 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1462 		if (!buf->page)
1463 			return -ENOMEM;
1464 		buf->page_offset = stmmac_rx_offset(priv);
1465 	}
1466 
1467 	if (priv->sph && !buf->sec_page) {
1468 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1469 		if (!buf->sec_page)
1470 			return -ENOMEM;
1471 
1472 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1473 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1474 	} else {
1475 		buf->sec_page = NULL;
1476 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1477 	}
1478 
1479 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1480 
1481 	stmmac_set_desc_addr(priv, p, buf->addr);
1482 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1483 		stmmac_init_desc3(priv, p);
1484 
1485 	return 0;
1486 }
1487 
1488 /**
1489  * stmmac_free_rx_buffer - free RX dma buffers
1490  * @priv: private structure
1491  * @rx_q: RX queue
1492  * @i: buffer index.
1493  */
1494 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1495 				  struct stmmac_rx_queue *rx_q,
1496 				  int i)
1497 {
1498 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1499 
1500 	if (buf->page)
1501 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1502 	buf->page = NULL;
1503 
1504 	if (buf->sec_page)
1505 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1506 	buf->sec_page = NULL;
1507 }
1508 
1509 /**
1510  * stmmac_free_tx_buffer - free RX dma buffers
1511  * @priv: private structure
1512  * @dma_conf: structure to take the dma data
1513  * @queue: RX queue index
1514  * @i: buffer index.
1515  */
1516 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1517 				  struct stmmac_dma_conf *dma_conf,
1518 				  u32 queue, int i)
1519 {
1520 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1521 
1522 	if (tx_q->tx_skbuff_dma[i].buf &&
1523 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1524 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1525 			dma_unmap_page(priv->device,
1526 				       tx_q->tx_skbuff_dma[i].buf,
1527 				       tx_q->tx_skbuff_dma[i].len,
1528 				       DMA_TO_DEVICE);
1529 		else
1530 			dma_unmap_single(priv->device,
1531 					 tx_q->tx_skbuff_dma[i].buf,
1532 					 tx_q->tx_skbuff_dma[i].len,
1533 					 DMA_TO_DEVICE);
1534 	}
1535 
1536 	if (tx_q->xdpf[i] &&
1537 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1538 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1539 		xdp_return_frame(tx_q->xdpf[i]);
1540 		tx_q->xdpf[i] = NULL;
1541 	}
1542 
1543 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1544 		tx_q->xsk_frames_done++;
1545 
1546 	if (tx_q->tx_skbuff[i] &&
1547 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1548 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1549 		tx_q->tx_skbuff[i] = NULL;
1550 	}
1551 
1552 	tx_q->tx_skbuff_dma[i].buf = 0;
1553 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1554 }
1555 
1556 /**
1557  * dma_free_rx_skbufs - free RX dma buffers
1558  * @priv: private structure
1559  * @dma_conf: structure to take the dma data
1560  * @queue: RX queue index
1561  */
1562 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1563 			       struct stmmac_dma_conf *dma_conf,
1564 			       u32 queue)
1565 {
1566 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1567 	int i;
1568 
1569 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1570 		stmmac_free_rx_buffer(priv, rx_q, i);
1571 }
1572 
1573 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1574 				   struct stmmac_dma_conf *dma_conf,
1575 				   u32 queue, gfp_t flags)
1576 {
1577 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1578 	int i;
1579 
1580 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1581 		struct dma_desc *p;
1582 		int ret;
1583 
1584 		if (priv->extend_desc)
1585 			p = &((rx_q->dma_erx + i)->basic);
1586 		else
1587 			p = rx_q->dma_rx + i;
1588 
1589 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1590 					     queue);
1591 		if (ret)
1592 			return ret;
1593 
1594 		rx_q->buf_alloc_num++;
1595 	}
1596 
1597 	return 0;
1598 }
1599 
1600 /**
1601  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1602  * @priv: private structure
1603  * @dma_conf: structure to take the dma data
1604  * @queue: RX queue index
1605  */
1606 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1607 				struct stmmac_dma_conf *dma_conf,
1608 				u32 queue)
1609 {
1610 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611 	int i;
1612 
1613 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1614 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1615 
1616 		if (!buf->xdp)
1617 			continue;
1618 
1619 		xsk_buff_free(buf->xdp);
1620 		buf->xdp = NULL;
1621 	}
1622 }
1623 
1624 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1625 				      struct stmmac_dma_conf *dma_conf,
1626 				      u32 queue)
1627 {
1628 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1629 	int i;
1630 
1631 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1632 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1633 	 * use this macro to make sure no size violations.
1634 	 */
1635 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1636 
1637 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1638 		struct stmmac_rx_buffer *buf;
1639 		dma_addr_t dma_addr;
1640 		struct dma_desc *p;
1641 
1642 		if (priv->extend_desc)
1643 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1644 		else
1645 			p = rx_q->dma_rx + i;
1646 
1647 		buf = &rx_q->buf_pool[i];
1648 
1649 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1650 		if (!buf->xdp)
1651 			return -ENOMEM;
1652 
1653 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1654 		stmmac_set_desc_addr(priv, p, dma_addr);
1655 		rx_q->buf_alloc_num++;
1656 	}
1657 
1658 	return 0;
1659 }
1660 
1661 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1662 {
1663 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1664 		return NULL;
1665 
1666 	return xsk_get_pool_from_qid(priv->dev, queue);
1667 }
1668 
1669 /**
1670  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1671  * @priv: driver private structure
1672  * @dma_conf: structure to take the dma data
1673  * @queue: RX queue index
1674  * @flags: gfp flag.
1675  * Description: this function initializes the DMA RX descriptors
1676  * and allocates the socket buffers. It supports the chained and ring
1677  * modes.
1678  */
1679 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1680 				    struct stmmac_dma_conf *dma_conf,
1681 				    u32 queue, gfp_t flags)
1682 {
1683 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1684 	int ret;
1685 
1686 	netif_dbg(priv, probe, priv->dev,
1687 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1688 		  (u32)rx_q->dma_rx_phy);
1689 
1690 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1691 
1692 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1693 
1694 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1695 
1696 	if (rx_q->xsk_pool) {
1697 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1698 						   MEM_TYPE_XSK_BUFF_POOL,
1699 						   NULL));
1700 		netdev_info(priv->dev,
1701 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1702 			    rx_q->queue_index);
1703 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1704 	} else {
1705 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1706 						   MEM_TYPE_PAGE_POOL,
1707 						   rx_q->page_pool));
1708 		netdev_info(priv->dev,
1709 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1710 			    rx_q->queue_index);
1711 	}
1712 
1713 	if (rx_q->xsk_pool) {
1714 		/* RX XDP ZC buffer pool may not be populated, e.g.
1715 		 * xdpsock TX-only.
1716 		 */
1717 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1718 	} else {
1719 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1720 		if (ret < 0)
1721 			return -ENOMEM;
1722 	}
1723 
1724 	/* Setup the chained descriptor addresses */
1725 	if (priv->mode == STMMAC_CHAIN_MODE) {
1726 		if (priv->extend_desc)
1727 			stmmac_mode_init(priv, rx_q->dma_erx,
1728 					 rx_q->dma_rx_phy,
1729 					 dma_conf->dma_rx_size, 1);
1730 		else
1731 			stmmac_mode_init(priv, rx_q->dma_rx,
1732 					 rx_q->dma_rx_phy,
1733 					 dma_conf->dma_rx_size, 0);
1734 	}
1735 
1736 	return 0;
1737 }
1738 
1739 static int init_dma_rx_desc_rings(struct net_device *dev,
1740 				  struct stmmac_dma_conf *dma_conf,
1741 				  gfp_t flags)
1742 {
1743 	struct stmmac_priv *priv = netdev_priv(dev);
1744 	u32 rx_count = priv->plat->rx_queues_to_use;
1745 	int queue;
1746 	int ret;
1747 
1748 	/* RX INITIALIZATION */
1749 	netif_dbg(priv, probe, priv->dev,
1750 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1751 
1752 	for (queue = 0; queue < rx_count; queue++) {
1753 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1754 		if (ret)
1755 			goto err_init_rx_buffers;
1756 	}
1757 
1758 	return 0;
1759 
1760 err_init_rx_buffers:
1761 	while (queue >= 0) {
1762 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1763 
1764 		if (rx_q->xsk_pool)
1765 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1766 		else
1767 			dma_free_rx_skbufs(priv, dma_conf, queue);
1768 
1769 		rx_q->buf_alloc_num = 0;
1770 		rx_q->xsk_pool = NULL;
1771 
1772 		queue--;
1773 	}
1774 
1775 	return ret;
1776 }
1777 
1778 /**
1779  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1780  * @priv: driver private structure
1781  * @dma_conf: structure to take the dma data
1782  * @queue: TX queue index
1783  * Description: this function initializes the DMA TX descriptors
1784  * and allocates the socket buffers. It supports the chained and ring
1785  * modes.
1786  */
1787 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1788 				    struct stmmac_dma_conf *dma_conf,
1789 				    u32 queue)
1790 {
1791 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1792 	int i;
1793 
1794 	netif_dbg(priv, probe, priv->dev,
1795 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1796 		  (u32)tx_q->dma_tx_phy);
1797 
1798 	/* Setup the chained descriptor addresses */
1799 	if (priv->mode == STMMAC_CHAIN_MODE) {
1800 		if (priv->extend_desc)
1801 			stmmac_mode_init(priv, tx_q->dma_etx,
1802 					 tx_q->dma_tx_phy,
1803 					 dma_conf->dma_tx_size, 1);
1804 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1805 			stmmac_mode_init(priv, tx_q->dma_tx,
1806 					 tx_q->dma_tx_phy,
1807 					 dma_conf->dma_tx_size, 0);
1808 	}
1809 
1810 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1811 
1812 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1813 		struct dma_desc *p;
1814 
1815 		if (priv->extend_desc)
1816 			p = &((tx_q->dma_etx + i)->basic);
1817 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1818 			p = &((tx_q->dma_entx + i)->basic);
1819 		else
1820 			p = tx_q->dma_tx + i;
1821 
1822 		stmmac_clear_desc(priv, p);
1823 
1824 		tx_q->tx_skbuff_dma[i].buf = 0;
1825 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1826 		tx_q->tx_skbuff_dma[i].len = 0;
1827 		tx_q->tx_skbuff_dma[i].last_segment = false;
1828 		tx_q->tx_skbuff[i] = NULL;
1829 	}
1830 
1831 	return 0;
1832 }
1833 
1834 static int init_dma_tx_desc_rings(struct net_device *dev,
1835 				  struct stmmac_dma_conf *dma_conf)
1836 {
1837 	struct stmmac_priv *priv = netdev_priv(dev);
1838 	u32 tx_queue_cnt;
1839 	u32 queue;
1840 
1841 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1842 
1843 	for (queue = 0; queue < tx_queue_cnt; queue++)
1844 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1845 
1846 	return 0;
1847 }
1848 
1849 /**
1850  * init_dma_desc_rings - init the RX/TX descriptor rings
1851  * @dev: net device structure
1852  * @dma_conf: structure to take the dma data
1853  * @flags: gfp flag.
1854  * Description: this function initializes the DMA RX/TX descriptors
1855  * and allocates the socket buffers. It supports the chained and ring
1856  * modes.
1857  */
1858 static int init_dma_desc_rings(struct net_device *dev,
1859 			       struct stmmac_dma_conf *dma_conf,
1860 			       gfp_t flags)
1861 {
1862 	struct stmmac_priv *priv = netdev_priv(dev);
1863 	int ret;
1864 
1865 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1866 	if (ret)
1867 		return ret;
1868 
1869 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1870 
1871 	stmmac_clear_descriptors(priv, dma_conf);
1872 
1873 	if (netif_msg_hw(priv))
1874 		stmmac_display_rings(priv, dma_conf);
1875 
1876 	return ret;
1877 }
1878 
1879 /**
1880  * dma_free_tx_skbufs - free TX dma buffers
1881  * @priv: private structure
1882  * @dma_conf: structure to take the dma data
1883  * @queue: TX queue index
1884  */
1885 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1886 			       struct stmmac_dma_conf *dma_conf,
1887 			       u32 queue)
1888 {
1889 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1890 	int i;
1891 
1892 	tx_q->xsk_frames_done = 0;
1893 
1894 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1895 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1896 
1897 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1898 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1899 		tx_q->xsk_frames_done = 0;
1900 		tx_q->xsk_pool = NULL;
1901 	}
1902 }
1903 
1904 /**
1905  * stmmac_free_tx_skbufs - free TX skb buffers
1906  * @priv: private structure
1907  */
1908 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1909 {
1910 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1911 	u32 queue;
1912 
1913 	for (queue = 0; queue < tx_queue_cnt; queue++)
1914 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1915 }
1916 
1917 /**
1918  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1919  * @priv: private structure
1920  * @dma_conf: structure to take the dma data
1921  * @queue: RX queue index
1922  */
1923 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1924 					 struct stmmac_dma_conf *dma_conf,
1925 					 u32 queue)
1926 {
1927 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1928 
1929 	/* Release the DMA RX socket buffers */
1930 	if (rx_q->xsk_pool)
1931 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1932 	else
1933 		dma_free_rx_skbufs(priv, dma_conf, queue);
1934 
1935 	rx_q->buf_alloc_num = 0;
1936 	rx_q->xsk_pool = NULL;
1937 
1938 	/* Free DMA regions of consistent memory previously allocated */
1939 	if (!priv->extend_desc)
1940 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1941 				  sizeof(struct dma_desc),
1942 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1943 	else
1944 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1945 				  sizeof(struct dma_extended_desc),
1946 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1947 
1948 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1949 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1950 
1951 	kfree(rx_q->buf_pool);
1952 	if (rx_q->page_pool)
1953 		page_pool_destroy(rx_q->page_pool);
1954 }
1955 
1956 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1957 				       struct stmmac_dma_conf *dma_conf)
1958 {
1959 	u32 rx_count = priv->plat->rx_queues_to_use;
1960 	u32 queue;
1961 
1962 	/* Free RX queue resources */
1963 	for (queue = 0; queue < rx_count; queue++)
1964 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1965 }
1966 
1967 /**
1968  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1969  * @priv: private structure
1970  * @dma_conf: structure to take the dma data
1971  * @queue: TX queue index
1972  */
1973 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1974 					 struct stmmac_dma_conf *dma_conf,
1975 					 u32 queue)
1976 {
1977 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1978 	size_t size;
1979 	void *addr;
1980 
1981 	/* Release the DMA TX socket buffers */
1982 	dma_free_tx_skbufs(priv, dma_conf, queue);
1983 
1984 	if (priv->extend_desc) {
1985 		size = sizeof(struct dma_extended_desc);
1986 		addr = tx_q->dma_etx;
1987 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1988 		size = sizeof(struct dma_edesc);
1989 		addr = tx_q->dma_entx;
1990 	} else {
1991 		size = sizeof(struct dma_desc);
1992 		addr = tx_q->dma_tx;
1993 	}
1994 
1995 	size *= dma_conf->dma_tx_size;
1996 
1997 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1998 
1999 	kfree(tx_q->tx_skbuff_dma);
2000 	kfree(tx_q->tx_skbuff);
2001 }
2002 
2003 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2004 				       struct stmmac_dma_conf *dma_conf)
2005 {
2006 	u32 tx_count = priv->plat->tx_queues_to_use;
2007 	u32 queue;
2008 
2009 	/* Free TX queue resources */
2010 	for (queue = 0; queue < tx_count; queue++)
2011 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2012 }
2013 
2014 /**
2015  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2016  * @priv: private structure
2017  * @dma_conf: structure to take the dma data
2018  * @queue: RX queue index
2019  * Description: according to which descriptor can be used (extend or basic)
2020  * this function allocates the resources for TX and RX paths. In case of
2021  * reception, for example, it pre-allocated the RX socket buffer in order to
2022  * allow zero-copy mechanism.
2023  */
2024 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2025 					 struct stmmac_dma_conf *dma_conf,
2026 					 u32 queue)
2027 {
2028 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2029 	struct stmmac_channel *ch = &priv->channel[queue];
2030 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2031 	struct page_pool_params pp_params = { 0 };
2032 	unsigned int num_pages;
2033 	unsigned int napi_id;
2034 	int ret;
2035 
2036 	rx_q->queue_index = queue;
2037 	rx_q->priv_data = priv;
2038 
2039 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2040 	pp_params.pool_size = dma_conf->dma_rx_size;
2041 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2042 	pp_params.order = ilog2(num_pages);
2043 	pp_params.nid = dev_to_node(priv->device);
2044 	pp_params.dev = priv->device;
2045 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2046 	pp_params.offset = stmmac_rx_offset(priv);
2047 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2048 
2049 	rx_q->page_pool = page_pool_create(&pp_params);
2050 	if (IS_ERR(rx_q->page_pool)) {
2051 		ret = PTR_ERR(rx_q->page_pool);
2052 		rx_q->page_pool = NULL;
2053 		return ret;
2054 	}
2055 
2056 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2057 				 sizeof(*rx_q->buf_pool),
2058 				 GFP_KERNEL);
2059 	if (!rx_q->buf_pool)
2060 		return -ENOMEM;
2061 
2062 	if (priv->extend_desc) {
2063 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2064 						   dma_conf->dma_rx_size *
2065 						   sizeof(struct dma_extended_desc),
2066 						   &rx_q->dma_rx_phy,
2067 						   GFP_KERNEL);
2068 		if (!rx_q->dma_erx)
2069 			return -ENOMEM;
2070 
2071 	} else {
2072 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2073 						  dma_conf->dma_rx_size *
2074 						  sizeof(struct dma_desc),
2075 						  &rx_q->dma_rx_phy,
2076 						  GFP_KERNEL);
2077 		if (!rx_q->dma_rx)
2078 			return -ENOMEM;
2079 	}
2080 
2081 	if (stmmac_xdp_is_enabled(priv) &&
2082 	    test_bit(queue, priv->af_xdp_zc_qps))
2083 		napi_id = ch->rxtx_napi.napi_id;
2084 	else
2085 		napi_id = ch->rx_napi.napi_id;
2086 
2087 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2088 			       rx_q->queue_index,
2089 			       napi_id);
2090 	if (ret) {
2091 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2092 		return -EINVAL;
2093 	}
2094 
2095 	return 0;
2096 }
2097 
2098 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2099 				       struct stmmac_dma_conf *dma_conf)
2100 {
2101 	u32 rx_count = priv->plat->rx_queues_to_use;
2102 	u32 queue;
2103 	int ret;
2104 
2105 	/* RX queues buffers and DMA */
2106 	for (queue = 0; queue < rx_count; queue++) {
2107 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2108 		if (ret)
2109 			goto err_dma;
2110 	}
2111 
2112 	return 0;
2113 
2114 err_dma:
2115 	free_dma_rx_desc_resources(priv, dma_conf);
2116 
2117 	return ret;
2118 }
2119 
2120 /**
2121  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2122  * @priv: private structure
2123  * @dma_conf: structure to take the dma data
2124  * @queue: TX queue index
2125  * Description: according to which descriptor can be used (extend or basic)
2126  * this function allocates the resources for TX and RX paths. In case of
2127  * reception, for example, it pre-allocated the RX socket buffer in order to
2128  * allow zero-copy mechanism.
2129  */
2130 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2131 					 struct stmmac_dma_conf *dma_conf,
2132 					 u32 queue)
2133 {
2134 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2135 	size_t size;
2136 	void *addr;
2137 
2138 	tx_q->queue_index = queue;
2139 	tx_q->priv_data = priv;
2140 
2141 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2142 				      sizeof(*tx_q->tx_skbuff_dma),
2143 				      GFP_KERNEL);
2144 	if (!tx_q->tx_skbuff_dma)
2145 		return -ENOMEM;
2146 
2147 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2148 				  sizeof(struct sk_buff *),
2149 				  GFP_KERNEL);
2150 	if (!tx_q->tx_skbuff)
2151 		return -ENOMEM;
2152 
2153 	if (priv->extend_desc)
2154 		size = sizeof(struct dma_extended_desc);
2155 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2156 		size = sizeof(struct dma_edesc);
2157 	else
2158 		size = sizeof(struct dma_desc);
2159 
2160 	size *= dma_conf->dma_tx_size;
2161 
2162 	addr = dma_alloc_coherent(priv->device, size,
2163 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2164 	if (!addr)
2165 		return -ENOMEM;
2166 
2167 	if (priv->extend_desc)
2168 		tx_q->dma_etx = addr;
2169 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2170 		tx_q->dma_entx = addr;
2171 	else
2172 		tx_q->dma_tx = addr;
2173 
2174 	return 0;
2175 }
2176 
2177 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2178 				       struct stmmac_dma_conf *dma_conf)
2179 {
2180 	u32 tx_count = priv->plat->tx_queues_to_use;
2181 	u32 queue;
2182 	int ret;
2183 
2184 	/* TX queues buffers and DMA */
2185 	for (queue = 0; queue < tx_count; queue++) {
2186 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2187 		if (ret)
2188 			goto err_dma;
2189 	}
2190 
2191 	return 0;
2192 
2193 err_dma:
2194 	free_dma_tx_desc_resources(priv, dma_conf);
2195 	return ret;
2196 }
2197 
2198 /**
2199  * alloc_dma_desc_resources - alloc TX/RX resources.
2200  * @priv: private structure
2201  * @dma_conf: structure to take the dma data
2202  * Description: according to which descriptor can be used (extend or basic)
2203  * this function allocates the resources for TX and RX paths. In case of
2204  * reception, for example, it pre-allocated the RX socket buffer in order to
2205  * allow zero-copy mechanism.
2206  */
2207 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2208 				    struct stmmac_dma_conf *dma_conf)
2209 {
2210 	/* RX Allocation */
2211 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2212 
2213 	if (ret)
2214 		return ret;
2215 
2216 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2217 
2218 	return ret;
2219 }
2220 
2221 /**
2222  * free_dma_desc_resources - free dma desc resources
2223  * @priv: private structure
2224  * @dma_conf: structure to take the dma data
2225  */
2226 static void free_dma_desc_resources(struct stmmac_priv *priv,
2227 				    struct stmmac_dma_conf *dma_conf)
2228 {
2229 	/* Release the DMA TX socket buffers */
2230 	free_dma_tx_desc_resources(priv, dma_conf);
2231 
2232 	/* Release the DMA RX socket buffers later
2233 	 * to ensure all pending XDP_TX buffers are returned.
2234 	 */
2235 	free_dma_rx_desc_resources(priv, dma_conf);
2236 }
2237 
2238 /**
2239  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2240  *  @priv: driver private structure
2241  *  Description: It is used for enabling the rx queues in the MAC
2242  */
2243 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2244 {
2245 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2246 	int queue;
2247 	u8 mode;
2248 
2249 	for (queue = 0; queue < rx_queues_count; queue++) {
2250 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2251 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2252 	}
2253 }
2254 
2255 /**
2256  * stmmac_start_rx_dma - start RX DMA channel
2257  * @priv: driver private structure
2258  * @chan: RX channel index
2259  * Description:
2260  * This starts a RX DMA channel
2261  */
2262 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2263 {
2264 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2265 	stmmac_start_rx(priv, priv->ioaddr, chan);
2266 }
2267 
2268 /**
2269  * stmmac_start_tx_dma - start TX DMA channel
2270  * @priv: driver private structure
2271  * @chan: TX channel index
2272  * Description:
2273  * This starts a TX DMA channel
2274  */
2275 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2276 {
2277 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2278 	stmmac_start_tx(priv, priv->ioaddr, chan);
2279 }
2280 
2281 /**
2282  * stmmac_stop_rx_dma - stop RX DMA channel
2283  * @priv: driver private structure
2284  * @chan: RX channel index
2285  * Description:
2286  * This stops a RX DMA channel
2287  */
2288 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2289 {
2290 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2291 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2292 }
2293 
2294 /**
2295  * stmmac_stop_tx_dma - stop TX DMA channel
2296  * @priv: driver private structure
2297  * @chan: TX channel index
2298  * Description:
2299  * This stops a TX DMA channel
2300  */
2301 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2302 {
2303 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2304 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2305 }
2306 
2307 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2308 {
2309 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2310 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2311 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2312 	u32 chan;
2313 
2314 	for (chan = 0; chan < dma_csr_ch; chan++) {
2315 		struct stmmac_channel *ch = &priv->channel[chan];
2316 		unsigned long flags;
2317 
2318 		spin_lock_irqsave(&ch->lock, flags);
2319 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2320 		spin_unlock_irqrestore(&ch->lock, flags);
2321 	}
2322 }
2323 
2324 /**
2325  * stmmac_start_all_dma - start all RX and TX DMA channels
2326  * @priv: driver private structure
2327  * Description:
2328  * This starts all the RX and TX DMA channels
2329  */
2330 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2331 {
2332 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2333 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2334 	u32 chan = 0;
2335 
2336 	for (chan = 0; chan < rx_channels_count; chan++)
2337 		stmmac_start_rx_dma(priv, chan);
2338 
2339 	for (chan = 0; chan < tx_channels_count; chan++)
2340 		stmmac_start_tx_dma(priv, chan);
2341 }
2342 
2343 /**
2344  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2345  * @priv: driver private structure
2346  * Description:
2347  * This stops the RX and TX DMA channels
2348  */
2349 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2350 {
2351 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2352 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2353 	u32 chan = 0;
2354 
2355 	for (chan = 0; chan < rx_channels_count; chan++)
2356 		stmmac_stop_rx_dma(priv, chan);
2357 
2358 	for (chan = 0; chan < tx_channels_count; chan++)
2359 		stmmac_stop_tx_dma(priv, chan);
2360 }
2361 
2362 /**
2363  *  stmmac_dma_operation_mode - HW DMA operation mode
2364  *  @priv: driver private structure
2365  *  Description: it is used for configuring the DMA operation mode register in
2366  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2367  */
2368 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2369 {
2370 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2371 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2372 	int rxfifosz = priv->plat->rx_fifo_size;
2373 	int txfifosz = priv->plat->tx_fifo_size;
2374 	u32 txmode = 0;
2375 	u32 rxmode = 0;
2376 	u32 chan = 0;
2377 	u8 qmode = 0;
2378 
2379 	if (rxfifosz == 0)
2380 		rxfifosz = priv->dma_cap.rx_fifo_size;
2381 	if (txfifosz == 0)
2382 		txfifosz = priv->dma_cap.tx_fifo_size;
2383 
2384 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2385 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2386 		rxfifosz /= rx_channels_count;
2387 		txfifosz /= tx_channels_count;
2388 	}
2389 
2390 	if (priv->plat->force_thresh_dma_mode) {
2391 		txmode = tc;
2392 		rxmode = tc;
2393 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2394 		/*
2395 		 * In case of GMAC, SF mode can be enabled
2396 		 * to perform the TX COE in HW. This depends on:
2397 		 * 1) TX COE if actually supported
2398 		 * 2) There is no bugged Jumbo frame support
2399 		 *    that needs to not insert csum in the TDES.
2400 		 */
2401 		txmode = SF_DMA_MODE;
2402 		rxmode = SF_DMA_MODE;
2403 		priv->xstats.threshold = SF_DMA_MODE;
2404 	} else {
2405 		txmode = tc;
2406 		rxmode = SF_DMA_MODE;
2407 	}
2408 
2409 	/* configure all channels */
2410 	for (chan = 0; chan < rx_channels_count; chan++) {
2411 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2412 		u32 buf_size;
2413 
2414 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2415 
2416 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2417 				rxfifosz, qmode);
2418 
2419 		if (rx_q->xsk_pool) {
2420 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2421 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2422 					      buf_size,
2423 					      chan);
2424 		} else {
2425 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2426 					      priv->dma_conf.dma_buf_sz,
2427 					      chan);
2428 		}
2429 	}
2430 
2431 	for (chan = 0; chan < tx_channels_count; chan++) {
2432 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2433 
2434 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2435 				txfifosz, qmode);
2436 	}
2437 }
2438 
2439 static void stmmac_xsk_request_timestamp(void *_priv)
2440 {
2441 	struct stmmac_metadata_request *meta_req = _priv;
2442 
2443 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2444 	*meta_req->set_ic = true;
2445 }
2446 
2447 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2448 {
2449 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2450 	struct stmmac_priv *priv = tx_compl->priv;
2451 	struct dma_desc *desc = tx_compl->desc;
2452 	bool found = false;
2453 	u64 ns = 0;
2454 
2455 	if (!priv->hwts_tx_en)
2456 		return 0;
2457 
2458 	/* check tx tstamp status */
2459 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2460 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2461 		found = true;
2462 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2463 		found = true;
2464 	}
2465 
2466 	if (found) {
2467 		ns -= priv->plat->cdc_error_adj;
2468 		return ns_to_ktime(ns);
2469 	}
2470 
2471 	return 0;
2472 }
2473 
2474 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2475 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2476 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2477 };
2478 
2479 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2480 {
2481 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2482 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2483 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2484 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2485 	unsigned int entry = tx_q->cur_tx;
2486 	struct dma_desc *tx_desc = NULL;
2487 	struct xdp_desc xdp_desc;
2488 	bool work_done = true;
2489 	u32 tx_set_ic_bit = 0;
2490 
2491 	/* Avoids TX time-out as we are sharing with slow path */
2492 	txq_trans_cond_update(nq);
2493 
2494 	budget = min(budget, stmmac_tx_avail(priv, queue));
2495 
2496 	while (budget-- > 0) {
2497 		struct stmmac_metadata_request meta_req;
2498 		struct xsk_tx_metadata *meta = NULL;
2499 		dma_addr_t dma_addr;
2500 		bool set_ic;
2501 
2502 		/* We are sharing with slow path and stop XSK TX desc submission when
2503 		 * available TX ring is less than threshold.
2504 		 */
2505 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2506 		    !netif_carrier_ok(priv->dev)) {
2507 			work_done = false;
2508 			break;
2509 		}
2510 
2511 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2512 			break;
2513 
2514 		if (priv->est && priv->est->enable &&
2515 		    priv->est->max_sdu[queue] &&
2516 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2517 			priv->xstats.max_sdu_txq_drop[queue]++;
2518 			continue;
2519 		}
2520 
2521 		if (likely(priv->extend_desc))
2522 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2523 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2524 			tx_desc = &tx_q->dma_entx[entry].basic;
2525 		else
2526 			tx_desc = tx_q->dma_tx + entry;
2527 
2528 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2529 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2530 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2531 
2532 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2533 
2534 		/* To return XDP buffer to XSK pool, we simple call
2535 		 * xsk_tx_completed(), so we don't need to fill up
2536 		 * 'buf' and 'xdpf'.
2537 		 */
2538 		tx_q->tx_skbuff_dma[entry].buf = 0;
2539 		tx_q->xdpf[entry] = NULL;
2540 
2541 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2542 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2543 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2544 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2545 
2546 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2547 
2548 		tx_q->tx_count_frames++;
2549 
2550 		if (!priv->tx_coal_frames[queue])
2551 			set_ic = false;
2552 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2553 			set_ic = true;
2554 		else
2555 			set_ic = false;
2556 
2557 		meta_req.priv = priv;
2558 		meta_req.tx_desc = tx_desc;
2559 		meta_req.set_ic = &set_ic;
2560 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2561 					&meta_req);
2562 		if (set_ic) {
2563 			tx_q->tx_count_frames = 0;
2564 			stmmac_set_tx_ic(priv, tx_desc);
2565 			tx_set_ic_bit++;
2566 		}
2567 
2568 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2569 				       true, priv->mode, true, true,
2570 				       xdp_desc.len);
2571 
2572 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2573 
2574 		xsk_tx_metadata_to_compl(meta,
2575 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2576 
2577 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2578 		entry = tx_q->cur_tx;
2579 	}
2580 	u64_stats_update_begin(&txq_stats->napi_syncp);
2581 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2582 	u64_stats_update_end(&txq_stats->napi_syncp);
2583 
2584 	if (tx_desc) {
2585 		stmmac_flush_tx_descriptors(priv, queue);
2586 		xsk_tx_release(pool);
2587 	}
2588 
2589 	/* Return true if all of the 3 conditions are met
2590 	 *  a) TX Budget is still available
2591 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2592 	 *     pending XSK TX for transmission)
2593 	 */
2594 	return !!budget && work_done;
2595 }
2596 
2597 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2598 {
2599 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2600 		tc += 64;
2601 
2602 		if (priv->plat->force_thresh_dma_mode)
2603 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2604 		else
2605 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2606 						      chan);
2607 
2608 		priv->xstats.threshold = tc;
2609 	}
2610 }
2611 
2612 /**
2613  * stmmac_tx_clean - to manage the transmission completion
2614  * @priv: driver private structure
2615  * @budget: napi budget limiting this functions packet handling
2616  * @queue: TX queue index
2617  * @pending_packets: signal to arm the TX coal timer
2618  * Description: it reclaims the transmit resources after transmission completes.
2619  * If some packets still needs to be handled, due to TX coalesce, set
2620  * pending_packets to true to make NAPI arm the TX coal timer.
2621  */
2622 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2623 			   bool *pending_packets)
2624 {
2625 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2626 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2627 	unsigned int bytes_compl = 0, pkts_compl = 0;
2628 	unsigned int entry, xmits = 0, count = 0;
2629 	u32 tx_packets = 0, tx_errors = 0;
2630 
2631 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2632 
2633 	tx_q->xsk_frames_done = 0;
2634 
2635 	entry = tx_q->dirty_tx;
2636 
2637 	/* Try to clean all TX complete frame in 1 shot */
2638 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2639 		struct xdp_frame *xdpf;
2640 		struct sk_buff *skb;
2641 		struct dma_desc *p;
2642 		int status;
2643 
2644 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2645 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2646 			xdpf = tx_q->xdpf[entry];
2647 			skb = NULL;
2648 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2649 			xdpf = NULL;
2650 			skb = tx_q->tx_skbuff[entry];
2651 		} else {
2652 			xdpf = NULL;
2653 			skb = NULL;
2654 		}
2655 
2656 		if (priv->extend_desc)
2657 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2658 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2659 			p = &tx_q->dma_entx[entry].basic;
2660 		else
2661 			p = tx_q->dma_tx + entry;
2662 
2663 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2664 		/* Check if the descriptor is owned by the DMA */
2665 		if (unlikely(status & tx_dma_own))
2666 			break;
2667 
2668 		count++;
2669 
2670 		/* Make sure descriptor fields are read after reading
2671 		 * the own bit.
2672 		 */
2673 		dma_rmb();
2674 
2675 		/* Just consider the last segment and ...*/
2676 		if (likely(!(status & tx_not_ls))) {
2677 			/* ... verify the status error condition */
2678 			if (unlikely(status & tx_err)) {
2679 				tx_errors++;
2680 				if (unlikely(status & tx_err_bump_tc))
2681 					stmmac_bump_dma_threshold(priv, queue);
2682 			} else {
2683 				tx_packets++;
2684 			}
2685 			if (skb) {
2686 				stmmac_get_tx_hwtstamp(priv, p, skb);
2687 			} else if (tx_q->xsk_pool &&
2688 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2689 				struct stmmac_xsk_tx_complete tx_compl = {
2690 					.priv = priv,
2691 					.desc = p,
2692 				};
2693 
2694 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2695 							 &stmmac_xsk_tx_metadata_ops,
2696 							 &tx_compl);
2697 			}
2698 		}
2699 
2700 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2701 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2702 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2703 				dma_unmap_page(priv->device,
2704 					       tx_q->tx_skbuff_dma[entry].buf,
2705 					       tx_q->tx_skbuff_dma[entry].len,
2706 					       DMA_TO_DEVICE);
2707 			else
2708 				dma_unmap_single(priv->device,
2709 						 tx_q->tx_skbuff_dma[entry].buf,
2710 						 tx_q->tx_skbuff_dma[entry].len,
2711 						 DMA_TO_DEVICE);
2712 			tx_q->tx_skbuff_dma[entry].buf = 0;
2713 			tx_q->tx_skbuff_dma[entry].len = 0;
2714 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2715 		}
2716 
2717 		stmmac_clean_desc3(priv, tx_q, p);
2718 
2719 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2720 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2721 
2722 		if (xdpf &&
2723 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2724 			xdp_return_frame_rx_napi(xdpf);
2725 			tx_q->xdpf[entry] = NULL;
2726 		}
2727 
2728 		if (xdpf &&
2729 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2730 			xdp_return_frame(xdpf);
2731 			tx_q->xdpf[entry] = NULL;
2732 		}
2733 
2734 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2735 			tx_q->xsk_frames_done++;
2736 
2737 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2738 			if (likely(skb)) {
2739 				pkts_compl++;
2740 				bytes_compl += skb->len;
2741 				dev_consume_skb_any(skb);
2742 				tx_q->tx_skbuff[entry] = NULL;
2743 			}
2744 		}
2745 
2746 		stmmac_release_tx_desc(priv, p, priv->mode);
2747 
2748 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2749 	}
2750 	tx_q->dirty_tx = entry;
2751 
2752 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2753 				  pkts_compl, bytes_compl);
2754 
2755 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2756 								queue))) &&
2757 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2758 
2759 		netif_dbg(priv, tx_done, priv->dev,
2760 			  "%s: restart transmit\n", __func__);
2761 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2762 	}
2763 
2764 	if (tx_q->xsk_pool) {
2765 		bool work_done;
2766 
2767 		if (tx_q->xsk_frames_done)
2768 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2769 
2770 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2771 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2772 
2773 		/* For XSK TX, we try to send as many as possible.
2774 		 * If XSK work done (XSK TX desc empty and budget still
2775 		 * available), return "budget - 1" to reenable TX IRQ.
2776 		 * Else, return "budget" to make NAPI continue polling.
2777 		 */
2778 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2779 					       STMMAC_XSK_TX_BUDGET_MAX);
2780 		if (work_done)
2781 			xmits = budget - 1;
2782 		else
2783 			xmits = budget;
2784 	}
2785 
2786 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2787 	    priv->eee_sw_timer_en) {
2788 		if (stmmac_enable_eee_mode(priv))
2789 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2790 	}
2791 
2792 	/* We still have pending packets, let's call for a new scheduling */
2793 	if (tx_q->dirty_tx != tx_q->cur_tx)
2794 		*pending_packets = true;
2795 
2796 	u64_stats_update_begin(&txq_stats->napi_syncp);
2797 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2798 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2799 	u64_stats_inc(&txq_stats->napi.tx_clean);
2800 	u64_stats_update_end(&txq_stats->napi_syncp);
2801 
2802 	priv->xstats.tx_errors += tx_errors;
2803 
2804 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2805 
2806 	/* Combine decisions from TX clean and XSK TX */
2807 	return max(count, xmits);
2808 }
2809 
2810 /**
2811  * stmmac_tx_err - to manage the tx error
2812  * @priv: driver private structure
2813  * @chan: channel index
2814  * Description: it cleans the descriptors and restarts the transmission
2815  * in case of transmission errors.
2816  */
2817 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2818 {
2819 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2820 
2821 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2822 
2823 	stmmac_stop_tx_dma(priv, chan);
2824 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2825 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2826 	stmmac_reset_tx_queue(priv, chan);
2827 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2828 			    tx_q->dma_tx_phy, chan);
2829 	stmmac_start_tx_dma(priv, chan);
2830 
2831 	priv->xstats.tx_errors++;
2832 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2833 }
2834 
2835 /**
2836  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2837  *  @priv: driver private structure
2838  *  @txmode: TX operating mode
2839  *  @rxmode: RX operating mode
2840  *  @chan: channel index
2841  *  Description: it is used for configuring of the DMA operation mode in
2842  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2843  *  mode.
2844  */
2845 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2846 					  u32 rxmode, u32 chan)
2847 {
2848 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2849 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2850 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2851 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2852 	int rxfifosz = priv->plat->rx_fifo_size;
2853 	int txfifosz = priv->plat->tx_fifo_size;
2854 
2855 	if (rxfifosz == 0)
2856 		rxfifosz = priv->dma_cap.rx_fifo_size;
2857 	if (txfifosz == 0)
2858 		txfifosz = priv->dma_cap.tx_fifo_size;
2859 
2860 	/* Adjust for real per queue fifo size */
2861 	rxfifosz /= rx_channels_count;
2862 	txfifosz /= tx_channels_count;
2863 
2864 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2865 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2866 }
2867 
2868 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2869 {
2870 	int ret;
2871 
2872 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2873 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2874 	if (ret && (ret != -EINVAL)) {
2875 		stmmac_global_err(priv);
2876 		return true;
2877 	}
2878 
2879 	return false;
2880 }
2881 
2882 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2883 {
2884 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2885 						 &priv->xstats, chan, dir);
2886 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2887 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2888 	struct stmmac_channel *ch = &priv->channel[chan];
2889 	struct napi_struct *rx_napi;
2890 	struct napi_struct *tx_napi;
2891 	unsigned long flags;
2892 
2893 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2894 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2895 
2896 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2897 		if (napi_schedule_prep(rx_napi)) {
2898 			spin_lock_irqsave(&ch->lock, flags);
2899 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2900 			spin_unlock_irqrestore(&ch->lock, flags);
2901 			__napi_schedule(rx_napi);
2902 		}
2903 	}
2904 
2905 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2906 		if (napi_schedule_prep(tx_napi)) {
2907 			spin_lock_irqsave(&ch->lock, flags);
2908 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2909 			spin_unlock_irqrestore(&ch->lock, flags);
2910 			__napi_schedule(tx_napi);
2911 		}
2912 	}
2913 
2914 	return status;
2915 }
2916 
2917 /**
2918  * stmmac_dma_interrupt - DMA ISR
2919  * @priv: driver private structure
2920  * Description: this is the DMA ISR. It is called by the main ISR.
2921  * It calls the dwmac dma routine and schedule poll method in case of some
2922  * work can be done.
2923  */
2924 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2925 {
2926 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2927 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2928 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2929 				tx_channel_count : rx_channel_count;
2930 	u32 chan;
2931 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2932 
2933 	/* Make sure we never check beyond our status buffer. */
2934 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2935 		channels_to_check = ARRAY_SIZE(status);
2936 
2937 	for (chan = 0; chan < channels_to_check; chan++)
2938 		status[chan] = stmmac_napi_check(priv, chan,
2939 						 DMA_DIR_RXTX);
2940 
2941 	for (chan = 0; chan < tx_channel_count; chan++) {
2942 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2943 			/* Try to bump up the dma threshold on this failure */
2944 			stmmac_bump_dma_threshold(priv, chan);
2945 		} else if (unlikely(status[chan] == tx_hard_error)) {
2946 			stmmac_tx_err(priv, chan);
2947 		}
2948 	}
2949 }
2950 
2951 /**
2952  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2953  * @priv: driver private structure
2954  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2955  */
2956 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2957 {
2958 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2959 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2960 
2961 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2962 
2963 	if (priv->dma_cap.rmon) {
2964 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2965 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2966 	} else
2967 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2968 }
2969 
2970 /**
2971  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2972  * @priv: driver private structure
2973  * Description:
2974  *  new GMAC chip generations have a new register to indicate the
2975  *  presence of the optional feature/functions.
2976  *  This can be also used to override the value passed through the
2977  *  platform and necessary for old MAC10/100 and GMAC chips.
2978  */
2979 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2980 {
2981 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2982 }
2983 
2984 /**
2985  * stmmac_check_ether_addr - check if the MAC addr is valid
2986  * @priv: driver private structure
2987  * Description:
2988  * it is to verify if the MAC address is valid, in case of failures it
2989  * generates a random MAC address
2990  */
2991 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2992 {
2993 	u8 addr[ETH_ALEN];
2994 
2995 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2996 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2997 		if (is_valid_ether_addr(addr))
2998 			eth_hw_addr_set(priv->dev, addr);
2999 		else
3000 			eth_hw_addr_random(priv->dev);
3001 		dev_info(priv->device, "device MAC address %pM\n",
3002 			 priv->dev->dev_addr);
3003 	}
3004 }
3005 
3006 /**
3007  * stmmac_init_dma_engine - DMA init.
3008  * @priv: driver private structure
3009  * Description:
3010  * It inits the DMA invoking the specific MAC/GMAC callback.
3011  * Some DMA parameters can be passed from the platform;
3012  * in case of these are not passed a default is kept for the MAC or GMAC.
3013  */
3014 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3015 {
3016 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3017 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3018 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3019 	struct stmmac_rx_queue *rx_q;
3020 	struct stmmac_tx_queue *tx_q;
3021 	u32 chan = 0;
3022 	int ret = 0;
3023 
3024 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3025 		dev_err(priv->device, "Invalid DMA configuration\n");
3026 		return -EINVAL;
3027 	}
3028 
3029 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3030 		priv->plat->dma_cfg->atds = 1;
3031 
3032 	ret = stmmac_reset(priv, priv->ioaddr);
3033 	if (ret) {
3034 		dev_err(priv->device, "Failed to reset the dma\n");
3035 		return ret;
3036 	}
3037 
3038 	/* DMA Configuration */
3039 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3040 
3041 	if (priv->plat->axi)
3042 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3043 
3044 	/* DMA CSR Channel configuration */
3045 	for (chan = 0; chan < dma_csr_ch; chan++) {
3046 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3047 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3048 	}
3049 
3050 	/* DMA RX Channel Configuration */
3051 	for (chan = 0; chan < rx_channels_count; chan++) {
3052 		rx_q = &priv->dma_conf.rx_queue[chan];
3053 
3054 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3055 				    rx_q->dma_rx_phy, chan);
3056 
3057 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3058 				     (rx_q->buf_alloc_num *
3059 				      sizeof(struct dma_desc));
3060 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3061 				       rx_q->rx_tail_addr, chan);
3062 	}
3063 
3064 	/* DMA TX Channel Configuration */
3065 	for (chan = 0; chan < tx_channels_count; chan++) {
3066 		tx_q = &priv->dma_conf.tx_queue[chan];
3067 
3068 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3069 				    tx_q->dma_tx_phy, chan);
3070 
3071 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3072 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3073 				       tx_q->tx_tail_addr, chan);
3074 	}
3075 
3076 	return ret;
3077 }
3078 
3079 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3080 {
3081 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3082 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3083 	struct stmmac_channel *ch;
3084 	struct napi_struct *napi;
3085 
3086 	if (!tx_coal_timer)
3087 		return;
3088 
3089 	ch = &priv->channel[tx_q->queue_index];
3090 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3091 
3092 	/* Arm timer only if napi is not already scheduled.
3093 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3094 	 * again in the next scheduled napi.
3095 	 */
3096 	if (unlikely(!napi_is_scheduled(napi)))
3097 		hrtimer_start(&tx_q->txtimer,
3098 			      STMMAC_COAL_TIMER(tx_coal_timer),
3099 			      HRTIMER_MODE_REL);
3100 	else
3101 		hrtimer_try_to_cancel(&tx_q->txtimer);
3102 }
3103 
3104 /**
3105  * stmmac_tx_timer - mitigation sw timer for tx.
3106  * @t: data pointer
3107  * Description:
3108  * This is the timer handler to directly invoke the stmmac_tx_clean.
3109  */
3110 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3111 {
3112 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3113 	struct stmmac_priv *priv = tx_q->priv_data;
3114 	struct stmmac_channel *ch;
3115 	struct napi_struct *napi;
3116 
3117 	ch = &priv->channel[tx_q->queue_index];
3118 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3119 
3120 	if (likely(napi_schedule_prep(napi))) {
3121 		unsigned long flags;
3122 
3123 		spin_lock_irqsave(&ch->lock, flags);
3124 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3125 		spin_unlock_irqrestore(&ch->lock, flags);
3126 		__napi_schedule(napi);
3127 	}
3128 
3129 	return HRTIMER_NORESTART;
3130 }
3131 
3132 /**
3133  * stmmac_init_coalesce - init mitigation options.
3134  * @priv: driver private structure
3135  * Description:
3136  * This inits the coalesce parameters: i.e. timer rate,
3137  * timer handler and default threshold used for enabling the
3138  * interrupt on completion bit.
3139  */
3140 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3141 {
3142 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3143 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3144 	u32 chan;
3145 
3146 	for (chan = 0; chan < tx_channel_count; chan++) {
3147 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3148 
3149 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3150 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3151 
3152 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3153 		tx_q->txtimer.function = stmmac_tx_timer;
3154 	}
3155 
3156 	for (chan = 0; chan < rx_channel_count; chan++)
3157 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3158 }
3159 
3160 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3161 {
3162 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3163 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3164 	u32 chan;
3165 
3166 	/* set TX ring length */
3167 	for (chan = 0; chan < tx_channels_count; chan++)
3168 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3169 				       (priv->dma_conf.dma_tx_size - 1), chan);
3170 
3171 	/* set RX ring length */
3172 	for (chan = 0; chan < rx_channels_count; chan++)
3173 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3174 				       (priv->dma_conf.dma_rx_size - 1), chan);
3175 }
3176 
3177 /**
3178  *  stmmac_set_tx_queue_weight - Set TX queue weight
3179  *  @priv: driver private structure
3180  *  Description: It is used for setting TX queues weight
3181  */
3182 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3183 {
3184 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3185 	u32 weight;
3186 	u32 queue;
3187 
3188 	for (queue = 0; queue < tx_queues_count; queue++) {
3189 		weight = priv->plat->tx_queues_cfg[queue].weight;
3190 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3191 	}
3192 }
3193 
3194 /**
3195  *  stmmac_configure_cbs - Configure CBS in TX queue
3196  *  @priv: driver private structure
3197  *  Description: It is used for configuring CBS in AVB TX queues
3198  */
3199 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3200 {
3201 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3202 	u32 mode_to_use;
3203 	u32 queue;
3204 
3205 	/* queue 0 is reserved for legacy traffic */
3206 	for (queue = 1; queue < tx_queues_count; queue++) {
3207 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3208 		if (mode_to_use == MTL_QUEUE_DCB)
3209 			continue;
3210 
3211 		stmmac_config_cbs(priv, priv->hw,
3212 				priv->plat->tx_queues_cfg[queue].send_slope,
3213 				priv->plat->tx_queues_cfg[queue].idle_slope,
3214 				priv->plat->tx_queues_cfg[queue].high_credit,
3215 				priv->plat->tx_queues_cfg[queue].low_credit,
3216 				queue);
3217 	}
3218 }
3219 
3220 /**
3221  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3222  *  @priv: driver private structure
3223  *  Description: It is used for mapping RX queues to RX dma channels
3224  */
3225 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3226 {
3227 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3228 	u32 queue;
3229 	u32 chan;
3230 
3231 	for (queue = 0; queue < rx_queues_count; queue++) {
3232 		chan = priv->plat->rx_queues_cfg[queue].chan;
3233 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3234 	}
3235 }
3236 
3237 /**
3238  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3239  *  @priv: driver private structure
3240  *  Description: It is used for configuring the RX Queue Priority
3241  */
3242 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3243 {
3244 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3245 	u32 queue;
3246 	u32 prio;
3247 
3248 	for (queue = 0; queue < rx_queues_count; queue++) {
3249 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3250 			continue;
3251 
3252 		prio = priv->plat->rx_queues_cfg[queue].prio;
3253 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3254 	}
3255 }
3256 
3257 /**
3258  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3259  *  @priv: driver private structure
3260  *  Description: It is used for configuring the TX Queue Priority
3261  */
3262 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3263 {
3264 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3265 	u32 queue;
3266 	u32 prio;
3267 
3268 	for (queue = 0; queue < tx_queues_count; queue++) {
3269 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3270 			continue;
3271 
3272 		prio = priv->plat->tx_queues_cfg[queue].prio;
3273 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3274 	}
3275 }
3276 
3277 /**
3278  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3279  *  @priv: driver private structure
3280  *  Description: It is used for configuring the RX queue routing
3281  */
3282 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3283 {
3284 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3285 	u32 queue;
3286 	u8 packet;
3287 
3288 	for (queue = 0; queue < rx_queues_count; queue++) {
3289 		/* no specific packet type routing specified for the queue */
3290 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3291 			continue;
3292 
3293 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3294 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3295 	}
3296 }
3297 
3298 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3299 {
3300 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3301 		priv->rss.enable = false;
3302 		return;
3303 	}
3304 
3305 	if (priv->dev->features & NETIF_F_RXHASH)
3306 		priv->rss.enable = true;
3307 	else
3308 		priv->rss.enable = false;
3309 
3310 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3311 			     priv->plat->rx_queues_to_use);
3312 }
3313 
3314 /**
3315  *  stmmac_mtl_configuration - Configure MTL
3316  *  @priv: driver private structure
3317  *  Description: It is used for configurring MTL
3318  */
3319 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3320 {
3321 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3322 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3323 
3324 	if (tx_queues_count > 1)
3325 		stmmac_set_tx_queue_weight(priv);
3326 
3327 	/* Configure MTL RX algorithms */
3328 	if (rx_queues_count > 1)
3329 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3330 				priv->plat->rx_sched_algorithm);
3331 
3332 	/* Configure MTL TX algorithms */
3333 	if (tx_queues_count > 1)
3334 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3335 				priv->plat->tx_sched_algorithm);
3336 
3337 	/* Configure CBS in AVB TX queues */
3338 	if (tx_queues_count > 1)
3339 		stmmac_configure_cbs(priv);
3340 
3341 	/* Map RX MTL to DMA channels */
3342 	stmmac_rx_queue_dma_chan_map(priv);
3343 
3344 	/* Enable MAC RX Queues */
3345 	stmmac_mac_enable_rx_queues(priv);
3346 
3347 	/* Set RX priorities */
3348 	if (rx_queues_count > 1)
3349 		stmmac_mac_config_rx_queues_prio(priv);
3350 
3351 	/* Set TX priorities */
3352 	if (tx_queues_count > 1)
3353 		stmmac_mac_config_tx_queues_prio(priv);
3354 
3355 	/* Set RX routing */
3356 	if (rx_queues_count > 1)
3357 		stmmac_mac_config_rx_queues_routing(priv);
3358 
3359 	/* Receive Side Scaling */
3360 	if (rx_queues_count > 1)
3361 		stmmac_mac_config_rss(priv);
3362 }
3363 
3364 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3365 {
3366 	if (priv->dma_cap.asp) {
3367 		netdev_info(priv->dev, "Enabling Safety Features\n");
3368 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3369 					  priv->plat->safety_feat_cfg);
3370 	} else {
3371 		netdev_info(priv->dev, "No Safety Features support found\n");
3372 	}
3373 }
3374 
3375 /**
3376  * stmmac_hw_setup - setup mac in a usable state.
3377  *  @dev : pointer to the device structure.
3378  *  @ptp_register: register PTP if set
3379  *  Description:
3380  *  this is the main function to setup the HW in a usable state because the
3381  *  dma engine is reset, the core registers are configured (e.g. AXI,
3382  *  Checksum features, timers). The DMA is ready to start receiving and
3383  *  transmitting.
3384  *  Return value:
3385  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3386  *  file on failure.
3387  */
3388 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3389 {
3390 	struct stmmac_priv *priv = netdev_priv(dev);
3391 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3392 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3393 	bool sph_en;
3394 	u32 chan;
3395 	int ret;
3396 
3397 	/* Make sure RX clock is enabled */
3398 	if (priv->hw->phylink_pcs)
3399 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3400 
3401 	/* DMA initialization and SW reset */
3402 	ret = stmmac_init_dma_engine(priv);
3403 	if (ret < 0) {
3404 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3405 			   __func__);
3406 		return ret;
3407 	}
3408 
3409 	/* Copy the MAC addr into the HW  */
3410 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3411 
3412 	/* PS and related bits will be programmed according to the speed */
3413 	if (priv->hw->pcs) {
3414 		int speed = priv->plat->mac_port_sel_speed;
3415 
3416 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3417 		    (speed == SPEED_1000)) {
3418 			priv->hw->ps = speed;
3419 		} else {
3420 			dev_warn(priv->device, "invalid port speed\n");
3421 			priv->hw->ps = 0;
3422 		}
3423 	}
3424 
3425 	/* Initialize the MAC Core */
3426 	stmmac_core_init(priv, priv->hw, dev);
3427 
3428 	/* Initialize MTL*/
3429 	stmmac_mtl_configuration(priv);
3430 
3431 	/* Initialize Safety Features */
3432 	stmmac_safety_feat_configuration(priv);
3433 
3434 	ret = stmmac_rx_ipc(priv, priv->hw);
3435 	if (!ret) {
3436 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3437 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3438 		priv->hw->rx_csum = 0;
3439 	}
3440 
3441 	/* Enable the MAC Rx/Tx */
3442 	stmmac_mac_set(priv, priv->ioaddr, true);
3443 
3444 	/* Set the HW DMA mode and the COE */
3445 	stmmac_dma_operation_mode(priv);
3446 
3447 	stmmac_mmc_setup(priv);
3448 
3449 	if (ptp_register) {
3450 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3451 		if (ret < 0)
3452 			netdev_warn(priv->dev,
3453 				    "failed to enable PTP reference clock: %pe\n",
3454 				    ERR_PTR(ret));
3455 	}
3456 
3457 	ret = stmmac_init_ptp(priv);
3458 	if (ret == -EOPNOTSUPP)
3459 		netdev_info(priv->dev, "PTP not supported by HW\n");
3460 	else if (ret)
3461 		netdev_warn(priv->dev, "PTP init failed\n");
3462 	else if (ptp_register)
3463 		stmmac_ptp_register(priv);
3464 
3465 	if (priv->use_riwt) {
3466 		u32 queue;
3467 
3468 		for (queue = 0; queue < rx_cnt; queue++) {
3469 			if (!priv->rx_riwt[queue])
3470 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3471 
3472 			stmmac_rx_watchdog(priv, priv->ioaddr,
3473 					   priv->rx_riwt[queue], queue);
3474 		}
3475 	}
3476 
3477 	if (priv->hw->pcs)
3478 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3479 
3480 	/* set TX and RX rings length */
3481 	stmmac_set_rings_length(priv);
3482 
3483 	/* Enable TSO */
3484 	if (priv->tso) {
3485 		for (chan = 0; chan < tx_cnt; chan++) {
3486 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3487 
3488 			/* TSO and TBS cannot co-exist */
3489 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3490 				continue;
3491 
3492 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3493 		}
3494 	}
3495 
3496 	/* Enable Split Header */
3497 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3498 	for (chan = 0; chan < rx_cnt; chan++)
3499 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3500 
3501 
3502 	/* VLAN Tag Insertion */
3503 	if (priv->dma_cap.vlins)
3504 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3505 
3506 	/* TBS */
3507 	for (chan = 0; chan < tx_cnt; chan++) {
3508 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3509 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3510 
3511 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3512 	}
3513 
3514 	/* Configure real RX and TX queues */
3515 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3516 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3517 
3518 	/* Start the ball rolling... */
3519 	stmmac_start_all_dma(priv);
3520 
3521 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3522 
3523 	return 0;
3524 }
3525 
3526 static void stmmac_hw_teardown(struct net_device *dev)
3527 {
3528 	struct stmmac_priv *priv = netdev_priv(dev);
3529 
3530 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3531 }
3532 
3533 static void stmmac_free_irq(struct net_device *dev,
3534 			    enum request_irq_err irq_err, int irq_idx)
3535 {
3536 	struct stmmac_priv *priv = netdev_priv(dev);
3537 	int j;
3538 
3539 	switch (irq_err) {
3540 	case REQ_IRQ_ERR_ALL:
3541 		irq_idx = priv->plat->tx_queues_to_use;
3542 		fallthrough;
3543 	case REQ_IRQ_ERR_TX:
3544 		for (j = irq_idx - 1; j >= 0; j--) {
3545 			if (priv->tx_irq[j] > 0) {
3546 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3547 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3548 			}
3549 		}
3550 		irq_idx = priv->plat->rx_queues_to_use;
3551 		fallthrough;
3552 	case REQ_IRQ_ERR_RX:
3553 		for (j = irq_idx - 1; j >= 0; j--) {
3554 			if (priv->rx_irq[j] > 0) {
3555 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3556 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3557 			}
3558 		}
3559 
3560 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3561 			free_irq(priv->sfty_ue_irq, dev);
3562 		fallthrough;
3563 	case REQ_IRQ_ERR_SFTY_UE:
3564 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3565 			free_irq(priv->sfty_ce_irq, dev);
3566 		fallthrough;
3567 	case REQ_IRQ_ERR_SFTY_CE:
3568 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3569 			free_irq(priv->lpi_irq, dev);
3570 		fallthrough;
3571 	case REQ_IRQ_ERR_LPI:
3572 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3573 			free_irq(priv->wol_irq, dev);
3574 		fallthrough;
3575 	case REQ_IRQ_ERR_SFTY:
3576 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3577 			free_irq(priv->sfty_irq, dev);
3578 		fallthrough;
3579 	case REQ_IRQ_ERR_WOL:
3580 		free_irq(dev->irq, dev);
3581 		fallthrough;
3582 	case REQ_IRQ_ERR_MAC:
3583 	case REQ_IRQ_ERR_NO:
3584 		/* If MAC IRQ request error, no more IRQ to free */
3585 		break;
3586 	}
3587 }
3588 
3589 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3590 {
3591 	struct stmmac_priv *priv = netdev_priv(dev);
3592 	enum request_irq_err irq_err;
3593 	cpumask_t cpu_mask;
3594 	int irq_idx = 0;
3595 	char *int_name;
3596 	int ret;
3597 	int i;
3598 
3599 	/* For common interrupt */
3600 	int_name = priv->int_name_mac;
3601 	sprintf(int_name, "%s:%s", dev->name, "mac");
3602 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3603 			  0, int_name, dev);
3604 	if (unlikely(ret < 0)) {
3605 		netdev_err(priv->dev,
3606 			   "%s: alloc mac MSI %d (error: %d)\n",
3607 			   __func__, dev->irq, ret);
3608 		irq_err = REQ_IRQ_ERR_MAC;
3609 		goto irq_error;
3610 	}
3611 
3612 	/* Request the Wake IRQ in case of another line
3613 	 * is used for WoL
3614 	 */
3615 	priv->wol_irq_disabled = true;
3616 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3617 		int_name = priv->int_name_wol;
3618 		sprintf(int_name, "%s:%s", dev->name, "wol");
3619 		ret = request_irq(priv->wol_irq,
3620 				  stmmac_mac_interrupt,
3621 				  0, int_name, dev);
3622 		if (unlikely(ret < 0)) {
3623 			netdev_err(priv->dev,
3624 				   "%s: alloc wol MSI %d (error: %d)\n",
3625 				   __func__, priv->wol_irq, ret);
3626 			irq_err = REQ_IRQ_ERR_WOL;
3627 			goto irq_error;
3628 		}
3629 	}
3630 
3631 	/* Request the LPI IRQ in case of another line
3632 	 * is used for LPI
3633 	 */
3634 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3635 		int_name = priv->int_name_lpi;
3636 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3637 		ret = request_irq(priv->lpi_irq,
3638 				  stmmac_mac_interrupt,
3639 				  0, int_name, dev);
3640 		if (unlikely(ret < 0)) {
3641 			netdev_err(priv->dev,
3642 				   "%s: alloc lpi MSI %d (error: %d)\n",
3643 				   __func__, priv->lpi_irq, ret);
3644 			irq_err = REQ_IRQ_ERR_LPI;
3645 			goto irq_error;
3646 		}
3647 	}
3648 
3649 	/* Request the common Safety Feature Correctible/Uncorrectible
3650 	 * Error line in case of another line is used
3651 	 */
3652 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3653 		int_name = priv->int_name_sfty;
3654 		sprintf(int_name, "%s:%s", dev->name, "safety");
3655 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3656 				  0, int_name, dev);
3657 		if (unlikely(ret < 0)) {
3658 			netdev_err(priv->dev,
3659 				   "%s: alloc sfty MSI %d (error: %d)\n",
3660 				   __func__, priv->sfty_irq, ret);
3661 			irq_err = REQ_IRQ_ERR_SFTY;
3662 			goto irq_error;
3663 		}
3664 	}
3665 
3666 	/* Request the Safety Feature Correctible Error line in
3667 	 * case of another line is used
3668 	 */
3669 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3670 		int_name = priv->int_name_sfty_ce;
3671 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3672 		ret = request_irq(priv->sfty_ce_irq,
3673 				  stmmac_safety_interrupt,
3674 				  0, int_name, dev);
3675 		if (unlikely(ret < 0)) {
3676 			netdev_err(priv->dev,
3677 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3678 				   __func__, priv->sfty_ce_irq, ret);
3679 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3680 			goto irq_error;
3681 		}
3682 	}
3683 
3684 	/* Request the Safety Feature Uncorrectible Error line in
3685 	 * case of another line is used
3686 	 */
3687 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3688 		int_name = priv->int_name_sfty_ue;
3689 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3690 		ret = request_irq(priv->sfty_ue_irq,
3691 				  stmmac_safety_interrupt,
3692 				  0, int_name, dev);
3693 		if (unlikely(ret < 0)) {
3694 			netdev_err(priv->dev,
3695 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3696 				   __func__, priv->sfty_ue_irq, ret);
3697 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3698 			goto irq_error;
3699 		}
3700 	}
3701 
3702 	/* Request Rx MSI irq */
3703 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3704 		if (i >= MTL_MAX_RX_QUEUES)
3705 			break;
3706 		if (priv->rx_irq[i] == 0)
3707 			continue;
3708 
3709 		int_name = priv->int_name_rx_irq[i];
3710 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3711 		ret = request_irq(priv->rx_irq[i],
3712 				  stmmac_msi_intr_rx,
3713 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3714 		if (unlikely(ret < 0)) {
3715 			netdev_err(priv->dev,
3716 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3717 				   __func__, i, priv->rx_irq[i], ret);
3718 			irq_err = REQ_IRQ_ERR_RX;
3719 			irq_idx = i;
3720 			goto irq_error;
3721 		}
3722 		cpumask_clear(&cpu_mask);
3723 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3724 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3725 	}
3726 
3727 	/* Request Tx MSI irq */
3728 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3729 		if (i >= MTL_MAX_TX_QUEUES)
3730 			break;
3731 		if (priv->tx_irq[i] == 0)
3732 			continue;
3733 
3734 		int_name = priv->int_name_tx_irq[i];
3735 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3736 		ret = request_irq(priv->tx_irq[i],
3737 				  stmmac_msi_intr_tx,
3738 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3739 		if (unlikely(ret < 0)) {
3740 			netdev_err(priv->dev,
3741 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3742 				   __func__, i, priv->tx_irq[i], ret);
3743 			irq_err = REQ_IRQ_ERR_TX;
3744 			irq_idx = i;
3745 			goto irq_error;
3746 		}
3747 		cpumask_clear(&cpu_mask);
3748 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3749 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3750 	}
3751 
3752 	return 0;
3753 
3754 irq_error:
3755 	stmmac_free_irq(dev, irq_err, irq_idx);
3756 	return ret;
3757 }
3758 
3759 static int stmmac_request_irq_single(struct net_device *dev)
3760 {
3761 	struct stmmac_priv *priv = netdev_priv(dev);
3762 	enum request_irq_err irq_err;
3763 	int ret;
3764 
3765 	ret = request_irq(dev->irq, stmmac_interrupt,
3766 			  IRQF_SHARED, dev->name, dev);
3767 	if (unlikely(ret < 0)) {
3768 		netdev_err(priv->dev,
3769 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3770 			   __func__, dev->irq, ret);
3771 		irq_err = REQ_IRQ_ERR_MAC;
3772 		goto irq_error;
3773 	}
3774 
3775 	/* Request the Wake IRQ in case of another line
3776 	 * is used for WoL
3777 	 */
3778 	priv->wol_irq_disabled = true;
3779 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3780 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3781 				  IRQF_SHARED, dev->name, dev);
3782 		if (unlikely(ret < 0)) {
3783 			netdev_err(priv->dev,
3784 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3785 				   __func__, priv->wol_irq, ret);
3786 			irq_err = REQ_IRQ_ERR_WOL;
3787 			goto irq_error;
3788 		}
3789 	}
3790 
3791 	/* Request the IRQ lines */
3792 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3793 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3794 				  IRQF_SHARED, dev->name, dev);
3795 		if (unlikely(ret < 0)) {
3796 			netdev_err(priv->dev,
3797 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3798 				   __func__, priv->lpi_irq, ret);
3799 			irq_err = REQ_IRQ_ERR_LPI;
3800 			goto irq_error;
3801 		}
3802 	}
3803 
3804 	/* Request the common Safety Feature Correctible/Uncorrectible
3805 	 * Error line in case of another line is used
3806 	 */
3807 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3808 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3809 				  IRQF_SHARED, dev->name, dev);
3810 		if (unlikely(ret < 0)) {
3811 			netdev_err(priv->dev,
3812 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3813 				   __func__, priv->sfty_irq, ret);
3814 			irq_err = REQ_IRQ_ERR_SFTY;
3815 			goto irq_error;
3816 		}
3817 	}
3818 
3819 	return 0;
3820 
3821 irq_error:
3822 	stmmac_free_irq(dev, irq_err, 0);
3823 	return ret;
3824 }
3825 
3826 static int stmmac_request_irq(struct net_device *dev)
3827 {
3828 	struct stmmac_priv *priv = netdev_priv(dev);
3829 	int ret;
3830 
3831 	/* Request the IRQ lines */
3832 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3833 		ret = stmmac_request_irq_multi_msi(dev);
3834 	else
3835 		ret = stmmac_request_irq_single(dev);
3836 
3837 	return ret;
3838 }
3839 
3840 /**
3841  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3842  *  @priv: driver private structure
3843  *  @mtu: MTU to setup the dma queue and buf with
3844  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3845  *  Allocate the Tx/Rx DMA queue and init them.
3846  *  Return value:
3847  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3848  */
3849 static struct stmmac_dma_conf *
3850 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3851 {
3852 	struct stmmac_dma_conf *dma_conf;
3853 	int chan, bfsize, ret;
3854 
3855 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3856 	if (!dma_conf) {
3857 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3858 			   __func__);
3859 		return ERR_PTR(-ENOMEM);
3860 	}
3861 
3862 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3863 	if (bfsize < 0)
3864 		bfsize = 0;
3865 
3866 	if (bfsize < BUF_SIZE_16KiB)
3867 		bfsize = stmmac_set_bfsize(mtu, 0);
3868 
3869 	dma_conf->dma_buf_sz = bfsize;
3870 	/* Chose the tx/rx size from the already defined one in the
3871 	 * priv struct. (if defined)
3872 	 */
3873 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3874 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3875 
3876 	if (!dma_conf->dma_tx_size)
3877 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3878 	if (!dma_conf->dma_rx_size)
3879 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3880 
3881 	/* Earlier check for TBS */
3882 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3883 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3884 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3885 
3886 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3887 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3888 	}
3889 
3890 	ret = alloc_dma_desc_resources(priv, dma_conf);
3891 	if (ret < 0) {
3892 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3893 			   __func__);
3894 		goto alloc_error;
3895 	}
3896 
3897 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3898 	if (ret < 0) {
3899 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3900 			   __func__);
3901 		goto init_error;
3902 	}
3903 
3904 	return dma_conf;
3905 
3906 init_error:
3907 	free_dma_desc_resources(priv, dma_conf);
3908 alloc_error:
3909 	kfree(dma_conf);
3910 	return ERR_PTR(ret);
3911 }
3912 
3913 /**
3914  *  __stmmac_open - open entry point of the driver
3915  *  @dev : pointer to the device structure.
3916  *  @dma_conf :  structure to take the dma data
3917  *  Description:
3918  *  This function is the open entry point of the driver.
3919  *  Return value:
3920  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3921  *  file on failure.
3922  */
3923 static int __stmmac_open(struct net_device *dev,
3924 			 struct stmmac_dma_conf *dma_conf)
3925 {
3926 	struct stmmac_priv *priv = netdev_priv(dev);
3927 	int mode = priv->plat->phy_interface;
3928 	u32 chan;
3929 	int ret;
3930 
3931 	/* Initialise the tx lpi timer, converting from msec to usec */
3932 	if (!priv->tx_lpi_timer)
3933 		priv->tx_lpi_timer = eee_timer * 1000;
3934 
3935 	ret = pm_runtime_resume_and_get(priv->device);
3936 	if (ret < 0)
3937 		return ret;
3938 
3939 	if ((!priv->hw->xpcs ||
3940 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3941 		ret = stmmac_init_phy(dev);
3942 		if (ret) {
3943 			netdev_err(priv->dev,
3944 				   "%s: Cannot attach to PHY (error: %d)\n",
3945 				   __func__, ret);
3946 			goto init_phy_error;
3947 		}
3948 	}
3949 
3950 	buf_sz = dma_conf->dma_buf_sz;
3951 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3952 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3953 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3954 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3955 
3956 	stmmac_reset_queues_param(priv);
3957 
3958 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3959 	    priv->plat->serdes_powerup) {
3960 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3961 		if (ret < 0) {
3962 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3963 				   __func__);
3964 			goto init_error;
3965 		}
3966 	}
3967 
3968 	ret = stmmac_hw_setup(dev, true);
3969 	if (ret < 0) {
3970 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3971 		goto init_error;
3972 	}
3973 
3974 	stmmac_init_coalesce(priv);
3975 
3976 	phylink_start(priv->phylink);
3977 	/* We may have called phylink_speed_down before */
3978 	phylink_speed_up(priv->phylink);
3979 
3980 	ret = stmmac_request_irq(dev);
3981 	if (ret)
3982 		goto irq_error;
3983 
3984 	stmmac_enable_all_queues(priv);
3985 	netif_tx_start_all_queues(priv->dev);
3986 	stmmac_enable_all_dma_irq(priv);
3987 
3988 	return 0;
3989 
3990 irq_error:
3991 	phylink_stop(priv->phylink);
3992 
3993 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3994 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3995 
3996 	stmmac_hw_teardown(dev);
3997 init_error:
3998 	phylink_disconnect_phy(priv->phylink);
3999 init_phy_error:
4000 	pm_runtime_put(priv->device);
4001 	return ret;
4002 }
4003 
4004 static int stmmac_open(struct net_device *dev)
4005 {
4006 	struct stmmac_priv *priv = netdev_priv(dev);
4007 	struct stmmac_dma_conf *dma_conf;
4008 	int ret;
4009 
4010 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4011 	if (IS_ERR(dma_conf))
4012 		return PTR_ERR(dma_conf);
4013 
4014 	ret = __stmmac_open(dev, dma_conf);
4015 	if (ret)
4016 		free_dma_desc_resources(priv, dma_conf);
4017 
4018 	kfree(dma_conf);
4019 	return ret;
4020 }
4021 
4022 /**
4023  *  stmmac_release - close entry point of the driver
4024  *  @dev : device pointer.
4025  *  Description:
4026  *  This is the stop entry point of the driver.
4027  */
4028 static int stmmac_release(struct net_device *dev)
4029 {
4030 	struct stmmac_priv *priv = netdev_priv(dev);
4031 	u32 chan;
4032 
4033 	if (device_may_wakeup(priv->device))
4034 		phylink_speed_down(priv->phylink, false);
4035 	/* Stop and disconnect the PHY */
4036 	phylink_stop(priv->phylink);
4037 	phylink_disconnect_phy(priv->phylink);
4038 
4039 	stmmac_disable_all_queues(priv);
4040 
4041 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4042 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4043 
4044 	netif_tx_disable(dev);
4045 
4046 	/* Free the IRQ lines */
4047 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4048 
4049 	/* Stop TX/RX DMA and clear the descriptors */
4050 	stmmac_stop_all_dma(priv);
4051 
4052 	/* Release and free the Rx/Tx resources */
4053 	free_dma_desc_resources(priv, &priv->dma_conf);
4054 
4055 	/* Disable the MAC Rx/Tx */
4056 	stmmac_mac_set(priv, priv->ioaddr, false);
4057 
4058 	/* Powerdown Serdes if there is */
4059 	if (priv->plat->serdes_powerdown)
4060 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4061 
4062 	stmmac_release_ptp(priv);
4063 
4064 	if (stmmac_fpe_supported(priv))
4065 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4066 
4067 	pm_runtime_put(priv->device);
4068 
4069 	return 0;
4070 }
4071 
4072 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4073 			       struct stmmac_tx_queue *tx_q)
4074 {
4075 	u16 tag = 0x0, inner_tag = 0x0;
4076 	u32 inner_type = 0x0;
4077 	struct dma_desc *p;
4078 
4079 	if (!priv->dma_cap.vlins)
4080 		return false;
4081 	if (!skb_vlan_tag_present(skb))
4082 		return false;
4083 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4084 		inner_tag = skb_vlan_tag_get(skb);
4085 		inner_type = STMMAC_VLAN_INSERT;
4086 	}
4087 
4088 	tag = skb_vlan_tag_get(skb);
4089 
4090 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4091 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4092 	else
4093 		p = &tx_q->dma_tx[tx_q->cur_tx];
4094 
4095 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4096 		return false;
4097 
4098 	stmmac_set_tx_owner(priv, p);
4099 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4100 	return true;
4101 }
4102 
4103 /**
4104  *  stmmac_tso_allocator - close entry point of the driver
4105  *  @priv: driver private structure
4106  *  @des: buffer start address
4107  *  @total_len: total length to fill in descriptors
4108  *  @last_segment: condition for the last descriptor
4109  *  @queue: TX queue index
4110  *  Description:
4111  *  This function fills descriptor and request new descriptors according to
4112  *  buffer length to fill
4113  */
4114 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4115 				 int total_len, bool last_segment, u32 queue)
4116 {
4117 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4118 	struct dma_desc *desc;
4119 	u32 buff_size;
4120 	int tmp_len;
4121 
4122 	tmp_len = total_len;
4123 
4124 	while (tmp_len > 0) {
4125 		dma_addr_t curr_addr;
4126 
4127 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4128 						priv->dma_conf.dma_tx_size);
4129 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4130 
4131 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4132 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4133 		else
4134 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4135 
4136 		curr_addr = des + (total_len - tmp_len);
4137 		stmmac_set_desc_addr(priv, desc, curr_addr);
4138 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4139 			    TSO_MAX_BUFF_SIZE : tmp_len;
4140 
4141 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4142 				0, 1,
4143 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4144 				0, 0);
4145 
4146 		tmp_len -= TSO_MAX_BUFF_SIZE;
4147 	}
4148 }
4149 
4150 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4151 {
4152 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4153 	int desc_size;
4154 
4155 	if (likely(priv->extend_desc))
4156 		desc_size = sizeof(struct dma_extended_desc);
4157 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4158 		desc_size = sizeof(struct dma_edesc);
4159 	else
4160 		desc_size = sizeof(struct dma_desc);
4161 
4162 	/* The own bit must be the latest setting done when prepare the
4163 	 * descriptor and then barrier is needed to make sure that
4164 	 * all is coherent before granting the DMA engine.
4165 	 */
4166 	wmb();
4167 
4168 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4169 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4170 }
4171 
4172 /**
4173  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4174  *  @skb : the socket buffer
4175  *  @dev : device pointer
4176  *  Description: this is the transmit function that is called on TSO frames
4177  *  (support available on GMAC4 and newer chips).
4178  *  Diagram below show the ring programming in case of TSO frames:
4179  *
4180  *  First Descriptor
4181  *   --------
4182  *   | DES0 |---> buffer1 = L2/L3/L4 header
4183  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4184  *   |      |     width is 32-bit, but we never use it.
4185  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4186  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4187  *   |      |     or 48-bit, and we always use it.
4188  *   | DES2 |---> buffer1 len
4189  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4190  *   --------
4191  *   --------
4192  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4193  *   | DES1 |---> same as the First Descriptor
4194  *   | DES2 |---> buffer1 len
4195  *   | DES3 |
4196  *   --------
4197  *	|
4198  *     ...
4199  *	|
4200  *   --------
4201  *   | DES0 |---> buffer1 = Split TCP Payload
4202  *   | DES1 |---> same as the First Descriptor
4203  *   | DES2 |---> buffer1 len
4204  *   | DES3 |
4205  *   --------
4206  *
4207  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4208  */
4209 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4210 {
4211 	struct dma_desc *desc, *first, *mss_desc = NULL;
4212 	struct stmmac_priv *priv = netdev_priv(dev);
4213 	unsigned int first_entry, tx_packets;
4214 	struct stmmac_txq_stats *txq_stats;
4215 	struct stmmac_tx_queue *tx_q;
4216 	u32 pay_len, mss, queue;
4217 	int i, first_tx, nfrags;
4218 	u8 proto_hdr_len, hdr;
4219 	dma_addr_t des;
4220 	bool set_ic;
4221 
4222 	/* Always insert VLAN tag to SKB payload for TSO frames.
4223 	 *
4224 	 * Never insert VLAN tag by HW, since segments splited by
4225 	 * TSO engine will be un-tagged by mistake.
4226 	 */
4227 	if (skb_vlan_tag_present(skb)) {
4228 		skb = __vlan_hwaccel_push_inside(skb);
4229 		if (unlikely(!skb)) {
4230 			priv->xstats.tx_dropped++;
4231 			return NETDEV_TX_OK;
4232 		}
4233 	}
4234 
4235 	nfrags = skb_shinfo(skb)->nr_frags;
4236 	queue = skb_get_queue_mapping(skb);
4237 
4238 	tx_q = &priv->dma_conf.tx_queue[queue];
4239 	txq_stats = &priv->xstats.txq_stats[queue];
4240 	first_tx = tx_q->cur_tx;
4241 
4242 	/* Compute header lengths */
4243 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4244 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4245 		hdr = sizeof(struct udphdr);
4246 	} else {
4247 		proto_hdr_len = skb_tcp_all_headers(skb);
4248 		hdr = tcp_hdrlen(skb);
4249 	}
4250 
4251 	/* Desc availability based on threshold should be enough safe */
4252 	if (unlikely(stmmac_tx_avail(priv, queue) <
4253 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4254 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4255 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4256 								queue));
4257 			/* This is a hard error, log it. */
4258 			netdev_err(priv->dev,
4259 				   "%s: Tx Ring full when queue awake\n",
4260 				   __func__);
4261 		}
4262 		return NETDEV_TX_BUSY;
4263 	}
4264 
4265 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4266 
4267 	mss = skb_shinfo(skb)->gso_size;
4268 
4269 	/* set new MSS value if needed */
4270 	if (mss != tx_q->mss) {
4271 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4272 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4273 		else
4274 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4275 
4276 		stmmac_set_mss(priv, mss_desc, mss);
4277 		tx_q->mss = mss;
4278 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4279 						priv->dma_conf.dma_tx_size);
4280 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4281 	}
4282 
4283 	if (netif_msg_tx_queued(priv)) {
4284 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4285 			__func__, hdr, proto_hdr_len, pay_len, mss);
4286 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4287 			skb->data_len);
4288 	}
4289 
4290 	first_entry = tx_q->cur_tx;
4291 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4292 
4293 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4294 		desc = &tx_q->dma_entx[first_entry].basic;
4295 	else
4296 		desc = &tx_q->dma_tx[first_entry];
4297 	first = desc;
4298 
4299 	/* first descriptor: fill Headers on Buf1 */
4300 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4301 			     DMA_TO_DEVICE);
4302 	if (dma_mapping_error(priv->device, des))
4303 		goto dma_map_err;
4304 
4305 	stmmac_set_desc_addr(priv, first, des);
4306 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4307 			     (nfrags == 0), queue);
4308 
4309 	/* In case two or more DMA transmit descriptors are allocated for this
4310 	 * non-paged SKB data, the DMA buffer address should be saved to
4311 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4312 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4313 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4314 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4315 	 * sooner or later.
4316 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4317 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4318 	 * this DMA buffer right after the DMA engine completely finishes the
4319 	 * full buffer transmission.
4320 	 */
4321 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4322 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4323 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4324 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4325 
4326 	/* Prepare fragments */
4327 	for (i = 0; i < nfrags; i++) {
4328 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4329 
4330 		des = skb_frag_dma_map(priv->device, frag, 0,
4331 				       skb_frag_size(frag),
4332 				       DMA_TO_DEVICE);
4333 		if (dma_mapping_error(priv->device, des))
4334 			goto dma_map_err;
4335 
4336 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4337 				     (i == nfrags - 1), queue);
4338 
4339 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4340 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4341 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4342 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4343 	}
4344 
4345 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4346 
4347 	/* Only the last descriptor gets to point to the skb. */
4348 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4349 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4350 
4351 	/* Manage tx mitigation */
4352 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4353 	tx_q->tx_count_frames += tx_packets;
4354 
4355 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4356 		set_ic = true;
4357 	else if (!priv->tx_coal_frames[queue])
4358 		set_ic = false;
4359 	else if (tx_packets > priv->tx_coal_frames[queue])
4360 		set_ic = true;
4361 	else if ((tx_q->tx_count_frames %
4362 		  priv->tx_coal_frames[queue]) < tx_packets)
4363 		set_ic = true;
4364 	else
4365 		set_ic = false;
4366 
4367 	if (set_ic) {
4368 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4369 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4370 		else
4371 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4372 
4373 		tx_q->tx_count_frames = 0;
4374 		stmmac_set_tx_ic(priv, desc);
4375 	}
4376 
4377 	/* We've used all descriptors we need for this skb, however,
4378 	 * advance cur_tx so that it references a fresh descriptor.
4379 	 * ndo_start_xmit will fill this descriptor the next time it's
4380 	 * called and stmmac_tx_clean may clean up to this descriptor.
4381 	 */
4382 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4383 
4384 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4385 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4386 			  __func__);
4387 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4388 	}
4389 
4390 	u64_stats_update_begin(&txq_stats->q_syncp);
4391 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4392 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4393 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4394 	if (set_ic)
4395 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4396 	u64_stats_update_end(&txq_stats->q_syncp);
4397 
4398 	if (priv->sarc_type)
4399 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4400 
4401 	skb_tx_timestamp(skb);
4402 
4403 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4404 		     priv->hwts_tx_en)) {
4405 		/* declare that device is doing timestamping */
4406 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4407 		stmmac_enable_tx_timestamp(priv, first);
4408 	}
4409 
4410 	/* Complete the first descriptor before granting the DMA */
4411 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4412 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4413 				   hdr / 4, (skb->len - proto_hdr_len));
4414 
4415 	/* If context desc is used to change MSS */
4416 	if (mss_desc) {
4417 		/* Make sure that first descriptor has been completely
4418 		 * written, including its own bit. This is because MSS is
4419 		 * actually before first descriptor, so we need to make
4420 		 * sure that MSS's own bit is the last thing written.
4421 		 */
4422 		dma_wmb();
4423 		stmmac_set_tx_owner(priv, mss_desc);
4424 	}
4425 
4426 	if (netif_msg_pktdata(priv)) {
4427 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4428 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4429 			tx_q->cur_tx, first, nfrags);
4430 		pr_info(">>> frame to be transmitted: ");
4431 		print_pkt(skb->data, skb_headlen(skb));
4432 	}
4433 
4434 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4435 
4436 	stmmac_flush_tx_descriptors(priv, queue);
4437 	stmmac_tx_timer_arm(priv, queue);
4438 
4439 	return NETDEV_TX_OK;
4440 
4441 dma_map_err:
4442 	dev_err(priv->device, "Tx dma map failed\n");
4443 	dev_kfree_skb(skb);
4444 	priv->xstats.tx_dropped++;
4445 	return NETDEV_TX_OK;
4446 }
4447 
4448 /**
4449  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4450  * @skb: socket buffer to check
4451  *
4452  * Check if a packet has an ethertype that will trigger the IP header checks
4453  * and IP/TCP checksum engine of the stmmac core.
4454  *
4455  * Return: true if the ethertype can trigger the checksum engine, false
4456  * otherwise
4457  */
4458 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4459 {
4460 	int depth = 0;
4461 	__be16 proto;
4462 
4463 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4464 				    &depth);
4465 
4466 	return (depth <= ETH_HLEN) &&
4467 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4468 }
4469 
4470 /**
4471  *  stmmac_xmit - Tx entry point of the driver
4472  *  @skb : the socket buffer
4473  *  @dev : device pointer
4474  *  Description : this is the tx entry point of the driver.
4475  *  It programs the chain or the ring and supports oversized frames
4476  *  and SG feature.
4477  */
4478 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4479 {
4480 	unsigned int first_entry, tx_packets, enh_desc;
4481 	struct stmmac_priv *priv = netdev_priv(dev);
4482 	unsigned int nopaged_len = skb_headlen(skb);
4483 	int i, csum_insertion = 0, is_jumbo = 0;
4484 	u32 queue = skb_get_queue_mapping(skb);
4485 	int nfrags = skb_shinfo(skb)->nr_frags;
4486 	int gso = skb_shinfo(skb)->gso_type;
4487 	struct stmmac_txq_stats *txq_stats;
4488 	struct dma_edesc *tbs_desc = NULL;
4489 	struct dma_desc *desc, *first;
4490 	struct stmmac_tx_queue *tx_q;
4491 	bool has_vlan, set_ic;
4492 	int entry, first_tx;
4493 	dma_addr_t des;
4494 
4495 	tx_q = &priv->dma_conf.tx_queue[queue];
4496 	txq_stats = &priv->xstats.txq_stats[queue];
4497 	first_tx = tx_q->cur_tx;
4498 
4499 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4500 		stmmac_disable_sw_eee_mode(priv);
4501 
4502 	/* Manage oversized TCP frames for GMAC4 device */
4503 	if (skb_is_gso(skb) && priv->tso) {
4504 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4505 			return stmmac_tso_xmit(skb, dev);
4506 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4507 			return stmmac_tso_xmit(skb, dev);
4508 	}
4509 
4510 	if (priv->est && priv->est->enable &&
4511 	    priv->est->max_sdu[queue] &&
4512 	    skb->len > priv->est->max_sdu[queue]){
4513 		priv->xstats.max_sdu_txq_drop[queue]++;
4514 		goto max_sdu_err;
4515 	}
4516 
4517 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4518 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4519 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4520 								queue));
4521 			/* This is a hard error, log it. */
4522 			netdev_err(priv->dev,
4523 				   "%s: Tx Ring full when queue awake\n",
4524 				   __func__);
4525 		}
4526 		return NETDEV_TX_BUSY;
4527 	}
4528 
4529 	/* Check if VLAN can be inserted by HW */
4530 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4531 
4532 	entry = tx_q->cur_tx;
4533 	first_entry = entry;
4534 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4535 
4536 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4537 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4538 	 * queues. In that case, checksum offloading for those queues that don't
4539 	 * support tx coe needs to fallback to software checksum calculation.
4540 	 *
4541 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4542 	 * also have to be checksummed in software.
4543 	 */
4544 	if (csum_insertion &&
4545 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4546 	     !stmmac_has_ip_ethertype(skb))) {
4547 		if (unlikely(skb_checksum_help(skb)))
4548 			goto dma_map_err;
4549 		csum_insertion = !csum_insertion;
4550 	}
4551 
4552 	if (likely(priv->extend_desc))
4553 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4554 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4555 		desc = &tx_q->dma_entx[entry].basic;
4556 	else
4557 		desc = tx_q->dma_tx + entry;
4558 
4559 	first = desc;
4560 
4561 	if (has_vlan)
4562 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4563 
4564 	enh_desc = priv->plat->enh_desc;
4565 	/* To program the descriptors according to the size of the frame */
4566 	if (enh_desc)
4567 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4568 
4569 	if (unlikely(is_jumbo)) {
4570 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4571 		if (unlikely(entry < 0) && (entry != -EINVAL))
4572 			goto dma_map_err;
4573 	}
4574 
4575 	for (i = 0; i < nfrags; i++) {
4576 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4577 		int len = skb_frag_size(frag);
4578 		bool last_segment = (i == (nfrags - 1));
4579 
4580 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4581 		WARN_ON(tx_q->tx_skbuff[entry]);
4582 
4583 		if (likely(priv->extend_desc))
4584 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4585 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4586 			desc = &tx_q->dma_entx[entry].basic;
4587 		else
4588 			desc = tx_q->dma_tx + entry;
4589 
4590 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4591 				       DMA_TO_DEVICE);
4592 		if (dma_mapping_error(priv->device, des))
4593 			goto dma_map_err; /* should reuse desc w/o issues */
4594 
4595 		tx_q->tx_skbuff_dma[entry].buf = des;
4596 
4597 		stmmac_set_desc_addr(priv, desc, des);
4598 
4599 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4600 		tx_q->tx_skbuff_dma[entry].len = len;
4601 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4602 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4603 
4604 		/* Prepare the descriptor and set the own bit too */
4605 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4606 				priv->mode, 1, last_segment, skb->len);
4607 	}
4608 
4609 	/* Only the last descriptor gets to point to the skb. */
4610 	tx_q->tx_skbuff[entry] = skb;
4611 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4612 
4613 	/* According to the coalesce parameter the IC bit for the latest
4614 	 * segment is reset and the timer re-started to clean the tx status.
4615 	 * This approach takes care about the fragments: desc is the first
4616 	 * element in case of no SG.
4617 	 */
4618 	tx_packets = (entry + 1) - first_tx;
4619 	tx_q->tx_count_frames += tx_packets;
4620 
4621 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4622 		set_ic = true;
4623 	else if (!priv->tx_coal_frames[queue])
4624 		set_ic = false;
4625 	else if (tx_packets > priv->tx_coal_frames[queue])
4626 		set_ic = true;
4627 	else if ((tx_q->tx_count_frames %
4628 		  priv->tx_coal_frames[queue]) < tx_packets)
4629 		set_ic = true;
4630 	else
4631 		set_ic = false;
4632 
4633 	if (set_ic) {
4634 		if (likely(priv->extend_desc))
4635 			desc = &tx_q->dma_etx[entry].basic;
4636 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4637 			desc = &tx_q->dma_entx[entry].basic;
4638 		else
4639 			desc = &tx_q->dma_tx[entry];
4640 
4641 		tx_q->tx_count_frames = 0;
4642 		stmmac_set_tx_ic(priv, desc);
4643 	}
4644 
4645 	/* We've used all descriptors we need for this skb, however,
4646 	 * advance cur_tx so that it references a fresh descriptor.
4647 	 * ndo_start_xmit will fill this descriptor the next time it's
4648 	 * called and stmmac_tx_clean may clean up to this descriptor.
4649 	 */
4650 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4651 	tx_q->cur_tx = entry;
4652 
4653 	if (netif_msg_pktdata(priv)) {
4654 		netdev_dbg(priv->dev,
4655 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4656 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4657 			   entry, first, nfrags);
4658 
4659 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4660 		print_pkt(skb->data, skb->len);
4661 	}
4662 
4663 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4664 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4665 			  __func__);
4666 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4667 	}
4668 
4669 	u64_stats_update_begin(&txq_stats->q_syncp);
4670 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4671 	if (set_ic)
4672 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4673 	u64_stats_update_end(&txq_stats->q_syncp);
4674 
4675 	if (priv->sarc_type)
4676 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4677 
4678 	skb_tx_timestamp(skb);
4679 
4680 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4681 	 * problems because all the descriptors are actually ready to be
4682 	 * passed to the DMA engine.
4683 	 */
4684 	if (likely(!is_jumbo)) {
4685 		bool last_segment = (nfrags == 0);
4686 
4687 		des = dma_map_single(priv->device, skb->data,
4688 				     nopaged_len, DMA_TO_DEVICE);
4689 		if (dma_mapping_error(priv->device, des))
4690 			goto dma_map_err;
4691 
4692 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4693 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4694 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4695 
4696 		stmmac_set_desc_addr(priv, first, des);
4697 
4698 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4699 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4700 
4701 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4702 			     priv->hwts_tx_en)) {
4703 			/* declare that device is doing timestamping */
4704 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4705 			stmmac_enable_tx_timestamp(priv, first);
4706 		}
4707 
4708 		/* Prepare the first descriptor setting the OWN bit too */
4709 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4710 				csum_insertion, priv->mode, 0, last_segment,
4711 				skb->len);
4712 	}
4713 
4714 	if (tx_q->tbs & STMMAC_TBS_EN) {
4715 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4716 
4717 		tbs_desc = &tx_q->dma_entx[first_entry];
4718 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4719 	}
4720 
4721 	stmmac_set_tx_owner(priv, first);
4722 
4723 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4724 
4725 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4726 
4727 	stmmac_flush_tx_descriptors(priv, queue);
4728 	stmmac_tx_timer_arm(priv, queue);
4729 
4730 	return NETDEV_TX_OK;
4731 
4732 dma_map_err:
4733 	netdev_err(priv->dev, "Tx DMA map failed\n");
4734 max_sdu_err:
4735 	dev_kfree_skb(skb);
4736 	priv->xstats.tx_dropped++;
4737 	return NETDEV_TX_OK;
4738 }
4739 
4740 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4741 {
4742 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4743 	__be16 vlan_proto = veth->h_vlan_proto;
4744 	u16 vlanid;
4745 
4746 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4747 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4748 	    (vlan_proto == htons(ETH_P_8021AD) &&
4749 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4750 		/* pop the vlan tag */
4751 		vlanid = ntohs(veth->h_vlan_TCI);
4752 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4753 		skb_pull(skb, VLAN_HLEN);
4754 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4755 	}
4756 }
4757 
4758 /**
4759  * stmmac_rx_refill - refill used skb preallocated buffers
4760  * @priv: driver private structure
4761  * @queue: RX queue index
4762  * Description : this is to reallocate the skb for the reception process
4763  * that is based on zero-copy.
4764  */
4765 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4766 {
4767 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4768 	int dirty = stmmac_rx_dirty(priv, queue);
4769 	unsigned int entry = rx_q->dirty_rx;
4770 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4771 
4772 	if (priv->dma_cap.host_dma_width <= 32)
4773 		gfp |= GFP_DMA32;
4774 
4775 	while (dirty-- > 0) {
4776 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4777 		struct dma_desc *p;
4778 		bool use_rx_wd;
4779 
4780 		if (priv->extend_desc)
4781 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4782 		else
4783 			p = rx_q->dma_rx + entry;
4784 
4785 		if (!buf->page) {
4786 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4787 			if (!buf->page)
4788 				break;
4789 		}
4790 
4791 		if (priv->sph && !buf->sec_page) {
4792 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4793 			if (!buf->sec_page)
4794 				break;
4795 
4796 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4797 		}
4798 
4799 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4800 
4801 		stmmac_set_desc_addr(priv, p, buf->addr);
4802 		if (priv->sph)
4803 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4804 		else
4805 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4806 		stmmac_refill_desc3(priv, rx_q, p);
4807 
4808 		rx_q->rx_count_frames++;
4809 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4810 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4811 			rx_q->rx_count_frames = 0;
4812 
4813 		use_rx_wd = !priv->rx_coal_frames[queue];
4814 		use_rx_wd |= rx_q->rx_count_frames > 0;
4815 		if (!priv->use_riwt)
4816 			use_rx_wd = false;
4817 
4818 		dma_wmb();
4819 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4820 
4821 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4822 	}
4823 	rx_q->dirty_rx = entry;
4824 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4825 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4826 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4827 }
4828 
4829 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4830 				       struct dma_desc *p,
4831 				       int status, unsigned int len)
4832 {
4833 	unsigned int plen = 0, hlen = 0;
4834 	int coe = priv->hw->rx_csum;
4835 
4836 	/* Not first descriptor, buffer is always zero */
4837 	if (priv->sph && len)
4838 		return 0;
4839 
4840 	/* First descriptor, get split header length */
4841 	stmmac_get_rx_header_len(priv, p, &hlen);
4842 	if (priv->sph && hlen) {
4843 		priv->xstats.rx_split_hdr_pkt_n++;
4844 		return hlen;
4845 	}
4846 
4847 	/* First descriptor, not last descriptor and not split header */
4848 	if (status & rx_not_ls)
4849 		return priv->dma_conf.dma_buf_sz;
4850 
4851 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4852 
4853 	/* First descriptor and last descriptor and not split header */
4854 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4855 }
4856 
4857 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4858 				       struct dma_desc *p,
4859 				       int status, unsigned int len)
4860 {
4861 	int coe = priv->hw->rx_csum;
4862 	unsigned int plen = 0;
4863 
4864 	/* Not split header, buffer is not available */
4865 	if (!priv->sph)
4866 		return 0;
4867 
4868 	/* Not last descriptor */
4869 	if (status & rx_not_ls)
4870 		return priv->dma_conf.dma_buf_sz;
4871 
4872 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4873 
4874 	/* Last descriptor */
4875 	return plen - len;
4876 }
4877 
4878 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4879 				struct xdp_frame *xdpf, bool dma_map)
4880 {
4881 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4882 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4883 	unsigned int entry = tx_q->cur_tx;
4884 	struct dma_desc *tx_desc;
4885 	dma_addr_t dma_addr;
4886 	bool set_ic;
4887 
4888 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4889 		return STMMAC_XDP_CONSUMED;
4890 
4891 	if (priv->est && priv->est->enable &&
4892 	    priv->est->max_sdu[queue] &&
4893 	    xdpf->len > priv->est->max_sdu[queue]) {
4894 		priv->xstats.max_sdu_txq_drop[queue]++;
4895 		return STMMAC_XDP_CONSUMED;
4896 	}
4897 
4898 	if (likely(priv->extend_desc))
4899 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4900 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4901 		tx_desc = &tx_q->dma_entx[entry].basic;
4902 	else
4903 		tx_desc = tx_q->dma_tx + entry;
4904 
4905 	if (dma_map) {
4906 		dma_addr = dma_map_single(priv->device, xdpf->data,
4907 					  xdpf->len, DMA_TO_DEVICE);
4908 		if (dma_mapping_error(priv->device, dma_addr))
4909 			return STMMAC_XDP_CONSUMED;
4910 
4911 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4912 	} else {
4913 		struct page *page = virt_to_page(xdpf->data);
4914 
4915 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4916 			   xdpf->headroom;
4917 		dma_sync_single_for_device(priv->device, dma_addr,
4918 					   xdpf->len, DMA_BIDIRECTIONAL);
4919 
4920 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4921 	}
4922 
4923 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4924 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4925 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4926 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4927 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4928 
4929 	tx_q->xdpf[entry] = xdpf;
4930 
4931 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4932 
4933 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4934 			       true, priv->mode, true, true,
4935 			       xdpf->len);
4936 
4937 	tx_q->tx_count_frames++;
4938 
4939 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4940 		set_ic = true;
4941 	else
4942 		set_ic = false;
4943 
4944 	if (set_ic) {
4945 		tx_q->tx_count_frames = 0;
4946 		stmmac_set_tx_ic(priv, tx_desc);
4947 		u64_stats_update_begin(&txq_stats->q_syncp);
4948 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4949 		u64_stats_update_end(&txq_stats->q_syncp);
4950 	}
4951 
4952 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4953 
4954 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4955 	tx_q->cur_tx = entry;
4956 
4957 	return STMMAC_XDP_TX;
4958 }
4959 
4960 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4961 				   int cpu)
4962 {
4963 	int index = cpu;
4964 
4965 	if (unlikely(index < 0))
4966 		index = 0;
4967 
4968 	while (index >= priv->plat->tx_queues_to_use)
4969 		index -= priv->plat->tx_queues_to_use;
4970 
4971 	return index;
4972 }
4973 
4974 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4975 				struct xdp_buff *xdp)
4976 {
4977 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4978 	int cpu = smp_processor_id();
4979 	struct netdev_queue *nq;
4980 	int queue;
4981 	int res;
4982 
4983 	if (unlikely(!xdpf))
4984 		return STMMAC_XDP_CONSUMED;
4985 
4986 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4987 	nq = netdev_get_tx_queue(priv->dev, queue);
4988 
4989 	__netif_tx_lock(nq, cpu);
4990 	/* Avoids TX time-out as we are sharing with slow path */
4991 	txq_trans_cond_update(nq);
4992 
4993 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4994 	if (res == STMMAC_XDP_TX)
4995 		stmmac_flush_tx_descriptors(priv, queue);
4996 
4997 	__netif_tx_unlock(nq);
4998 
4999 	return res;
5000 }
5001 
5002 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5003 				 struct bpf_prog *prog,
5004 				 struct xdp_buff *xdp)
5005 {
5006 	u32 act;
5007 	int res;
5008 
5009 	act = bpf_prog_run_xdp(prog, xdp);
5010 	switch (act) {
5011 	case XDP_PASS:
5012 		res = STMMAC_XDP_PASS;
5013 		break;
5014 	case XDP_TX:
5015 		res = stmmac_xdp_xmit_back(priv, xdp);
5016 		break;
5017 	case XDP_REDIRECT:
5018 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5019 			res = STMMAC_XDP_CONSUMED;
5020 		else
5021 			res = STMMAC_XDP_REDIRECT;
5022 		break;
5023 	default:
5024 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5025 		fallthrough;
5026 	case XDP_ABORTED:
5027 		trace_xdp_exception(priv->dev, prog, act);
5028 		fallthrough;
5029 	case XDP_DROP:
5030 		res = STMMAC_XDP_CONSUMED;
5031 		break;
5032 	}
5033 
5034 	return res;
5035 }
5036 
5037 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5038 					   struct xdp_buff *xdp)
5039 {
5040 	struct bpf_prog *prog;
5041 	int res;
5042 
5043 	prog = READ_ONCE(priv->xdp_prog);
5044 	if (!prog) {
5045 		res = STMMAC_XDP_PASS;
5046 		goto out;
5047 	}
5048 
5049 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5050 out:
5051 	return ERR_PTR(-res);
5052 }
5053 
5054 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5055 				   int xdp_status)
5056 {
5057 	int cpu = smp_processor_id();
5058 	int queue;
5059 
5060 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5061 
5062 	if (xdp_status & STMMAC_XDP_TX)
5063 		stmmac_tx_timer_arm(priv, queue);
5064 
5065 	if (xdp_status & STMMAC_XDP_REDIRECT)
5066 		xdp_do_flush();
5067 }
5068 
5069 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5070 					       struct xdp_buff *xdp)
5071 {
5072 	unsigned int metasize = xdp->data - xdp->data_meta;
5073 	unsigned int datasize = xdp->data_end - xdp->data;
5074 	struct sk_buff *skb;
5075 
5076 	skb = napi_alloc_skb(&ch->rxtx_napi,
5077 			     xdp->data_end - xdp->data_hard_start);
5078 	if (unlikely(!skb))
5079 		return NULL;
5080 
5081 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5082 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5083 	if (metasize)
5084 		skb_metadata_set(skb, metasize);
5085 
5086 	return skb;
5087 }
5088 
5089 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5090 				   struct dma_desc *p, struct dma_desc *np,
5091 				   struct xdp_buff *xdp)
5092 {
5093 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5094 	struct stmmac_channel *ch = &priv->channel[queue];
5095 	unsigned int len = xdp->data_end - xdp->data;
5096 	enum pkt_hash_types hash_type;
5097 	int coe = priv->hw->rx_csum;
5098 	struct sk_buff *skb;
5099 	u32 hash;
5100 
5101 	skb = stmmac_construct_skb_zc(ch, xdp);
5102 	if (!skb) {
5103 		priv->xstats.rx_dropped++;
5104 		return;
5105 	}
5106 
5107 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5108 	if (priv->hw->hw_vlan_en)
5109 		/* MAC level stripping. */
5110 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5111 	else
5112 		/* Driver level stripping. */
5113 		stmmac_rx_vlan(priv->dev, skb);
5114 	skb->protocol = eth_type_trans(skb, priv->dev);
5115 
5116 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5117 		skb_checksum_none_assert(skb);
5118 	else
5119 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5120 
5121 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5122 		skb_set_hash(skb, hash, hash_type);
5123 
5124 	skb_record_rx_queue(skb, queue);
5125 	napi_gro_receive(&ch->rxtx_napi, skb);
5126 
5127 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5128 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5129 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5130 	u64_stats_update_end(&rxq_stats->napi_syncp);
5131 }
5132 
5133 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5134 {
5135 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5136 	unsigned int entry = rx_q->dirty_rx;
5137 	struct dma_desc *rx_desc = NULL;
5138 	bool ret = true;
5139 
5140 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5141 
5142 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5143 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5144 		dma_addr_t dma_addr;
5145 		bool use_rx_wd;
5146 
5147 		if (!buf->xdp) {
5148 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5149 			if (!buf->xdp) {
5150 				ret = false;
5151 				break;
5152 			}
5153 		}
5154 
5155 		if (priv->extend_desc)
5156 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5157 		else
5158 			rx_desc = rx_q->dma_rx + entry;
5159 
5160 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5161 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5162 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5163 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5164 
5165 		rx_q->rx_count_frames++;
5166 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5167 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5168 			rx_q->rx_count_frames = 0;
5169 
5170 		use_rx_wd = !priv->rx_coal_frames[queue];
5171 		use_rx_wd |= rx_q->rx_count_frames > 0;
5172 		if (!priv->use_riwt)
5173 			use_rx_wd = false;
5174 
5175 		dma_wmb();
5176 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5177 
5178 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5179 	}
5180 
5181 	if (rx_desc) {
5182 		rx_q->dirty_rx = entry;
5183 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5184 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5185 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5186 	}
5187 
5188 	return ret;
5189 }
5190 
5191 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5192 {
5193 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5194 	 * to represent incoming packet, whereas cb field in the same structure
5195 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5196 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5197 	 */
5198 	return (struct stmmac_xdp_buff *)xdp;
5199 }
5200 
5201 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5202 {
5203 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5204 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5205 	unsigned int count = 0, error = 0, len = 0;
5206 	int dirty = stmmac_rx_dirty(priv, queue);
5207 	unsigned int next_entry = rx_q->cur_rx;
5208 	u32 rx_errors = 0, rx_dropped = 0;
5209 	unsigned int desc_size;
5210 	struct bpf_prog *prog;
5211 	bool failure = false;
5212 	int xdp_status = 0;
5213 	int status = 0;
5214 
5215 	if (netif_msg_rx_status(priv)) {
5216 		void *rx_head;
5217 
5218 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5219 		if (priv->extend_desc) {
5220 			rx_head = (void *)rx_q->dma_erx;
5221 			desc_size = sizeof(struct dma_extended_desc);
5222 		} else {
5223 			rx_head = (void *)rx_q->dma_rx;
5224 			desc_size = sizeof(struct dma_desc);
5225 		}
5226 
5227 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5228 				    rx_q->dma_rx_phy, desc_size);
5229 	}
5230 	while (count < limit) {
5231 		struct stmmac_rx_buffer *buf;
5232 		struct stmmac_xdp_buff *ctx;
5233 		unsigned int buf1_len = 0;
5234 		struct dma_desc *np, *p;
5235 		int entry;
5236 		int res;
5237 
5238 		if (!count && rx_q->state_saved) {
5239 			error = rx_q->state.error;
5240 			len = rx_q->state.len;
5241 		} else {
5242 			rx_q->state_saved = false;
5243 			error = 0;
5244 			len = 0;
5245 		}
5246 
5247 		if (count >= limit)
5248 			break;
5249 
5250 read_again:
5251 		buf1_len = 0;
5252 		entry = next_entry;
5253 		buf = &rx_q->buf_pool[entry];
5254 
5255 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5256 			failure = failure ||
5257 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5258 			dirty = 0;
5259 		}
5260 
5261 		if (priv->extend_desc)
5262 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5263 		else
5264 			p = rx_q->dma_rx + entry;
5265 
5266 		/* read the status of the incoming frame */
5267 		status = stmmac_rx_status(priv, &priv->xstats, p);
5268 		/* check if managed by the DMA otherwise go ahead */
5269 		if (unlikely(status & dma_own))
5270 			break;
5271 
5272 		/* Prefetch the next RX descriptor */
5273 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5274 						priv->dma_conf.dma_rx_size);
5275 		next_entry = rx_q->cur_rx;
5276 
5277 		if (priv->extend_desc)
5278 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5279 		else
5280 			np = rx_q->dma_rx + next_entry;
5281 
5282 		prefetch(np);
5283 
5284 		/* Ensure a valid XSK buffer before proceed */
5285 		if (!buf->xdp)
5286 			break;
5287 
5288 		if (priv->extend_desc)
5289 			stmmac_rx_extended_status(priv, &priv->xstats,
5290 						  rx_q->dma_erx + entry);
5291 		if (unlikely(status == discard_frame)) {
5292 			xsk_buff_free(buf->xdp);
5293 			buf->xdp = NULL;
5294 			dirty++;
5295 			error = 1;
5296 			if (!priv->hwts_rx_en)
5297 				rx_errors++;
5298 		}
5299 
5300 		if (unlikely(error && (status & rx_not_ls)))
5301 			goto read_again;
5302 		if (unlikely(error)) {
5303 			count++;
5304 			continue;
5305 		}
5306 
5307 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5308 		if (likely(status & rx_not_ls)) {
5309 			xsk_buff_free(buf->xdp);
5310 			buf->xdp = NULL;
5311 			dirty++;
5312 			count++;
5313 			goto read_again;
5314 		}
5315 
5316 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5317 		ctx->priv = priv;
5318 		ctx->desc = p;
5319 		ctx->ndesc = np;
5320 
5321 		/* XDP ZC Frame only support primary buffers for now */
5322 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5323 		len += buf1_len;
5324 
5325 		/* ACS is disabled; strip manually. */
5326 		if (likely(!(status & rx_not_ls))) {
5327 			buf1_len -= ETH_FCS_LEN;
5328 			len -= ETH_FCS_LEN;
5329 		}
5330 
5331 		/* RX buffer is good and fit into a XSK pool buffer */
5332 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5333 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5334 
5335 		prog = READ_ONCE(priv->xdp_prog);
5336 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5337 
5338 		switch (res) {
5339 		case STMMAC_XDP_PASS:
5340 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5341 			xsk_buff_free(buf->xdp);
5342 			break;
5343 		case STMMAC_XDP_CONSUMED:
5344 			xsk_buff_free(buf->xdp);
5345 			rx_dropped++;
5346 			break;
5347 		case STMMAC_XDP_TX:
5348 		case STMMAC_XDP_REDIRECT:
5349 			xdp_status |= res;
5350 			break;
5351 		}
5352 
5353 		buf->xdp = NULL;
5354 		dirty++;
5355 		count++;
5356 	}
5357 
5358 	if (status & rx_not_ls) {
5359 		rx_q->state_saved = true;
5360 		rx_q->state.error = error;
5361 		rx_q->state.len = len;
5362 	}
5363 
5364 	stmmac_finalize_xdp_rx(priv, xdp_status);
5365 
5366 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5367 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5368 	u64_stats_update_end(&rxq_stats->napi_syncp);
5369 
5370 	priv->xstats.rx_dropped += rx_dropped;
5371 	priv->xstats.rx_errors += rx_errors;
5372 
5373 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5374 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5375 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5376 		else
5377 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5378 
5379 		return (int)count;
5380 	}
5381 
5382 	return failure ? limit : (int)count;
5383 }
5384 
5385 /**
5386  * stmmac_rx - manage the receive process
5387  * @priv: driver private structure
5388  * @limit: napi bugget
5389  * @queue: RX queue index.
5390  * Description :  this the function called by the napi poll method.
5391  * It gets all the frames inside the ring.
5392  */
5393 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5394 {
5395 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5396 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5397 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5398 	struct stmmac_channel *ch = &priv->channel[queue];
5399 	unsigned int count = 0, error = 0, len = 0;
5400 	int status = 0, coe = priv->hw->rx_csum;
5401 	unsigned int next_entry = rx_q->cur_rx;
5402 	enum dma_data_direction dma_dir;
5403 	unsigned int desc_size;
5404 	struct sk_buff *skb = NULL;
5405 	struct stmmac_xdp_buff ctx;
5406 	int xdp_status = 0;
5407 	int buf_sz;
5408 
5409 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5410 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5411 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5412 
5413 	if (netif_msg_rx_status(priv)) {
5414 		void *rx_head;
5415 
5416 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5417 		if (priv->extend_desc) {
5418 			rx_head = (void *)rx_q->dma_erx;
5419 			desc_size = sizeof(struct dma_extended_desc);
5420 		} else {
5421 			rx_head = (void *)rx_q->dma_rx;
5422 			desc_size = sizeof(struct dma_desc);
5423 		}
5424 
5425 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5426 				    rx_q->dma_rx_phy, desc_size);
5427 	}
5428 	while (count < limit) {
5429 		unsigned int buf1_len = 0, buf2_len = 0;
5430 		enum pkt_hash_types hash_type;
5431 		struct stmmac_rx_buffer *buf;
5432 		struct dma_desc *np, *p;
5433 		int entry;
5434 		u32 hash;
5435 
5436 		if (!count && rx_q->state_saved) {
5437 			skb = rx_q->state.skb;
5438 			error = rx_q->state.error;
5439 			len = rx_q->state.len;
5440 		} else {
5441 			rx_q->state_saved = false;
5442 			skb = NULL;
5443 			error = 0;
5444 			len = 0;
5445 		}
5446 
5447 read_again:
5448 		if (count >= limit)
5449 			break;
5450 
5451 		buf1_len = 0;
5452 		buf2_len = 0;
5453 		entry = next_entry;
5454 		buf = &rx_q->buf_pool[entry];
5455 
5456 		if (priv->extend_desc)
5457 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5458 		else
5459 			p = rx_q->dma_rx + entry;
5460 
5461 		/* read the status of the incoming frame */
5462 		status = stmmac_rx_status(priv, &priv->xstats, p);
5463 		/* check if managed by the DMA otherwise go ahead */
5464 		if (unlikely(status & dma_own))
5465 			break;
5466 
5467 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5468 						priv->dma_conf.dma_rx_size);
5469 		next_entry = rx_q->cur_rx;
5470 
5471 		if (priv->extend_desc)
5472 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5473 		else
5474 			np = rx_q->dma_rx + next_entry;
5475 
5476 		prefetch(np);
5477 
5478 		if (priv->extend_desc)
5479 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5480 		if (unlikely(status == discard_frame)) {
5481 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5482 			buf->page = NULL;
5483 			error = 1;
5484 			if (!priv->hwts_rx_en)
5485 				rx_errors++;
5486 		}
5487 
5488 		if (unlikely(error && (status & rx_not_ls)))
5489 			goto read_again;
5490 		if (unlikely(error)) {
5491 			dev_kfree_skb(skb);
5492 			skb = NULL;
5493 			count++;
5494 			continue;
5495 		}
5496 
5497 		/* Buffer is good. Go on. */
5498 
5499 		prefetch(page_address(buf->page) + buf->page_offset);
5500 		if (buf->sec_page)
5501 			prefetch(page_address(buf->sec_page));
5502 
5503 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5504 		len += buf1_len;
5505 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5506 		len += buf2_len;
5507 
5508 		/* ACS is disabled; strip manually. */
5509 		if (likely(!(status & rx_not_ls))) {
5510 			if (buf2_len) {
5511 				buf2_len -= ETH_FCS_LEN;
5512 				len -= ETH_FCS_LEN;
5513 			} else if (buf1_len) {
5514 				buf1_len -= ETH_FCS_LEN;
5515 				len -= ETH_FCS_LEN;
5516 			}
5517 		}
5518 
5519 		if (!skb) {
5520 			unsigned int pre_len, sync_len;
5521 
5522 			dma_sync_single_for_cpu(priv->device, buf->addr,
5523 						buf1_len, dma_dir);
5524 
5525 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5526 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5527 					 buf->page_offset, buf1_len, true);
5528 
5529 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5530 				  buf->page_offset;
5531 
5532 			ctx.priv = priv;
5533 			ctx.desc = p;
5534 			ctx.ndesc = np;
5535 
5536 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5537 			/* Due xdp_adjust_tail: DMA sync for_device
5538 			 * cover max len CPU touch
5539 			 */
5540 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5541 				   buf->page_offset;
5542 			sync_len = max(sync_len, pre_len);
5543 
5544 			/* For Not XDP_PASS verdict */
5545 			if (IS_ERR(skb)) {
5546 				unsigned int xdp_res = -PTR_ERR(skb);
5547 
5548 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5549 					page_pool_put_page(rx_q->page_pool,
5550 							   virt_to_head_page(ctx.xdp.data),
5551 							   sync_len, true);
5552 					buf->page = NULL;
5553 					rx_dropped++;
5554 
5555 					/* Clear skb as it was set as
5556 					 * status by XDP program.
5557 					 */
5558 					skb = NULL;
5559 
5560 					if (unlikely((status & rx_not_ls)))
5561 						goto read_again;
5562 
5563 					count++;
5564 					continue;
5565 				} else if (xdp_res & (STMMAC_XDP_TX |
5566 						      STMMAC_XDP_REDIRECT)) {
5567 					xdp_status |= xdp_res;
5568 					buf->page = NULL;
5569 					skb = NULL;
5570 					count++;
5571 					continue;
5572 				}
5573 			}
5574 		}
5575 
5576 		if (!skb) {
5577 			/* XDP program may expand or reduce tail */
5578 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5579 
5580 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5581 			if (!skb) {
5582 				rx_dropped++;
5583 				count++;
5584 				goto drain_data;
5585 			}
5586 
5587 			/* XDP program may adjust header */
5588 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5589 			skb_put(skb, buf1_len);
5590 
5591 			/* Data payload copied into SKB, page ready for recycle */
5592 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5593 			buf->page = NULL;
5594 		} else if (buf1_len) {
5595 			dma_sync_single_for_cpu(priv->device, buf->addr,
5596 						buf1_len, dma_dir);
5597 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5598 					buf->page, buf->page_offset, buf1_len,
5599 					priv->dma_conf.dma_buf_sz);
5600 
5601 			/* Data payload appended into SKB */
5602 			skb_mark_for_recycle(skb);
5603 			buf->page = NULL;
5604 		}
5605 
5606 		if (buf2_len) {
5607 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5608 						buf2_len, dma_dir);
5609 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5610 					buf->sec_page, 0, buf2_len,
5611 					priv->dma_conf.dma_buf_sz);
5612 
5613 			/* Data payload appended into SKB */
5614 			skb_mark_for_recycle(skb);
5615 			buf->sec_page = NULL;
5616 		}
5617 
5618 drain_data:
5619 		if (likely(status & rx_not_ls))
5620 			goto read_again;
5621 		if (!skb)
5622 			continue;
5623 
5624 		/* Got entire packet into SKB. Finish it. */
5625 
5626 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5627 
5628 		if (priv->hw->hw_vlan_en)
5629 			/* MAC level stripping. */
5630 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5631 		else
5632 			/* Driver level stripping. */
5633 			stmmac_rx_vlan(priv->dev, skb);
5634 
5635 		skb->protocol = eth_type_trans(skb, priv->dev);
5636 
5637 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5638 			skb_checksum_none_assert(skb);
5639 		else
5640 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5641 
5642 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5643 			skb_set_hash(skb, hash, hash_type);
5644 
5645 		skb_record_rx_queue(skb, queue);
5646 		napi_gro_receive(&ch->rx_napi, skb);
5647 		skb = NULL;
5648 
5649 		rx_packets++;
5650 		rx_bytes += len;
5651 		count++;
5652 	}
5653 
5654 	if (status & rx_not_ls || skb) {
5655 		rx_q->state_saved = true;
5656 		rx_q->state.skb = skb;
5657 		rx_q->state.error = error;
5658 		rx_q->state.len = len;
5659 	}
5660 
5661 	stmmac_finalize_xdp_rx(priv, xdp_status);
5662 
5663 	stmmac_rx_refill(priv, queue);
5664 
5665 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5666 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5667 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5668 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5669 	u64_stats_update_end(&rxq_stats->napi_syncp);
5670 
5671 	priv->xstats.rx_dropped += rx_dropped;
5672 	priv->xstats.rx_errors += rx_errors;
5673 
5674 	return count;
5675 }
5676 
5677 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5678 {
5679 	struct stmmac_channel *ch =
5680 		container_of(napi, struct stmmac_channel, rx_napi);
5681 	struct stmmac_priv *priv = ch->priv_data;
5682 	struct stmmac_rxq_stats *rxq_stats;
5683 	u32 chan = ch->index;
5684 	int work_done;
5685 
5686 	rxq_stats = &priv->xstats.rxq_stats[chan];
5687 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5688 	u64_stats_inc(&rxq_stats->napi.poll);
5689 	u64_stats_update_end(&rxq_stats->napi_syncp);
5690 
5691 	work_done = stmmac_rx(priv, budget, chan);
5692 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5693 		unsigned long flags;
5694 
5695 		spin_lock_irqsave(&ch->lock, flags);
5696 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5697 		spin_unlock_irqrestore(&ch->lock, flags);
5698 	}
5699 
5700 	return work_done;
5701 }
5702 
5703 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5704 {
5705 	struct stmmac_channel *ch =
5706 		container_of(napi, struct stmmac_channel, tx_napi);
5707 	struct stmmac_priv *priv = ch->priv_data;
5708 	struct stmmac_txq_stats *txq_stats;
5709 	bool pending_packets = false;
5710 	u32 chan = ch->index;
5711 	int work_done;
5712 
5713 	txq_stats = &priv->xstats.txq_stats[chan];
5714 	u64_stats_update_begin(&txq_stats->napi_syncp);
5715 	u64_stats_inc(&txq_stats->napi.poll);
5716 	u64_stats_update_end(&txq_stats->napi_syncp);
5717 
5718 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5719 	work_done = min(work_done, budget);
5720 
5721 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5722 		unsigned long flags;
5723 
5724 		spin_lock_irqsave(&ch->lock, flags);
5725 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5726 		spin_unlock_irqrestore(&ch->lock, flags);
5727 	}
5728 
5729 	/* TX still have packet to handle, check if we need to arm tx timer */
5730 	if (pending_packets)
5731 		stmmac_tx_timer_arm(priv, chan);
5732 
5733 	return work_done;
5734 }
5735 
5736 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5737 {
5738 	struct stmmac_channel *ch =
5739 		container_of(napi, struct stmmac_channel, rxtx_napi);
5740 	struct stmmac_priv *priv = ch->priv_data;
5741 	bool tx_pending_packets = false;
5742 	int rx_done, tx_done, rxtx_done;
5743 	struct stmmac_rxq_stats *rxq_stats;
5744 	struct stmmac_txq_stats *txq_stats;
5745 	u32 chan = ch->index;
5746 
5747 	rxq_stats = &priv->xstats.rxq_stats[chan];
5748 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5749 	u64_stats_inc(&rxq_stats->napi.poll);
5750 	u64_stats_update_end(&rxq_stats->napi_syncp);
5751 
5752 	txq_stats = &priv->xstats.txq_stats[chan];
5753 	u64_stats_update_begin(&txq_stats->napi_syncp);
5754 	u64_stats_inc(&txq_stats->napi.poll);
5755 	u64_stats_update_end(&txq_stats->napi_syncp);
5756 
5757 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5758 	tx_done = min(tx_done, budget);
5759 
5760 	rx_done = stmmac_rx_zc(priv, budget, chan);
5761 
5762 	rxtx_done = max(tx_done, rx_done);
5763 
5764 	/* If either TX or RX work is not complete, return budget
5765 	 * and keep pooling
5766 	 */
5767 	if (rxtx_done >= budget)
5768 		return budget;
5769 
5770 	/* all work done, exit the polling mode */
5771 	if (napi_complete_done(napi, rxtx_done)) {
5772 		unsigned long flags;
5773 
5774 		spin_lock_irqsave(&ch->lock, flags);
5775 		/* Both RX and TX work done are compelte,
5776 		 * so enable both RX & TX IRQs.
5777 		 */
5778 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5779 		spin_unlock_irqrestore(&ch->lock, flags);
5780 	}
5781 
5782 	/* TX still have packet to handle, check if we need to arm tx timer */
5783 	if (tx_pending_packets)
5784 		stmmac_tx_timer_arm(priv, chan);
5785 
5786 	return min(rxtx_done, budget - 1);
5787 }
5788 
5789 /**
5790  *  stmmac_tx_timeout
5791  *  @dev : Pointer to net device structure
5792  *  @txqueue: the index of the hanging transmit queue
5793  *  Description: this function is called when a packet transmission fails to
5794  *   complete within a reasonable time. The driver will mark the error in the
5795  *   netdev structure and arrange for the device to be reset to a sane state
5796  *   in order to transmit a new packet.
5797  */
5798 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5799 {
5800 	struct stmmac_priv *priv = netdev_priv(dev);
5801 
5802 	stmmac_global_err(priv);
5803 }
5804 
5805 /**
5806  *  stmmac_set_rx_mode - entry point for multicast addressing
5807  *  @dev : pointer to the device structure
5808  *  Description:
5809  *  This function is a driver entry point which gets called by the kernel
5810  *  whenever multicast addresses must be enabled/disabled.
5811  *  Return value:
5812  *  void.
5813  */
5814 static void stmmac_set_rx_mode(struct net_device *dev)
5815 {
5816 	struct stmmac_priv *priv = netdev_priv(dev);
5817 
5818 	stmmac_set_filter(priv, priv->hw, dev);
5819 }
5820 
5821 /**
5822  *  stmmac_change_mtu - entry point to change MTU size for the device.
5823  *  @dev : device pointer.
5824  *  @new_mtu : the new MTU size for the device.
5825  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5826  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5827  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5828  *  Return value:
5829  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5830  *  file on failure.
5831  */
5832 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5833 {
5834 	struct stmmac_priv *priv = netdev_priv(dev);
5835 	int txfifosz = priv->plat->tx_fifo_size;
5836 	struct stmmac_dma_conf *dma_conf;
5837 	const int mtu = new_mtu;
5838 	int ret;
5839 
5840 	if (txfifosz == 0)
5841 		txfifosz = priv->dma_cap.tx_fifo_size;
5842 
5843 	txfifosz /= priv->plat->tx_queues_to_use;
5844 
5845 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5846 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5847 		return -EINVAL;
5848 	}
5849 
5850 	new_mtu = STMMAC_ALIGN(new_mtu);
5851 
5852 	/* If condition true, FIFO is too small or MTU too large */
5853 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5854 		return -EINVAL;
5855 
5856 	if (netif_running(dev)) {
5857 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5858 		/* Try to allocate the new DMA conf with the new mtu */
5859 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5860 		if (IS_ERR(dma_conf)) {
5861 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5862 				   mtu);
5863 			return PTR_ERR(dma_conf);
5864 		}
5865 
5866 		stmmac_release(dev);
5867 
5868 		ret = __stmmac_open(dev, dma_conf);
5869 		if (ret) {
5870 			free_dma_desc_resources(priv, dma_conf);
5871 			kfree(dma_conf);
5872 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5873 			return ret;
5874 		}
5875 
5876 		kfree(dma_conf);
5877 
5878 		stmmac_set_rx_mode(dev);
5879 	}
5880 
5881 	WRITE_ONCE(dev->mtu, mtu);
5882 	netdev_update_features(dev);
5883 
5884 	return 0;
5885 }
5886 
5887 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5888 					     netdev_features_t features)
5889 {
5890 	struct stmmac_priv *priv = netdev_priv(dev);
5891 
5892 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5893 		features &= ~NETIF_F_RXCSUM;
5894 
5895 	if (!priv->plat->tx_coe)
5896 		features &= ~NETIF_F_CSUM_MASK;
5897 
5898 	/* Some GMAC devices have a bugged Jumbo frame support that
5899 	 * needs to have the Tx COE disabled for oversized frames
5900 	 * (due to limited buffer sizes). In this case we disable
5901 	 * the TX csum insertion in the TDES and not use SF.
5902 	 */
5903 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5904 		features &= ~NETIF_F_CSUM_MASK;
5905 
5906 	/* Disable tso if asked by ethtool */
5907 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5908 		if (features & NETIF_F_TSO)
5909 			priv->tso = true;
5910 		else
5911 			priv->tso = false;
5912 	}
5913 
5914 	return features;
5915 }
5916 
5917 static int stmmac_set_features(struct net_device *netdev,
5918 			       netdev_features_t features)
5919 {
5920 	struct stmmac_priv *priv = netdev_priv(netdev);
5921 
5922 	/* Keep the COE Type in case of csum is supporting */
5923 	if (features & NETIF_F_RXCSUM)
5924 		priv->hw->rx_csum = priv->plat->rx_coe;
5925 	else
5926 		priv->hw->rx_csum = 0;
5927 	/* No check needed because rx_coe has been set before and it will be
5928 	 * fixed in case of issue.
5929 	 */
5930 	stmmac_rx_ipc(priv, priv->hw);
5931 
5932 	if (priv->sph_cap) {
5933 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5934 		u32 chan;
5935 
5936 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5937 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5938 	}
5939 
5940 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5941 		priv->hw->hw_vlan_en = true;
5942 	else
5943 		priv->hw->hw_vlan_en = false;
5944 
5945 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5946 
5947 	return 0;
5948 }
5949 
5950 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5951 {
5952 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5953 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5954 	u32 queues_count;
5955 	u32 queue;
5956 	bool xmac;
5957 
5958 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5959 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5960 
5961 	if (priv->irq_wake)
5962 		pm_wakeup_event(priv->device, 0);
5963 
5964 	if (priv->dma_cap.estsel)
5965 		stmmac_est_irq_status(priv, priv, priv->dev,
5966 				      &priv->xstats, tx_cnt);
5967 
5968 	if (stmmac_fpe_supported(priv))
5969 		stmmac_fpe_irq_status(priv);
5970 
5971 	/* To handle GMAC own interrupts */
5972 	if ((priv->plat->has_gmac) || xmac) {
5973 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5974 
5975 		if (unlikely(status)) {
5976 			/* For LPI we need to save the tx status */
5977 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5978 				priv->tx_path_in_lpi_mode = true;
5979 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5980 				priv->tx_path_in_lpi_mode = false;
5981 		}
5982 
5983 		for (queue = 0; queue < queues_count; queue++)
5984 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
5985 
5986 		/* PCS link status */
5987 		if (priv->hw->pcs &&
5988 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5989 			if (priv->xstats.pcs_link)
5990 				netif_carrier_on(priv->dev);
5991 			else
5992 				netif_carrier_off(priv->dev);
5993 		}
5994 
5995 		stmmac_timestamp_interrupt(priv, priv);
5996 	}
5997 }
5998 
5999 /**
6000  *  stmmac_interrupt - main ISR
6001  *  @irq: interrupt number.
6002  *  @dev_id: to pass the net device pointer.
6003  *  Description: this is the main driver interrupt service routine.
6004  *  It can call:
6005  *  o DMA service routine (to manage incoming frame reception and transmission
6006  *    status)
6007  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6008  *    interrupts.
6009  */
6010 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6011 {
6012 	struct net_device *dev = (struct net_device *)dev_id;
6013 	struct stmmac_priv *priv = netdev_priv(dev);
6014 
6015 	/* Check if adapter is up */
6016 	if (test_bit(STMMAC_DOWN, &priv->state))
6017 		return IRQ_HANDLED;
6018 
6019 	/* Check ASP error if it isn't delivered via an individual IRQ */
6020 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6021 		return IRQ_HANDLED;
6022 
6023 	/* To handle Common interrupts */
6024 	stmmac_common_interrupt(priv);
6025 
6026 	/* To handle DMA interrupts */
6027 	stmmac_dma_interrupt(priv);
6028 
6029 	return IRQ_HANDLED;
6030 }
6031 
6032 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6033 {
6034 	struct net_device *dev = (struct net_device *)dev_id;
6035 	struct stmmac_priv *priv = netdev_priv(dev);
6036 
6037 	/* Check if adapter is up */
6038 	if (test_bit(STMMAC_DOWN, &priv->state))
6039 		return IRQ_HANDLED;
6040 
6041 	/* To handle Common interrupts */
6042 	stmmac_common_interrupt(priv);
6043 
6044 	return IRQ_HANDLED;
6045 }
6046 
6047 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6048 {
6049 	struct net_device *dev = (struct net_device *)dev_id;
6050 	struct stmmac_priv *priv = netdev_priv(dev);
6051 
6052 	/* Check if adapter is up */
6053 	if (test_bit(STMMAC_DOWN, &priv->state))
6054 		return IRQ_HANDLED;
6055 
6056 	/* Check if a fatal error happened */
6057 	stmmac_safety_feat_interrupt(priv);
6058 
6059 	return IRQ_HANDLED;
6060 }
6061 
6062 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6063 {
6064 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6065 	struct stmmac_dma_conf *dma_conf;
6066 	int chan = tx_q->queue_index;
6067 	struct stmmac_priv *priv;
6068 	int status;
6069 
6070 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6071 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6072 
6073 	/* Check if adapter is up */
6074 	if (test_bit(STMMAC_DOWN, &priv->state))
6075 		return IRQ_HANDLED;
6076 
6077 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6078 
6079 	if (unlikely(status & tx_hard_error_bump_tc)) {
6080 		/* Try to bump up the dma threshold on this failure */
6081 		stmmac_bump_dma_threshold(priv, chan);
6082 	} else if (unlikely(status == tx_hard_error)) {
6083 		stmmac_tx_err(priv, chan);
6084 	}
6085 
6086 	return IRQ_HANDLED;
6087 }
6088 
6089 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6090 {
6091 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6092 	struct stmmac_dma_conf *dma_conf;
6093 	int chan = rx_q->queue_index;
6094 	struct stmmac_priv *priv;
6095 
6096 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6097 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6098 
6099 	/* Check if adapter is up */
6100 	if (test_bit(STMMAC_DOWN, &priv->state))
6101 		return IRQ_HANDLED;
6102 
6103 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6104 
6105 	return IRQ_HANDLED;
6106 }
6107 
6108 /**
6109  *  stmmac_ioctl - Entry point for the Ioctl
6110  *  @dev: Device pointer.
6111  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6112  *  a proprietary structure used to pass information to the driver.
6113  *  @cmd: IOCTL command
6114  *  Description:
6115  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6116  */
6117 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6118 {
6119 	struct stmmac_priv *priv = netdev_priv (dev);
6120 	int ret = -EOPNOTSUPP;
6121 
6122 	if (!netif_running(dev))
6123 		return -EINVAL;
6124 
6125 	switch (cmd) {
6126 	case SIOCGMIIPHY:
6127 	case SIOCGMIIREG:
6128 	case SIOCSMIIREG:
6129 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6130 		break;
6131 	case SIOCSHWTSTAMP:
6132 		ret = stmmac_hwtstamp_set(dev, rq);
6133 		break;
6134 	case SIOCGHWTSTAMP:
6135 		ret = stmmac_hwtstamp_get(dev, rq);
6136 		break;
6137 	default:
6138 		break;
6139 	}
6140 
6141 	return ret;
6142 }
6143 
6144 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6145 				    void *cb_priv)
6146 {
6147 	struct stmmac_priv *priv = cb_priv;
6148 	int ret = -EOPNOTSUPP;
6149 
6150 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6151 		return ret;
6152 
6153 	__stmmac_disable_all_queues(priv);
6154 
6155 	switch (type) {
6156 	case TC_SETUP_CLSU32:
6157 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6158 		break;
6159 	case TC_SETUP_CLSFLOWER:
6160 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6161 		break;
6162 	default:
6163 		break;
6164 	}
6165 
6166 	stmmac_enable_all_queues(priv);
6167 	return ret;
6168 }
6169 
6170 static LIST_HEAD(stmmac_block_cb_list);
6171 
6172 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6173 			   void *type_data)
6174 {
6175 	struct stmmac_priv *priv = netdev_priv(ndev);
6176 
6177 	switch (type) {
6178 	case TC_QUERY_CAPS:
6179 		return stmmac_tc_query_caps(priv, priv, type_data);
6180 	case TC_SETUP_QDISC_MQPRIO:
6181 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6182 	case TC_SETUP_BLOCK:
6183 		return flow_block_cb_setup_simple(type_data,
6184 						  &stmmac_block_cb_list,
6185 						  stmmac_setup_tc_block_cb,
6186 						  priv, priv, true);
6187 	case TC_SETUP_QDISC_CBS:
6188 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6189 	case TC_SETUP_QDISC_TAPRIO:
6190 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6191 	case TC_SETUP_QDISC_ETF:
6192 		return stmmac_tc_setup_etf(priv, priv, type_data);
6193 	default:
6194 		return -EOPNOTSUPP;
6195 	}
6196 }
6197 
6198 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6199 			       struct net_device *sb_dev)
6200 {
6201 	int gso = skb_shinfo(skb)->gso_type;
6202 
6203 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6204 		/*
6205 		 * There is no way to determine the number of TSO/USO
6206 		 * capable Queues. Let's use always the Queue 0
6207 		 * because if TSO/USO is supported then at least this
6208 		 * one will be capable.
6209 		 */
6210 		return 0;
6211 	}
6212 
6213 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6214 }
6215 
6216 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6217 {
6218 	struct stmmac_priv *priv = netdev_priv(ndev);
6219 	int ret = 0;
6220 
6221 	ret = pm_runtime_resume_and_get(priv->device);
6222 	if (ret < 0)
6223 		return ret;
6224 
6225 	ret = eth_mac_addr(ndev, addr);
6226 	if (ret)
6227 		goto set_mac_error;
6228 
6229 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6230 
6231 set_mac_error:
6232 	pm_runtime_put(priv->device);
6233 
6234 	return ret;
6235 }
6236 
6237 #ifdef CONFIG_DEBUG_FS
6238 static struct dentry *stmmac_fs_dir;
6239 
6240 static void sysfs_display_ring(void *head, int size, int extend_desc,
6241 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6242 {
6243 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6244 	struct dma_desc *p = (struct dma_desc *)head;
6245 	unsigned int desc_size;
6246 	dma_addr_t dma_addr;
6247 	int i;
6248 
6249 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6250 	for (i = 0; i < size; i++) {
6251 		dma_addr = dma_phy_addr + i * desc_size;
6252 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6253 				i, &dma_addr,
6254 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6255 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6256 		if (extend_desc)
6257 			p = &(++ep)->basic;
6258 		else
6259 			p++;
6260 	}
6261 }
6262 
6263 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6264 {
6265 	struct net_device *dev = seq->private;
6266 	struct stmmac_priv *priv = netdev_priv(dev);
6267 	u32 rx_count = priv->plat->rx_queues_to_use;
6268 	u32 tx_count = priv->plat->tx_queues_to_use;
6269 	u32 queue;
6270 
6271 	if ((dev->flags & IFF_UP) == 0)
6272 		return 0;
6273 
6274 	for (queue = 0; queue < rx_count; queue++) {
6275 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6276 
6277 		seq_printf(seq, "RX Queue %d:\n", queue);
6278 
6279 		if (priv->extend_desc) {
6280 			seq_printf(seq, "Extended descriptor ring:\n");
6281 			sysfs_display_ring((void *)rx_q->dma_erx,
6282 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6283 		} else {
6284 			seq_printf(seq, "Descriptor ring:\n");
6285 			sysfs_display_ring((void *)rx_q->dma_rx,
6286 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6287 		}
6288 	}
6289 
6290 	for (queue = 0; queue < tx_count; queue++) {
6291 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6292 
6293 		seq_printf(seq, "TX Queue %d:\n", queue);
6294 
6295 		if (priv->extend_desc) {
6296 			seq_printf(seq, "Extended descriptor ring:\n");
6297 			sysfs_display_ring((void *)tx_q->dma_etx,
6298 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6299 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6300 			seq_printf(seq, "Descriptor ring:\n");
6301 			sysfs_display_ring((void *)tx_q->dma_tx,
6302 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6303 		}
6304 	}
6305 
6306 	return 0;
6307 }
6308 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6309 
6310 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6311 {
6312 	static const char * const dwxgmac_timestamp_source[] = {
6313 		"None",
6314 		"Internal",
6315 		"External",
6316 		"Both",
6317 	};
6318 	static const char * const dwxgmac_safety_feature_desc[] = {
6319 		"No",
6320 		"All Safety Features with ECC and Parity",
6321 		"All Safety Features without ECC or Parity",
6322 		"All Safety Features with Parity Only",
6323 		"ECC Only",
6324 		"UNDEFINED",
6325 		"UNDEFINED",
6326 		"UNDEFINED",
6327 	};
6328 	struct net_device *dev = seq->private;
6329 	struct stmmac_priv *priv = netdev_priv(dev);
6330 
6331 	if (!priv->hw_cap_support) {
6332 		seq_printf(seq, "DMA HW features not supported\n");
6333 		return 0;
6334 	}
6335 
6336 	seq_printf(seq, "==============================\n");
6337 	seq_printf(seq, "\tDMA HW features\n");
6338 	seq_printf(seq, "==============================\n");
6339 
6340 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6341 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6342 	seq_printf(seq, "\t1000 Mbps: %s\n",
6343 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6344 	seq_printf(seq, "\tHalf duplex: %s\n",
6345 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6346 	if (priv->plat->has_xgmac) {
6347 		seq_printf(seq,
6348 			   "\tNumber of Additional MAC address registers: %d\n",
6349 			   priv->dma_cap.multi_addr);
6350 	} else {
6351 		seq_printf(seq, "\tHash Filter: %s\n",
6352 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6353 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6354 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6355 	}
6356 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6357 		   (priv->dma_cap.pcs) ? "Y" : "N");
6358 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6359 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6360 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6361 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6362 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6363 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6364 	seq_printf(seq, "\tRMON module: %s\n",
6365 		   (priv->dma_cap.rmon) ? "Y" : "N");
6366 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6367 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6368 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6369 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6370 	if (priv->plat->has_xgmac)
6371 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6372 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6373 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6374 		   (priv->dma_cap.eee) ? "Y" : "N");
6375 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6376 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6377 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6378 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6379 	    priv->plat->has_xgmac) {
6380 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6381 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6382 	} else {
6383 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6384 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6385 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6386 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6387 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6388 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6389 	}
6390 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6391 		   priv->dma_cap.number_rx_channel);
6392 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6393 		   priv->dma_cap.number_tx_channel);
6394 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6395 		   priv->dma_cap.number_rx_queues);
6396 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6397 		   priv->dma_cap.number_tx_queues);
6398 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6399 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6400 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6401 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6402 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6403 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6404 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6405 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6406 		   priv->dma_cap.pps_out_num);
6407 	seq_printf(seq, "\tSafety Features: %s\n",
6408 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6409 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6410 		   priv->dma_cap.frpsel ? "Y" : "N");
6411 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6412 		   priv->dma_cap.host_dma_width);
6413 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6414 		   priv->dma_cap.rssen ? "Y" : "N");
6415 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6416 		   priv->dma_cap.vlhash ? "Y" : "N");
6417 	seq_printf(seq, "\tSplit Header: %s\n",
6418 		   priv->dma_cap.sphen ? "Y" : "N");
6419 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6420 		   priv->dma_cap.vlins ? "Y" : "N");
6421 	seq_printf(seq, "\tDouble VLAN: %s\n",
6422 		   priv->dma_cap.dvlan ? "Y" : "N");
6423 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6424 		   priv->dma_cap.l3l4fnum);
6425 	seq_printf(seq, "\tARP Offloading: %s\n",
6426 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6427 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6428 		   priv->dma_cap.estsel ? "Y" : "N");
6429 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6430 		   priv->dma_cap.fpesel ? "Y" : "N");
6431 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6432 		   priv->dma_cap.tbssel ? "Y" : "N");
6433 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6434 		   priv->dma_cap.tbs_ch_num);
6435 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6436 		   priv->dma_cap.sgfsel ? "Y" : "N");
6437 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6438 		   BIT(priv->dma_cap.ttsfd) >> 1);
6439 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6440 		   priv->dma_cap.numtc);
6441 	seq_printf(seq, "\tDCB Feature: %s\n",
6442 		   priv->dma_cap.dcben ? "Y" : "N");
6443 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6444 		   priv->dma_cap.advthword ? "Y" : "N");
6445 	seq_printf(seq, "\tPTP Offload: %s\n",
6446 		   priv->dma_cap.ptoen ? "Y" : "N");
6447 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6448 		   priv->dma_cap.osten ? "Y" : "N");
6449 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6450 		   priv->dma_cap.pfcen ? "Y" : "N");
6451 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6452 		   BIT(priv->dma_cap.frpes) << 6);
6453 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6454 		   BIT(priv->dma_cap.frpbs) << 6);
6455 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6456 		   priv->dma_cap.frppipe_num);
6457 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6458 		   priv->dma_cap.nrvf_num ?
6459 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6460 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6461 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6462 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6463 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6464 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6465 		   priv->dma_cap.cbtisel ? "Y" : "N");
6466 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6467 		   priv->dma_cap.aux_snapshot_n);
6468 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6469 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6470 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6471 		   priv->dma_cap.edma ? "Y" : "N");
6472 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6473 		   priv->dma_cap.ediffc ? "Y" : "N");
6474 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6475 		   priv->dma_cap.vxn ? "Y" : "N");
6476 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6477 		   priv->dma_cap.dbgmem ? "Y" : "N");
6478 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6479 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6480 	return 0;
6481 }
6482 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6483 
6484 /* Use network device events to rename debugfs file entries.
6485  */
6486 static int stmmac_device_event(struct notifier_block *unused,
6487 			       unsigned long event, void *ptr)
6488 {
6489 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6490 	struct stmmac_priv *priv = netdev_priv(dev);
6491 
6492 	if (dev->netdev_ops != &stmmac_netdev_ops)
6493 		goto done;
6494 
6495 	switch (event) {
6496 	case NETDEV_CHANGENAME:
6497 		if (priv->dbgfs_dir)
6498 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6499 							 priv->dbgfs_dir,
6500 							 stmmac_fs_dir,
6501 							 dev->name);
6502 		break;
6503 	}
6504 done:
6505 	return NOTIFY_DONE;
6506 }
6507 
6508 static struct notifier_block stmmac_notifier = {
6509 	.notifier_call = stmmac_device_event,
6510 };
6511 
6512 static void stmmac_init_fs(struct net_device *dev)
6513 {
6514 	struct stmmac_priv *priv = netdev_priv(dev);
6515 
6516 	rtnl_lock();
6517 
6518 	/* Create per netdev entries */
6519 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6520 
6521 	/* Entry to report DMA RX/TX rings */
6522 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6523 			    &stmmac_rings_status_fops);
6524 
6525 	/* Entry to report the DMA HW features */
6526 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6527 			    &stmmac_dma_cap_fops);
6528 
6529 	rtnl_unlock();
6530 }
6531 
6532 static void stmmac_exit_fs(struct net_device *dev)
6533 {
6534 	struct stmmac_priv *priv = netdev_priv(dev);
6535 
6536 	debugfs_remove_recursive(priv->dbgfs_dir);
6537 }
6538 #endif /* CONFIG_DEBUG_FS */
6539 
6540 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6541 {
6542 	unsigned char *data = (unsigned char *)&vid_le;
6543 	unsigned char data_byte = 0;
6544 	u32 crc = ~0x0;
6545 	u32 temp = 0;
6546 	int i, bits;
6547 
6548 	bits = get_bitmask_order(VLAN_VID_MASK);
6549 	for (i = 0; i < bits; i++) {
6550 		if ((i % 8) == 0)
6551 			data_byte = data[i / 8];
6552 
6553 		temp = ((crc & 1) ^ data_byte) & 1;
6554 		crc >>= 1;
6555 		data_byte >>= 1;
6556 
6557 		if (temp)
6558 			crc ^= 0xedb88320;
6559 	}
6560 
6561 	return crc;
6562 }
6563 
6564 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6565 {
6566 	u32 crc, hash = 0;
6567 	u16 pmatch = 0;
6568 	int count = 0;
6569 	u16 vid = 0;
6570 
6571 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6572 		__le16 vid_le = cpu_to_le16(vid);
6573 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6574 		hash |= (1 << crc);
6575 		count++;
6576 	}
6577 
6578 	if (!priv->dma_cap.vlhash) {
6579 		if (count > 2) /* VID = 0 always passes filter */
6580 			return -EOPNOTSUPP;
6581 
6582 		pmatch = vid;
6583 		hash = 0;
6584 	}
6585 
6586 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6587 }
6588 
6589 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6590 {
6591 	struct stmmac_priv *priv = netdev_priv(ndev);
6592 	bool is_double = false;
6593 	int ret;
6594 
6595 	ret = pm_runtime_resume_and_get(priv->device);
6596 	if (ret < 0)
6597 		return ret;
6598 
6599 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6600 		is_double = true;
6601 
6602 	set_bit(vid, priv->active_vlans);
6603 	ret = stmmac_vlan_update(priv, is_double);
6604 	if (ret) {
6605 		clear_bit(vid, priv->active_vlans);
6606 		goto err_pm_put;
6607 	}
6608 
6609 	if (priv->hw->num_vlan) {
6610 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6611 		if (ret)
6612 			goto err_pm_put;
6613 	}
6614 err_pm_put:
6615 	pm_runtime_put(priv->device);
6616 
6617 	return ret;
6618 }
6619 
6620 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6621 {
6622 	struct stmmac_priv *priv = netdev_priv(ndev);
6623 	bool is_double = false;
6624 	int ret;
6625 
6626 	ret = pm_runtime_resume_and_get(priv->device);
6627 	if (ret < 0)
6628 		return ret;
6629 
6630 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6631 		is_double = true;
6632 
6633 	clear_bit(vid, priv->active_vlans);
6634 
6635 	if (priv->hw->num_vlan) {
6636 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6637 		if (ret)
6638 			goto del_vlan_error;
6639 	}
6640 
6641 	ret = stmmac_vlan_update(priv, is_double);
6642 
6643 del_vlan_error:
6644 	pm_runtime_put(priv->device);
6645 
6646 	return ret;
6647 }
6648 
6649 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6650 {
6651 	struct stmmac_priv *priv = netdev_priv(dev);
6652 
6653 	switch (bpf->command) {
6654 	case XDP_SETUP_PROG:
6655 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6656 	case XDP_SETUP_XSK_POOL:
6657 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6658 					     bpf->xsk.queue_id);
6659 	default:
6660 		return -EOPNOTSUPP;
6661 	}
6662 }
6663 
6664 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6665 			   struct xdp_frame **frames, u32 flags)
6666 {
6667 	struct stmmac_priv *priv = netdev_priv(dev);
6668 	int cpu = smp_processor_id();
6669 	struct netdev_queue *nq;
6670 	int i, nxmit = 0;
6671 	int queue;
6672 
6673 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6674 		return -ENETDOWN;
6675 
6676 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6677 		return -EINVAL;
6678 
6679 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6680 	nq = netdev_get_tx_queue(priv->dev, queue);
6681 
6682 	__netif_tx_lock(nq, cpu);
6683 	/* Avoids TX time-out as we are sharing with slow path */
6684 	txq_trans_cond_update(nq);
6685 
6686 	for (i = 0; i < num_frames; i++) {
6687 		int res;
6688 
6689 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6690 		if (res == STMMAC_XDP_CONSUMED)
6691 			break;
6692 
6693 		nxmit++;
6694 	}
6695 
6696 	if (flags & XDP_XMIT_FLUSH) {
6697 		stmmac_flush_tx_descriptors(priv, queue);
6698 		stmmac_tx_timer_arm(priv, queue);
6699 	}
6700 
6701 	__netif_tx_unlock(nq);
6702 
6703 	return nxmit;
6704 }
6705 
6706 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6707 {
6708 	struct stmmac_channel *ch = &priv->channel[queue];
6709 	unsigned long flags;
6710 
6711 	spin_lock_irqsave(&ch->lock, flags);
6712 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6713 	spin_unlock_irqrestore(&ch->lock, flags);
6714 
6715 	stmmac_stop_rx_dma(priv, queue);
6716 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6717 }
6718 
6719 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6720 {
6721 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6722 	struct stmmac_channel *ch = &priv->channel[queue];
6723 	unsigned long flags;
6724 	u32 buf_size;
6725 	int ret;
6726 
6727 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6728 	if (ret) {
6729 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6730 		return;
6731 	}
6732 
6733 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6734 	if (ret) {
6735 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6736 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6737 		return;
6738 	}
6739 
6740 	stmmac_reset_rx_queue(priv, queue);
6741 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6742 
6743 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6744 			    rx_q->dma_rx_phy, rx_q->queue_index);
6745 
6746 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6747 			     sizeof(struct dma_desc));
6748 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6749 			       rx_q->rx_tail_addr, rx_q->queue_index);
6750 
6751 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6752 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6753 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6754 				      buf_size,
6755 				      rx_q->queue_index);
6756 	} else {
6757 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6758 				      priv->dma_conf.dma_buf_sz,
6759 				      rx_q->queue_index);
6760 	}
6761 
6762 	stmmac_start_rx_dma(priv, queue);
6763 
6764 	spin_lock_irqsave(&ch->lock, flags);
6765 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6766 	spin_unlock_irqrestore(&ch->lock, flags);
6767 }
6768 
6769 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6770 {
6771 	struct stmmac_channel *ch = &priv->channel[queue];
6772 	unsigned long flags;
6773 
6774 	spin_lock_irqsave(&ch->lock, flags);
6775 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6776 	spin_unlock_irqrestore(&ch->lock, flags);
6777 
6778 	stmmac_stop_tx_dma(priv, queue);
6779 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6780 }
6781 
6782 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6783 {
6784 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6785 	struct stmmac_channel *ch = &priv->channel[queue];
6786 	unsigned long flags;
6787 	int ret;
6788 
6789 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6790 	if (ret) {
6791 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6792 		return;
6793 	}
6794 
6795 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6796 	if (ret) {
6797 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6798 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6799 		return;
6800 	}
6801 
6802 	stmmac_reset_tx_queue(priv, queue);
6803 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6804 
6805 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6806 			    tx_q->dma_tx_phy, tx_q->queue_index);
6807 
6808 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6809 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6810 
6811 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6812 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6813 			       tx_q->tx_tail_addr, tx_q->queue_index);
6814 
6815 	stmmac_start_tx_dma(priv, queue);
6816 
6817 	spin_lock_irqsave(&ch->lock, flags);
6818 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6819 	spin_unlock_irqrestore(&ch->lock, flags);
6820 }
6821 
6822 void stmmac_xdp_release(struct net_device *dev)
6823 {
6824 	struct stmmac_priv *priv = netdev_priv(dev);
6825 	u32 chan;
6826 
6827 	/* Ensure tx function is not running */
6828 	netif_tx_disable(dev);
6829 
6830 	/* Disable NAPI process */
6831 	stmmac_disable_all_queues(priv);
6832 
6833 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6834 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6835 
6836 	/* Free the IRQ lines */
6837 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6838 
6839 	/* Stop TX/RX DMA channels */
6840 	stmmac_stop_all_dma(priv);
6841 
6842 	/* Release and free the Rx/Tx resources */
6843 	free_dma_desc_resources(priv, &priv->dma_conf);
6844 
6845 	/* Disable the MAC Rx/Tx */
6846 	stmmac_mac_set(priv, priv->ioaddr, false);
6847 
6848 	/* set trans_start so we don't get spurious
6849 	 * watchdogs during reset
6850 	 */
6851 	netif_trans_update(dev);
6852 	netif_carrier_off(dev);
6853 }
6854 
6855 int stmmac_xdp_open(struct net_device *dev)
6856 {
6857 	struct stmmac_priv *priv = netdev_priv(dev);
6858 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6859 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6860 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6861 	struct stmmac_rx_queue *rx_q;
6862 	struct stmmac_tx_queue *tx_q;
6863 	u32 buf_size;
6864 	bool sph_en;
6865 	u32 chan;
6866 	int ret;
6867 
6868 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6869 	if (ret < 0) {
6870 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6871 			   __func__);
6872 		goto dma_desc_error;
6873 	}
6874 
6875 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6876 	if (ret < 0) {
6877 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6878 			   __func__);
6879 		goto init_error;
6880 	}
6881 
6882 	stmmac_reset_queues_param(priv);
6883 
6884 	/* DMA CSR Channel configuration */
6885 	for (chan = 0; chan < dma_csr_ch; chan++) {
6886 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6887 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6888 	}
6889 
6890 	/* Adjust Split header */
6891 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6892 
6893 	/* DMA RX Channel Configuration */
6894 	for (chan = 0; chan < rx_cnt; chan++) {
6895 		rx_q = &priv->dma_conf.rx_queue[chan];
6896 
6897 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6898 				    rx_q->dma_rx_phy, chan);
6899 
6900 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6901 				     (rx_q->buf_alloc_num *
6902 				      sizeof(struct dma_desc));
6903 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6904 				       rx_q->rx_tail_addr, chan);
6905 
6906 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6907 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6908 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6909 					      buf_size,
6910 					      rx_q->queue_index);
6911 		} else {
6912 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6913 					      priv->dma_conf.dma_buf_sz,
6914 					      rx_q->queue_index);
6915 		}
6916 
6917 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6918 	}
6919 
6920 	/* DMA TX Channel Configuration */
6921 	for (chan = 0; chan < tx_cnt; chan++) {
6922 		tx_q = &priv->dma_conf.tx_queue[chan];
6923 
6924 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6925 				    tx_q->dma_tx_phy, chan);
6926 
6927 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6928 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6929 				       tx_q->tx_tail_addr, chan);
6930 
6931 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6932 		tx_q->txtimer.function = stmmac_tx_timer;
6933 	}
6934 
6935 	/* Enable the MAC Rx/Tx */
6936 	stmmac_mac_set(priv, priv->ioaddr, true);
6937 
6938 	/* Start Rx & Tx DMA Channels */
6939 	stmmac_start_all_dma(priv);
6940 
6941 	ret = stmmac_request_irq(dev);
6942 	if (ret)
6943 		goto irq_error;
6944 
6945 	/* Enable NAPI process*/
6946 	stmmac_enable_all_queues(priv);
6947 	netif_carrier_on(dev);
6948 	netif_tx_start_all_queues(dev);
6949 	stmmac_enable_all_dma_irq(priv);
6950 
6951 	return 0;
6952 
6953 irq_error:
6954 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6955 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6956 
6957 	stmmac_hw_teardown(dev);
6958 init_error:
6959 	free_dma_desc_resources(priv, &priv->dma_conf);
6960 dma_desc_error:
6961 	return ret;
6962 }
6963 
6964 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6965 {
6966 	struct stmmac_priv *priv = netdev_priv(dev);
6967 	struct stmmac_rx_queue *rx_q;
6968 	struct stmmac_tx_queue *tx_q;
6969 	struct stmmac_channel *ch;
6970 
6971 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6972 	    !netif_carrier_ok(priv->dev))
6973 		return -ENETDOWN;
6974 
6975 	if (!stmmac_xdp_is_enabled(priv))
6976 		return -EINVAL;
6977 
6978 	if (queue >= priv->plat->rx_queues_to_use ||
6979 	    queue >= priv->plat->tx_queues_to_use)
6980 		return -EINVAL;
6981 
6982 	rx_q = &priv->dma_conf.rx_queue[queue];
6983 	tx_q = &priv->dma_conf.tx_queue[queue];
6984 	ch = &priv->channel[queue];
6985 
6986 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6987 		return -EINVAL;
6988 
6989 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6990 		/* EQoS does not have per-DMA channel SW interrupt,
6991 		 * so we schedule RX Napi straight-away.
6992 		 */
6993 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6994 			__napi_schedule(&ch->rxtx_napi);
6995 	}
6996 
6997 	return 0;
6998 }
6999 
7000 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7001 {
7002 	struct stmmac_priv *priv = netdev_priv(dev);
7003 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7004 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7005 	unsigned int start;
7006 	int q;
7007 
7008 	for (q = 0; q < tx_cnt; q++) {
7009 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7010 		u64 tx_packets;
7011 		u64 tx_bytes;
7012 
7013 		do {
7014 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7015 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7016 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7017 		do {
7018 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7019 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7020 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7021 
7022 		stats->tx_packets += tx_packets;
7023 		stats->tx_bytes += tx_bytes;
7024 	}
7025 
7026 	for (q = 0; q < rx_cnt; q++) {
7027 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7028 		u64 rx_packets;
7029 		u64 rx_bytes;
7030 
7031 		do {
7032 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7033 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7034 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7035 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7036 
7037 		stats->rx_packets += rx_packets;
7038 		stats->rx_bytes += rx_bytes;
7039 	}
7040 
7041 	stats->rx_dropped = priv->xstats.rx_dropped;
7042 	stats->rx_errors = priv->xstats.rx_errors;
7043 	stats->tx_dropped = priv->xstats.tx_dropped;
7044 	stats->tx_errors = priv->xstats.tx_errors;
7045 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7046 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7047 	stats->rx_length_errors = priv->xstats.rx_length;
7048 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7049 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7050 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7051 }
7052 
7053 static const struct net_device_ops stmmac_netdev_ops = {
7054 	.ndo_open = stmmac_open,
7055 	.ndo_start_xmit = stmmac_xmit,
7056 	.ndo_stop = stmmac_release,
7057 	.ndo_change_mtu = stmmac_change_mtu,
7058 	.ndo_fix_features = stmmac_fix_features,
7059 	.ndo_set_features = stmmac_set_features,
7060 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7061 	.ndo_tx_timeout = stmmac_tx_timeout,
7062 	.ndo_eth_ioctl = stmmac_ioctl,
7063 	.ndo_get_stats64 = stmmac_get_stats64,
7064 	.ndo_setup_tc = stmmac_setup_tc,
7065 	.ndo_select_queue = stmmac_select_queue,
7066 	.ndo_set_mac_address = stmmac_set_mac_address,
7067 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7068 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7069 	.ndo_bpf = stmmac_bpf,
7070 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7071 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7072 };
7073 
7074 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7075 {
7076 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7077 		return;
7078 	if (test_bit(STMMAC_DOWN, &priv->state))
7079 		return;
7080 
7081 	netdev_err(priv->dev, "Reset adapter.\n");
7082 
7083 	rtnl_lock();
7084 	netif_trans_update(priv->dev);
7085 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7086 		usleep_range(1000, 2000);
7087 
7088 	set_bit(STMMAC_DOWN, &priv->state);
7089 	dev_close(priv->dev);
7090 	dev_open(priv->dev, NULL);
7091 	clear_bit(STMMAC_DOWN, &priv->state);
7092 	clear_bit(STMMAC_RESETING, &priv->state);
7093 	rtnl_unlock();
7094 }
7095 
7096 static void stmmac_service_task(struct work_struct *work)
7097 {
7098 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7099 			service_task);
7100 
7101 	stmmac_reset_subtask(priv);
7102 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7103 }
7104 
7105 /**
7106  *  stmmac_hw_init - Init the MAC device
7107  *  @priv: driver private structure
7108  *  Description: this function is to configure the MAC device according to
7109  *  some platform parameters or the HW capability register. It prepares the
7110  *  driver to use either ring or chain modes and to setup either enhanced or
7111  *  normal descriptors.
7112  */
7113 static int stmmac_hw_init(struct stmmac_priv *priv)
7114 {
7115 	int ret;
7116 
7117 	/* dwmac-sun8i only work in chain mode */
7118 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7119 		chain_mode = 1;
7120 	priv->chain_mode = chain_mode;
7121 
7122 	/* Initialize HW Interface */
7123 	ret = stmmac_hwif_init(priv);
7124 	if (ret)
7125 		return ret;
7126 
7127 	/* Get the HW capability (new GMAC newer than 3.50a) */
7128 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7129 	if (priv->hw_cap_support) {
7130 		dev_info(priv->device, "DMA HW capability register supported\n");
7131 
7132 		/* We can override some gmac/dma configuration fields: e.g.
7133 		 * enh_desc, tx_coe (e.g. that are passed through the
7134 		 * platform) with the values from the HW capability
7135 		 * register (if supported).
7136 		 */
7137 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7138 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7139 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7140 		priv->hw->pmt = priv->plat->pmt;
7141 		if (priv->dma_cap.hash_tb_sz) {
7142 			priv->hw->multicast_filter_bins =
7143 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7144 			priv->hw->mcast_bits_log2 =
7145 					ilog2(priv->hw->multicast_filter_bins);
7146 		}
7147 
7148 		/* TXCOE doesn't work in thresh DMA mode */
7149 		if (priv->plat->force_thresh_dma_mode)
7150 			priv->plat->tx_coe = 0;
7151 		else
7152 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7153 
7154 		/* In case of GMAC4 rx_coe is from HW cap register. */
7155 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7156 
7157 		if (priv->dma_cap.rx_coe_type2)
7158 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7159 		else if (priv->dma_cap.rx_coe_type1)
7160 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7161 
7162 	} else {
7163 		dev_info(priv->device, "No HW DMA feature register supported\n");
7164 	}
7165 
7166 	if (priv->plat->rx_coe) {
7167 		priv->hw->rx_csum = priv->plat->rx_coe;
7168 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7169 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7170 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7171 	}
7172 	if (priv->plat->tx_coe)
7173 		dev_info(priv->device, "TX Checksum insertion supported\n");
7174 
7175 	if (priv->plat->pmt) {
7176 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7177 		device_set_wakeup_capable(priv->device, 1);
7178 	}
7179 
7180 	if (priv->dma_cap.tsoen)
7181 		dev_info(priv->device, "TSO supported\n");
7182 
7183 	priv->hw->vlan_fail_q_en =
7184 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7185 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7186 
7187 	/* Run HW quirks, if any */
7188 	if (priv->hwif_quirks) {
7189 		ret = priv->hwif_quirks(priv);
7190 		if (ret)
7191 			return ret;
7192 	}
7193 
7194 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7195 	 * In some case, for example on bugged HW this feature
7196 	 * has to be disable and this can be done by passing the
7197 	 * riwt_off field from the platform.
7198 	 */
7199 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7200 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7201 		priv->use_riwt = 1;
7202 		dev_info(priv->device,
7203 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7204 	}
7205 
7206 	return 0;
7207 }
7208 
7209 static void stmmac_napi_add(struct net_device *dev)
7210 {
7211 	struct stmmac_priv *priv = netdev_priv(dev);
7212 	u32 queue, maxq;
7213 
7214 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7215 
7216 	for (queue = 0; queue < maxq; queue++) {
7217 		struct stmmac_channel *ch = &priv->channel[queue];
7218 
7219 		ch->priv_data = priv;
7220 		ch->index = queue;
7221 		spin_lock_init(&ch->lock);
7222 
7223 		if (queue < priv->plat->rx_queues_to_use) {
7224 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7225 		}
7226 		if (queue < priv->plat->tx_queues_to_use) {
7227 			netif_napi_add_tx(dev, &ch->tx_napi,
7228 					  stmmac_napi_poll_tx);
7229 		}
7230 		if (queue < priv->plat->rx_queues_to_use &&
7231 		    queue < priv->plat->tx_queues_to_use) {
7232 			netif_napi_add(dev, &ch->rxtx_napi,
7233 				       stmmac_napi_poll_rxtx);
7234 		}
7235 	}
7236 }
7237 
7238 static void stmmac_napi_del(struct net_device *dev)
7239 {
7240 	struct stmmac_priv *priv = netdev_priv(dev);
7241 	u32 queue, maxq;
7242 
7243 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7244 
7245 	for (queue = 0; queue < maxq; queue++) {
7246 		struct stmmac_channel *ch = &priv->channel[queue];
7247 
7248 		if (queue < priv->plat->rx_queues_to_use)
7249 			netif_napi_del(&ch->rx_napi);
7250 		if (queue < priv->plat->tx_queues_to_use)
7251 			netif_napi_del(&ch->tx_napi);
7252 		if (queue < priv->plat->rx_queues_to_use &&
7253 		    queue < priv->plat->tx_queues_to_use) {
7254 			netif_napi_del(&ch->rxtx_napi);
7255 		}
7256 	}
7257 }
7258 
7259 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7260 {
7261 	struct stmmac_priv *priv = netdev_priv(dev);
7262 	int ret = 0, i;
7263 
7264 	if (netif_running(dev))
7265 		stmmac_release(dev);
7266 
7267 	stmmac_napi_del(dev);
7268 
7269 	priv->plat->rx_queues_to_use = rx_cnt;
7270 	priv->plat->tx_queues_to_use = tx_cnt;
7271 	if (!netif_is_rxfh_configured(dev))
7272 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7273 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7274 									rx_cnt);
7275 
7276 	stmmac_napi_add(dev);
7277 
7278 	if (netif_running(dev))
7279 		ret = stmmac_open(dev);
7280 
7281 	return ret;
7282 }
7283 
7284 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7285 {
7286 	struct stmmac_priv *priv = netdev_priv(dev);
7287 	int ret = 0;
7288 
7289 	if (netif_running(dev))
7290 		stmmac_release(dev);
7291 
7292 	priv->dma_conf.dma_rx_size = rx_size;
7293 	priv->dma_conf.dma_tx_size = tx_size;
7294 
7295 	if (netif_running(dev))
7296 		ret = stmmac_open(dev);
7297 
7298 	return ret;
7299 }
7300 
7301 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7302 {
7303 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7304 	struct dma_desc *desc_contains_ts = ctx->desc;
7305 	struct stmmac_priv *priv = ctx->priv;
7306 	struct dma_desc *ndesc = ctx->ndesc;
7307 	struct dma_desc *desc = ctx->desc;
7308 	u64 ns = 0;
7309 
7310 	if (!priv->hwts_rx_en)
7311 		return -ENODATA;
7312 
7313 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7314 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7315 		desc_contains_ts = ndesc;
7316 
7317 	/* Check if timestamp is available */
7318 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7319 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7320 		ns -= priv->plat->cdc_error_adj;
7321 		*timestamp = ns_to_ktime(ns);
7322 		return 0;
7323 	}
7324 
7325 	return -ENODATA;
7326 }
7327 
7328 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7329 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7330 };
7331 
7332 /**
7333  * stmmac_dvr_probe
7334  * @device: device pointer
7335  * @plat_dat: platform data pointer
7336  * @res: stmmac resource pointer
7337  * Description: this is the main probe function used to
7338  * call the alloc_etherdev, allocate the priv structure.
7339  * Return:
7340  * returns 0 on success, otherwise errno.
7341  */
7342 int stmmac_dvr_probe(struct device *device,
7343 		     struct plat_stmmacenet_data *plat_dat,
7344 		     struct stmmac_resources *res)
7345 {
7346 	struct net_device *ndev = NULL;
7347 	struct stmmac_priv *priv;
7348 	u32 rxq;
7349 	int i, ret = 0;
7350 
7351 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7352 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7353 	if (!ndev)
7354 		return -ENOMEM;
7355 
7356 	SET_NETDEV_DEV(ndev, device);
7357 
7358 	priv = netdev_priv(ndev);
7359 	priv->device = device;
7360 	priv->dev = ndev;
7361 
7362 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7363 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7364 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7365 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7366 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7367 	}
7368 
7369 	priv->xstats.pcpu_stats =
7370 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7371 	if (!priv->xstats.pcpu_stats)
7372 		return -ENOMEM;
7373 
7374 	stmmac_set_ethtool_ops(ndev);
7375 	priv->pause = pause;
7376 	priv->plat = plat_dat;
7377 	priv->ioaddr = res->addr;
7378 	priv->dev->base_addr = (unsigned long)res->addr;
7379 	priv->plat->dma_cfg->multi_msi_en =
7380 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7381 
7382 	priv->dev->irq = res->irq;
7383 	priv->wol_irq = res->wol_irq;
7384 	priv->lpi_irq = res->lpi_irq;
7385 	priv->sfty_irq = res->sfty_irq;
7386 	priv->sfty_ce_irq = res->sfty_ce_irq;
7387 	priv->sfty_ue_irq = res->sfty_ue_irq;
7388 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7389 		priv->rx_irq[i] = res->rx_irq[i];
7390 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7391 		priv->tx_irq[i] = res->tx_irq[i];
7392 
7393 	if (!is_zero_ether_addr(res->mac))
7394 		eth_hw_addr_set(priv->dev, res->mac);
7395 
7396 	dev_set_drvdata(device, priv->dev);
7397 
7398 	/* Verify driver arguments */
7399 	stmmac_verify_args();
7400 
7401 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7402 	if (!priv->af_xdp_zc_qps)
7403 		return -ENOMEM;
7404 
7405 	/* Allocate workqueue */
7406 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7407 	if (!priv->wq) {
7408 		dev_err(priv->device, "failed to create workqueue\n");
7409 		ret = -ENOMEM;
7410 		goto error_wq_init;
7411 	}
7412 
7413 	INIT_WORK(&priv->service_task, stmmac_service_task);
7414 
7415 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7416 
7417 	/* Override with kernel parameters if supplied XXX CRS XXX
7418 	 * this needs to have multiple instances
7419 	 */
7420 	if ((phyaddr >= 0) && (phyaddr <= 31))
7421 		priv->plat->phy_addr = phyaddr;
7422 
7423 	if (priv->plat->stmmac_rst) {
7424 		ret = reset_control_assert(priv->plat->stmmac_rst);
7425 		reset_control_deassert(priv->plat->stmmac_rst);
7426 		/* Some reset controllers have only reset callback instead of
7427 		 * assert + deassert callbacks pair.
7428 		 */
7429 		if (ret == -ENOTSUPP)
7430 			reset_control_reset(priv->plat->stmmac_rst);
7431 	}
7432 
7433 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7434 	if (ret == -ENOTSUPP)
7435 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7436 			ERR_PTR(ret));
7437 
7438 	/* Wait a bit for the reset to take effect */
7439 	udelay(10);
7440 
7441 	/* Init MAC and get the capabilities */
7442 	ret = stmmac_hw_init(priv);
7443 	if (ret)
7444 		goto error_hw_init;
7445 
7446 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7447 	 */
7448 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7449 		priv->plat->dma_cfg->dche = false;
7450 
7451 	stmmac_check_ether_addr(priv);
7452 
7453 	ndev->netdev_ops = &stmmac_netdev_ops;
7454 
7455 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7456 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7457 
7458 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7459 			    NETIF_F_RXCSUM;
7460 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7461 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7462 
7463 	ret = stmmac_tc_init(priv, priv);
7464 	if (!ret) {
7465 		ndev->hw_features |= NETIF_F_HW_TC;
7466 	}
7467 
7468 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7469 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7470 		if (priv->plat->has_gmac4)
7471 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7472 		priv->tso = true;
7473 		dev_info(priv->device, "TSO feature enabled\n");
7474 	}
7475 
7476 	if (priv->dma_cap.sphen &&
7477 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7478 		ndev->hw_features |= NETIF_F_GRO;
7479 		priv->sph_cap = true;
7480 		priv->sph = priv->sph_cap;
7481 		dev_info(priv->device, "SPH feature enabled\n");
7482 	}
7483 
7484 	/* Ideally our host DMA address width is the same as for the
7485 	 * device. However, it may differ and then we have to use our
7486 	 * host DMA width for allocation and the device DMA width for
7487 	 * register handling.
7488 	 */
7489 	if (priv->plat->host_dma_width)
7490 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7491 	else
7492 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7493 
7494 	if (priv->dma_cap.host_dma_width) {
7495 		ret = dma_set_mask_and_coherent(device,
7496 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7497 		if (!ret) {
7498 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7499 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7500 
7501 			/*
7502 			 * If more than 32 bits can be addressed, make sure to
7503 			 * enable enhanced addressing mode.
7504 			 */
7505 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7506 				priv->plat->dma_cfg->eame = true;
7507 		} else {
7508 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7509 			if (ret) {
7510 				dev_err(priv->device, "Failed to set DMA Mask\n");
7511 				goto error_hw_init;
7512 			}
7513 
7514 			priv->dma_cap.host_dma_width = 32;
7515 		}
7516 	}
7517 
7518 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7519 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7520 #ifdef STMMAC_VLAN_TAG_USED
7521 	/* Both mac100 and gmac support receive VLAN tag detection */
7522 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7523 	if (priv->plat->has_gmac4) {
7524 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7525 		priv->hw->hw_vlan_en = true;
7526 	}
7527 	if (priv->dma_cap.vlhash) {
7528 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7529 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7530 	}
7531 	if (priv->dma_cap.vlins) {
7532 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7533 		if (priv->dma_cap.dvlan)
7534 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7535 	}
7536 #endif
7537 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7538 
7539 	priv->xstats.threshold = tc;
7540 
7541 	/* Initialize RSS */
7542 	rxq = priv->plat->rx_queues_to_use;
7543 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7544 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7545 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7546 
7547 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7548 		ndev->features |= NETIF_F_RXHASH;
7549 
7550 	ndev->vlan_features |= ndev->features;
7551 
7552 	/* MTU range: 46 - hw-specific max */
7553 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7554 	if (priv->plat->has_xgmac)
7555 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7556 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7557 		ndev->max_mtu = JUMBO_LEN;
7558 	else
7559 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7560 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7561 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7562 	 */
7563 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7564 	    (priv->plat->maxmtu >= ndev->min_mtu))
7565 		ndev->max_mtu = priv->plat->maxmtu;
7566 	else if (priv->plat->maxmtu < ndev->min_mtu)
7567 		dev_warn(priv->device,
7568 			 "%s: warning: maxmtu having invalid value (%d)\n",
7569 			 __func__, priv->plat->maxmtu);
7570 
7571 	if (flow_ctrl)
7572 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7573 
7574 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7575 
7576 	/* Setup channels NAPI */
7577 	stmmac_napi_add(ndev);
7578 
7579 	mutex_init(&priv->lock);
7580 
7581 	stmmac_fpe_init(priv);
7582 
7583 	/* If a specific clk_csr value is passed from the platform
7584 	 * this means that the CSR Clock Range selection cannot be
7585 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7586 	 * set the MDC clock dynamically according to the csr actual
7587 	 * clock input.
7588 	 */
7589 	if (priv->plat->clk_csr >= 0)
7590 		priv->clk_csr = priv->plat->clk_csr;
7591 	else
7592 		stmmac_clk_csr_set(priv);
7593 
7594 	stmmac_check_pcs_mode(priv);
7595 
7596 	pm_runtime_get_noresume(device);
7597 	pm_runtime_set_active(device);
7598 	if (!pm_runtime_enabled(device))
7599 		pm_runtime_enable(device);
7600 
7601 	ret = stmmac_mdio_register(ndev);
7602 	if (ret < 0) {
7603 		dev_err_probe(priv->device, ret,
7604 			      "MDIO bus (id: %d) registration failed\n",
7605 			      priv->plat->bus_id);
7606 		goto error_mdio_register;
7607 	}
7608 
7609 	if (priv->plat->speed_mode_2500)
7610 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7611 
7612 	ret = stmmac_pcs_setup(ndev);
7613 	if (ret)
7614 		goto error_pcs_setup;
7615 
7616 	ret = stmmac_phy_setup(priv);
7617 	if (ret) {
7618 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7619 		goto error_phy_setup;
7620 	}
7621 
7622 	ret = register_netdev(ndev);
7623 	if (ret) {
7624 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7625 			__func__, ret);
7626 		goto error_netdev_register;
7627 	}
7628 
7629 #ifdef CONFIG_DEBUG_FS
7630 	stmmac_init_fs(ndev);
7631 #endif
7632 
7633 	if (priv->plat->dump_debug_regs)
7634 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7635 
7636 	/* Let pm_runtime_put() disable the clocks.
7637 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7638 	 */
7639 	pm_runtime_put(device);
7640 
7641 	return ret;
7642 
7643 error_netdev_register:
7644 	phylink_destroy(priv->phylink);
7645 error_phy_setup:
7646 	stmmac_pcs_clean(ndev);
7647 error_pcs_setup:
7648 	stmmac_mdio_unregister(ndev);
7649 error_mdio_register:
7650 	stmmac_napi_del(ndev);
7651 error_hw_init:
7652 	destroy_workqueue(priv->wq);
7653 error_wq_init:
7654 	bitmap_free(priv->af_xdp_zc_qps);
7655 
7656 	return ret;
7657 }
7658 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7659 
7660 /**
7661  * stmmac_dvr_remove
7662  * @dev: device pointer
7663  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7664  * changes the link status, releases the DMA descriptor rings.
7665  */
7666 void stmmac_dvr_remove(struct device *dev)
7667 {
7668 	struct net_device *ndev = dev_get_drvdata(dev);
7669 	struct stmmac_priv *priv = netdev_priv(ndev);
7670 
7671 	netdev_info(priv->dev, "%s: removing driver", __func__);
7672 
7673 	pm_runtime_get_sync(dev);
7674 
7675 	stmmac_stop_all_dma(priv);
7676 	stmmac_mac_set(priv, priv->ioaddr, false);
7677 	unregister_netdev(ndev);
7678 
7679 #ifdef CONFIG_DEBUG_FS
7680 	stmmac_exit_fs(ndev);
7681 #endif
7682 	phylink_destroy(priv->phylink);
7683 	if (priv->plat->stmmac_rst)
7684 		reset_control_assert(priv->plat->stmmac_rst);
7685 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7686 
7687 	stmmac_pcs_clean(ndev);
7688 	stmmac_mdio_unregister(ndev);
7689 
7690 	destroy_workqueue(priv->wq);
7691 	mutex_destroy(&priv->lock);
7692 	bitmap_free(priv->af_xdp_zc_qps);
7693 
7694 	pm_runtime_disable(dev);
7695 	pm_runtime_put_noidle(dev);
7696 }
7697 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7698 
7699 /**
7700  * stmmac_suspend - suspend callback
7701  * @dev: device pointer
7702  * Description: this is the function to suspend the device and it is called
7703  * by the platform driver to stop the network queue, release the resources,
7704  * program the PMT register (for WoL), clean and release driver resources.
7705  */
7706 int stmmac_suspend(struct device *dev)
7707 {
7708 	struct net_device *ndev = dev_get_drvdata(dev);
7709 	struct stmmac_priv *priv = netdev_priv(ndev);
7710 	u32 chan;
7711 
7712 	if (!ndev || !netif_running(ndev))
7713 		return 0;
7714 
7715 	mutex_lock(&priv->lock);
7716 
7717 	netif_device_detach(ndev);
7718 
7719 	stmmac_disable_all_queues(priv);
7720 
7721 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7722 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7723 
7724 	if (priv->eee_enabled) {
7725 		priv->tx_path_in_lpi_mode = false;
7726 		del_timer_sync(&priv->eee_ctrl_timer);
7727 	}
7728 
7729 	/* Stop TX/RX DMA */
7730 	stmmac_stop_all_dma(priv);
7731 
7732 	if (priv->plat->serdes_powerdown)
7733 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7734 
7735 	/* Enable Power down mode by programming the PMT regs */
7736 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7737 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7738 		priv->irq_wake = 1;
7739 	} else {
7740 		stmmac_mac_set(priv, priv->ioaddr, false);
7741 		pinctrl_pm_select_sleep_state(priv->device);
7742 	}
7743 
7744 	mutex_unlock(&priv->lock);
7745 
7746 	rtnl_lock();
7747 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7748 		phylink_suspend(priv->phylink, true);
7749 	} else {
7750 		if (device_may_wakeup(priv->device))
7751 			phylink_speed_down(priv->phylink, false);
7752 		phylink_suspend(priv->phylink, false);
7753 	}
7754 	rtnl_unlock();
7755 
7756 	if (stmmac_fpe_supported(priv))
7757 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7758 
7759 	priv->speed = SPEED_UNKNOWN;
7760 	return 0;
7761 }
7762 EXPORT_SYMBOL_GPL(stmmac_suspend);
7763 
7764 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7765 {
7766 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7767 
7768 	rx_q->cur_rx = 0;
7769 	rx_q->dirty_rx = 0;
7770 }
7771 
7772 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7773 {
7774 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7775 
7776 	tx_q->cur_tx = 0;
7777 	tx_q->dirty_tx = 0;
7778 	tx_q->mss = 0;
7779 
7780 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7781 }
7782 
7783 /**
7784  * stmmac_reset_queues_param - reset queue parameters
7785  * @priv: device pointer
7786  */
7787 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7788 {
7789 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7790 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7791 	u32 queue;
7792 
7793 	for (queue = 0; queue < rx_cnt; queue++)
7794 		stmmac_reset_rx_queue(priv, queue);
7795 
7796 	for (queue = 0; queue < tx_cnt; queue++)
7797 		stmmac_reset_tx_queue(priv, queue);
7798 }
7799 
7800 /**
7801  * stmmac_resume - resume callback
7802  * @dev: device pointer
7803  * Description: when resume this function is invoked to setup the DMA and CORE
7804  * in a usable state.
7805  */
7806 int stmmac_resume(struct device *dev)
7807 {
7808 	struct net_device *ndev = dev_get_drvdata(dev);
7809 	struct stmmac_priv *priv = netdev_priv(ndev);
7810 	int ret;
7811 
7812 	if (!netif_running(ndev))
7813 		return 0;
7814 
7815 	/* Power Down bit, into the PM register, is cleared
7816 	 * automatically as soon as a magic packet or a Wake-up frame
7817 	 * is received. Anyway, it's better to manually clear
7818 	 * this bit because it can generate problems while resuming
7819 	 * from another devices (e.g. serial console).
7820 	 */
7821 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7822 		mutex_lock(&priv->lock);
7823 		stmmac_pmt(priv, priv->hw, 0);
7824 		mutex_unlock(&priv->lock);
7825 		priv->irq_wake = 0;
7826 	} else {
7827 		pinctrl_pm_select_default_state(priv->device);
7828 		/* reset the phy so that it's ready */
7829 		if (priv->mii)
7830 			stmmac_mdio_reset(priv->mii);
7831 	}
7832 
7833 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7834 	    priv->plat->serdes_powerup) {
7835 		ret = priv->plat->serdes_powerup(ndev,
7836 						 priv->plat->bsp_priv);
7837 
7838 		if (ret < 0)
7839 			return ret;
7840 	}
7841 
7842 	rtnl_lock();
7843 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7844 		phylink_resume(priv->phylink);
7845 	} else {
7846 		phylink_resume(priv->phylink);
7847 		if (device_may_wakeup(priv->device))
7848 			phylink_speed_up(priv->phylink);
7849 	}
7850 	rtnl_unlock();
7851 
7852 	rtnl_lock();
7853 	mutex_lock(&priv->lock);
7854 
7855 	stmmac_reset_queues_param(priv);
7856 
7857 	stmmac_free_tx_skbufs(priv);
7858 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7859 
7860 	stmmac_hw_setup(ndev, false);
7861 	stmmac_init_coalesce(priv);
7862 	stmmac_set_rx_mode(ndev);
7863 
7864 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7865 
7866 	stmmac_enable_all_queues(priv);
7867 	stmmac_enable_all_dma_irq(priv);
7868 
7869 	mutex_unlock(&priv->lock);
7870 	rtnl_unlock();
7871 
7872 	netif_device_attach(ndev);
7873 
7874 	return 0;
7875 }
7876 EXPORT_SYMBOL_GPL(stmmac_resume);
7877 
7878 #ifndef MODULE
7879 static int __init stmmac_cmdline_opt(char *str)
7880 {
7881 	char *opt;
7882 
7883 	if (!str || !*str)
7884 		return 1;
7885 	while ((opt = strsep(&str, ",")) != NULL) {
7886 		if (!strncmp(opt, "debug:", 6)) {
7887 			if (kstrtoint(opt + 6, 0, &debug))
7888 				goto err;
7889 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7890 			if (kstrtoint(opt + 8, 0, &phyaddr))
7891 				goto err;
7892 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7893 			if (kstrtoint(opt + 7, 0, &buf_sz))
7894 				goto err;
7895 		} else if (!strncmp(opt, "tc:", 3)) {
7896 			if (kstrtoint(opt + 3, 0, &tc))
7897 				goto err;
7898 		} else if (!strncmp(opt, "watchdog:", 9)) {
7899 			if (kstrtoint(opt + 9, 0, &watchdog))
7900 				goto err;
7901 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7902 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7903 				goto err;
7904 		} else if (!strncmp(opt, "pause:", 6)) {
7905 			if (kstrtoint(opt + 6, 0, &pause))
7906 				goto err;
7907 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7908 			if (kstrtoint(opt + 10, 0, &eee_timer))
7909 				goto err;
7910 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7911 			if (kstrtoint(opt + 11, 0, &chain_mode))
7912 				goto err;
7913 		}
7914 	}
7915 	return 1;
7916 
7917 err:
7918 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7919 	return 1;
7920 }
7921 
7922 __setup("stmmaceth=", stmmac_cmdline_opt);
7923 #endif /* MODULE */
7924 
7925 static int __init stmmac_init(void)
7926 {
7927 #ifdef CONFIG_DEBUG_FS
7928 	/* Create debugfs main directory if it doesn't exist yet */
7929 	if (!stmmac_fs_dir)
7930 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7931 	register_netdevice_notifier(&stmmac_notifier);
7932 #endif
7933 
7934 	return 0;
7935 }
7936 
7937 static void __exit stmmac_exit(void)
7938 {
7939 #ifdef CONFIG_DEBUG_FS
7940 	unregister_netdevice_notifier(&stmmac_notifier);
7941 	debugfs_remove_recursive(stmmac_fs_dir);
7942 #endif
7943 }
7944 
7945 module_init(stmmac_init)
7946 module_exit(stmmac_exit)
7947 
7948 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7949 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7950 MODULE_LICENSE("GPL");
7951