xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision cbaf84e73811ed0ff7ff6d7f52b73fd7ed082d65)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
968 	} else {
969 		*lo_state = FPE_STATE_OFF;
970 		*lp_state = FPE_STATE_OFF;
971 	}
972 }
973 
974 static void stmmac_mac_link_down(struct phylink_config *config,
975 				 unsigned int mode, phy_interface_t interface)
976 {
977 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
978 
979 	stmmac_mac_set(priv, priv->ioaddr, false);
980 	priv->eee_active = false;
981 	priv->tx_lpi_enabled = false;
982 	priv->eee_enabled = stmmac_eee_init(priv);
983 	stmmac_set_eee_pls(priv, priv->hw, false);
984 
985 	if (priv->dma_cap.fpesel)
986 		stmmac_fpe_link_state_handle(priv, false);
987 }
988 
989 static void stmmac_mac_link_up(struct phylink_config *config,
990 			       struct phy_device *phy,
991 			       unsigned int mode, phy_interface_t interface,
992 			       int speed, int duplex,
993 			       bool tx_pause, bool rx_pause)
994 {
995 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
996 	u32 old_ctrl, ctrl;
997 
998 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
999 	    priv->plat->serdes_powerup)
1000 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1001 
1002 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1003 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1004 
1005 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1006 		switch (speed) {
1007 		case SPEED_10000:
1008 			ctrl |= priv->hw->link.xgmii.speed10000;
1009 			break;
1010 		case SPEED_5000:
1011 			ctrl |= priv->hw->link.xgmii.speed5000;
1012 			break;
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.xgmii.speed2500;
1015 			break;
1016 		default:
1017 			return;
1018 		}
1019 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1020 		switch (speed) {
1021 		case SPEED_100000:
1022 			ctrl |= priv->hw->link.xlgmii.speed100000;
1023 			break;
1024 		case SPEED_50000:
1025 			ctrl |= priv->hw->link.xlgmii.speed50000;
1026 			break;
1027 		case SPEED_40000:
1028 			ctrl |= priv->hw->link.xlgmii.speed40000;
1029 			break;
1030 		case SPEED_25000:
1031 			ctrl |= priv->hw->link.xlgmii.speed25000;
1032 			break;
1033 		case SPEED_10000:
1034 			ctrl |= priv->hw->link.xgmii.speed10000;
1035 			break;
1036 		case SPEED_2500:
1037 			ctrl |= priv->hw->link.speed2500;
1038 			break;
1039 		case SPEED_1000:
1040 			ctrl |= priv->hw->link.speed1000;
1041 			break;
1042 		default:
1043 			return;
1044 		}
1045 	} else {
1046 		switch (speed) {
1047 		case SPEED_2500:
1048 			ctrl |= priv->hw->link.speed2500;
1049 			break;
1050 		case SPEED_1000:
1051 			ctrl |= priv->hw->link.speed1000;
1052 			break;
1053 		case SPEED_100:
1054 			ctrl |= priv->hw->link.speed100;
1055 			break;
1056 		case SPEED_10:
1057 			ctrl |= priv->hw->link.speed10;
1058 			break;
1059 		default:
1060 			return;
1061 		}
1062 	}
1063 
1064 	priv->speed = speed;
1065 
1066 	if (priv->plat->fix_mac_speed)
1067 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1068 
1069 	if (!duplex)
1070 		ctrl &= ~priv->hw->link.duplex;
1071 	else
1072 		ctrl |= priv->hw->link.duplex;
1073 
1074 	/* Flow Control operation */
1075 	if (rx_pause && tx_pause)
1076 		priv->flow_ctrl = FLOW_AUTO;
1077 	else if (rx_pause && !tx_pause)
1078 		priv->flow_ctrl = FLOW_RX;
1079 	else if (!rx_pause && tx_pause)
1080 		priv->flow_ctrl = FLOW_TX;
1081 	else
1082 		priv->flow_ctrl = FLOW_OFF;
1083 
1084 	stmmac_mac_flow_ctrl(priv, duplex);
1085 
1086 	if (ctrl != old_ctrl)
1087 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1088 
1089 	stmmac_mac_set(priv, priv->ioaddr, true);
1090 	if (phy && priv->dma_cap.eee) {
1091 		priv->eee_active =
1092 			phy_init_eee(phy, !(priv->plat->flags &
1093 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1094 		priv->eee_enabled = stmmac_eee_init(priv);
1095 		priv->tx_lpi_enabled = priv->eee_enabled;
1096 		stmmac_set_eee_pls(priv, priv->hw, true);
1097 	}
1098 
1099 	if (priv->dma_cap.fpesel)
1100 		stmmac_fpe_link_state_handle(priv, true);
1101 
1102 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1103 		stmmac_hwtstamp_correct_latency(priv, priv);
1104 }
1105 
1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1107 	.mac_select_pcs = stmmac_mac_select_pcs,
1108 	.mac_config = stmmac_mac_config,
1109 	.mac_link_down = stmmac_mac_link_down,
1110 	.mac_link_up = stmmac_mac_link_up,
1111 };
1112 
1113 /**
1114  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1115  * @priv: driver private structure
1116  * Description: this is to verify if the HW supports the PCS.
1117  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1118  * configured for the TBI, RTBI, or SGMII PHY interface.
1119  */
1120 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1121 {
1122 	int interface = priv->plat->mac_interface;
1123 
1124 	if (priv->dma_cap.pcs) {
1125 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1129 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1130 			priv->hw->pcs = STMMAC_PCS_RGMII;
1131 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1132 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1133 			priv->hw->pcs = STMMAC_PCS_SGMII;
1134 		}
1135 	}
1136 }
1137 
1138 /**
1139  * stmmac_init_phy - PHY initialization
1140  * @dev: net device structure
1141  * Description: it initializes the driver's PHY state, and attaches the PHY
1142  * to the mac driver.
1143  *  Return value:
1144  *  0 on success
1145  */
1146 static int stmmac_init_phy(struct net_device *dev)
1147 {
1148 	struct stmmac_priv *priv = netdev_priv(dev);
1149 	struct fwnode_handle *phy_fwnode;
1150 	struct fwnode_handle *fwnode;
1151 	int ret;
1152 
1153 	if (!phylink_expects_phy(priv->phylink))
1154 		return 0;
1155 
1156 	fwnode = priv->plat->port_node;
1157 	if (!fwnode)
1158 		fwnode = dev_fwnode(priv->device);
1159 
1160 	if (fwnode)
1161 		phy_fwnode = fwnode_get_phy_node(fwnode);
1162 	else
1163 		phy_fwnode = NULL;
1164 
1165 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1166 	 * manually parse it
1167 	 */
1168 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1169 		int addr = priv->plat->phy_addr;
1170 		struct phy_device *phydev;
1171 
1172 		if (addr < 0) {
1173 			netdev_err(priv->dev, "no phy found\n");
1174 			return -ENODEV;
1175 		}
1176 
1177 		phydev = mdiobus_get_phy(priv->mii, addr);
1178 		if (!phydev) {
1179 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1180 			return -ENODEV;
1181 		}
1182 
1183 		ret = phylink_connect_phy(priv->phylink, phydev);
1184 	} else {
1185 		fwnode_handle_put(phy_fwnode);
1186 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 	}
1188 
1189 	if (!priv->plat->pmt) {
1190 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191 
1192 		phylink_ethtool_get_wol(priv->phylink, &wol);
1193 		device_set_wakeup_capable(priv->device, !!wol.supported);
1194 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1201 {
1202 	/* Half-Duplex can only work with single tx queue */
1203 	if (priv->plat->tx_queues_to_use > 1)
1204 		priv->phylink_config.mac_capabilities &=
1205 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1206 	else
1207 		priv->phylink_config.mac_capabilities |=
1208 			(MAC_10HD | MAC_100HD | MAC_1000HD);
1209 }
1210 
1211 static int stmmac_phy_setup(struct stmmac_priv *priv)
1212 {
1213 	struct stmmac_mdio_bus_data *mdio_bus_data;
1214 	int mode = priv->plat->phy_interface;
1215 	struct fwnode_handle *fwnode;
1216 	struct phylink *phylink;
1217 	int max_speed;
1218 
1219 	priv->phylink_config.dev = &priv->dev->dev;
1220 	priv->phylink_config.type = PHYLINK_NETDEV;
1221 	priv->phylink_config.mac_managed_pm = true;
1222 
1223 	mdio_bus_data = priv->plat->mdio_bus_data;
1224 	if (mdio_bus_data)
1225 		priv->phylink_config.ovr_an_inband =
1226 			mdio_bus_data->xpcs_an_inband;
1227 
1228 	/* Set the platform/firmware specified interface mode. Note, phylink
1229 	 * deals with the PHY interface mode, not the MAC interface mode.
1230 	 */
1231 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1232 
1233 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1234 	if (priv->hw->xpcs)
1235 		xpcs_get_interfaces(priv->hw->xpcs,
1236 				    priv->phylink_config.supported_interfaces);
1237 
1238 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1239 						MAC_10FD | MAC_100FD |
1240 						MAC_1000FD;
1241 
1242 	stmmac_set_half_duplex(priv);
1243 
1244 	/* Get the MAC specific capabilities */
1245 	stmmac_mac_phylink_get_caps(priv);
1246 
1247 	max_speed = priv->plat->max_speed;
1248 	if (max_speed)
1249 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1250 
1251 	fwnode = priv->plat->port_node;
1252 	if (!fwnode)
1253 		fwnode = dev_fwnode(priv->device);
1254 
1255 	phylink = phylink_create(&priv->phylink_config, fwnode,
1256 				 mode, &stmmac_phylink_mac_ops);
1257 	if (IS_ERR(phylink))
1258 		return PTR_ERR(phylink);
1259 
1260 	priv->phylink = phylink;
1261 	return 0;
1262 }
1263 
1264 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1265 				    struct stmmac_dma_conf *dma_conf)
1266 {
1267 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1268 	unsigned int desc_size;
1269 	void *head_rx;
1270 	u32 queue;
1271 
1272 	/* Display RX rings */
1273 	for (queue = 0; queue < rx_cnt; queue++) {
1274 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1275 
1276 		pr_info("\tRX Queue %u rings\n", queue);
1277 
1278 		if (priv->extend_desc) {
1279 			head_rx = (void *)rx_q->dma_erx;
1280 			desc_size = sizeof(struct dma_extended_desc);
1281 		} else {
1282 			head_rx = (void *)rx_q->dma_rx;
1283 			desc_size = sizeof(struct dma_desc);
1284 		}
1285 
1286 		/* Display RX ring */
1287 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1288 				    rx_q->dma_rx_phy, desc_size);
1289 	}
1290 }
1291 
1292 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1293 				    struct stmmac_dma_conf *dma_conf)
1294 {
1295 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1296 	unsigned int desc_size;
1297 	void *head_tx;
1298 	u32 queue;
1299 
1300 	/* Display TX rings */
1301 	for (queue = 0; queue < tx_cnt; queue++) {
1302 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1303 
1304 		pr_info("\tTX Queue %d rings\n", queue);
1305 
1306 		if (priv->extend_desc) {
1307 			head_tx = (void *)tx_q->dma_etx;
1308 			desc_size = sizeof(struct dma_extended_desc);
1309 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1310 			head_tx = (void *)tx_q->dma_entx;
1311 			desc_size = sizeof(struct dma_edesc);
1312 		} else {
1313 			head_tx = (void *)tx_q->dma_tx;
1314 			desc_size = sizeof(struct dma_desc);
1315 		}
1316 
1317 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1318 				    tx_q->dma_tx_phy, desc_size);
1319 	}
1320 }
1321 
1322 static void stmmac_display_rings(struct stmmac_priv *priv,
1323 				 struct stmmac_dma_conf *dma_conf)
1324 {
1325 	/* Display RX ring */
1326 	stmmac_display_rx_rings(priv, dma_conf);
1327 
1328 	/* Display TX ring */
1329 	stmmac_display_tx_rings(priv, dma_conf);
1330 }
1331 
1332 static int stmmac_set_bfsize(int mtu, int bufsize)
1333 {
1334 	int ret = bufsize;
1335 
1336 	if (mtu >= BUF_SIZE_8KiB)
1337 		ret = BUF_SIZE_16KiB;
1338 	else if (mtu >= BUF_SIZE_4KiB)
1339 		ret = BUF_SIZE_8KiB;
1340 	else if (mtu >= BUF_SIZE_2KiB)
1341 		ret = BUF_SIZE_4KiB;
1342 	else if (mtu > DEFAULT_BUFSIZE)
1343 		ret = BUF_SIZE_2KiB;
1344 	else
1345 		ret = DEFAULT_BUFSIZE;
1346 
1347 	return ret;
1348 }
1349 
1350 /**
1351  * stmmac_clear_rx_descriptors - clear RX descriptors
1352  * @priv: driver private structure
1353  * @dma_conf: structure to take the dma data
1354  * @queue: RX queue index
1355  * Description: this function is called to clear the RX descriptors
1356  * in case of both basic and extended descriptors are used.
1357  */
1358 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1359 					struct stmmac_dma_conf *dma_conf,
1360 					u32 queue)
1361 {
1362 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1363 	int i;
1364 
1365 	/* Clear the RX descriptors */
1366 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1367 		if (priv->extend_desc)
1368 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1369 					priv->use_riwt, priv->mode,
1370 					(i == dma_conf->dma_rx_size - 1),
1371 					dma_conf->dma_buf_sz);
1372 		else
1373 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1374 					priv->use_riwt, priv->mode,
1375 					(i == dma_conf->dma_rx_size - 1),
1376 					dma_conf->dma_buf_sz);
1377 }
1378 
1379 /**
1380  * stmmac_clear_tx_descriptors - clear tx descriptors
1381  * @priv: driver private structure
1382  * @dma_conf: structure to take the dma data
1383  * @queue: TX queue index.
1384  * Description: this function is called to clear the TX descriptors
1385  * in case of both basic and extended descriptors are used.
1386  */
1387 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1388 					struct stmmac_dma_conf *dma_conf,
1389 					u32 queue)
1390 {
1391 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1392 	int i;
1393 
1394 	/* Clear the TX descriptors */
1395 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1396 		int last = (i == (dma_conf->dma_tx_size - 1));
1397 		struct dma_desc *p;
1398 
1399 		if (priv->extend_desc)
1400 			p = &tx_q->dma_etx[i].basic;
1401 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1402 			p = &tx_q->dma_entx[i].basic;
1403 		else
1404 			p = &tx_q->dma_tx[i];
1405 
1406 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1407 	}
1408 }
1409 
1410 /**
1411  * stmmac_clear_descriptors - clear descriptors
1412  * @priv: driver private structure
1413  * @dma_conf: structure to take the dma data
1414  * Description: this function is called to clear the TX and RX descriptors
1415  * in case of both basic and extended descriptors are used.
1416  */
1417 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1418 				     struct stmmac_dma_conf *dma_conf)
1419 {
1420 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1421 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1422 	u32 queue;
1423 
1424 	/* Clear the RX descriptors */
1425 	for (queue = 0; queue < rx_queue_cnt; queue++)
1426 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1427 
1428 	/* Clear the TX descriptors */
1429 	for (queue = 0; queue < tx_queue_cnt; queue++)
1430 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1431 }
1432 
1433 /**
1434  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1435  * @priv: driver private structure
1436  * @dma_conf: structure to take the dma data
1437  * @p: descriptor pointer
1438  * @i: descriptor index
1439  * @flags: gfp flag
1440  * @queue: RX queue index
1441  * Description: this function is called to allocate a receive buffer, perform
1442  * the DMA mapping and init the descriptor.
1443  */
1444 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1445 				  struct stmmac_dma_conf *dma_conf,
1446 				  struct dma_desc *p,
1447 				  int i, gfp_t flags, u32 queue)
1448 {
1449 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1450 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1451 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1452 
1453 	if (priv->dma_cap.host_dma_width <= 32)
1454 		gfp |= GFP_DMA32;
1455 
1456 	if (!buf->page) {
1457 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1458 		if (!buf->page)
1459 			return -ENOMEM;
1460 		buf->page_offset = stmmac_rx_offset(priv);
1461 	}
1462 
1463 	if (priv->sph && !buf->sec_page) {
1464 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1465 		if (!buf->sec_page)
1466 			return -ENOMEM;
1467 
1468 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1469 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1470 	} else {
1471 		buf->sec_page = NULL;
1472 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1473 	}
1474 
1475 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1476 
1477 	stmmac_set_desc_addr(priv, p, buf->addr);
1478 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1479 		stmmac_init_desc3(priv, p);
1480 
1481 	return 0;
1482 }
1483 
1484 /**
1485  * stmmac_free_rx_buffer - free RX dma buffers
1486  * @priv: private structure
1487  * @rx_q: RX queue
1488  * @i: buffer index.
1489  */
1490 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1491 				  struct stmmac_rx_queue *rx_q,
1492 				  int i)
1493 {
1494 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1495 
1496 	if (buf->page)
1497 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1498 	buf->page = NULL;
1499 
1500 	if (buf->sec_page)
1501 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1502 	buf->sec_page = NULL;
1503 }
1504 
1505 /**
1506  * stmmac_free_tx_buffer - free RX dma buffers
1507  * @priv: private structure
1508  * @dma_conf: structure to take the dma data
1509  * @queue: RX queue index
1510  * @i: buffer index.
1511  */
1512 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1513 				  struct stmmac_dma_conf *dma_conf,
1514 				  u32 queue, int i)
1515 {
1516 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1517 
1518 	if (tx_q->tx_skbuff_dma[i].buf &&
1519 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1520 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1521 			dma_unmap_page(priv->device,
1522 				       tx_q->tx_skbuff_dma[i].buf,
1523 				       tx_q->tx_skbuff_dma[i].len,
1524 				       DMA_TO_DEVICE);
1525 		else
1526 			dma_unmap_single(priv->device,
1527 					 tx_q->tx_skbuff_dma[i].buf,
1528 					 tx_q->tx_skbuff_dma[i].len,
1529 					 DMA_TO_DEVICE);
1530 	}
1531 
1532 	if (tx_q->xdpf[i] &&
1533 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1534 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1535 		xdp_return_frame(tx_q->xdpf[i]);
1536 		tx_q->xdpf[i] = NULL;
1537 	}
1538 
1539 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1540 		tx_q->xsk_frames_done++;
1541 
1542 	if (tx_q->tx_skbuff[i] &&
1543 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1544 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1545 		tx_q->tx_skbuff[i] = NULL;
1546 	}
1547 
1548 	tx_q->tx_skbuff_dma[i].buf = 0;
1549 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1550 }
1551 
1552 /**
1553  * dma_free_rx_skbufs - free RX dma buffers
1554  * @priv: private structure
1555  * @dma_conf: structure to take the dma data
1556  * @queue: RX queue index
1557  */
1558 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1559 			       struct stmmac_dma_conf *dma_conf,
1560 			       u32 queue)
1561 {
1562 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1563 	int i;
1564 
1565 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1566 		stmmac_free_rx_buffer(priv, rx_q, i);
1567 }
1568 
1569 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1570 				   struct stmmac_dma_conf *dma_conf,
1571 				   u32 queue, gfp_t flags)
1572 {
1573 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1574 	int i;
1575 
1576 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1577 		struct dma_desc *p;
1578 		int ret;
1579 
1580 		if (priv->extend_desc)
1581 			p = &((rx_q->dma_erx + i)->basic);
1582 		else
1583 			p = rx_q->dma_rx + i;
1584 
1585 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1586 					     queue);
1587 		if (ret)
1588 			return ret;
1589 
1590 		rx_q->buf_alloc_num++;
1591 	}
1592 
1593 	return 0;
1594 }
1595 
1596 /**
1597  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1598  * @priv: private structure
1599  * @dma_conf: structure to take the dma data
1600  * @queue: RX queue index
1601  */
1602 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1603 				struct stmmac_dma_conf *dma_conf,
1604 				u32 queue)
1605 {
1606 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1607 	int i;
1608 
1609 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1610 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1611 
1612 		if (!buf->xdp)
1613 			continue;
1614 
1615 		xsk_buff_free(buf->xdp);
1616 		buf->xdp = NULL;
1617 	}
1618 }
1619 
1620 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1621 				      struct stmmac_dma_conf *dma_conf,
1622 				      u32 queue)
1623 {
1624 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1625 	int i;
1626 
1627 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1628 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1629 	 * use this macro to make sure no size violations.
1630 	 */
1631 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1632 
1633 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1634 		struct stmmac_rx_buffer *buf;
1635 		dma_addr_t dma_addr;
1636 		struct dma_desc *p;
1637 
1638 		if (priv->extend_desc)
1639 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1640 		else
1641 			p = rx_q->dma_rx + i;
1642 
1643 		buf = &rx_q->buf_pool[i];
1644 
1645 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1646 		if (!buf->xdp)
1647 			return -ENOMEM;
1648 
1649 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1650 		stmmac_set_desc_addr(priv, p, dma_addr);
1651 		rx_q->buf_alloc_num++;
1652 	}
1653 
1654 	return 0;
1655 }
1656 
1657 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1658 {
1659 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1660 		return NULL;
1661 
1662 	return xsk_get_pool_from_qid(priv->dev, queue);
1663 }
1664 
1665 /**
1666  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1667  * @priv: driver private structure
1668  * @dma_conf: structure to take the dma data
1669  * @queue: RX queue index
1670  * @flags: gfp flag.
1671  * Description: this function initializes the DMA RX descriptors
1672  * and allocates the socket buffers. It supports the chained and ring
1673  * modes.
1674  */
1675 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1676 				    struct stmmac_dma_conf *dma_conf,
1677 				    u32 queue, gfp_t flags)
1678 {
1679 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1680 	int ret;
1681 
1682 	netif_dbg(priv, probe, priv->dev,
1683 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1684 		  (u32)rx_q->dma_rx_phy);
1685 
1686 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1687 
1688 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1689 
1690 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1691 
1692 	if (rx_q->xsk_pool) {
1693 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1694 						   MEM_TYPE_XSK_BUFF_POOL,
1695 						   NULL));
1696 		netdev_info(priv->dev,
1697 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1698 			    rx_q->queue_index);
1699 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1700 	} else {
1701 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1702 						   MEM_TYPE_PAGE_POOL,
1703 						   rx_q->page_pool));
1704 		netdev_info(priv->dev,
1705 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1706 			    rx_q->queue_index);
1707 	}
1708 
1709 	if (rx_q->xsk_pool) {
1710 		/* RX XDP ZC buffer pool may not be populated, e.g.
1711 		 * xdpsock TX-only.
1712 		 */
1713 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1714 	} else {
1715 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1716 		if (ret < 0)
1717 			return -ENOMEM;
1718 	}
1719 
1720 	/* Setup the chained descriptor addresses */
1721 	if (priv->mode == STMMAC_CHAIN_MODE) {
1722 		if (priv->extend_desc)
1723 			stmmac_mode_init(priv, rx_q->dma_erx,
1724 					 rx_q->dma_rx_phy,
1725 					 dma_conf->dma_rx_size, 1);
1726 		else
1727 			stmmac_mode_init(priv, rx_q->dma_rx,
1728 					 rx_q->dma_rx_phy,
1729 					 dma_conf->dma_rx_size, 0);
1730 	}
1731 
1732 	return 0;
1733 }
1734 
1735 static int init_dma_rx_desc_rings(struct net_device *dev,
1736 				  struct stmmac_dma_conf *dma_conf,
1737 				  gfp_t flags)
1738 {
1739 	struct stmmac_priv *priv = netdev_priv(dev);
1740 	u32 rx_count = priv->plat->rx_queues_to_use;
1741 	int queue;
1742 	int ret;
1743 
1744 	/* RX INITIALIZATION */
1745 	netif_dbg(priv, probe, priv->dev,
1746 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1747 
1748 	for (queue = 0; queue < rx_count; queue++) {
1749 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1750 		if (ret)
1751 			goto err_init_rx_buffers;
1752 	}
1753 
1754 	return 0;
1755 
1756 err_init_rx_buffers:
1757 	while (queue >= 0) {
1758 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1759 
1760 		if (rx_q->xsk_pool)
1761 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1762 		else
1763 			dma_free_rx_skbufs(priv, dma_conf, queue);
1764 
1765 		rx_q->buf_alloc_num = 0;
1766 		rx_q->xsk_pool = NULL;
1767 
1768 		queue--;
1769 	}
1770 
1771 	return ret;
1772 }
1773 
1774 /**
1775  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1776  * @priv: driver private structure
1777  * @dma_conf: structure to take the dma data
1778  * @queue: TX queue index
1779  * Description: this function initializes the DMA TX descriptors
1780  * and allocates the socket buffers. It supports the chained and ring
1781  * modes.
1782  */
1783 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1784 				    struct stmmac_dma_conf *dma_conf,
1785 				    u32 queue)
1786 {
1787 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1788 	int i;
1789 
1790 	netif_dbg(priv, probe, priv->dev,
1791 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1792 		  (u32)tx_q->dma_tx_phy);
1793 
1794 	/* Setup the chained descriptor addresses */
1795 	if (priv->mode == STMMAC_CHAIN_MODE) {
1796 		if (priv->extend_desc)
1797 			stmmac_mode_init(priv, tx_q->dma_etx,
1798 					 tx_q->dma_tx_phy,
1799 					 dma_conf->dma_tx_size, 1);
1800 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1801 			stmmac_mode_init(priv, tx_q->dma_tx,
1802 					 tx_q->dma_tx_phy,
1803 					 dma_conf->dma_tx_size, 0);
1804 	}
1805 
1806 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1807 
1808 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1809 		struct dma_desc *p;
1810 
1811 		if (priv->extend_desc)
1812 			p = &((tx_q->dma_etx + i)->basic);
1813 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1814 			p = &((tx_q->dma_entx + i)->basic);
1815 		else
1816 			p = tx_q->dma_tx + i;
1817 
1818 		stmmac_clear_desc(priv, p);
1819 
1820 		tx_q->tx_skbuff_dma[i].buf = 0;
1821 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1822 		tx_q->tx_skbuff_dma[i].len = 0;
1823 		tx_q->tx_skbuff_dma[i].last_segment = false;
1824 		tx_q->tx_skbuff[i] = NULL;
1825 	}
1826 
1827 	return 0;
1828 }
1829 
1830 static int init_dma_tx_desc_rings(struct net_device *dev,
1831 				  struct stmmac_dma_conf *dma_conf)
1832 {
1833 	struct stmmac_priv *priv = netdev_priv(dev);
1834 	u32 tx_queue_cnt;
1835 	u32 queue;
1836 
1837 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1838 
1839 	for (queue = 0; queue < tx_queue_cnt; queue++)
1840 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1841 
1842 	return 0;
1843 }
1844 
1845 /**
1846  * init_dma_desc_rings - init the RX/TX descriptor rings
1847  * @dev: net device structure
1848  * @dma_conf: structure to take the dma data
1849  * @flags: gfp flag.
1850  * Description: this function initializes the DMA RX/TX descriptors
1851  * and allocates the socket buffers. It supports the chained and ring
1852  * modes.
1853  */
1854 static int init_dma_desc_rings(struct net_device *dev,
1855 			       struct stmmac_dma_conf *dma_conf,
1856 			       gfp_t flags)
1857 {
1858 	struct stmmac_priv *priv = netdev_priv(dev);
1859 	int ret;
1860 
1861 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1862 	if (ret)
1863 		return ret;
1864 
1865 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1866 
1867 	stmmac_clear_descriptors(priv, dma_conf);
1868 
1869 	if (netif_msg_hw(priv))
1870 		stmmac_display_rings(priv, dma_conf);
1871 
1872 	return ret;
1873 }
1874 
1875 /**
1876  * dma_free_tx_skbufs - free TX dma buffers
1877  * @priv: private structure
1878  * @dma_conf: structure to take the dma data
1879  * @queue: TX queue index
1880  */
1881 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1882 			       struct stmmac_dma_conf *dma_conf,
1883 			       u32 queue)
1884 {
1885 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1886 	int i;
1887 
1888 	tx_q->xsk_frames_done = 0;
1889 
1890 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1891 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1892 
1893 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1894 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1895 		tx_q->xsk_frames_done = 0;
1896 		tx_q->xsk_pool = NULL;
1897 	}
1898 }
1899 
1900 /**
1901  * stmmac_free_tx_skbufs - free TX skb buffers
1902  * @priv: private structure
1903  */
1904 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1905 {
1906 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1907 	u32 queue;
1908 
1909 	for (queue = 0; queue < tx_queue_cnt; queue++)
1910 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1911 }
1912 
1913 /**
1914  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1915  * @priv: private structure
1916  * @dma_conf: structure to take the dma data
1917  * @queue: RX queue index
1918  */
1919 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1920 					 struct stmmac_dma_conf *dma_conf,
1921 					 u32 queue)
1922 {
1923 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1924 
1925 	/* Release the DMA RX socket buffers */
1926 	if (rx_q->xsk_pool)
1927 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1928 	else
1929 		dma_free_rx_skbufs(priv, dma_conf, queue);
1930 
1931 	rx_q->buf_alloc_num = 0;
1932 	rx_q->xsk_pool = NULL;
1933 
1934 	/* Free DMA regions of consistent memory previously allocated */
1935 	if (!priv->extend_desc)
1936 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1937 				  sizeof(struct dma_desc),
1938 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1939 	else
1940 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1941 				  sizeof(struct dma_extended_desc),
1942 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1943 
1944 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1945 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1946 
1947 	kfree(rx_q->buf_pool);
1948 	if (rx_q->page_pool)
1949 		page_pool_destroy(rx_q->page_pool);
1950 }
1951 
1952 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1953 				       struct stmmac_dma_conf *dma_conf)
1954 {
1955 	u32 rx_count = priv->plat->rx_queues_to_use;
1956 	u32 queue;
1957 
1958 	/* Free RX queue resources */
1959 	for (queue = 0; queue < rx_count; queue++)
1960 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1961 }
1962 
1963 /**
1964  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1965  * @priv: private structure
1966  * @dma_conf: structure to take the dma data
1967  * @queue: TX queue index
1968  */
1969 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1970 					 struct stmmac_dma_conf *dma_conf,
1971 					 u32 queue)
1972 {
1973 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1974 	size_t size;
1975 	void *addr;
1976 
1977 	/* Release the DMA TX socket buffers */
1978 	dma_free_tx_skbufs(priv, dma_conf, queue);
1979 
1980 	if (priv->extend_desc) {
1981 		size = sizeof(struct dma_extended_desc);
1982 		addr = tx_q->dma_etx;
1983 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1984 		size = sizeof(struct dma_edesc);
1985 		addr = tx_q->dma_entx;
1986 	} else {
1987 		size = sizeof(struct dma_desc);
1988 		addr = tx_q->dma_tx;
1989 	}
1990 
1991 	size *= dma_conf->dma_tx_size;
1992 
1993 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1994 
1995 	kfree(tx_q->tx_skbuff_dma);
1996 	kfree(tx_q->tx_skbuff);
1997 }
1998 
1999 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2000 				       struct stmmac_dma_conf *dma_conf)
2001 {
2002 	u32 tx_count = priv->plat->tx_queues_to_use;
2003 	u32 queue;
2004 
2005 	/* Free TX queue resources */
2006 	for (queue = 0; queue < tx_count; queue++)
2007 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2008 }
2009 
2010 /**
2011  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2012  * @priv: private structure
2013  * @dma_conf: structure to take the dma data
2014  * @queue: RX queue index
2015  * Description: according to which descriptor can be used (extend or basic)
2016  * this function allocates the resources for TX and RX paths. In case of
2017  * reception, for example, it pre-allocated the RX socket buffer in order to
2018  * allow zero-copy mechanism.
2019  */
2020 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2021 					 struct stmmac_dma_conf *dma_conf,
2022 					 u32 queue)
2023 {
2024 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2025 	struct stmmac_channel *ch = &priv->channel[queue];
2026 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2027 	struct page_pool_params pp_params = { 0 };
2028 	unsigned int num_pages;
2029 	unsigned int napi_id;
2030 	int ret;
2031 
2032 	rx_q->queue_index = queue;
2033 	rx_q->priv_data = priv;
2034 
2035 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2036 	pp_params.pool_size = dma_conf->dma_rx_size;
2037 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2038 	pp_params.order = ilog2(num_pages);
2039 	pp_params.nid = dev_to_node(priv->device);
2040 	pp_params.dev = priv->device;
2041 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2042 	pp_params.offset = stmmac_rx_offset(priv);
2043 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2044 
2045 	rx_q->page_pool = page_pool_create(&pp_params);
2046 	if (IS_ERR(rx_q->page_pool)) {
2047 		ret = PTR_ERR(rx_q->page_pool);
2048 		rx_q->page_pool = NULL;
2049 		return ret;
2050 	}
2051 
2052 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2053 				 sizeof(*rx_q->buf_pool),
2054 				 GFP_KERNEL);
2055 	if (!rx_q->buf_pool)
2056 		return -ENOMEM;
2057 
2058 	if (priv->extend_desc) {
2059 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2060 						   dma_conf->dma_rx_size *
2061 						   sizeof(struct dma_extended_desc),
2062 						   &rx_q->dma_rx_phy,
2063 						   GFP_KERNEL);
2064 		if (!rx_q->dma_erx)
2065 			return -ENOMEM;
2066 
2067 	} else {
2068 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2069 						  dma_conf->dma_rx_size *
2070 						  sizeof(struct dma_desc),
2071 						  &rx_q->dma_rx_phy,
2072 						  GFP_KERNEL);
2073 		if (!rx_q->dma_rx)
2074 			return -ENOMEM;
2075 	}
2076 
2077 	if (stmmac_xdp_is_enabled(priv) &&
2078 	    test_bit(queue, priv->af_xdp_zc_qps))
2079 		napi_id = ch->rxtx_napi.napi_id;
2080 	else
2081 		napi_id = ch->rx_napi.napi_id;
2082 
2083 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2084 			       rx_q->queue_index,
2085 			       napi_id);
2086 	if (ret) {
2087 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2088 		return -EINVAL;
2089 	}
2090 
2091 	return 0;
2092 }
2093 
2094 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2095 				       struct stmmac_dma_conf *dma_conf)
2096 {
2097 	u32 rx_count = priv->plat->rx_queues_to_use;
2098 	u32 queue;
2099 	int ret;
2100 
2101 	/* RX queues buffers and DMA */
2102 	for (queue = 0; queue < rx_count; queue++) {
2103 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2104 		if (ret)
2105 			goto err_dma;
2106 	}
2107 
2108 	return 0;
2109 
2110 err_dma:
2111 	free_dma_rx_desc_resources(priv, dma_conf);
2112 
2113 	return ret;
2114 }
2115 
2116 /**
2117  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2118  * @priv: private structure
2119  * @dma_conf: structure to take the dma data
2120  * @queue: TX queue index
2121  * Description: according to which descriptor can be used (extend or basic)
2122  * this function allocates the resources for TX and RX paths. In case of
2123  * reception, for example, it pre-allocated the RX socket buffer in order to
2124  * allow zero-copy mechanism.
2125  */
2126 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2127 					 struct stmmac_dma_conf *dma_conf,
2128 					 u32 queue)
2129 {
2130 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2131 	size_t size;
2132 	void *addr;
2133 
2134 	tx_q->queue_index = queue;
2135 	tx_q->priv_data = priv;
2136 
2137 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2138 				      sizeof(*tx_q->tx_skbuff_dma),
2139 				      GFP_KERNEL);
2140 	if (!tx_q->tx_skbuff_dma)
2141 		return -ENOMEM;
2142 
2143 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2144 				  sizeof(struct sk_buff *),
2145 				  GFP_KERNEL);
2146 	if (!tx_q->tx_skbuff)
2147 		return -ENOMEM;
2148 
2149 	if (priv->extend_desc)
2150 		size = sizeof(struct dma_extended_desc);
2151 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2152 		size = sizeof(struct dma_edesc);
2153 	else
2154 		size = sizeof(struct dma_desc);
2155 
2156 	size *= dma_conf->dma_tx_size;
2157 
2158 	addr = dma_alloc_coherent(priv->device, size,
2159 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2160 	if (!addr)
2161 		return -ENOMEM;
2162 
2163 	if (priv->extend_desc)
2164 		tx_q->dma_etx = addr;
2165 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2166 		tx_q->dma_entx = addr;
2167 	else
2168 		tx_q->dma_tx = addr;
2169 
2170 	return 0;
2171 }
2172 
2173 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2174 				       struct stmmac_dma_conf *dma_conf)
2175 {
2176 	u32 tx_count = priv->plat->tx_queues_to_use;
2177 	u32 queue;
2178 	int ret;
2179 
2180 	/* TX queues buffers and DMA */
2181 	for (queue = 0; queue < tx_count; queue++) {
2182 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2183 		if (ret)
2184 			goto err_dma;
2185 	}
2186 
2187 	return 0;
2188 
2189 err_dma:
2190 	free_dma_tx_desc_resources(priv, dma_conf);
2191 	return ret;
2192 }
2193 
2194 /**
2195  * alloc_dma_desc_resources - alloc TX/RX resources.
2196  * @priv: private structure
2197  * @dma_conf: structure to take the dma data
2198  * Description: according to which descriptor can be used (extend or basic)
2199  * this function allocates the resources for TX and RX paths. In case of
2200  * reception, for example, it pre-allocated the RX socket buffer in order to
2201  * allow zero-copy mechanism.
2202  */
2203 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2204 				    struct stmmac_dma_conf *dma_conf)
2205 {
2206 	/* RX Allocation */
2207 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2208 
2209 	if (ret)
2210 		return ret;
2211 
2212 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2213 
2214 	return ret;
2215 }
2216 
2217 /**
2218  * free_dma_desc_resources - free dma desc resources
2219  * @priv: private structure
2220  * @dma_conf: structure to take the dma data
2221  */
2222 static void free_dma_desc_resources(struct stmmac_priv *priv,
2223 				    struct stmmac_dma_conf *dma_conf)
2224 {
2225 	/* Release the DMA TX socket buffers */
2226 	free_dma_tx_desc_resources(priv, dma_conf);
2227 
2228 	/* Release the DMA RX socket buffers later
2229 	 * to ensure all pending XDP_TX buffers are returned.
2230 	 */
2231 	free_dma_rx_desc_resources(priv, dma_conf);
2232 }
2233 
2234 /**
2235  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2236  *  @priv: driver private structure
2237  *  Description: It is used for enabling the rx queues in the MAC
2238  */
2239 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2240 {
2241 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2242 	int queue;
2243 	u8 mode;
2244 
2245 	for (queue = 0; queue < rx_queues_count; queue++) {
2246 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2247 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2248 	}
2249 }
2250 
2251 /**
2252  * stmmac_start_rx_dma - start RX DMA channel
2253  * @priv: driver private structure
2254  * @chan: RX channel index
2255  * Description:
2256  * This starts a RX DMA channel
2257  */
2258 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2259 {
2260 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2261 	stmmac_start_rx(priv, priv->ioaddr, chan);
2262 }
2263 
2264 /**
2265  * stmmac_start_tx_dma - start TX DMA channel
2266  * @priv: driver private structure
2267  * @chan: TX channel index
2268  * Description:
2269  * This starts a TX DMA channel
2270  */
2271 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2272 {
2273 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2274 	stmmac_start_tx(priv, priv->ioaddr, chan);
2275 }
2276 
2277 /**
2278  * stmmac_stop_rx_dma - stop RX DMA channel
2279  * @priv: driver private structure
2280  * @chan: RX channel index
2281  * Description:
2282  * This stops a RX DMA channel
2283  */
2284 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2285 {
2286 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2287 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2288 }
2289 
2290 /**
2291  * stmmac_stop_tx_dma - stop TX DMA channel
2292  * @priv: driver private structure
2293  * @chan: TX channel index
2294  * Description:
2295  * This stops a TX DMA channel
2296  */
2297 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2298 {
2299 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2300 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2301 }
2302 
2303 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2304 {
2305 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2306 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2307 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2308 	u32 chan;
2309 
2310 	for (chan = 0; chan < dma_csr_ch; chan++) {
2311 		struct stmmac_channel *ch = &priv->channel[chan];
2312 		unsigned long flags;
2313 
2314 		spin_lock_irqsave(&ch->lock, flags);
2315 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2316 		spin_unlock_irqrestore(&ch->lock, flags);
2317 	}
2318 }
2319 
2320 /**
2321  * stmmac_start_all_dma - start all RX and TX DMA channels
2322  * @priv: driver private structure
2323  * Description:
2324  * This starts all the RX and TX DMA channels
2325  */
2326 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2327 {
2328 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2329 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2330 	u32 chan = 0;
2331 
2332 	for (chan = 0; chan < rx_channels_count; chan++)
2333 		stmmac_start_rx_dma(priv, chan);
2334 
2335 	for (chan = 0; chan < tx_channels_count; chan++)
2336 		stmmac_start_tx_dma(priv, chan);
2337 }
2338 
2339 /**
2340  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2341  * @priv: driver private structure
2342  * Description:
2343  * This stops the RX and TX DMA channels
2344  */
2345 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2346 {
2347 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2348 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2349 	u32 chan = 0;
2350 
2351 	for (chan = 0; chan < rx_channels_count; chan++)
2352 		stmmac_stop_rx_dma(priv, chan);
2353 
2354 	for (chan = 0; chan < tx_channels_count; chan++)
2355 		stmmac_stop_tx_dma(priv, chan);
2356 }
2357 
2358 /**
2359  *  stmmac_dma_operation_mode - HW DMA operation mode
2360  *  @priv: driver private structure
2361  *  Description: it is used for configuring the DMA operation mode register in
2362  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2363  */
2364 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2365 {
2366 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2367 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2368 	int rxfifosz = priv->plat->rx_fifo_size;
2369 	int txfifosz = priv->plat->tx_fifo_size;
2370 	u32 txmode = 0;
2371 	u32 rxmode = 0;
2372 	u32 chan = 0;
2373 	u8 qmode = 0;
2374 
2375 	if (rxfifosz == 0)
2376 		rxfifosz = priv->dma_cap.rx_fifo_size;
2377 	if (txfifosz == 0)
2378 		txfifosz = priv->dma_cap.tx_fifo_size;
2379 
2380 	/* Adjust for real per queue fifo size */
2381 	rxfifosz /= rx_channels_count;
2382 	txfifosz /= tx_channels_count;
2383 
2384 	if (priv->plat->force_thresh_dma_mode) {
2385 		txmode = tc;
2386 		rxmode = tc;
2387 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2388 		/*
2389 		 * In case of GMAC, SF mode can be enabled
2390 		 * to perform the TX COE in HW. This depends on:
2391 		 * 1) TX COE if actually supported
2392 		 * 2) There is no bugged Jumbo frame support
2393 		 *    that needs to not insert csum in the TDES.
2394 		 */
2395 		txmode = SF_DMA_MODE;
2396 		rxmode = SF_DMA_MODE;
2397 		priv->xstats.threshold = SF_DMA_MODE;
2398 	} else {
2399 		txmode = tc;
2400 		rxmode = SF_DMA_MODE;
2401 	}
2402 
2403 	/* configure all channels */
2404 	for (chan = 0; chan < rx_channels_count; chan++) {
2405 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2406 		u32 buf_size;
2407 
2408 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2409 
2410 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2411 				rxfifosz, qmode);
2412 
2413 		if (rx_q->xsk_pool) {
2414 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2415 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2416 					      buf_size,
2417 					      chan);
2418 		} else {
2419 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2420 					      priv->dma_conf.dma_buf_sz,
2421 					      chan);
2422 		}
2423 	}
2424 
2425 	for (chan = 0; chan < tx_channels_count; chan++) {
2426 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2427 
2428 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2429 				txfifosz, qmode);
2430 	}
2431 }
2432 
2433 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2434 {
2435 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2436 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2437 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2438 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2439 	unsigned int entry = tx_q->cur_tx;
2440 	struct dma_desc *tx_desc = NULL;
2441 	struct xdp_desc xdp_desc;
2442 	bool work_done = true;
2443 	u32 tx_set_ic_bit = 0;
2444 	unsigned long flags;
2445 
2446 	/* Avoids TX time-out as we are sharing with slow path */
2447 	txq_trans_cond_update(nq);
2448 
2449 	budget = min(budget, stmmac_tx_avail(priv, queue));
2450 
2451 	while (budget-- > 0) {
2452 		dma_addr_t dma_addr;
2453 		bool set_ic;
2454 
2455 		/* We are sharing with slow path and stop XSK TX desc submission when
2456 		 * available TX ring is less than threshold.
2457 		 */
2458 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2459 		    !netif_carrier_ok(priv->dev)) {
2460 			work_done = false;
2461 			break;
2462 		}
2463 
2464 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2465 			break;
2466 
2467 		if (likely(priv->extend_desc))
2468 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2469 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2470 			tx_desc = &tx_q->dma_entx[entry].basic;
2471 		else
2472 			tx_desc = tx_q->dma_tx + entry;
2473 
2474 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2475 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2476 
2477 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2478 
2479 		/* To return XDP buffer to XSK pool, we simple call
2480 		 * xsk_tx_completed(), so we don't need to fill up
2481 		 * 'buf' and 'xdpf'.
2482 		 */
2483 		tx_q->tx_skbuff_dma[entry].buf = 0;
2484 		tx_q->xdpf[entry] = NULL;
2485 
2486 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2487 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2488 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2489 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2490 
2491 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2492 
2493 		tx_q->tx_count_frames++;
2494 
2495 		if (!priv->tx_coal_frames[queue])
2496 			set_ic = false;
2497 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2498 			set_ic = true;
2499 		else
2500 			set_ic = false;
2501 
2502 		if (set_ic) {
2503 			tx_q->tx_count_frames = 0;
2504 			stmmac_set_tx_ic(priv, tx_desc);
2505 			tx_set_ic_bit++;
2506 		}
2507 
2508 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2509 				       true, priv->mode, true, true,
2510 				       xdp_desc.len);
2511 
2512 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2513 
2514 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2515 		entry = tx_q->cur_tx;
2516 	}
2517 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2518 	txq_stats->tx_set_ic_bit += tx_set_ic_bit;
2519 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2520 
2521 	if (tx_desc) {
2522 		stmmac_flush_tx_descriptors(priv, queue);
2523 		xsk_tx_release(pool);
2524 	}
2525 
2526 	/* Return true if all of the 3 conditions are met
2527 	 *  a) TX Budget is still available
2528 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2529 	 *     pending XSK TX for transmission)
2530 	 */
2531 	return !!budget && work_done;
2532 }
2533 
2534 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2535 {
2536 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2537 		tc += 64;
2538 
2539 		if (priv->plat->force_thresh_dma_mode)
2540 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2541 		else
2542 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2543 						      chan);
2544 
2545 		priv->xstats.threshold = tc;
2546 	}
2547 }
2548 
2549 /**
2550  * stmmac_tx_clean - to manage the transmission completion
2551  * @priv: driver private structure
2552  * @budget: napi budget limiting this functions packet handling
2553  * @queue: TX queue index
2554  * @pending_packets: signal to arm the TX coal timer
2555  * Description: it reclaims the transmit resources after transmission completes.
2556  * If some packets still needs to be handled, due to TX coalesce, set
2557  * pending_packets to true to make NAPI arm the TX coal timer.
2558  */
2559 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2560 			   bool *pending_packets)
2561 {
2562 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2563 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2564 	unsigned int bytes_compl = 0, pkts_compl = 0;
2565 	unsigned int entry, xmits = 0, count = 0;
2566 	u32 tx_packets = 0, tx_errors = 0;
2567 	unsigned long flags;
2568 
2569 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2570 
2571 	tx_q->xsk_frames_done = 0;
2572 
2573 	entry = tx_q->dirty_tx;
2574 
2575 	/* Try to clean all TX complete frame in 1 shot */
2576 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2577 		struct xdp_frame *xdpf;
2578 		struct sk_buff *skb;
2579 		struct dma_desc *p;
2580 		int status;
2581 
2582 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2583 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2584 			xdpf = tx_q->xdpf[entry];
2585 			skb = NULL;
2586 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2587 			xdpf = NULL;
2588 			skb = tx_q->tx_skbuff[entry];
2589 		} else {
2590 			xdpf = NULL;
2591 			skb = NULL;
2592 		}
2593 
2594 		if (priv->extend_desc)
2595 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2596 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2597 			p = &tx_q->dma_entx[entry].basic;
2598 		else
2599 			p = tx_q->dma_tx + entry;
2600 
2601 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2602 		/* Check if the descriptor is owned by the DMA */
2603 		if (unlikely(status & tx_dma_own))
2604 			break;
2605 
2606 		count++;
2607 
2608 		/* Make sure descriptor fields are read after reading
2609 		 * the own bit.
2610 		 */
2611 		dma_rmb();
2612 
2613 		/* Just consider the last segment and ...*/
2614 		if (likely(!(status & tx_not_ls))) {
2615 			/* ... verify the status error condition */
2616 			if (unlikely(status & tx_err)) {
2617 				tx_errors++;
2618 				if (unlikely(status & tx_err_bump_tc))
2619 					stmmac_bump_dma_threshold(priv, queue);
2620 			} else {
2621 				tx_packets++;
2622 			}
2623 			if (skb)
2624 				stmmac_get_tx_hwtstamp(priv, p, skb);
2625 		}
2626 
2627 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2628 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2629 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2630 				dma_unmap_page(priv->device,
2631 					       tx_q->tx_skbuff_dma[entry].buf,
2632 					       tx_q->tx_skbuff_dma[entry].len,
2633 					       DMA_TO_DEVICE);
2634 			else
2635 				dma_unmap_single(priv->device,
2636 						 tx_q->tx_skbuff_dma[entry].buf,
2637 						 tx_q->tx_skbuff_dma[entry].len,
2638 						 DMA_TO_DEVICE);
2639 			tx_q->tx_skbuff_dma[entry].buf = 0;
2640 			tx_q->tx_skbuff_dma[entry].len = 0;
2641 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2642 		}
2643 
2644 		stmmac_clean_desc3(priv, tx_q, p);
2645 
2646 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2647 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2648 
2649 		if (xdpf &&
2650 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2651 			xdp_return_frame_rx_napi(xdpf);
2652 			tx_q->xdpf[entry] = NULL;
2653 		}
2654 
2655 		if (xdpf &&
2656 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2657 			xdp_return_frame(xdpf);
2658 			tx_q->xdpf[entry] = NULL;
2659 		}
2660 
2661 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2662 			tx_q->xsk_frames_done++;
2663 
2664 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2665 			if (likely(skb)) {
2666 				pkts_compl++;
2667 				bytes_compl += skb->len;
2668 				dev_consume_skb_any(skb);
2669 				tx_q->tx_skbuff[entry] = NULL;
2670 			}
2671 		}
2672 
2673 		stmmac_release_tx_desc(priv, p, priv->mode);
2674 
2675 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2676 	}
2677 	tx_q->dirty_tx = entry;
2678 
2679 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2680 				  pkts_compl, bytes_compl);
2681 
2682 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2683 								queue))) &&
2684 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2685 
2686 		netif_dbg(priv, tx_done, priv->dev,
2687 			  "%s: restart transmit\n", __func__);
2688 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2689 	}
2690 
2691 	if (tx_q->xsk_pool) {
2692 		bool work_done;
2693 
2694 		if (tx_q->xsk_frames_done)
2695 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2696 
2697 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2698 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2699 
2700 		/* For XSK TX, we try to send as many as possible.
2701 		 * If XSK work done (XSK TX desc empty and budget still
2702 		 * available), return "budget - 1" to reenable TX IRQ.
2703 		 * Else, return "budget" to make NAPI continue polling.
2704 		 */
2705 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2706 					       STMMAC_XSK_TX_BUDGET_MAX);
2707 		if (work_done)
2708 			xmits = budget - 1;
2709 		else
2710 			xmits = budget;
2711 	}
2712 
2713 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2714 	    priv->eee_sw_timer_en) {
2715 		if (stmmac_enable_eee_mode(priv))
2716 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2717 	}
2718 
2719 	/* We still have pending packets, let's call for a new scheduling */
2720 	if (tx_q->dirty_tx != tx_q->cur_tx)
2721 		*pending_packets = true;
2722 
2723 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2724 	txq_stats->tx_packets += tx_packets;
2725 	txq_stats->tx_pkt_n += tx_packets;
2726 	txq_stats->tx_clean++;
2727 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2728 
2729 	priv->xstats.tx_errors += tx_errors;
2730 
2731 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2732 
2733 	/* Combine decisions from TX clean and XSK TX */
2734 	return max(count, xmits);
2735 }
2736 
2737 /**
2738  * stmmac_tx_err - to manage the tx error
2739  * @priv: driver private structure
2740  * @chan: channel index
2741  * Description: it cleans the descriptors and restarts the transmission
2742  * in case of transmission errors.
2743  */
2744 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2745 {
2746 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2747 
2748 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2749 
2750 	stmmac_stop_tx_dma(priv, chan);
2751 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2752 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2753 	stmmac_reset_tx_queue(priv, chan);
2754 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2755 			    tx_q->dma_tx_phy, chan);
2756 	stmmac_start_tx_dma(priv, chan);
2757 
2758 	priv->xstats.tx_errors++;
2759 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2760 }
2761 
2762 /**
2763  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2764  *  @priv: driver private structure
2765  *  @txmode: TX operating mode
2766  *  @rxmode: RX operating mode
2767  *  @chan: channel index
2768  *  Description: it is used for configuring of the DMA operation mode in
2769  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2770  *  mode.
2771  */
2772 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2773 					  u32 rxmode, u32 chan)
2774 {
2775 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2776 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2777 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2778 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2779 	int rxfifosz = priv->plat->rx_fifo_size;
2780 	int txfifosz = priv->plat->tx_fifo_size;
2781 
2782 	if (rxfifosz == 0)
2783 		rxfifosz = priv->dma_cap.rx_fifo_size;
2784 	if (txfifosz == 0)
2785 		txfifosz = priv->dma_cap.tx_fifo_size;
2786 
2787 	/* Adjust for real per queue fifo size */
2788 	rxfifosz /= rx_channels_count;
2789 	txfifosz /= tx_channels_count;
2790 
2791 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2792 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2793 }
2794 
2795 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2796 {
2797 	int ret;
2798 
2799 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2800 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2801 	if (ret && (ret != -EINVAL)) {
2802 		stmmac_global_err(priv);
2803 		return true;
2804 	}
2805 
2806 	return false;
2807 }
2808 
2809 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2810 {
2811 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2812 						 &priv->xstats, chan, dir);
2813 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2814 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2815 	struct stmmac_channel *ch = &priv->channel[chan];
2816 	struct napi_struct *rx_napi;
2817 	struct napi_struct *tx_napi;
2818 	unsigned long flags;
2819 
2820 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2821 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2822 
2823 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2824 		if (napi_schedule_prep(rx_napi)) {
2825 			spin_lock_irqsave(&ch->lock, flags);
2826 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2827 			spin_unlock_irqrestore(&ch->lock, flags);
2828 			__napi_schedule(rx_napi);
2829 		}
2830 	}
2831 
2832 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2833 		if (napi_schedule_prep(tx_napi)) {
2834 			spin_lock_irqsave(&ch->lock, flags);
2835 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2836 			spin_unlock_irqrestore(&ch->lock, flags);
2837 			__napi_schedule(tx_napi);
2838 		}
2839 	}
2840 
2841 	return status;
2842 }
2843 
2844 /**
2845  * stmmac_dma_interrupt - DMA ISR
2846  * @priv: driver private structure
2847  * Description: this is the DMA ISR. It is called by the main ISR.
2848  * It calls the dwmac dma routine and schedule poll method in case of some
2849  * work can be done.
2850  */
2851 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2852 {
2853 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2854 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2855 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2856 				tx_channel_count : rx_channel_count;
2857 	u32 chan;
2858 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2859 
2860 	/* Make sure we never check beyond our status buffer. */
2861 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2862 		channels_to_check = ARRAY_SIZE(status);
2863 
2864 	for (chan = 0; chan < channels_to_check; chan++)
2865 		status[chan] = stmmac_napi_check(priv, chan,
2866 						 DMA_DIR_RXTX);
2867 
2868 	for (chan = 0; chan < tx_channel_count; chan++) {
2869 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2870 			/* Try to bump up the dma threshold on this failure */
2871 			stmmac_bump_dma_threshold(priv, chan);
2872 		} else if (unlikely(status[chan] == tx_hard_error)) {
2873 			stmmac_tx_err(priv, chan);
2874 		}
2875 	}
2876 }
2877 
2878 /**
2879  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2880  * @priv: driver private structure
2881  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2882  */
2883 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2884 {
2885 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2886 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2887 
2888 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2889 
2890 	if (priv->dma_cap.rmon) {
2891 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2892 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2893 	} else
2894 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2895 }
2896 
2897 /**
2898  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2899  * @priv: driver private structure
2900  * Description:
2901  *  new GMAC chip generations have a new register to indicate the
2902  *  presence of the optional feature/functions.
2903  *  This can be also used to override the value passed through the
2904  *  platform and necessary for old MAC10/100 and GMAC chips.
2905  */
2906 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2907 {
2908 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2909 }
2910 
2911 /**
2912  * stmmac_check_ether_addr - check if the MAC addr is valid
2913  * @priv: driver private structure
2914  * Description:
2915  * it is to verify if the MAC address is valid, in case of failures it
2916  * generates a random MAC address
2917  */
2918 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2919 {
2920 	u8 addr[ETH_ALEN];
2921 
2922 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2923 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2924 		if (is_valid_ether_addr(addr))
2925 			eth_hw_addr_set(priv->dev, addr);
2926 		else
2927 			eth_hw_addr_random(priv->dev);
2928 		dev_info(priv->device, "device MAC address %pM\n",
2929 			 priv->dev->dev_addr);
2930 	}
2931 }
2932 
2933 /**
2934  * stmmac_init_dma_engine - DMA init.
2935  * @priv: driver private structure
2936  * Description:
2937  * It inits the DMA invoking the specific MAC/GMAC callback.
2938  * Some DMA parameters can be passed from the platform;
2939  * in case of these are not passed a default is kept for the MAC or GMAC.
2940  */
2941 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2942 {
2943 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2944 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2945 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2946 	struct stmmac_rx_queue *rx_q;
2947 	struct stmmac_tx_queue *tx_q;
2948 	u32 chan = 0;
2949 	int atds = 0;
2950 	int ret = 0;
2951 
2952 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2953 		dev_err(priv->device, "Invalid DMA configuration\n");
2954 		return -EINVAL;
2955 	}
2956 
2957 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2958 		atds = 1;
2959 
2960 	ret = stmmac_reset(priv, priv->ioaddr);
2961 	if (ret) {
2962 		dev_err(priv->device, "Failed to reset the dma\n");
2963 		return ret;
2964 	}
2965 
2966 	/* DMA Configuration */
2967 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2968 
2969 	if (priv->plat->axi)
2970 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2971 
2972 	/* DMA CSR Channel configuration */
2973 	for (chan = 0; chan < dma_csr_ch; chan++) {
2974 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2975 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2976 	}
2977 
2978 	/* DMA RX Channel Configuration */
2979 	for (chan = 0; chan < rx_channels_count; chan++) {
2980 		rx_q = &priv->dma_conf.rx_queue[chan];
2981 
2982 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2983 				    rx_q->dma_rx_phy, chan);
2984 
2985 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2986 				     (rx_q->buf_alloc_num *
2987 				      sizeof(struct dma_desc));
2988 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2989 				       rx_q->rx_tail_addr, chan);
2990 	}
2991 
2992 	/* DMA TX Channel Configuration */
2993 	for (chan = 0; chan < tx_channels_count; chan++) {
2994 		tx_q = &priv->dma_conf.tx_queue[chan];
2995 
2996 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2997 				    tx_q->dma_tx_phy, chan);
2998 
2999 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3000 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3001 				       tx_q->tx_tail_addr, chan);
3002 	}
3003 
3004 	return ret;
3005 }
3006 
3007 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3008 {
3009 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3010 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3011 	struct stmmac_channel *ch;
3012 	struct napi_struct *napi;
3013 
3014 	if (!tx_coal_timer)
3015 		return;
3016 
3017 	ch = &priv->channel[tx_q->queue_index];
3018 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3019 
3020 	/* Arm timer only if napi is not already scheduled.
3021 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3022 	 * again in the next scheduled napi.
3023 	 */
3024 	if (unlikely(!napi_is_scheduled(napi)))
3025 		hrtimer_start(&tx_q->txtimer,
3026 			      STMMAC_COAL_TIMER(tx_coal_timer),
3027 			      HRTIMER_MODE_REL);
3028 	else
3029 		hrtimer_try_to_cancel(&tx_q->txtimer);
3030 }
3031 
3032 /**
3033  * stmmac_tx_timer - mitigation sw timer for tx.
3034  * @t: data pointer
3035  * Description:
3036  * This is the timer handler to directly invoke the stmmac_tx_clean.
3037  */
3038 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3039 {
3040 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3041 	struct stmmac_priv *priv = tx_q->priv_data;
3042 	struct stmmac_channel *ch;
3043 	struct napi_struct *napi;
3044 
3045 	ch = &priv->channel[tx_q->queue_index];
3046 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3047 
3048 	if (likely(napi_schedule_prep(napi))) {
3049 		unsigned long flags;
3050 
3051 		spin_lock_irqsave(&ch->lock, flags);
3052 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3053 		spin_unlock_irqrestore(&ch->lock, flags);
3054 		__napi_schedule(napi);
3055 	}
3056 
3057 	return HRTIMER_NORESTART;
3058 }
3059 
3060 /**
3061  * stmmac_init_coalesce - init mitigation options.
3062  * @priv: driver private structure
3063  * Description:
3064  * This inits the coalesce parameters: i.e. timer rate,
3065  * timer handler and default threshold used for enabling the
3066  * interrupt on completion bit.
3067  */
3068 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3069 {
3070 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3071 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3072 	u32 chan;
3073 
3074 	for (chan = 0; chan < tx_channel_count; chan++) {
3075 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3076 
3077 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3078 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3079 
3080 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3081 		tx_q->txtimer.function = stmmac_tx_timer;
3082 	}
3083 
3084 	for (chan = 0; chan < rx_channel_count; chan++)
3085 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3086 }
3087 
3088 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3089 {
3090 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3091 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3092 	u32 chan;
3093 
3094 	/* set TX ring length */
3095 	for (chan = 0; chan < tx_channels_count; chan++)
3096 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3097 				       (priv->dma_conf.dma_tx_size - 1), chan);
3098 
3099 	/* set RX ring length */
3100 	for (chan = 0; chan < rx_channels_count; chan++)
3101 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3102 				       (priv->dma_conf.dma_rx_size - 1), chan);
3103 }
3104 
3105 /**
3106  *  stmmac_set_tx_queue_weight - Set TX queue weight
3107  *  @priv: driver private structure
3108  *  Description: It is used for setting TX queues weight
3109  */
3110 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3111 {
3112 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3113 	u32 weight;
3114 	u32 queue;
3115 
3116 	for (queue = 0; queue < tx_queues_count; queue++) {
3117 		weight = priv->plat->tx_queues_cfg[queue].weight;
3118 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3119 	}
3120 }
3121 
3122 /**
3123  *  stmmac_configure_cbs - Configure CBS in TX queue
3124  *  @priv: driver private structure
3125  *  Description: It is used for configuring CBS in AVB TX queues
3126  */
3127 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3128 {
3129 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3130 	u32 mode_to_use;
3131 	u32 queue;
3132 
3133 	/* queue 0 is reserved for legacy traffic */
3134 	for (queue = 1; queue < tx_queues_count; queue++) {
3135 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3136 		if (mode_to_use == MTL_QUEUE_DCB)
3137 			continue;
3138 
3139 		stmmac_config_cbs(priv, priv->hw,
3140 				priv->plat->tx_queues_cfg[queue].send_slope,
3141 				priv->plat->tx_queues_cfg[queue].idle_slope,
3142 				priv->plat->tx_queues_cfg[queue].high_credit,
3143 				priv->plat->tx_queues_cfg[queue].low_credit,
3144 				queue);
3145 	}
3146 }
3147 
3148 /**
3149  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3150  *  @priv: driver private structure
3151  *  Description: It is used for mapping RX queues to RX dma channels
3152  */
3153 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3154 {
3155 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3156 	u32 queue;
3157 	u32 chan;
3158 
3159 	for (queue = 0; queue < rx_queues_count; queue++) {
3160 		chan = priv->plat->rx_queues_cfg[queue].chan;
3161 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3162 	}
3163 }
3164 
3165 /**
3166  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3167  *  @priv: driver private structure
3168  *  Description: It is used for configuring the RX Queue Priority
3169  */
3170 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3171 {
3172 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3173 	u32 queue;
3174 	u32 prio;
3175 
3176 	for (queue = 0; queue < rx_queues_count; queue++) {
3177 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3178 			continue;
3179 
3180 		prio = priv->plat->rx_queues_cfg[queue].prio;
3181 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3182 	}
3183 }
3184 
3185 /**
3186  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3187  *  @priv: driver private structure
3188  *  Description: It is used for configuring the TX Queue Priority
3189  */
3190 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3191 {
3192 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3193 	u32 queue;
3194 	u32 prio;
3195 
3196 	for (queue = 0; queue < tx_queues_count; queue++) {
3197 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3198 			continue;
3199 
3200 		prio = priv->plat->tx_queues_cfg[queue].prio;
3201 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3202 	}
3203 }
3204 
3205 /**
3206  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3207  *  @priv: driver private structure
3208  *  Description: It is used for configuring the RX queue routing
3209  */
3210 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3211 {
3212 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3213 	u32 queue;
3214 	u8 packet;
3215 
3216 	for (queue = 0; queue < rx_queues_count; queue++) {
3217 		/* no specific packet type routing specified for the queue */
3218 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3219 			continue;
3220 
3221 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3222 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3223 	}
3224 }
3225 
3226 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3227 {
3228 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3229 		priv->rss.enable = false;
3230 		return;
3231 	}
3232 
3233 	if (priv->dev->features & NETIF_F_RXHASH)
3234 		priv->rss.enable = true;
3235 	else
3236 		priv->rss.enable = false;
3237 
3238 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3239 			     priv->plat->rx_queues_to_use);
3240 }
3241 
3242 /**
3243  *  stmmac_mtl_configuration - Configure MTL
3244  *  @priv: driver private structure
3245  *  Description: It is used for configurring MTL
3246  */
3247 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3248 {
3249 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3250 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3251 
3252 	if (tx_queues_count > 1)
3253 		stmmac_set_tx_queue_weight(priv);
3254 
3255 	/* Configure MTL RX algorithms */
3256 	if (rx_queues_count > 1)
3257 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3258 				priv->plat->rx_sched_algorithm);
3259 
3260 	/* Configure MTL TX algorithms */
3261 	if (tx_queues_count > 1)
3262 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3263 				priv->plat->tx_sched_algorithm);
3264 
3265 	/* Configure CBS in AVB TX queues */
3266 	if (tx_queues_count > 1)
3267 		stmmac_configure_cbs(priv);
3268 
3269 	/* Map RX MTL to DMA channels */
3270 	stmmac_rx_queue_dma_chan_map(priv);
3271 
3272 	/* Enable MAC RX Queues */
3273 	stmmac_mac_enable_rx_queues(priv);
3274 
3275 	/* Set RX priorities */
3276 	if (rx_queues_count > 1)
3277 		stmmac_mac_config_rx_queues_prio(priv);
3278 
3279 	/* Set TX priorities */
3280 	if (tx_queues_count > 1)
3281 		stmmac_mac_config_tx_queues_prio(priv);
3282 
3283 	/* Set RX routing */
3284 	if (rx_queues_count > 1)
3285 		stmmac_mac_config_rx_queues_routing(priv);
3286 
3287 	/* Receive Side Scaling */
3288 	if (rx_queues_count > 1)
3289 		stmmac_mac_config_rss(priv);
3290 }
3291 
3292 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3293 {
3294 	if (priv->dma_cap.asp) {
3295 		netdev_info(priv->dev, "Enabling Safety Features\n");
3296 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3297 					  priv->plat->safety_feat_cfg);
3298 	} else {
3299 		netdev_info(priv->dev, "No Safety Features support found\n");
3300 	}
3301 }
3302 
3303 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3304 {
3305 	char *name;
3306 
3307 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3308 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3309 
3310 	name = priv->wq_name;
3311 	sprintf(name, "%s-fpe", priv->dev->name);
3312 
3313 	priv->fpe_wq = create_singlethread_workqueue(name);
3314 	if (!priv->fpe_wq) {
3315 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3316 
3317 		return -ENOMEM;
3318 	}
3319 	netdev_info(priv->dev, "FPE workqueue start");
3320 
3321 	return 0;
3322 }
3323 
3324 /**
3325  * stmmac_hw_setup - setup mac in a usable state.
3326  *  @dev : pointer to the device structure.
3327  *  @ptp_register: register PTP if set
3328  *  Description:
3329  *  this is the main function to setup the HW in a usable state because the
3330  *  dma engine is reset, the core registers are configured (e.g. AXI,
3331  *  Checksum features, timers). The DMA is ready to start receiving and
3332  *  transmitting.
3333  *  Return value:
3334  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3335  *  file on failure.
3336  */
3337 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3338 {
3339 	struct stmmac_priv *priv = netdev_priv(dev);
3340 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3341 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3342 	bool sph_en;
3343 	u32 chan;
3344 	int ret;
3345 
3346 	/* DMA initialization and SW reset */
3347 	ret = stmmac_init_dma_engine(priv);
3348 	if (ret < 0) {
3349 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3350 			   __func__);
3351 		return ret;
3352 	}
3353 
3354 	/* Copy the MAC addr into the HW  */
3355 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3356 
3357 	/* PS and related bits will be programmed according to the speed */
3358 	if (priv->hw->pcs) {
3359 		int speed = priv->plat->mac_port_sel_speed;
3360 
3361 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3362 		    (speed == SPEED_1000)) {
3363 			priv->hw->ps = speed;
3364 		} else {
3365 			dev_warn(priv->device, "invalid port speed\n");
3366 			priv->hw->ps = 0;
3367 		}
3368 	}
3369 
3370 	/* Initialize the MAC Core */
3371 	stmmac_core_init(priv, priv->hw, dev);
3372 
3373 	/* Initialize MTL*/
3374 	stmmac_mtl_configuration(priv);
3375 
3376 	/* Initialize Safety Features */
3377 	stmmac_safety_feat_configuration(priv);
3378 
3379 	ret = stmmac_rx_ipc(priv, priv->hw);
3380 	if (!ret) {
3381 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3382 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3383 		priv->hw->rx_csum = 0;
3384 	}
3385 
3386 	/* Enable the MAC Rx/Tx */
3387 	stmmac_mac_set(priv, priv->ioaddr, true);
3388 
3389 	/* Set the HW DMA mode and the COE */
3390 	stmmac_dma_operation_mode(priv);
3391 
3392 	stmmac_mmc_setup(priv);
3393 
3394 	if (ptp_register) {
3395 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3396 		if (ret < 0)
3397 			netdev_warn(priv->dev,
3398 				    "failed to enable PTP reference clock: %pe\n",
3399 				    ERR_PTR(ret));
3400 	}
3401 
3402 	ret = stmmac_init_ptp(priv);
3403 	if (ret == -EOPNOTSUPP)
3404 		netdev_info(priv->dev, "PTP not supported by HW\n");
3405 	else if (ret)
3406 		netdev_warn(priv->dev, "PTP init failed\n");
3407 	else if (ptp_register)
3408 		stmmac_ptp_register(priv);
3409 
3410 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3411 
3412 	/* Convert the timer from msec to usec */
3413 	if (!priv->tx_lpi_timer)
3414 		priv->tx_lpi_timer = eee_timer * 1000;
3415 
3416 	if (priv->use_riwt) {
3417 		u32 queue;
3418 
3419 		for (queue = 0; queue < rx_cnt; queue++) {
3420 			if (!priv->rx_riwt[queue])
3421 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3422 
3423 			stmmac_rx_watchdog(priv, priv->ioaddr,
3424 					   priv->rx_riwt[queue], queue);
3425 		}
3426 	}
3427 
3428 	if (priv->hw->pcs)
3429 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3430 
3431 	/* set TX and RX rings length */
3432 	stmmac_set_rings_length(priv);
3433 
3434 	/* Enable TSO */
3435 	if (priv->tso) {
3436 		for (chan = 0; chan < tx_cnt; chan++) {
3437 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3438 
3439 			/* TSO and TBS cannot co-exist */
3440 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3441 				continue;
3442 
3443 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3444 		}
3445 	}
3446 
3447 	/* Enable Split Header */
3448 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3449 	for (chan = 0; chan < rx_cnt; chan++)
3450 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3451 
3452 
3453 	/* VLAN Tag Insertion */
3454 	if (priv->dma_cap.vlins)
3455 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3456 
3457 	/* TBS */
3458 	for (chan = 0; chan < tx_cnt; chan++) {
3459 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3460 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3461 
3462 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3463 	}
3464 
3465 	/* Configure real RX and TX queues */
3466 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3467 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3468 
3469 	/* Start the ball rolling... */
3470 	stmmac_start_all_dma(priv);
3471 
3472 	if (priv->dma_cap.fpesel) {
3473 		stmmac_fpe_start_wq(priv);
3474 
3475 		if (priv->plat->fpe_cfg->enable)
3476 			stmmac_fpe_handshake(priv, true);
3477 	}
3478 
3479 	return 0;
3480 }
3481 
3482 static void stmmac_hw_teardown(struct net_device *dev)
3483 {
3484 	struct stmmac_priv *priv = netdev_priv(dev);
3485 
3486 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3487 }
3488 
3489 static void stmmac_free_irq(struct net_device *dev,
3490 			    enum request_irq_err irq_err, int irq_idx)
3491 {
3492 	struct stmmac_priv *priv = netdev_priv(dev);
3493 	int j;
3494 
3495 	switch (irq_err) {
3496 	case REQ_IRQ_ERR_ALL:
3497 		irq_idx = priv->plat->tx_queues_to_use;
3498 		fallthrough;
3499 	case REQ_IRQ_ERR_TX:
3500 		for (j = irq_idx - 1; j >= 0; j--) {
3501 			if (priv->tx_irq[j] > 0) {
3502 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3503 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3504 			}
3505 		}
3506 		irq_idx = priv->plat->rx_queues_to_use;
3507 		fallthrough;
3508 	case REQ_IRQ_ERR_RX:
3509 		for (j = irq_idx - 1; j >= 0; j--) {
3510 			if (priv->rx_irq[j] > 0) {
3511 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3512 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3513 			}
3514 		}
3515 
3516 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3517 			free_irq(priv->sfty_ue_irq, dev);
3518 		fallthrough;
3519 	case REQ_IRQ_ERR_SFTY_UE:
3520 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3521 			free_irq(priv->sfty_ce_irq, dev);
3522 		fallthrough;
3523 	case REQ_IRQ_ERR_SFTY_CE:
3524 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3525 			free_irq(priv->lpi_irq, dev);
3526 		fallthrough;
3527 	case REQ_IRQ_ERR_LPI:
3528 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3529 			free_irq(priv->wol_irq, dev);
3530 		fallthrough;
3531 	case REQ_IRQ_ERR_WOL:
3532 		free_irq(dev->irq, dev);
3533 		fallthrough;
3534 	case REQ_IRQ_ERR_MAC:
3535 	case REQ_IRQ_ERR_NO:
3536 		/* If MAC IRQ request error, no more IRQ to free */
3537 		break;
3538 	}
3539 }
3540 
3541 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3542 {
3543 	struct stmmac_priv *priv = netdev_priv(dev);
3544 	enum request_irq_err irq_err;
3545 	cpumask_t cpu_mask;
3546 	int irq_idx = 0;
3547 	char *int_name;
3548 	int ret;
3549 	int i;
3550 
3551 	/* For common interrupt */
3552 	int_name = priv->int_name_mac;
3553 	sprintf(int_name, "%s:%s", dev->name, "mac");
3554 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3555 			  0, int_name, dev);
3556 	if (unlikely(ret < 0)) {
3557 		netdev_err(priv->dev,
3558 			   "%s: alloc mac MSI %d (error: %d)\n",
3559 			   __func__, dev->irq, ret);
3560 		irq_err = REQ_IRQ_ERR_MAC;
3561 		goto irq_error;
3562 	}
3563 
3564 	/* Request the Wake IRQ in case of another line
3565 	 * is used for WoL
3566 	 */
3567 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3568 		int_name = priv->int_name_wol;
3569 		sprintf(int_name, "%s:%s", dev->name, "wol");
3570 		ret = request_irq(priv->wol_irq,
3571 				  stmmac_mac_interrupt,
3572 				  0, int_name, dev);
3573 		if (unlikely(ret < 0)) {
3574 			netdev_err(priv->dev,
3575 				   "%s: alloc wol MSI %d (error: %d)\n",
3576 				   __func__, priv->wol_irq, ret);
3577 			irq_err = REQ_IRQ_ERR_WOL;
3578 			goto irq_error;
3579 		}
3580 	}
3581 
3582 	/* Request the LPI IRQ in case of another line
3583 	 * is used for LPI
3584 	 */
3585 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3586 		int_name = priv->int_name_lpi;
3587 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3588 		ret = request_irq(priv->lpi_irq,
3589 				  stmmac_mac_interrupt,
3590 				  0, int_name, dev);
3591 		if (unlikely(ret < 0)) {
3592 			netdev_err(priv->dev,
3593 				   "%s: alloc lpi MSI %d (error: %d)\n",
3594 				   __func__, priv->lpi_irq, ret);
3595 			irq_err = REQ_IRQ_ERR_LPI;
3596 			goto irq_error;
3597 		}
3598 	}
3599 
3600 	/* Request the Safety Feature Correctible Error line in
3601 	 * case of another line is used
3602 	 */
3603 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3604 		int_name = priv->int_name_sfty_ce;
3605 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3606 		ret = request_irq(priv->sfty_ce_irq,
3607 				  stmmac_safety_interrupt,
3608 				  0, int_name, dev);
3609 		if (unlikely(ret < 0)) {
3610 			netdev_err(priv->dev,
3611 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3612 				   __func__, priv->sfty_ce_irq, ret);
3613 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3614 			goto irq_error;
3615 		}
3616 	}
3617 
3618 	/* Request the Safety Feature Uncorrectible Error line in
3619 	 * case of another line is used
3620 	 */
3621 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3622 		int_name = priv->int_name_sfty_ue;
3623 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3624 		ret = request_irq(priv->sfty_ue_irq,
3625 				  stmmac_safety_interrupt,
3626 				  0, int_name, dev);
3627 		if (unlikely(ret < 0)) {
3628 			netdev_err(priv->dev,
3629 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3630 				   __func__, priv->sfty_ue_irq, ret);
3631 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3632 			goto irq_error;
3633 		}
3634 	}
3635 
3636 	/* Request Rx MSI irq */
3637 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3638 		if (i >= MTL_MAX_RX_QUEUES)
3639 			break;
3640 		if (priv->rx_irq[i] == 0)
3641 			continue;
3642 
3643 		int_name = priv->int_name_rx_irq[i];
3644 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3645 		ret = request_irq(priv->rx_irq[i],
3646 				  stmmac_msi_intr_rx,
3647 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3648 		if (unlikely(ret < 0)) {
3649 			netdev_err(priv->dev,
3650 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3651 				   __func__, i, priv->rx_irq[i], ret);
3652 			irq_err = REQ_IRQ_ERR_RX;
3653 			irq_idx = i;
3654 			goto irq_error;
3655 		}
3656 		cpumask_clear(&cpu_mask);
3657 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3658 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3659 	}
3660 
3661 	/* Request Tx MSI irq */
3662 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3663 		if (i >= MTL_MAX_TX_QUEUES)
3664 			break;
3665 		if (priv->tx_irq[i] == 0)
3666 			continue;
3667 
3668 		int_name = priv->int_name_tx_irq[i];
3669 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3670 		ret = request_irq(priv->tx_irq[i],
3671 				  stmmac_msi_intr_tx,
3672 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3673 		if (unlikely(ret < 0)) {
3674 			netdev_err(priv->dev,
3675 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3676 				   __func__, i, priv->tx_irq[i], ret);
3677 			irq_err = REQ_IRQ_ERR_TX;
3678 			irq_idx = i;
3679 			goto irq_error;
3680 		}
3681 		cpumask_clear(&cpu_mask);
3682 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3683 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3684 	}
3685 
3686 	return 0;
3687 
3688 irq_error:
3689 	stmmac_free_irq(dev, irq_err, irq_idx);
3690 	return ret;
3691 }
3692 
3693 static int stmmac_request_irq_single(struct net_device *dev)
3694 {
3695 	struct stmmac_priv *priv = netdev_priv(dev);
3696 	enum request_irq_err irq_err;
3697 	int ret;
3698 
3699 	ret = request_irq(dev->irq, stmmac_interrupt,
3700 			  IRQF_SHARED, dev->name, dev);
3701 	if (unlikely(ret < 0)) {
3702 		netdev_err(priv->dev,
3703 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3704 			   __func__, dev->irq, ret);
3705 		irq_err = REQ_IRQ_ERR_MAC;
3706 		goto irq_error;
3707 	}
3708 
3709 	/* Request the Wake IRQ in case of another line
3710 	 * is used for WoL
3711 	 */
3712 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3713 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3714 				  IRQF_SHARED, dev->name, dev);
3715 		if (unlikely(ret < 0)) {
3716 			netdev_err(priv->dev,
3717 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3718 				   __func__, priv->wol_irq, ret);
3719 			irq_err = REQ_IRQ_ERR_WOL;
3720 			goto irq_error;
3721 		}
3722 	}
3723 
3724 	/* Request the IRQ lines */
3725 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3726 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3727 				  IRQF_SHARED, dev->name, dev);
3728 		if (unlikely(ret < 0)) {
3729 			netdev_err(priv->dev,
3730 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3731 				   __func__, priv->lpi_irq, ret);
3732 			irq_err = REQ_IRQ_ERR_LPI;
3733 			goto irq_error;
3734 		}
3735 	}
3736 
3737 	return 0;
3738 
3739 irq_error:
3740 	stmmac_free_irq(dev, irq_err, 0);
3741 	return ret;
3742 }
3743 
3744 static int stmmac_request_irq(struct net_device *dev)
3745 {
3746 	struct stmmac_priv *priv = netdev_priv(dev);
3747 	int ret;
3748 
3749 	/* Request the IRQ lines */
3750 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3751 		ret = stmmac_request_irq_multi_msi(dev);
3752 	else
3753 		ret = stmmac_request_irq_single(dev);
3754 
3755 	return ret;
3756 }
3757 
3758 /**
3759  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3760  *  @priv: driver private structure
3761  *  @mtu: MTU to setup the dma queue and buf with
3762  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3763  *  Allocate the Tx/Rx DMA queue and init them.
3764  *  Return value:
3765  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3766  */
3767 static struct stmmac_dma_conf *
3768 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3769 {
3770 	struct stmmac_dma_conf *dma_conf;
3771 	int chan, bfsize, ret;
3772 
3773 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3774 	if (!dma_conf) {
3775 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3776 			   __func__);
3777 		return ERR_PTR(-ENOMEM);
3778 	}
3779 
3780 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3781 	if (bfsize < 0)
3782 		bfsize = 0;
3783 
3784 	if (bfsize < BUF_SIZE_16KiB)
3785 		bfsize = stmmac_set_bfsize(mtu, 0);
3786 
3787 	dma_conf->dma_buf_sz = bfsize;
3788 	/* Chose the tx/rx size from the already defined one in the
3789 	 * priv struct. (if defined)
3790 	 */
3791 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3792 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3793 
3794 	if (!dma_conf->dma_tx_size)
3795 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3796 	if (!dma_conf->dma_rx_size)
3797 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3798 
3799 	/* Earlier check for TBS */
3800 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3801 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3802 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3803 
3804 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3805 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3806 	}
3807 
3808 	ret = alloc_dma_desc_resources(priv, dma_conf);
3809 	if (ret < 0) {
3810 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3811 			   __func__);
3812 		goto alloc_error;
3813 	}
3814 
3815 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3816 	if (ret < 0) {
3817 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3818 			   __func__);
3819 		goto init_error;
3820 	}
3821 
3822 	return dma_conf;
3823 
3824 init_error:
3825 	free_dma_desc_resources(priv, dma_conf);
3826 alloc_error:
3827 	kfree(dma_conf);
3828 	return ERR_PTR(ret);
3829 }
3830 
3831 /**
3832  *  __stmmac_open - open entry point of the driver
3833  *  @dev : pointer to the device structure.
3834  *  @dma_conf :  structure to take the dma data
3835  *  Description:
3836  *  This function is the open entry point of the driver.
3837  *  Return value:
3838  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3839  *  file on failure.
3840  */
3841 static int __stmmac_open(struct net_device *dev,
3842 			 struct stmmac_dma_conf *dma_conf)
3843 {
3844 	struct stmmac_priv *priv = netdev_priv(dev);
3845 	int mode = priv->plat->phy_interface;
3846 	u32 chan;
3847 	int ret;
3848 
3849 	ret = pm_runtime_resume_and_get(priv->device);
3850 	if (ret < 0)
3851 		return ret;
3852 
3853 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3854 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3855 	    (!priv->hw->xpcs ||
3856 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3857 	    !priv->hw->lynx_pcs) {
3858 		ret = stmmac_init_phy(dev);
3859 		if (ret) {
3860 			netdev_err(priv->dev,
3861 				   "%s: Cannot attach to PHY (error: %d)\n",
3862 				   __func__, ret);
3863 			goto init_phy_error;
3864 		}
3865 	}
3866 
3867 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3868 
3869 	buf_sz = dma_conf->dma_buf_sz;
3870 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3871 
3872 	stmmac_reset_queues_param(priv);
3873 
3874 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3875 	    priv->plat->serdes_powerup) {
3876 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3877 		if (ret < 0) {
3878 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3879 				   __func__);
3880 			goto init_error;
3881 		}
3882 	}
3883 
3884 	ret = stmmac_hw_setup(dev, true);
3885 	if (ret < 0) {
3886 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3887 		goto init_error;
3888 	}
3889 
3890 	stmmac_init_coalesce(priv);
3891 
3892 	phylink_start(priv->phylink);
3893 	/* We may have called phylink_speed_down before */
3894 	phylink_speed_up(priv->phylink);
3895 
3896 	ret = stmmac_request_irq(dev);
3897 	if (ret)
3898 		goto irq_error;
3899 
3900 	stmmac_enable_all_queues(priv);
3901 	netif_tx_start_all_queues(priv->dev);
3902 	stmmac_enable_all_dma_irq(priv);
3903 
3904 	return 0;
3905 
3906 irq_error:
3907 	phylink_stop(priv->phylink);
3908 
3909 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3910 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3911 
3912 	stmmac_hw_teardown(dev);
3913 init_error:
3914 	phylink_disconnect_phy(priv->phylink);
3915 init_phy_error:
3916 	pm_runtime_put(priv->device);
3917 	return ret;
3918 }
3919 
3920 static int stmmac_open(struct net_device *dev)
3921 {
3922 	struct stmmac_priv *priv = netdev_priv(dev);
3923 	struct stmmac_dma_conf *dma_conf;
3924 	int ret;
3925 
3926 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3927 	if (IS_ERR(dma_conf))
3928 		return PTR_ERR(dma_conf);
3929 
3930 	ret = __stmmac_open(dev, dma_conf);
3931 	if (ret)
3932 		free_dma_desc_resources(priv, dma_conf);
3933 
3934 	kfree(dma_conf);
3935 	return ret;
3936 }
3937 
3938 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3939 {
3940 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3941 
3942 	if (priv->fpe_wq)
3943 		destroy_workqueue(priv->fpe_wq);
3944 
3945 	netdev_info(priv->dev, "FPE workqueue stop");
3946 }
3947 
3948 /**
3949  *  stmmac_release - close entry point of the driver
3950  *  @dev : device pointer.
3951  *  Description:
3952  *  This is the stop entry point of the driver.
3953  */
3954 static int stmmac_release(struct net_device *dev)
3955 {
3956 	struct stmmac_priv *priv = netdev_priv(dev);
3957 	u32 chan;
3958 
3959 	if (device_may_wakeup(priv->device))
3960 		phylink_speed_down(priv->phylink, false);
3961 	/* Stop and disconnect the PHY */
3962 	phylink_stop(priv->phylink);
3963 	phylink_disconnect_phy(priv->phylink);
3964 
3965 	stmmac_disable_all_queues(priv);
3966 
3967 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3968 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3969 
3970 	netif_tx_disable(dev);
3971 
3972 	/* Free the IRQ lines */
3973 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3974 
3975 	if (priv->eee_enabled) {
3976 		priv->tx_path_in_lpi_mode = false;
3977 		del_timer_sync(&priv->eee_ctrl_timer);
3978 	}
3979 
3980 	/* Stop TX/RX DMA and clear the descriptors */
3981 	stmmac_stop_all_dma(priv);
3982 
3983 	/* Release and free the Rx/Tx resources */
3984 	free_dma_desc_resources(priv, &priv->dma_conf);
3985 
3986 	/* Disable the MAC Rx/Tx */
3987 	stmmac_mac_set(priv, priv->ioaddr, false);
3988 
3989 	/* Powerdown Serdes if there is */
3990 	if (priv->plat->serdes_powerdown)
3991 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3992 
3993 	netif_carrier_off(dev);
3994 
3995 	stmmac_release_ptp(priv);
3996 
3997 	pm_runtime_put(priv->device);
3998 
3999 	if (priv->dma_cap.fpesel)
4000 		stmmac_fpe_stop_wq(priv);
4001 
4002 	return 0;
4003 }
4004 
4005 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4006 			       struct stmmac_tx_queue *tx_q)
4007 {
4008 	u16 tag = 0x0, inner_tag = 0x0;
4009 	u32 inner_type = 0x0;
4010 	struct dma_desc *p;
4011 
4012 	if (!priv->dma_cap.vlins)
4013 		return false;
4014 	if (!skb_vlan_tag_present(skb))
4015 		return false;
4016 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4017 		inner_tag = skb_vlan_tag_get(skb);
4018 		inner_type = STMMAC_VLAN_INSERT;
4019 	}
4020 
4021 	tag = skb_vlan_tag_get(skb);
4022 
4023 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4024 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4025 	else
4026 		p = &tx_q->dma_tx[tx_q->cur_tx];
4027 
4028 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4029 		return false;
4030 
4031 	stmmac_set_tx_owner(priv, p);
4032 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4033 	return true;
4034 }
4035 
4036 /**
4037  *  stmmac_tso_allocator - close entry point of the driver
4038  *  @priv: driver private structure
4039  *  @des: buffer start address
4040  *  @total_len: total length to fill in descriptors
4041  *  @last_segment: condition for the last descriptor
4042  *  @queue: TX queue index
4043  *  Description:
4044  *  This function fills descriptor and request new descriptors according to
4045  *  buffer length to fill
4046  */
4047 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4048 				 int total_len, bool last_segment, u32 queue)
4049 {
4050 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4051 	struct dma_desc *desc;
4052 	u32 buff_size;
4053 	int tmp_len;
4054 
4055 	tmp_len = total_len;
4056 
4057 	while (tmp_len > 0) {
4058 		dma_addr_t curr_addr;
4059 
4060 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4061 						priv->dma_conf.dma_tx_size);
4062 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4063 
4064 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4065 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4066 		else
4067 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4068 
4069 		curr_addr = des + (total_len - tmp_len);
4070 		if (priv->dma_cap.addr64 <= 32)
4071 			desc->des0 = cpu_to_le32(curr_addr);
4072 		else
4073 			stmmac_set_desc_addr(priv, desc, curr_addr);
4074 
4075 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4076 			    TSO_MAX_BUFF_SIZE : tmp_len;
4077 
4078 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4079 				0, 1,
4080 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4081 				0, 0);
4082 
4083 		tmp_len -= TSO_MAX_BUFF_SIZE;
4084 	}
4085 }
4086 
4087 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4088 {
4089 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4090 	int desc_size;
4091 
4092 	if (likely(priv->extend_desc))
4093 		desc_size = sizeof(struct dma_extended_desc);
4094 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4095 		desc_size = sizeof(struct dma_edesc);
4096 	else
4097 		desc_size = sizeof(struct dma_desc);
4098 
4099 	/* The own bit must be the latest setting done when prepare the
4100 	 * descriptor and then barrier is needed to make sure that
4101 	 * all is coherent before granting the DMA engine.
4102 	 */
4103 	wmb();
4104 
4105 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4106 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4107 }
4108 
4109 /**
4110  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4111  *  @skb : the socket buffer
4112  *  @dev : device pointer
4113  *  Description: this is the transmit function that is called on TSO frames
4114  *  (support available on GMAC4 and newer chips).
4115  *  Diagram below show the ring programming in case of TSO frames:
4116  *
4117  *  First Descriptor
4118  *   --------
4119  *   | DES0 |---> buffer1 = L2/L3/L4 header
4120  *   | DES1 |---> TCP Payload (can continue on next descr...)
4121  *   | DES2 |---> buffer 1 and 2 len
4122  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4123  *   --------
4124  *	|
4125  *     ...
4126  *	|
4127  *   --------
4128  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4129  *   | DES1 | --|
4130  *   | DES2 | --> buffer 1 and 2 len
4131  *   | DES3 |
4132  *   --------
4133  *
4134  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4135  */
4136 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4137 {
4138 	struct dma_desc *desc, *first, *mss_desc = NULL;
4139 	struct stmmac_priv *priv = netdev_priv(dev);
4140 	int nfrags = skb_shinfo(skb)->nr_frags;
4141 	u32 queue = skb_get_queue_mapping(skb);
4142 	unsigned int first_entry, tx_packets;
4143 	struct stmmac_txq_stats *txq_stats;
4144 	int tmp_pay_len = 0, first_tx;
4145 	struct stmmac_tx_queue *tx_q;
4146 	bool has_vlan, set_ic;
4147 	u8 proto_hdr_len, hdr;
4148 	unsigned long flags;
4149 	u32 pay_len, mss;
4150 	dma_addr_t des;
4151 	int i;
4152 
4153 	tx_q = &priv->dma_conf.tx_queue[queue];
4154 	txq_stats = &priv->xstats.txq_stats[queue];
4155 	first_tx = tx_q->cur_tx;
4156 
4157 	/* Compute header lengths */
4158 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4159 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4160 		hdr = sizeof(struct udphdr);
4161 	} else {
4162 		proto_hdr_len = skb_tcp_all_headers(skb);
4163 		hdr = tcp_hdrlen(skb);
4164 	}
4165 
4166 	/* Desc availability based on threshold should be enough safe */
4167 	if (unlikely(stmmac_tx_avail(priv, queue) <
4168 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4169 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4170 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4171 								queue));
4172 			/* This is a hard error, log it. */
4173 			netdev_err(priv->dev,
4174 				   "%s: Tx Ring full when queue awake\n",
4175 				   __func__);
4176 		}
4177 		return NETDEV_TX_BUSY;
4178 	}
4179 
4180 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4181 
4182 	mss = skb_shinfo(skb)->gso_size;
4183 
4184 	/* set new MSS value if needed */
4185 	if (mss != tx_q->mss) {
4186 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4187 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4188 		else
4189 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4190 
4191 		stmmac_set_mss(priv, mss_desc, mss);
4192 		tx_q->mss = mss;
4193 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4194 						priv->dma_conf.dma_tx_size);
4195 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4196 	}
4197 
4198 	if (netif_msg_tx_queued(priv)) {
4199 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4200 			__func__, hdr, proto_hdr_len, pay_len, mss);
4201 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4202 			skb->data_len);
4203 	}
4204 
4205 	/* Check if VLAN can be inserted by HW */
4206 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4207 
4208 	first_entry = tx_q->cur_tx;
4209 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4210 
4211 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4212 		desc = &tx_q->dma_entx[first_entry].basic;
4213 	else
4214 		desc = &tx_q->dma_tx[first_entry];
4215 	first = desc;
4216 
4217 	if (has_vlan)
4218 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4219 
4220 	/* first descriptor: fill Headers on Buf1 */
4221 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4222 			     DMA_TO_DEVICE);
4223 	if (dma_mapping_error(priv->device, des))
4224 		goto dma_map_err;
4225 
4226 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4227 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4228 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4229 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4230 
4231 	if (priv->dma_cap.addr64 <= 32) {
4232 		first->des0 = cpu_to_le32(des);
4233 
4234 		/* Fill start of payload in buff2 of first descriptor */
4235 		if (pay_len)
4236 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4237 
4238 		/* If needed take extra descriptors to fill the remaining payload */
4239 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4240 	} else {
4241 		stmmac_set_desc_addr(priv, first, des);
4242 		tmp_pay_len = pay_len;
4243 		des += proto_hdr_len;
4244 		pay_len = 0;
4245 	}
4246 
4247 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4248 
4249 	/* Prepare fragments */
4250 	for (i = 0; i < nfrags; i++) {
4251 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4252 
4253 		des = skb_frag_dma_map(priv->device, frag, 0,
4254 				       skb_frag_size(frag),
4255 				       DMA_TO_DEVICE);
4256 		if (dma_mapping_error(priv->device, des))
4257 			goto dma_map_err;
4258 
4259 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4260 				     (i == nfrags - 1), queue);
4261 
4262 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4263 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4264 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4265 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4266 	}
4267 
4268 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4269 
4270 	/* Only the last descriptor gets to point to the skb. */
4271 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4272 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4273 
4274 	/* Manage tx mitigation */
4275 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4276 	tx_q->tx_count_frames += tx_packets;
4277 
4278 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4279 		set_ic = true;
4280 	else if (!priv->tx_coal_frames[queue])
4281 		set_ic = false;
4282 	else if (tx_packets > priv->tx_coal_frames[queue])
4283 		set_ic = true;
4284 	else if ((tx_q->tx_count_frames %
4285 		  priv->tx_coal_frames[queue]) < tx_packets)
4286 		set_ic = true;
4287 	else
4288 		set_ic = false;
4289 
4290 	if (set_ic) {
4291 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4292 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4293 		else
4294 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4295 
4296 		tx_q->tx_count_frames = 0;
4297 		stmmac_set_tx_ic(priv, desc);
4298 	}
4299 
4300 	/* We've used all descriptors we need for this skb, however,
4301 	 * advance cur_tx so that it references a fresh descriptor.
4302 	 * ndo_start_xmit will fill this descriptor the next time it's
4303 	 * called and stmmac_tx_clean may clean up to this descriptor.
4304 	 */
4305 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4306 
4307 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4308 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4309 			  __func__);
4310 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4311 	}
4312 
4313 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4314 	txq_stats->tx_bytes += skb->len;
4315 	txq_stats->tx_tso_frames++;
4316 	txq_stats->tx_tso_nfrags += nfrags;
4317 	if (set_ic)
4318 		txq_stats->tx_set_ic_bit++;
4319 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4320 
4321 	if (priv->sarc_type)
4322 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4323 
4324 	skb_tx_timestamp(skb);
4325 
4326 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4327 		     priv->hwts_tx_en)) {
4328 		/* declare that device is doing timestamping */
4329 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4330 		stmmac_enable_tx_timestamp(priv, first);
4331 	}
4332 
4333 	/* Complete the first descriptor before granting the DMA */
4334 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4335 			proto_hdr_len,
4336 			pay_len,
4337 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4338 			hdr / 4, (skb->len - proto_hdr_len));
4339 
4340 	/* If context desc is used to change MSS */
4341 	if (mss_desc) {
4342 		/* Make sure that first descriptor has been completely
4343 		 * written, including its own bit. This is because MSS is
4344 		 * actually before first descriptor, so we need to make
4345 		 * sure that MSS's own bit is the last thing written.
4346 		 */
4347 		dma_wmb();
4348 		stmmac_set_tx_owner(priv, mss_desc);
4349 	}
4350 
4351 	if (netif_msg_pktdata(priv)) {
4352 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4353 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4354 			tx_q->cur_tx, first, nfrags);
4355 		pr_info(">>> frame to be transmitted: ");
4356 		print_pkt(skb->data, skb_headlen(skb));
4357 	}
4358 
4359 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4360 
4361 	stmmac_flush_tx_descriptors(priv, queue);
4362 	stmmac_tx_timer_arm(priv, queue);
4363 
4364 	return NETDEV_TX_OK;
4365 
4366 dma_map_err:
4367 	dev_err(priv->device, "Tx dma map failed\n");
4368 	dev_kfree_skb(skb);
4369 	priv->xstats.tx_dropped++;
4370 	return NETDEV_TX_OK;
4371 }
4372 
4373 /**
4374  *  stmmac_xmit - Tx entry point of the driver
4375  *  @skb : the socket buffer
4376  *  @dev : device pointer
4377  *  Description : this is the tx entry point of the driver.
4378  *  It programs the chain or the ring and supports oversized frames
4379  *  and SG feature.
4380  */
4381 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4382 {
4383 	unsigned int first_entry, tx_packets, enh_desc;
4384 	struct stmmac_priv *priv = netdev_priv(dev);
4385 	unsigned int nopaged_len = skb_headlen(skb);
4386 	int i, csum_insertion = 0, is_jumbo = 0;
4387 	u32 queue = skb_get_queue_mapping(skb);
4388 	int nfrags = skb_shinfo(skb)->nr_frags;
4389 	int gso = skb_shinfo(skb)->gso_type;
4390 	struct stmmac_txq_stats *txq_stats;
4391 	struct dma_edesc *tbs_desc = NULL;
4392 	struct dma_desc *desc, *first;
4393 	struct stmmac_tx_queue *tx_q;
4394 	bool has_vlan, set_ic;
4395 	int entry, first_tx;
4396 	unsigned long flags;
4397 	dma_addr_t des;
4398 
4399 	tx_q = &priv->dma_conf.tx_queue[queue];
4400 	txq_stats = &priv->xstats.txq_stats[queue];
4401 	first_tx = tx_q->cur_tx;
4402 
4403 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4404 		stmmac_disable_eee_mode(priv);
4405 
4406 	/* Manage oversized TCP frames for GMAC4 device */
4407 	if (skb_is_gso(skb) && priv->tso) {
4408 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4409 			return stmmac_tso_xmit(skb, dev);
4410 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4411 			return stmmac_tso_xmit(skb, dev);
4412 	}
4413 
4414 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4415 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4416 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4417 								queue));
4418 			/* This is a hard error, log it. */
4419 			netdev_err(priv->dev,
4420 				   "%s: Tx Ring full when queue awake\n",
4421 				   __func__);
4422 		}
4423 		return NETDEV_TX_BUSY;
4424 	}
4425 
4426 	/* Check if VLAN can be inserted by HW */
4427 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4428 
4429 	entry = tx_q->cur_tx;
4430 	first_entry = entry;
4431 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4432 
4433 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4434 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4435 	 * queues. In that case, checksum offloading for those queues that don't
4436 	 * support tx coe needs to fallback to software checksum calculation.
4437 	 */
4438 	if (csum_insertion &&
4439 	    priv->plat->tx_queues_cfg[queue].coe_unsupported) {
4440 		if (unlikely(skb_checksum_help(skb)))
4441 			goto dma_map_err;
4442 		csum_insertion = !csum_insertion;
4443 	}
4444 
4445 	if (likely(priv->extend_desc))
4446 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4447 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4448 		desc = &tx_q->dma_entx[entry].basic;
4449 	else
4450 		desc = tx_q->dma_tx + entry;
4451 
4452 	first = desc;
4453 
4454 	if (has_vlan)
4455 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4456 
4457 	enh_desc = priv->plat->enh_desc;
4458 	/* To program the descriptors according to the size of the frame */
4459 	if (enh_desc)
4460 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4461 
4462 	if (unlikely(is_jumbo)) {
4463 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4464 		if (unlikely(entry < 0) && (entry != -EINVAL))
4465 			goto dma_map_err;
4466 	}
4467 
4468 	for (i = 0; i < nfrags; i++) {
4469 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4470 		int len = skb_frag_size(frag);
4471 		bool last_segment = (i == (nfrags - 1));
4472 
4473 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4474 		WARN_ON(tx_q->tx_skbuff[entry]);
4475 
4476 		if (likely(priv->extend_desc))
4477 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4478 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4479 			desc = &tx_q->dma_entx[entry].basic;
4480 		else
4481 			desc = tx_q->dma_tx + entry;
4482 
4483 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4484 				       DMA_TO_DEVICE);
4485 		if (dma_mapping_error(priv->device, des))
4486 			goto dma_map_err; /* should reuse desc w/o issues */
4487 
4488 		tx_q->tx_skbuff_dma[entry].buf = des;
4489 
4490 		stmmac_set_desc_addr(priv, desc, des);
4491 
4492 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4493 		tx_q->tx_skbuff_dma[entry].len = len;
4494 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4495 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4496 
4497 		/* Prepare the descriptor and set the own bit too */
4498 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4499 				priv->mode, 1, last_segment, skb->len);
4500 	}
4501 
4502 	/* Only the last descriptor gets to point to the skb. */
4503 	tx_q->tx_skbuff[entry] = skb;
4504 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4505 
4506 	/* According to the coalesce parameter the IC bit for the latest
4507 	 * segment is reset and the timer re-started to clean the tx status.
4508 	 * This approach takes care about the fragments: desc is the first
4509 	 * element in case of no SG.
4510 	 */
4511 	tx_packets = (entry + 1) - first_tx;
4512 	tx_q->tx_count_frames += tx_packets;
4513 
4514 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4515 		set_ic = true;
4516 	else if (!priv->tx_coal_frames[queue])
4517 		set_ic = false;
4518 	else if (tx_packets > priv->tx_coal_frames[queue])
4519 		set_ic = true;
4520 	else if ((tx_q->tx_count_frames %
4521 		  priv->tx_coal_frames[queue]) < tx_packets)
4522 		set_ic = true;
4523 	else
4524 		set_ic = false;
4525 
4526 	if (set_ic) {
4527 		if (likely(priv->extend_desc))
4528 			desc = &tx_q->dma_etx[entry].basic;
4529 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4530 			desc = &tx_q->dma_entx[entry].basic;
4531 		else
4532 			desc = &tx_q->dma_tx[entry];
4533 
4534 		tx_q->tx_count_frames = 0;
4535 		stmmac_set_tx_ic(priv, desc);
4536 	}
4537 
4538 	/* We've used all descriptors we need for this skb, however,
4539 	 * advance cur_tx so that it references a fresh descriptor.
4540 	 * ndo_start_xmit will fill this descriptor the next time it's
4541 	 * called and stmmac_tx_clean may clean up to this descriptor.
4542 	 */
4543 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4544 	tx_q->cur_tx = entry;
4545 
4546 	if (netif_msg_pktdata(priv)) {
4547 		netdev_dbg(priv->dev,
4548 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4549 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4550 			   entry, first, nfrags);
4551 
4552 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4553 		print_pkt(skb->data, skb->len);
4554 	}
4555 
4556 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4557 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4558 			  __func__);
4559 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4560 	}
4561 
4562 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4563 	txq_stats->tx_bytes += skb->len;
4564 	if (set_ic)
4565 		txq_stats->tx_set_ic_bit++;
4566 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4567 
4568 	if (priv->sarc_type)
4569 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4570 
4571 	skb_tx_timestamp(skb);
4572 
4573 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4574 	 * problems because all the descriptors are actually ready to be
4575 	 * passed to the DMA engine.
4576 	 */
4577 	if (likely(!is_jumbo)) {
4578 		bool last_segment = (nfrags == 0);
4579 
4580 		des = dma_map_single(priv->device, skb->data,
4581 				     nopaged_len, DMA_TO_DEVICE);
4582 		if (dma_mapping_error(priv->device, des))
4583 			goto dma_map_err;
4584 
4585 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4586 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4587 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4588 
4589 		stmmac_set_desc_addr(priv, first, des);
4590 
4591 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4592 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4593 
4594 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4595 			     priv->hwts_tx_en)) {
4596 			/* declare that device is doing timestamping */
4597 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4598 			stmmac_enable_tx_timestamp(priv, first);
4599 		}
4600 
4601 		/* Prepare the first descriptor setting the OWN bit too */
4602 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4603 				csum_insertion, priv->mode, 0, last_segment,
4604 				skb->len);
4605 	}
4606 
4607 	if (tx_q->tbs & STMMAC_TBS_EN) {
4608 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4609 
4610 		tbs_desc = &tx_q->dma_entx[first_entry];
4611 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4612 	}
4613 
4614 	stmmac_set_tx_owner(priv, first);
4615 
4616 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4617 
4618 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4619 
4620 	stmmac_flush_tx_descriptors(priv, queue);
4621 	stmmac_tx_timer_arm(priv, queue);
4622 
4623 	return NETDEV_TX_OK;
4624 
4625 dma_map_err:
4626 	netdev_err(priv->dev, "Tx DMA map failed\n");
4627 	dev_kfree_skb(skb);
4628 	priv->xstats.tx_dropped++;
4629 	return NETDEV_TX_OK;
4630 }
4631 
4632 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4633 {
4634 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4635 	__be16 vlan_proto = veth->h_vlan_proto;
4636 	u16 vlanid;
4637 
4638 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4639 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4640 	    (vlan_proto == htons(ETH_P_8021AD) &&
4641 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4642 		/* pop the vlan tag */
4643 		vlanid = ntohs(veth->h_vlan_TCI);
4644 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4645 		skb_pull(skb, VLAN_HLEN);
4646 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4647 	}
4648 }
4649 
4650 /**
4651  * stmmac_rx_refill - refill used skb preallocated buffers
4652  * @priv: driver private structure
4653  * @queue: RX queue index
4654  * Description : this is to reallocate the skb for the reception process
4655  * that is based on zero-copy.
4656  */
4657 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4658 {
4659 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4660 	int dirty = stmmac_rx_dirty(priv, queue);
4661 	unsigned int entry = rx_q->dirty_rx;
4662 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4663 
4664 	if (priv->dma_cap.host_dma_width <= 32)
4665 		gfp |= GFP_DMA32;
4666 
4667 	while (dirty-- > 0) {
4668 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4669 		struct dma_desc *p;
4670 		bool use_rx_wd;
4671 
4672 		if (priv->extend_desc)
4673 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4674 		else
4675 			p = rx_q->dma_rx + entry;
4676 
4677 		if (!buf->page) {
4678 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4679 			if (!buf->page)
4680 				break;
4681 		}
4682 
4683 		if (priv->sph && !buf->sec_page) {
4684 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4685 			if (!buf->sec_page)
4686 				break;
4687 
4688 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4689 		}
4690 
4691 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4692 
4693 		stmmac_set_desc_addr(priv, p, buf->addr);
4694 		if (priv->sph)
4695 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4696 		else
4697 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4698 		stmmac_refill_desc3(priv, rx_q, p);
4699 
4700 		rx_q->rx_count_frames++;
4701 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4702 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4703 			rx_q->rx_count_frames = 0;
4704 
4705 		use_rx_wd = !priv->rx_coal_frames[queue];
4706 		use_rx_wd |= rx_q->rx_count_frames > 0;
4707 		if (!priv->use_riwt)
4708 			use_rx_wd = false;
4709 
4710 		dma_wmb();
4711 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4712 
4713 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4714 	}
4715 	rx_q->dirty_rx = entry;
4716 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4717 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4718 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4719 }
4720 
4721 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4722 				       struct dma_desc *p,
4723 				       int status, unsigned int len)
4724 {
4725 	unsigned int plen = 0, hlen = 0;
4726 	int coe = priv->hw->rx_csum;
4727 
4728 	/* Not first descriptor, buffer is always zero */
4729 	if (priv->sph && len)
4730 		return 0;
4731 
4732 	/* First descriptor, get split header length */
4733 	stmmac_get_rx_header_len(priv, p, &hlen);
4734 	if (priv->sph && hlen) {
4735 		priv->xstats.rx_split_hdr_pkt_n++;
4736 		return hlen;
4737 	}
4738 
4739 	/* First descriptor, not last descriptor and not split header */
4740 	if (status & rx_not_ls)
4741 		return priv->dma_conf.dma_buf_sz;
4742 
4743 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4744 
4745 	/* First descriptor and last descriptor and not split header */
4746 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4747 }
4748 
4749 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4750 				       struct dma_desc *p,
4751 				       int status, unsigned int len)
4752 {
4753 	int coe = priv->hw->rx_csum;
4754 	unsigned int plen = 0;
4755 
4756 	/* Not split header, buffer is not available */
4757 	if (!priv->sph)
4758 		return 0;
4759 
4760 	/* Not last descriptor */
4761 	if (status & rx_not_ls)
4762 		return priv->dma_conf.dma_buf_sz;
4763 
4764 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4765 
4766 	/* Last descriptor */
4767 	return plen - len;
4768 }
4769 
4770 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4771 				struct xdp_frame *xdpf, bool dma_map)
4772 {
4773 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4774 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4775 	unsigned int entry = tx_q->cur_tx;
4776 	struct dma_desc *tx_desc;
4777 	dma_addr_t dma_addr;
4778 	bool set_ic;
4779 
4780 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4781 		return STMMAC_XDP_CONSUMED;
4782 
4783 	if (likely(priv->extend_desc))
4784 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4785 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4786 		tx_desc = &tx_q->dma_entx[entry].basic;
4787 	else
4788 		tx_desc = tx_q->dma_tx + entry;
4789 
4790 	if (dma_map) {
4791 		dma_addr = dma_map_single(priv->device, xdpf->data,
4792 					  xdpf->len, DMA_TO_DEVICE);
4793 		if (dma_mapping_error(priv->device, dma_addr))
4794 			return STMMAC_XDP_CONSUMED;
4795 
4796 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4797 	} else {
4798 		struct page *page = virt_to_page(xdpf->data);
4799 
4800 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4801 			   xdpf->headroom;
4802 		dma_sync_single_for_device(priv->device, dma_addr,
4803 					   xdpf->len, DMA_BIDIRECTIONAL);
4804 
4805 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4806 	}
4807 
4808 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4809 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4810 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4811 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4812 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4813 
4814 	tx_q->xdpf[entry] = xdpf;
4815 
4816 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4817 
4818 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4819 			       true, priv->mode, true, true,
4820 			       xdpf->len);
4821 
4822 	tx_q->tx_count_frames++;
4823 
4824 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4825 		set_ic = true;
4826 	else
4827 		set_ic = false;
4828 
4829 	if (set_ic) {
4830 		unsigned long flags;
4831 		tx_q->tx_count_frames = 0;
4832 		stmmac_set_tx_ic(priv, tx_desc);
4833 		flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4834 		txq_stats->tx_set_ic_bit++;
4835 		u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4836 	}
4837 
4838 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4839 
4840 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4841 	tx_q->cur_tx = entry;
4842 
4843 	return STMMAC_XDP_TX;
4844 }
4845 
4846 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4847 				   int cpu)
4848 {
4849 	int index = cpu;
4850 
4851 	if (unlikely(index < 0))
4852 		index = 0;
4853 
4854 	while (index >= priv->plat->tx_queues_to_use)
4855 		index -= priv->plat->tx_queues_to_use;
4856 
4857 	return index;
4858 }
4859 
4860 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4861 				struct xdp_buff *xdp)
4862 {
4863 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4864 	int cpu = smp_processor_id();
4865 	struct netdev_queue *nq;
4866 	int queue;
4867 	int res;
4868 
4869 	if (unlikely(!xdpf))
4870 		return STMMAC_XDP_CONSUMED;
4871 
4872 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4873 	nq = netdev_get_tx_queue(priv->dev, queue);
4874 
4875 	__netif_tx_lock(nq, cpu);
4876 	/* Avoids TX time-out as we are sharing with slow path */
4877 	txq_trans_cond_update(nq);
4878 
4879 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4880 	if (res == STMMAC_XDP_TX)
4881 		stmmac_flush_tx_descriptors(priv, queue);
4882 
4883 	__netif_tx_unlock(nq);
4884 
4885 	return res;
4886 }
4887 
4888 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4889 				 struct bpf_prog *prog,
4890 				 struct xdp_buff *xdp)
4891 {
4892 	u32 act;
4893 	int res;
4894 
4895 	act = bpf_prog_run_xdp(prog, xdp);
4896 	switch (act) {
4897 	case XDP_PASS:
4898 		res = STMMAC_XDP_PASS;
4899 		break;
4900 	case XDP_TX:
4901 		res = stmmac_xdp_xmit_back(priv, xdp);
4902 		break;
4903 	case XDP_REDIRECT:
4904 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4905 			res = STMMAC_XDP_CONSUMED;
4906 		else
4907 			res = STMMAC_XDP_REDIRECT;
4908 		break;
4909 	default:
4910 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4911 		fallthrough;
4912 	case XDP_ABORTED:
4913 		trace_xdp_exception(priv->dev, prog, act);
4914 		fallthrough;
4915 	case XDP_DROP:
4916 		res = STMMAC_XDP_CONSUMED;
4917 		break;
4918 	}
4919 
4920 	return res;
4921 }
4922 
4923 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4924 					   struct xdp_buff *xdp)
4925 {
4926 	struct bpf_prog *prog;
4927 	int res;
4928 
4929 	prog = READ_ONCE(priv->xdp_prog);
4930 	if (!prog) {
4931 		res = STMMAC_XDP_PASS;
4932 		goto out;
4933 	}
4934 
4935 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4936 out:
4937 	return ERR_PTR(-res);
4938 }
4939 
4940 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4941 				   int xdp_status)
4942 {
4943 	int cpu = smp_processor_id();
4944 	int queue;
4945 
4946 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4947 
4948 	if (xdp_status & STMMAC_XDP_TX)
4949 		stmmac_tx_timer_arm(priv, queue);
4950 
4951 	if (xdp_status & STMMAC_XDP_REDIRECT)
4952 		xdp_do_flush();
4953 }
4954 
4955 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4956 					       struct xdp_buff *xdp)
4957 {
4958 	unsigned int metasize = xdp->data - xdp->data_meta;
4959 	unsigned int datasize = xdp->data_end - xdp->data;
4960 	struct sk_buff *skb;
4961 
4962 	skb = __napi_alloc_skb(&ch->rxtx_napi,
4963 			       xdp->data_end - xdp->data_hard_start,
4964 			       GFP_ATOMIC | __GFP_NOWARN);
4965 	if (unlikely(!skb))
4966 		return NULL;
4967 
4968 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
4969 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4970 	if (metasize)
4971 		skb_metadata_set(skb, metasize);
4972 
4973 	return skb;
4974 }
4975 
4976 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4977 				   struct dma_desc *p, struct dma_desc *np,
4978 				   struct xdp_buff *xdp)
4979 {
4980 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4981 	struct stmmac_channel *ch = &priv->channel[queue];
4982 	unsigned int len = xdp->data_end - xdp->data;
4983 	enum pkt_hash_types hash_type;
4984 	int coe = priv->hw->rx_csum;
4985 	unsigned long flags;
4986 	struct sk_buff *skb;
4987 	u32 hash;
4988 
4989 	skb = stmmac_construct_skb_zc(ch, xdp);
4990 	if (!skb) {
4991 		priv->xstats.rx_dropped++;
4992 		return;
4993 	}
4994 
4995 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
4996 	stmmac_rx_vlan(priv->dev, skb);
4997 	skb->protocol = eth_type_trans(skb, priv->dev);
4998 
4999 	if (unlikely(!coe))
5000 		skb_checksum_none_assert(skb);
5001 	else
5002 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5003 
5004 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5005 		skb_set_hash(skb, hash, hash_type);
5006 
5007 	skb_record_rx_queue(skb, queue);
5008 	napi_gro_receive(&ch->rxtx_napi, skb);
5009 
5010 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5011 	rxq_stats->rx_pkt_n++;
5012 	rxq_stats->rx_bytes += len;
5013 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5014 }
5015 
5016 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5017 {
5018 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5019 	unsigned int entry = rx_q->dirty_rx;
5020 	struct dma_desc *rx_desc = NULL;
5021 	bool ret = true;
5022 
5023 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5024 
5025 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5026 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5027 		dma_addr_t dma_addr;
5028 		bool use_rx_wd;
5029 
5030 		if (!buf->xdp) {
5031 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5032 			if (!buf->xdp) {
5033 				ret = false;
5034 				break;
5035 			}
5036 		}
5037 
5038 		if (priv->extend_desc)
5039 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5040 		else
5041 			rx_desc = rx_q->dma_rx + entry;
5042 
5043 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5044 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5045 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5046 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5047 
5048 		rx_q->rx_count_frames++;
5049 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5050 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5051 			rx_q->rx_count_frames = 0;
5052 
5053 		use_rx_wd = !priv->rx_coal_frames[queue];
5054 		use_rx_wd |= rx_q->rx_count_frames > 0;
5055 		if (!priv->use_riwt)
5056 			use_rx_wd = false;
5057 
5058 		dma_wmb();
5059 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5060 
5061 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5062 	}
5063 
5064 	if (rx_desc) {
5065 		rx_q->dirty_rx = entry;
5066 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5067 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5068 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5069 	}
5070 
5071 	return ret;
5072 }
5073 
5074 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5075 {
5076 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5077 	 * to represent incoming packet, whereas cb field in the same structure
5078 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5079 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5080 	 */
5081 	return (struct stmmac_xdp_buff *)xdp;
5082 }
5083 
5084 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5085 {
5086 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5087 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5088 	unsigned int count = 0, error = 0, len = 0;
5089 	int dirty = stmmac_rx_dirty(priv, queue);
5090 	unsigned int next_entry = rx_q->cur_rx;
5091 	u32 rx_errors = 0, rx_dropped = 0;
5092 	unsigned int desc_size;
5093 	struct bpf_prog *prog;
5094 	bool failure = false;
5095 	unsigned long flags;
5096 	int xdp_status = 0;
5097 	int status = 0;
5098 
5099 	if (netif_msg_rx_status(priv)) {
5100 		void *rx_head;
5101 
5102 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5103 		if (priv->extend_desc) {
5104 			rx_head = (void *)rx_q->dma_erx;
5105 			desc_size = sizeof(struct dma_extended_desc);
5106 		} else {
5107 			rx_head = (void *)rx_q->dma_rx;
5108 			desc_size = sizeof(struct dma_desc);
5109 		}
5110 
5111 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5112 				    rx_q->dma_rx_phy, desc_size);
5113 	}
5114 	while (count < limit) {
5115 		struct stmmac_rx_buffer *buf;
5116 		struct stmmac_xdp_buff *ctx;
5117 		unsigned int buf1_len = 0;
5118 		struct dma_desc *np, *p;
5119 		int entry;
5120 		int res;
5121 
5122 		if (!count && rx_q->state_saved) {
5123 			error = rx_q->state.error;
5124 			len = rx_q->state.len;
5125 		} else {
5126 			rx_q->state_saved = false;
5127 			error = 0;
5128 			len = 0;
5129 		}
5130 
5131 		if (count >= limit)
5132 			break;
5133 
5134 read_again:
5135 		buf1_len = 0;
5136 		entry = next_entry;
5137 		buf = &rx_q->buf_pool[entry];
5138 
5139 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5140 			failure = failure ||
5141 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5142 			dirty = 0;
5143 		}
5144 
5145 		if (priv->extend_desc)
5146 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5147 		else
5148 			p = rx_q->dma_rx + entry;
5149 
5150 		/* read the status of the incoming frame */
5151 		status = stmmac_rx_status(priv, &priv->xstats, p);
5152 		/* check if managed by the DMA otherwise go ahead */
5153 		if (unlikely(status & dma_own))
5154 			break;
5155 
5156 		/* Prefetch the next RX descriptor */
5157 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5158 						priv->dma_conf.dma_rx_size);
5159 		next_entry = rx_q->cur_rx;
5160 
5161 		if (priv->extend_desc)
5162 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5163 		else
5164 			np = rx_q->dma_rx + next_entry;
5165 
5166 		prefetch(np);
5167 
5168 		/* Ensure a valid XSK buffer before proceed */
5169 		if (!buf->xdp)
5170 			break;
5171 
5172 		if (priv->extend_desc)
5173 			stmmac_rx_extended_status(priv, &priv->xstats,
5174 						  rx_q->dma_erx + entry);
5175 		if (unlikely(status == discard_frame)) {
5176 			xsk_buff_free(buf->xdp);
5177 			buf->xdp = NULL;
5178 			dirty++;
5179 			error = 1;
5180 			if (!priv->hwts_rx_en)
5181 				rx_errors++;
5182 		}
5183 
5184 		if (unlikely(error && (status & rx_not_ls)))
5185 			goto read_again;
5186 		if (unlikely(error)) {
5187 			count++;
5188 			continue;
5189 		}
5190 
5191 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5192 		if (likely(status & rx_not_ls)) {
5193 			xsk_buff_free(buf->xdp);
5194 			buf->xdp = NULL;
5195 			dirty++;
5196 			count++;
5197 			goto read_again;
5198 		}
5199 
5200 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5201 		ctx->priv = priv;
5202 		ctx->desc = p;
5203 		ctx->ndesc = np;
5204 
5205 		/* XDP ZC Frame only support primary buffers for now */
5206 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5207 		len += buf1_len;
5208 
5209 		/* ACS is disabled; strip manually. */
5210 		if (likely(!(status & rx_not_ls))) {
5211 			buf1_len -= ETH_FCS_LEN;
5212 			len -= ETH_FCS_LEN;
5213 		}
5214 
5215 		/* RX buffer is good and fit into a XSK pool buffer */
5216 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5217 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5218 
5219 		prog = READ_ONCE(priv->xdp_prog);
5220 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5221 
5222 		switch (res) {
5223 		case STMMAC_XDP_PASS:
5224 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5225 			xsk_buff_free(buf->xdp);
5226 			break;
5227 		case STMMAC_XDP_CONSUMED:
5228 			xsk_buff_free(buf->xdp);
5229 			rx_dropped++;
5230 			break;
5231 		case STMMAC_XDP_TX:
5232 		case STMMAC_XDP_REDIRECT:
5233 			xdp_status |= res;
5234 			break;
5235 		}
5236 
5237 		buf->xdp = NULL;
5238 		dirty++;
5239 		count++;
5240 	}
5241 
5242 	if (status & rx_not_ls) {
5243 		rx_q->state_saved = true;
5244 		rx_q->state.error = error;
5245 		rx_q->state.len = len;
5246 	}
5247 
5248 	stmmac_finalize_xdp_rx(priv, xdp_status);
5249 
5250 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5251 	rxq_stats->rx_pkt_n += count;
5252 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5253 
5254 	priv->xstats.rx_dropped += rx_dropped;
5255 	priv->xstats.rx_errors += rx_errors;
5256 
5257 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5258 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5259 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5260 		else
5261 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5262 
5263 		return (int)count;
5264 	}
5265 
5266 	return failure ? limit : (int)count;
5267 }
5268 
5269 /**
5270  * stmmac_rx - manage the receive process
5271  * @priv: driver private structure
5272  * @limit: napi bugget
5273  * @queue: RX queue index.
5274  * Description :  this the function called by the napi poll method.
5275  * It gets all the frames inside the ring.
5276  */
5277 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5278 {
5279 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5280 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5281 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5282 	struct stmmac_channel *ch = &priv->channel[queue];
5283 	unsigned int count = 0, error = 0, len = 0;
5284 	int status = 0, coe = priv->hw->rx_csum;
5285 	unsigned int next_entry = rx_q->cur_rx;
5286 	enum dma_data_direction dma_dir;
5287 	unsigned int desc_size;
5288 	struct sk_buff *skb = NULL;
5289 	struct stmmac_xdp_buff ctx;
5290 	unsigned long flags;
5291 	int xdp_status = 0;
5292 	int buf_sz;
5293 
5294 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5295 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5296 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5297 
5298 	if (netif_msg_rx_status(priv)) {
5299 		void *rx_head;
5300 
5301 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5302 		if (priv->extend_desc) {
5303 			rx_head = (void *)rx_q->dma_erx;
5304 			desc_size = sizeof(struct dma_extended_desc);
5305 		} else {
5306 			rx_head = (void *)rx_q->dma_rx;
5307 			desc_size = sizeof(struct dma_desc);
5308 		}
5309 
5310 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5311 				    rx_q->dma_rx_phy, desc_size);
5312 	}
5313 	while (count < limit) {
5314 		unsigned int buf1_len = 0, buf2_len = 0;
5315 		enum pkt_hash_types hash_type;
5316 		struct stmmac_rx_buffer *buf;
5317 		struct dma_desc *np, *p;
5318 		int entry;
5319 		u32 hash;
5320 
5321 		if (!count && rx_q->state_saved) {
5322 			skb = rx_q->state.skb;
5323 			error = rx_q->state.error;
5324 			len = rx_q->state.len;
5325 		} else {
5326 			rx_q->state_saved = false;
5327 			skb = NULL;
5328 			error = 0;
5329 			len = 0;
5330 		}
5331 
5332 read_again:
5333 		if (count >= limit)
5334 			break;
5335 
5336 		buf1_len = 0;
5337 		buf2_len = 0;
5338 		entry = next_entry;
5339 		buf = &rx_q->buf_pool[entry];
5340 
5341 		if (priv->extend_desc)
5342 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5343 		else
5344 			p = rx_q->dma_rx + entry;
5345 
5346 		/* read the status of the incoming frame */
5347 		status = stmmac_rx_status(priv, &priv->xstats, p);
5348 		/* check if managed by the DMA otherwise go ahead */
5349 		if (unlikely(status & dma_own))
5350 			break;
5351 
5352 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5353 						priv->dma_conf.dma_rx_size);
5354 		next_entry = rx_q->cur_rx;
5355 
5356 		if (priv->extend_desc)
5357 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5358 		else
5359 			np = rx_q->dma_rx + next_entry;
5360 
5361 		prefetch(np);
5362 
5363 		if (priv->extend_desc)
5364 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5365 		if (unlikely(status == discard_frame)) {
5366 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5367 			buf->page = NULL;
5368 			error = 1;
5369 			if (!priv->hwts_rx_en)
5370 				rx_errors++;
5371 		}
5372 
5373 		if (unlikely(error && (status & rx_not_ls)))
5374 			goto read_again;
5375 		if (unlikely(error)) {
5376 			dev_kfree_skb(skb);
5377 			skb = NULL;
5378 			count++;
5379 			continue;
5380 		}
5381 
5382 		/* Buffer is good. Go on. */
5383 
5384 		prefetch(page_address(buf->page) + buf->page_offset);
5385 		if (buf->sec_page)
5386 			prefetch(page_address(buf->sec_page));
5387 
5388 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5389 		len += buf1_len;
5390 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5391 		len += buf2_len;
5392 
5393 		/* ACS is disabled; strip manually. */
5394 		if (likely(!(status & rx_not_ls))) {
5395 			if (buf2_len) {
5396 				buf2_len -= ETH_FCS_LEN;
5397 				len -= ETH_FCS_LEN;
5398 			} else if (buf1_len) {
5399 				buf1_len -= ETH_FCS_LEN;
5400 				len -= ETH_FCS_LEN;
5401 			}
5402 		}
5403 
5404 		if (!skb) {
5405 			unsigned int pre_len, sync_len;
5406 
5407 			dma_sync_single_for_cpu(priv->device, buf->addr,
5408 						buf1_len, dma_dir);
5409 
5410 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5411 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5412 					 buf->page_offset, buf1_len, true);
5413 
5414 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5415 				  buf->page_offset;
5416 
5417 			ctx.priv = priv;
5418 			ctx.desc = p;
5419 			ctx.ndesc = np;
5420 
5421 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5422 			/* Due xdp_adjust_tail: DMA sync for_device
5423 			 * cover max len CPU touch
5424 			 */
5425 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5426 				   buf->page_offset;
5427 			sync_len = max(sync_len, pre_len);
5428 
5429 			/* For Not XDP_PASS verdict */
5430 			if (IS_ERR(skb)) {
5431 				unsigned int xdp_res = -PTR_ERR(skb);
5432 
5433 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5434 					page_pool_put_page(rx_q->page_pool,
5435 							   virt_to_head_page(ctx.xdp.data),
5436 							   sync_len, true);
5437 					buf->page = NULL;
5438 					rx_dropped++;
5439 
5440 					/* Clear skb as it was set as
5441 					 * status by XDP program.
5442 					 */
5443 					skb = NULL;
5444 
5445 					if (unlikely((status & rx_not_ls)))
5446 						goto read_again;
5447 
5448 					count++;
5449 					continue;
5450 				} else if (xdp_res & (STMMAC_XDP_TX |
5451 						      STMMAC_XDP_REDIRECT)) {
5452 					xdp_status |= xdp_res;
5453 					buf->page = NULL;
5454 					skb = NULL;
5455 					count++;
5456 					continue;
5457 				}
5458 			}
5459 		}
5460 
5461 		if (!skb) {
5462 			/* XDP program may expand or reduce tail */
5463 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5464 
5465 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5466 			if (!skb) {
5467 				rx_dropped++;
5468 				count++;
5469 				goto drain_data;
5470 			}
5471 
5472 			/* XDP program may adjust header */
5473 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5474 			skb_put(skb, buf1_len);
5475 
5476 			/* Data payload copied into SKB, page ready for recycle */
5477 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5478 			buf->page = NULL;
5479 		} else if (buf1_len) {
5480 			dma_sync_single_for_cpu(priv->device, buf->addr,
5481 						buf1_len, dma_dir);
5482 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5483 					buf->page, buf->page_offset, buf1_len,
5484 					priv->dma_conf.dma_buf_sz);
5485 
5486 			/* Data payload appended into SKB */
5487 			skb_mark_for_recycle(skb);
5488 			buf->page = NULL;
5489 		}
5490 
5491 		if (buf2_len) {
5492 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5493 						buf2_len, dma_dir);
5494 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5495 					buf->sec_page, 0, buf2_len,
5496 					priv->dma_conf.dma_buf_sz);
5497 
5498 			/* Data payload appended into SKB */
5499 			skb_mark_for_recycle(skb);
5500 			buf->sec_page = NULL;
5501 		}
5502 
5503 drain_data:
5504 		if (likely(status & rx_not_ls))
5505 			goto read_again;
5506 		if (!skb)
5507 			continue;
5508 
5509 		/* Got entire packet into SKB. Finish it. */
5510 
5511 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5512 		stmmac_rx_vlan(priv->dev, skb);
5513 		skb->protocol = eth_type_trans(skb, priv->dev);
5514 
5515 		if (unlikely(!coe))
5516 			skb_checksum_none_assert(skb);
5517 		else
5518 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5519 
5520 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5521 			skb_set_hash(skb, hash, hash_type);
5522 
5523 		skb_record_rx_queue(skb, queue);
5524 		napi_gro_receive(&ch->rx_napi, skb);
5525 		skb = NULL;
5526 
5527 		rx_packets++;
5528 		rx_bytes += len;
5529 		count++;
5530 	}
5531 
5532 	if (status & rx_not_ls || skb) {
5533 		rx_q->state_saved = true;
5534 		rx_q->state.skb = skb;
5535 		rx_q->state.error = error;
5536 		rx_q->state.len = len;
5537 	}
5538 
5539 	stmmac_finalize_xdp_rx(priv, xdp_status);
5540 
5541 	stmmac_rx_refill(priv, queue);
5542 
5543 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5544 	rxq_stats->rx_packets += rx_packets;
5545 	rxq_stats->rx_bytes += rx_bytes;
5546 	rxq_stats->rx_pkt_n += count;
5547 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5548 
5549 	priv->xstats.rx_dropped += rx_dropped;
5550 	priv->xstats.rx_errors += rx_errors;
5551 
5552 	return count;
5553 }
5554 
5555 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5556 {
5557 	struct stmmac_channel *ch =
5558 		container_of(napi, struct stmmac_channel, rx_napi);
5559 	struct stmmac_priv *priv = ch->priv_data;
5560 	struct stmmac_rxq_stats *rxq_stats;
5561 	u32 chan = ch->index;
5562 	unsigned long flags;
5563 	int work_done;
5564 
5565 	rxq_stats = &priv->xstats.rxq_stats[chan];
5566 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5567 	rxq_stats->napi_poll++;
5568 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5569 
5570 	work_done = stmmac_rx(priv, budget, chan);
5571 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5572 		unsigned long flags;
5573 
5574 		spin_lock_irqsave(&ch->lock, flags);
5575 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5576 		spin_unlock_irqrestore(&ch->lock, flags);
5577 	}
5578 
5579 	return work_done;
5580 }
5581 
5582 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5583 {
5584 	struct stmmac_channel *ch =
5585 		container_of(napi, struct stmmac_channel, tx_napi);
5586 	struct stmmac_priv *priv = ch->priv_data;
5587 	struct stmmac_txq_stats *txq_stats;
5588 	bool pending_packets = false;
5589 	u32 chan = ch->index;
5590 	unsigned long flags;
5591 	int work_done;
5592 
5593 	txq_stats = &priv->xstats.txq_stats[chan];
5594 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5595 	txq_stats->napi_poll++;
5596 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5597 
5598 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5599 	work_done = min(work_done, budget);
5600 
5601 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5602 		unsigned long flags;
5603 
5604 		spin_lock_irqsave(&ch->lock, flags);
5605 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5606 		spin_unlock_irqrestore(&ch->lock, flags);
5607 	}
5608 
5609 	/* TX still have packet to handle, check if we need to arm tx timer */
5610 	if (pending_packets)
5611 		stmmac_tx_timer_arm(priv, chan);
5612 
5613 	return work_done;
5614 }
5615 
5616 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5617 {
5618 	struct stmmac_channel *ch =
5619 		container_of(napi, struct stmmac_channel, rxtx_napi);
5620 	struct stmmac_priv *priv = ch->priv_data;
5621 	bool tx_pending_packets = false;
5622 	int rx_done, tx_done, rxtx_done;
5623 	struct stmmac_rxq_stats *rxq_stats;
5624 	struct stmmac_txq_stats *txq_stats;
5625 	u32 chan = ch->index;
5626 	unsigned long flags;
5627 
5628 	rxq_stats = &priv->xstats.rxq_stats[chan];
5629 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5630 	rxq_stats->napi_poll++;
5631 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5632 
5633 	txq_stats = &priv->xstats.txq_stats[chan];
5634 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5635 	txq_stats->napi_poll++;
5636 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5637 
5638 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5639 	tx_done = min(tx_done, budget);
5640 
5641 	rx_done = stmmac_rx_zc(priv, budget, chan);
5642 
5643 	rxtx_done = max(tx_done, rx_done);
5644 
5645 	/* If either TX or RX work is not complete, return budget
5646 	 * and keep pooling
5647 	 */
5648 	if (rxtx_done >= budget)
5649 		return budget;
5650 
5651 	/* all work done, exit the polling mode */
5652 	if (napi_complete_done(napi, rxtx_done)) {
5653 		unsigned long flags;
5654 
5655 		spin_lock_irqsave(&ch->lock, flags);
5656 		/* Both RX and TX work done are compelte,
5657 		 * so enable both RX & TX IRQs.
5658 		 */
5659 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5660 		spin_unlock_irqrestore(&ch->lock, flags);
5661 	}
5662 
5663 	/* TX still have packet to handle, check if we need to arm tx timer */
5664 	if (tx_pending_packets)
5665 		stmmac_tx_timer_arm(priv, chan);
5666 
5667 	return min(rxtx_done, budget - 1);
5668 }
5669 
5670 /**
5671  *  stmmac_tx_timeout
5672  *  @dev : Pointer to net device structure
5673  *  @txqueue: the index of the hanging transmit queue
5674  *  Description: this function is called when a packet transmission fails to
5675  *   complete within a reasonable time. The driver will mark the error in the
5676  *   netdev structure and arrange for the device to be reset to a sane state
5677  *   in order to transmit a new packet.
5678  */
5679 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5680 {
5681 	struct stmmac_priv *priv = netdev_priv(dev);
5682 
5683 	stmmac_global_err(priv);
5684 }
5685 
5686 /**
5687  *  stmmac_set_rx_mode - entry point for multicast addressing
5688  *  @dev : pointer to the device structure
5689  *  Description:
5690  *  This function is a driver entry point which gets called by the kernel
5691  *  whenever multicast addresses must be enabled/disabled.
5692  *  Return value:
5693  *  void.
5694  */
5695 static void stmmac_set_rx_mode(struct net_device *dev)
5696 {
5697 	struct stmmac_priv *priv = netdev_priv(dev);
5698 
5699 	stmmac_set_filter(priv, priv->hw, dev);
5700 }
5701 
5702 /**
5703  *  stmmac_change_mtu - entry point to change MTU size for the device.
5704  *  @dev : device pointer.
5705  *  @new_mtu : the new MTU size for the device.
5706  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5707  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5708  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5709  *  Return value:
5710  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5711  *  file on failure.
5712  */
5713 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5714 {
5715 	struct stmmac_priv *priv = netdev_priv(dev);
5716 	int txfifosz = priv->plat->tx_fifo_size;
5717 	struct stmmac_dma_conf *dma_conf;
5718 	const int mtu = new_mtu;
5719 	int ret;
5720 
5721 	if (txfifosz == 0)
5722 		txfifosz = priv->dma_cap.tx_fifo_size;
5723 
5724 	txfifosz /= priv->plat->tx_queues_to_use;
5725 
5726 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5727 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5728 		return -EINVAL;
5729 	}
5730 
5731 	new_mtu = STMMAC_ALIGN(new_mtu);
5732 
5733 	/* If condition true, FIFO is too small or MTU too large */
5734 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5735 		return -EINVAL;
5736 
5737 	if (netif_running(dev)) {
5738 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5739 		/* Try to allocate the new DMA conf with the new mtu */
5740 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5741 		if (IS_ERR(dma_conf)) {
5742 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5743 				   mtu);
5744 			return PTR_ERR(dma_conf);
5745 		}
5746 
5747 		stmmac_release(dev);
5748 
5749 		ret = __stmmac_open(dev, dma_conf);
5750 		if (ret) {
5751 			free_dma_desc_resources(priv, dma_conf);
5752 			kfree(dma_conf);
5753 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5754 			return ret;
5755 		}
5756 
5757 		kfree(dma_conf);
5758 
5759 		stmmac_set_rx_mode(dev);
5760 	}
5761 
5762 	dev->mtu = mtu;
5763 	netdev_update_features(dev);
5764 
5765 	return 0;
5766 }
5767 
5768 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5769 					     netdev_features_t features)
5770 {
5771 	struct stmmac_priv *priv = netdev_priv(dev);
5772 
5773 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5774 		features &= ~NETIF_F_RXCSUM;
5775 
5776 	if (!priv->plat->tx_coe)
5777 		features &= ~NETIF_F_CSUM_MASK;
5778 
5779 	/* Some GMAC devices have a bugged Jumbo frame support that
5780 	 * needs to have the Tx COE disabled for oversized frames
5781 	 * (due to limited buffer sizes). In this case we disable
5782 	 * the TX csum insertion in the TDES and not use SF.
5783 	 */
5784 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5785 		features &= ~NETIF_F_CSUM_MASK;
5786 
5787 	/* Disable tso if asked by ethtool */
5788 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5789 		if (features & NETIF_F_TSO)
5790 			priv->tso = true;
5791 		else
5792 			priv->tso = false;
5793 	}
5794 
5795 	return features;
5796 }
5797 
5798 static int stmmac_set_features(struct net_device *netdev,
5799 			       netdev_features_t features)
5800 {
5801 	struct stmmac_priv *priv = netdev_priv(netdev);
5802 
5803 	/* Keep the COE Type in case of csum is supporting */
5804 	if (features & NETIF_F_RXCSUM)
5805 		priv->hw->rx_csum = priv->plat->rx_coe;
5806 	else
5807 		priv->hw->rx_csum = 0;
5808 	/* No check needed because rx_coe has been set before and it will be
5809 	 * fixed in case of issue.
5810 	 */
5811 	stmmac_rx_ipc(priv, priv->hw);
5812 
5813 	if (priv->sph_cap) {
5814 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5815 		u32 chan;
5816 
5817 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5818 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5819 	}
5820 
5821 	return 0;
5822 }
5823 
5824 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5825 {
5826 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5827 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5828 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5829 	bool *hs_enable = &fpe_cfg->hs_enable;
5830 
5831 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5832 		return;
5833 
5834 	/* If LP has sent verify mPacket, LP is FPE capable */
5835 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5836 		if (*lp_state < FPE_STATE_CAPABLE)
5837 			*lp_state = FPE_STATE_CAPABLE;
5838 
5839 		/* If user has requested FPE enable, quickly response */
5840 		if (*hs_enable)
5841 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5842 						MPACKET_RESPONSE);
5843 	}
5844 
5845 	/* If Local has sent verify mPacket, Local is FPE capable */
5846 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5847 		if (*lo_state < FPE_STATE_CAPABLE)
5848 			*lo_state = FPE_STATE_CAPABLE;
5849 	}
5850 
5851 	/* If LP has sent response mPacket, LP is entering FPE ON */
5852 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5853 		*lp_state = FPE_STATE_ENTERING_ON;
5854 
5855 	/* If Local has sent response mPacket, Local is entering FPE ON */
5856 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5857 		*lo_state = FPE_STATE_ENTERING_ON;
5858 
5859 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5860 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5861 	    priv->fpe_wq) {
5862 		queue_work(priv->fpe_wq, &priv->fpe_task);
5863 	}
5864 }
5865 
5866 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5867 {
5868 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5869 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5870 	u32 queues_count;
5871 	u32 queue;
5872 	bool xmac;
5873 
5874 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5875 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5876 
5877 	if (priv->irq_wake)
5878 		pm_wakeup_event(priv->device, 0);
5879 
5880 	if (priv->dma_cap.estsel)
5881 		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5882 				      &priv->xstats, tx_cnt);
5883 
5884 	if (priv->dma_cap.fpesel) {
5885 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5886 						   priv->dev);
5887 
5888 		stmmac_fpe_event_status(priv, status);
5889 	}
5890 
5891 	/* To handle GMAC own interrupts */
5892 	if ((priv->plat->has_gmac) || xmac) {
5893 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5894 
5895 		if (unlikely(status)) {
5896 			/* For LPI we need to save the tx status */
5897 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5898 				priv->tx_path_in_lpi_mode = true;
5899 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5900 				priv->tx_path_in_lpi_mode = false;
5901 		}
5902 
5903 		for (queue = 0; queue < queues_count; queue++) {
5904 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5905 							    queue);
5906 		}
5907 
5908 		/* PCS link status */
5909 		if (priv->hw->pcs &&
5910 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5911 			if (priv->xstats.pcs_link)
5912 				netif_carrier_on(priv->dev);
5913 			else
5914 				netif_carrier_off(priv->dev);
5915 		}
5916 
5917 		stmmac_timestamp_interrupt(priv, priv);
5918 	}
5919 }
5920 
5921 /**
5922  *  stmmac_interrupt - main ISR
5923  *  @irq: interrupt number.
5924  *  @dev_id: to pass the net device pointer.
5925  *  Description: this is the main driver interrupt service routine.
5926  *  It can call:
5927  *  o DMA service routine (to manage incoming frame reception and transmission
5928  *    status)
5929  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5930  *    interrupts.
5931  */
5932 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5933 {
5934 	struct net_device *dev = (struct net_device *)dev_id;
5935 	struct stmmac_priv *priv = netdev_priv(dev);
5936 
5937 	/* Check if adapter is up */
5938 	if (test_bit(STMMAC_DOWN, &priv->state))
5939 		return IRQ_HANDLED;
5940 
5941 	/* Check if a fatal error happened */
5942 	if (stmmac_safety_feat_interrupt(priv))
5943 		return IRQ_HANDLED;
5944 
5945 	/* To handle Common interrupts */
5946 	stmmac_common_interrupt(priv);
5947 
5948 	/* To handle DMA interrupts */
5949 	stmmac_dma_interrupt(priv);
5950 
5951 	return IRQ_HANDLED;
5952 }
5953 
5954 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5955 {
5956 	struct net_device *dev = (struct net_device *)dev_id;
5957 	struct stmmac_priv *priv = netdev_priv(dev);
5958 
5959 	if (unlikely(!dev)) {
5960 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5961 		return IRQ_NONE;
5962 	}
5963 
5964 	/* Check if adapter is up */
5965 	if (test_bit(STMMAC_DOWN, &priv->state))
5966 		return IRQ_HANDLED;
5967 
5968 	/* To handle Common interrupts */
5969 	stmmac_common_interrupt(priv);
5970 
5971 	return IRQ_HANDLED;
5972 }
5973 
5974 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5975 {
5976 	struct net_device *dev = (struct net_device *)dev_id;
5977 	struct stmmac_priv *priv = netdev_priv(dev);
5978 
5979 	if (unlikely(!dev)) {
5980 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5981 		return IRQ_NONE;
5982 	}
5983 
5984 	/* Check if adapter is up */
5985 	if (test_bit(STMMAC_DOWN, &priv->state))
5986 		return IRQ_HANDLED;
5987 
5988 	/* Check if a fatal error happened */
5989 	stmmac_safety_feat_interrupt(priv);
5990 
5991 	return IRQ_HANDLED;
5992 }
5993 
5994 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5995 {
5996 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5997 	struct stmmac_dma_conf *dma_conf;
5998 	int chan = tx_q->queue_index;
5999 	struct stmmac_priv *priv;
6000 	int status;
6001 
6002 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6003 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6004 
6005 	if (unlikely(!data)) {
6006 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6007 		return IRQ_NONE;
6008 	}
6009 
6010 	/* Check if adapter is up */
6011 	if (test_bit(STMMAC_DOWN, &priv->state))
6012 		return IRQ_HANDLED;
6013 
6014 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6015 
6016 	if (unlikely(status & tx_hard_error_bump_tc)) {
6017 		/* Try to bump up the dma threshold on this failure */
6018 		stmmac_bump_dma_threshold(priv, chan);
6019 	} else if (unlikely(status == tx_hard_error)) {
6020 		stmmac_tx_err(priv, chan);
6021 	}
6022 
6023 	return IRQ_HANDLED;
6024 }
6025 
6026 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6027 {
6028 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6029 	struct stmmac_dma_conf *dma_conf;
6030 	int chan = rx_q->queue_index;
6031 	struct stmmac_priv *priv;
6032 
6033 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6034 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6035 
6036 	if (unlikely(!data)) {
6037 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6038 		return IRQ_NONE;
6039 	}
6040 
6041 	/* Check if adapter is up */
6042 	if (test_bit(STMMAC_DOWN, &priv->state))
6043 		return IRQ_HANDLED;
6044 
6045 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6046 
6047 	return IRQ_HANDLED;
6048 }
6049 
6050 /**
6051  *  stmmac_ioctl - Entry point for the Ioctl
6052  *  @dev: Device pointer.
6053  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6054  *  a proprietary structure used to pass information to the driver.
6055  *  @cmd: IOCTL command
6056  *  Description:
6057  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6058  */
6059 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6060 {
6061 	struct stmmac_priv *priv = netdev_priv (dev);
6062 	int ret = -EOPNOTSUPP;
6063 
6064 	if (!netif_running(dev))
6065 		return -EINVAL;
6066 
6067 	switch (cmd) {
6068 	case SIOCGMIIPHY:
6069 	case SIOCGMIIREG:
6070 	case SIOCSMIIREG:
6071 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6072 		break;
6073 	case SIOCSHWTSTAMP:
6074 		ret = stmmac_hwtstamp_set(dev, rq);
6075 		break;
6076 	case SIOCGHWTSTAMP:
6077 		ret = stmmac_hwtstamp_get(dev, rq);
6078 		break;
6079 	default:
6080 		break;
6081 	}
6082 
6083 	return ret;
6084 }
6085 
6086 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6087 				    void *cb_priv)
6088 {
6089 	struct stmmac_priv *priv = cb_priv;
6090 	int ret = -EOPNOTSUPP;
6091 
6092 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6093 		return ret;
6094 
6095 	__stmmac_disable_all_queues(priv);
6096 
6097 	switch (type) {
6098 	case TC_SETUP_CLSU32:
6099 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6100 		break;
6101 	case TC_SETUP_CLSFLOWER:
6102 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6103 		break;
6104 	default:
6105 		break;
6106 	}
6107 
6108 	stmmac_enable_all_queues(priv);
6109 	return ret;
6110 }
6111 
6112 static LIST_HEAD(stmmac_block_cb_list);
6113 
6114 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6115 			   void *type_data)
6116 {
6117 	struct stmmac_priv *priv = netdev_priv(ndev);
6118 
6119 	switch (type) {
6120 	case TC_QUERY_CAPS:
6121 		return stmmac_tc_query_caps(priv, priv, type_data);
6122 	case TC_SETUP_BLOCK:
6123 		return flow_block_cb_setup_simple(type_data,
6124 						  &stmmac_block_cb_list,
6125 						  stmmac_setup_tc_block_cb,
6126 						  priv, priv, true);
6127 	case TC_SETUP_QDISC_CBS:
6128 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6129 	case TC_SETUP_QDISC_TAPRIO:
6130 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6131 	case TC_SETUP_QDISC_ETF:
6132 		return stmmac_tc_setup_etf(priv, priv, type_data);
6133 	default:
6134 		return -EOPNOTSUPP;
6135 	}
6136 }
6137 
6138 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6139 			       struct net_device *sb_dev)
6140 {
6141 	int gso = skb_shinfo(skb)->gso_type;
6142 
6143 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6144 		/*
6145 		 * There is no way to determine the number of TSO/USO
6146 		 * capable Queues. Let's use always the Queue 0
6147 		 * because if TSO/USO is supported then at least this
6148 		 * one will be capable.
6149 		 */
6150 		return 0;
6151 	}
6152 
6153 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6154 }
6155 
6156 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6157 {
6158 	struct stmmac_priv *priv = netdev_priv(ndev);
6159 	int ret = 0;
6160 
6161 	ret = pm_runtime_resume_and_get(priv->device);
6162 	if (ret < 0)
6163 		return ret;
6164 
6165 	ret = eth_mac_addr(ndev, addr);
6166 	if (ret)
6167 		goto set_mac_error;
6168 
6169 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6170 
6171 set_mac_error:
6172 	pm_runtime_put(priv->device);
6173 
6174 	return ret;
6175 }
6176 
6177 #ifdef CONFIG_DEBUG_FS
6178 static struct dentry *stmmac_fs_dir;
6179 
6180 static void sysfs_display_ring(void *head, int size, int extend_desc,
6181 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6182 {
6183 	int i;
6184 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6185 	struct dma_desc *p = (struct dma_desc *)head;
6186 	dma_addr_t dma_addr;
6187 
6188 	for (i = 0; i < size; i++) {
6189 		if (extend_desc) {
6190 			dma_addr = dma_phy_addr + i * sizeof(*ep);
6191 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6192 				   i, &dma_addr,
6193 				   le32_to_cpu(ep->basic.des0),
6194 				   le32_to_cpu(ep->basic.des1),
6195 				   le32_to_cpu(ep->basic.des2),
6196 				   le32_to_cpu(ep->basic.des3));
6197 			ep++;
6198 		} else {
6199 			dma_addr = dma_phy_addr + i * sizeof(*p);
6200 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6201 				   i, &dma_addr,
6202 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6203 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6204 			p++;
6205 		}
6206 		seq_printf(seq, "\n");
6207 	}
6208 }
6209 
6210 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6211 {
6212 	struct net_device *dev = seq->private;
6213 	struct stmmac_priv *priv = netdev_priv(dev);
6214 	u32 rx_count = priv->plat->rx_queues_to_use;
6215 	u32 tx_count = priv->plat->tx_queues_to_use;
6216 	u32 queue;
6217 
6218 	if ((dev->flags & IFF_UP) == 0)
6219 		return 0;
6220 
6221 	for (queue = 0; queue < rx_count; queue++) {
6222 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6223 
6224 		seq_printf(seq, "RX Queue %d:\n", queue);
6225 
6226 		if (priv->extend_desc) {
6227 			seq_printf(seq, "Extended descriptor ring:\n");
6228 			sysfs_display_ring((void *)rx_q->dma_erx,
6229 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6230 		} else {
6231 			seq_printf(seq, "Descriptor ring:\n");
6232 			sysfs_display_ring((void *)rx_q->dma_rx,
6233 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6234 		}
6235 	}
6236 
6237 	for (queue = 0; queue < tx_count; queue++) {
6238 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6239 
6240 		seq_printf(seq, "TX Queue %d:\n", queue);
6241 
6242 		if (priv->extend_desc) {
6243 			seq_printf(seq, "Extended descriptor ring:\n");
6244 			sysfs_display_ring((void *)tx_q->dma_etx,
6245 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6246 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6247 			seq_printf(seq, "Descriptor ring:\n");
6248 			sysfs_display_ring((void *)tx_q->dma_tx,
6249 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6250 		}
6251 	}
6252 
6253 	return 0;
6254 }
6255 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6256 
6257 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6258 {
6259 	static const char * const dwxgmac_timestamp_source[] = {
6260 		"None",
6261 		"Internal",
6262 		"External",
6263 		"Both",
6264 	};
6265 	static const char * const dwxgmac_safety_feature_desc[] = {
6266 		"No",
6267 		"All Safety Features with ECC and Parity",
6268 		"All Safety Features without ECC or Parity",
6269 		"All Safety Features with Parity Only",
6270 		"ECC Only",
6271 		"UNDEFINED",
6272 		"UNDEFINED",
6273 		"UNDEFINED",
6274 	};
6275 	struct net_device *dev = seq->private;
6276 	struct stmmac_priv *priv = netdev_priv(dev);
6277 
6278 	if (!priv->hw_cap_support) {
6279 		seq_printf(seq, "DMA HW features not supported\n");
6280 		return 0;
6281 	}
6282 
6283 	seq_printf(seq, "==============================\n");
6284 	seq_printf(seq, "\tDMA HW features\n");
6285 	seq_printf(seq, "==============================\n");
6286 
6287 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6288 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6289 	seq_printf(seq, "\t1000 Mbps: %s\n",
6290 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6291 	seq_printf(seq, "\tHalf duplex: %s\n",
6292 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6293 	if (priv->plat->has_xgmac) {
6294 		seq_printf(seq,
6295 			   "\tNumber of Additional MAC address registers: %d\n",
6296 			   priv->dma_cap.multi_addr);
6297 	} else {
6298 		seq_printf(seq, "\tHash Filter: %s\n",
6299 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6300 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6301 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6302 	}
6303 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6304 		   (priv->dma_cap.pcs) ? "Y" : "N");
6305 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6306 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6307 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6308 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6309 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6310 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6311 	seq_printf(seq, "\tRMON module: %s\n",
6312 		   (priv->dma_cap.rmon) ? "Y" : "N");
6313 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6314 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6315 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6316 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6317 	if (priv->plat->has_xgmac)
6318 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6319 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6320 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6321 		   (priv->dma_cap.eee) ? "Y" : "N");
6322 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6323 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6324 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6325 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6326 	    priv->plat->has_xgmac) {
6327 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6328 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6329 	} else {
6330 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6331 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6332 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6333 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6334 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6335 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6336 	}
6337 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6338 		   priv->dma_cap.number_rx_channel);
6339 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6340 		   priv->dma_cap.number_tx_channel);
6341 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6342 		   priv->dma_cap.number_rx_queues);
6343 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6344 		   priv->dma_cap.number_tx_queues);
6345 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6346 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6347 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6348 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6349 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6350 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6351 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6352 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6353 		   priv->dma_cap.pps_out_num);
6354 	seq_printf(seq, "\tSafety Features: %s\n",
6355 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6356 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6357 		   priv->dma_cap.frpsel ? "Y" : "N");
6358 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6359 		   priv->dma_cap.host_dma_width);
6360 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6361 		   priv->dma_cap.rssen ? "Y" : "N");
6362 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6363 		   priv->dma_cap.vlhash ? "Y" : "N");
6364 	seq_printf(seq, "\tSplit Header: %s\n",
6365 		   priv->dma_cap.sphen ? "Y" : "N");
6366 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6367 		   priv->dma_cap.vlins ? "Y" : "N");
6368 	seq_printf(seq, "\tDouble VLAN: %s\n",
6369 		   priv->dma_cap.dvlan ? "Y" : "N");
6370 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6371 		   priv->dma_cap.l3l4fnum);
6372 	seq_printf(seq, "\tARP Offloading: %s\n",
6373 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6374 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6375 		   priv->dma_cap.estsel ? "Y" : "N");
6376 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6377 		   priv->dma_cap.fpesel ? "Y" : "N");
6378 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6379 		   priv->dma_cap.tbssel ? "Y" : "N");
6380 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6381 		   priv->dma_cap.tbs_ch_num);
6382 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6383 		   priv->dma_cap.sgfsel ? "Y" : "N");
6384 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6385 		   BIT(priv->dma_cap.ttsfd) >> 1);
6386 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6387 		   priv->dma_cap.numtc);
6388 	seq_printf(seq, "\tDCB Feature: %s\n",
6389 		   priv->dma_cap.dcben ? "Y" : "N");
6390 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6391 		   priv->dma_cap.advthword ? "Y" : "N");
6392 	seq_printf(seq, "\tPTP Offload: %s\n",
6393 		   priv->dma_cap.ptoen ? "Y" : "N");
6394 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6395 		   priv->dma_cap.osten ? "Y" : "N");
6396 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6397 		   priv->dma_cap.pfcen ? "Y" : "N");
6398 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6399 		   BIT(priv->dma_cap.frpes) << 6);
6400 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6401 		   BIT(priv->dma_cap.frpbs) << 6);
6402 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6403 		   priv->dma_cap.frppipe_num);
6404 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6405 		   priv->dma_cap.nrvf_num ?
6406 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6407 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6408 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6409 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6410 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6411 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6412 		   priv->dma_cap.cbtisel ? "Y" : "N");
6413 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6414 		   priv->dma_cap.aux_snapshot_n);
6415 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6416 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6417 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6418 		   priv->dma_cap.edma ? "Y" : "N");
6419 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6420 		   priv->dma_cap.ediffc ? "Y" : "N");
6421 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6422 		   priv->dma_cap.vxn ? "Y" : "N");
6423 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6424 		   priv->dma_cap.dbgmem ? "Y" : "N");
6425 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6426 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6427 	return 0;
6428 }
6429 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6430 
6431 /* Use network device events to rename debugfs file entries.
6432  */
6433 static int stmmac_device_event(struct notifier_block *unused,
6434 			       unsigned long event, void *ptr)
6435 {
6436 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6437 	struct stmmac_priv *priv = netdev_priv(dev);
6438 
6439 	if (dev->netdev_ops != &stmmac_netdev_ops)
6440 		goto done;
6441 
6442 	switch (event) {
6443 	case NETDEV_CHANGENAME:
6444 		if (priv->dbgfs_dir)
6445 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6446 							 priv->dbgfs_dir,
6447 							 stmmac_fs_dir,
6448 							 dev->name);
6449 		break;
6450 	}
6451 done:
6452 	return NOTIFY_DONE;
6453 }
6454 
6455 static struct notifier_block stmmac_notifier = {
6456 	.notifier_call = stmmac_device_event,
6457 };
6458 
6459 static void stmmac_init_fs(struct net_device *dev)
6460 {
6461 	struct stmmac_priv *priv = netdev_priv(dev);
6462 
6463 	rtnl_lock();
6464 
6465 	/* Create per netdev entries */
6466 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6467 
6468 	/* Entry to report DMA RX/TX rings */
6469 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6470 			    &stmmac_rings_status_fops);
6471 
6472 	/* Entry to report the DMA HW features */
6473 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6474 			    &stmmac_dma_cap_fops);
6475 
6476 	rtnl_unlock();
6477 }
6478 
6479 static void stmmac_exit_fs(struct net_device *dev)
6480 {
6481 	struct stmmac_priv *priv = netdev_priv(dev);
6482 
6483 	debugfs_remove_recursive(priv->dbgfs_dir);
6484 }
6485 #endif /* CONFIG_DEBUG_FS */
6486 
6487 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6488 {
6489 	unsigned char *data = (unsigned char *)&vid_le;
6490 	unsigned char data_byte = 0;
6491 	u32 crc = ~0x0;
6492 	u32 temp = 0;
6493 	int i, bits;
6494 
6495 	bits = get_bitmask_order(VLAN_VID_MASK);
6496 	for (i = 0; i < bits; i++) {
6497 		if ((i % 8) == 0)
6498 			data_byte = data[i / 8];
6499 
6500 		temp = ((crc & 1) ^ data_byte) & 1;
6501 		crc >>= 1;
6502 		data_byte >>= 1;
6503 
6504 		if (temp)
6505 			crc ^= 0xedb88320;
6506 	}
6507 
6508 	return crc;
6509 }
6510 
6511 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6512 {
6513 	u32 crc, hash = 0;
6514 	__le16 pmatch = 0;
6515 	int count = 0;
6516 	u16 vid = 0;
6517 
6518 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6519 		__le16 vid_le = cpu_to_le16(vid);
6520 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6521 		hash |= (1 << crc);
6522 		count++;
6523 	}
6524 
6525 	if (!priv->dma_cap.vlhash) {
6526 		if (count > 2) /* VID = 0 always passes filter */
6527 			return -EOPNOTSUPP;
6528 
6529 		pmatch = cpu_to_le16(vid);
6530 		hash = 0;
6531 	}
6532 
6533 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6534 }
6535 
6536 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6537 {
6538 	struct stmmac_priv *priv = netdev_priv(ndev);
6539 	bool is_double = false;
6540 	int ret;
6541 
6542 	ret = pm_runtime_resume_and_get(priv->device);
6543 	if (ret < 0)
6544 		return ret;
6545 
6546 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6547 		is_double = true;
6548 
6549 	set_bit(vid, priv->active_vlans);
6550 	ret = stmmac_vlan_update(priv, is_double);
6551 	if (ret) {
6552 		clear_bit(vid, priv->active_vlans);
6553 		goto err_pm_put;
6554 	}
6555 
6556 	if (priv->hw->num_vlan) {
6557 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6558 		if (ret)
6559 			goto err_pm_put;
6560 	}
6561 err_pm_put:
6562 	pm_runtime_put(priv->device);
6563 
6564 	return ret;
6565 }
6566 
6567 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6568 {
6569 	struct stmmac_priv *priv = netdev_priv(ndev);
6570 	bool is_double = false;
6571 	int ret;
6572 
6573 	ret = pm_runtime_resume_and_get(priv->device);
6574 	if (ret < 0)
6575 		return ret;
6576 
6577 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6578 		is_double = true;
6579 
6580 	clear_bit(vid, priv->active_vlans);
6581 
6582 	if (priv->hw->num_vlan) {
6583 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6584 		if (ret)
6585 			goto del_vlan_error;
6586 	}
6587 
6588 	ret = stmmac_vlan_update(priv, is_double);
6589 
6590 del_vlan_error:
6591 	pm_runtime_put(priv->device);
6592 
6593 	return ret;
6594 }
6595 
6596 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6597 {
6598 	struct stmmac_priv *priv = netdev_priv(dev);
6599 
6600 	switch (bpf->command) {
6601 	case XDP_SETUP_PROG:
6602 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6603 	case XDP_SETUP_XSK_POOL:
6604 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6605 					     bpf->xsk.queue_id);
6606 	default:
6607 		return -EOPNOTSUPP;
6608 	}
6609 }
6610 
6611 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6612 			   struct xdp_frame **frames, u32 flags)
6613 {
6614 	struct stmmac_priv *priv = netdev_priv(dev);
6615 	int cpu = smp_processor_id();
6616 	struct netdev_queue *nq;
6617 	int i, nxmit = 0;
6618 	int queue;
6619 
6620 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6621 		return -ENETDOWN;
6622 
6623 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6624 		return -EINVAL;
6625 
6626 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6627 	nq = netdev_get_tx_queue(priv->dev, queue);
6628 
6629 	__netif_tx_lock(nq, cpu);
6630 	/* Avoids TX time-out as we are sharing with slow path */
6631 	txq_trans_cond_update(nq);
6632 
6633 	for (i = 0; i < num_frames; i++) {
6634 		int res;
6635 
6636 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6637 		if (res == STMMAC_XDP_CONSUMED)
6638 			break;
6639 
6640 		nxmit++;
6641 	}
6642 
6643 	if (flags & XDP_XMIT_FLUSH) {
6644 		stmmac_flush_tx_descriptors(priv, queue);
6645 		stmmac_tx_timer_arm(priv, queue);
6646 	}
6647 
6648 	__netif_tx_unlock(nq);
6649 
6650 	return nxmit;
6651 }
6652 
6653 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6654 {
6655 	struct stmmac_channel *ch = &priv->channel[queue];
6656 	unsigned long flags;
6657 
6658 	spin_lock_irqsave(&ch->lock, flags);
6659 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6660 	spin_unlock_irqrestore(&ch->lock, flags);
6661 
6662 	stmmac_stop_rx_dma(priv, queue);
6663 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6664 }
6665 
6666 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6667 {
6668 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6669 	struct stmmac_channel *ch = &priv->channel[queue];
6670 	unsigned long flags;
6671 	u32 buf_size;
6672 	int ret;
6673 
6674 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6675 	if (ret) {
6676 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6677 		return;
6678 	}
6679 
6680 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6681 	if (ret) {
6682 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6683 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6684 		return;
6685 	}
6686 
6687 	stmmac_reset_rx_queue(priv, queue);
6688 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6689 
6690 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6691 			    rx_q->dma_rx_phy, rx_q->queue_index);
6692 
6693 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6694 			     sizeof(struct dma_desc));
6695 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6696 			       rx_q->rx_tail_addr, rx_q->queue_index);
6697 
6698 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6699 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6700 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6701 				      buf_size,
6702 				      rx_q->queue_index);
6703 	} else {
6704 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6705 				      priv->dma_conf.dma_buf_sz,
6706 				      rx_q->queue_index);
6707 	}
6708 
6709 	stmmac_start_rx_dma(priv, queue);
6710 
6711 	spin_lock_irqsave(&ch->lock, flags);
6712 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6713 	spin_unlock_irqrestore(&ch->lock, flags);
6714 }
6715 
6716 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6717 {
6718 	struct stmmac_channel *ch = &priv->channel[queue];
6719 	unsigned long flags;
6720 
6721 	spin_lock_irqsave(&ch->lock, flags);
6722 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6723 	spin_unlock_irqrestore(&ch->lock, flags);
6724 
6725 	stmmac_stop_tx_dma(priv, queue);
6726 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6727 }
6728 
6729 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6730 {
6731 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6732 	struct stmmac_channel *ch = &priv->channel[queue];
6733 	unsigned long flags;
6734 	int ret;
6735 
6736 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6737 	if (ret) {
6738 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6739 		return;
6740 	}
6741 
6742 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6743 	if (ret) {
6744 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6745 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6746 		return;
6747 	}
6748 
6749 	stmmac_reset_tx_queue(priv, queue);
6750 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6751 
6752 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6753 			    tx_q->dma_tx_phy, tx_q->queue_index);
6754 
6755 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6756 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6757 
6758 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6759 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6760 			       tx_q->tx_tail_addr, tx_q->queue_index);
6761 
6762 	stmmac_start_tx_dma(priv, queue);
6763 
6764 	spin_lock_irqsave(&ch->lock, flags);
6765 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6766 	spin_unlock_irqrestore(&ch->lock, flags);
6767 }
6768 
6769 void stmmac_xdp_release(struct net_device *dev)
6770 {
6771 	struct stmmac_priv *priv = netdev_priv(dev);
6772 	u32 chan;
6773 
6774 	/* Ensure tx function is not running */
6775 	netif_tx_disable(dev);
6776 
6777 	/* Disable NAPI process */
6778 	stmmac_disable_all_queues(priv);
6779 
6780 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6781 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6782 
6783 	/* Free the IRQ lines */
6784 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6785 
6786 	/* Stop TX/RX DMA channels */
6787 	stmmac_stop_all_dma(priv);
6788 
6789 	/* Release and free the Rx/Tx resources */
6790 	free_dma_desc_resources(priv, &priv->dma_conf);
6791 
6792 	/* Disable the MAC Rx/Tx */
6793 	stmmac_mac_set(priv, priv->ioaddr, false);
6794 
6795 	/* set trans_start so we don't get spurious
6796 	 * watchdogs during reset
6797 	 */
6798 	netif_trans_update(dev);
6799 	netif_carrier_off(dev);
6800 }
6801 
6802 int stmmac_xdp_open(struct net_device *dev)
6803 {
6804 	struct stmmac_priv *priv = netdev_priv(dev);
6805 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6806 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6807 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6808 	struct stmmac_rx_queue *rx_q;
6809 	struct stmmac_tx_queue *tx_q;
6810 	u32 buf_size;
6811 	bool sph_en;
6812 	u32 chan;
6813 	int ret;
6814 
6815 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6816 	if (ret < 0) {
6817 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6818 			   __func__);
6819 		goto dma_desc_error;
6820 	}
6821 
6822 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6823 	if (ret < 0) {
6824 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6825 			   __func__);
6826 		goto init_error;
6827 	}
6828 
6829 	stmmac_reset_queues_param(priv);
6830 
6831 	/* DMA CSR Channel configuration */
6832 	for (chan = 0; chan < dma_csr_ch; chan++) {
6833 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6834 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6835 	}
6836 
6837 	/* Adjust Split header */
6838 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6839 
6840 	/* DMA RX Channel Configuration */
6841 	for (chan = 0; chan < rx_cnt; chan++) {
6842 		rx_q = &priv->dma_conf.rx_queue[chan];
6843 
6844 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6845 				    rx_q->dma_rx_phy, chan);
6846 
6847 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6848 				     (rx_q->buf_alloc_num *
6849 				      sizeof(struct dma_desc));
6850 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6851 				       rx_q->rx_tail_addr, chan);
6852 
6853 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6854 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6855 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6856 					      buf_size,
6857 					      rx_q->queue_index);
6858 		} else {
6859 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6860 					      priv->dma_conf.dma_buf_sz,
6861 					      rx_q->queue_index);
6862 		}
6863 
6864 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6865 	}
6866 
6867 	/* DMA TX Channel Configuration */
6868 	for (chan = 0; chan < tx_cnt; chan++) {
6869 		tx_q = &priv->dma_conf.tx_queue[chan];
6870 
6871 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6872 				    tx_q->dma_tx_phy, chan);
6873 
6874 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6875 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6876 				       tx_q->tx_tail_addr, chan);
6877 
6878 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6879 		tx_q->txtimer.function = stmmac_tx_timer;
6880 	}
6881 
6882 	/* Enable the MAC Rx/Tx */
6883 	stmmac_mac_set(priv, priv->ioaddr, true);
6884 
6885 	/* Start Rx & Tx DMA Channels */
6886 	stmmac_start_all_dma(priv);
6887 
6888 	ret = stmmac_request_irq(dev);
6889 	if (ret)
6890 		goto irq_error;
6891 
6892 	/* Enable NAPI process*/
6893 	stmmac_enable_all_queues(priv);
6894 	netif_carrier_on(dev);
6895 	netif_tx_start_all_queues(dev);
6896 	stmmac_enable_all_dma_irq(priv);
6897 
6898 	return 0;
6899 
6900 irq_error:
6901 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6902 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6903 
6904 	stmmac_hw_teardown(dev);
6905 init_error:
6906 	free_dma_desc_resources(priv, &priv->dma_conf);
6907 dma_desc_error:
6908 	return ret;
6909 }
6910 
6911 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6912 {
6913 	struct stmmac_priv *priv = netdev_priv(dev);
6914 	struct stmmac_rx_queue *rx_q;
6915 	struct stmmac_tx_queue *tx_q;
6916 	struct stmmac_channel *ch;
6917 
6918 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6919 	    !netif_carrier_ok(priv->dev))
6920 		return -ENETDOWN;
6921 
6922 	if (!stmmac_xdp_is_enabled(priv))
6923 		return -EINVAL;
6924 
6925 	if (queue >= priv->plat->rx_queues_to_use ||
6926 	    queue >= priv->plat->tx_queues_to_use)
6927 		return -EINVAL;
6928 
6929 	rx_q = &priv->dma_conf.rx_queue[queue];
6930 	tx_q = &priv->dma_conf.tx_queue[queue];
6931 	ch = &priv->channel[queue];
6932 
6933 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6934 		return -EINVAL;
6935 
6936 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6937 		/* EQoS does not have per-DMA channel SW interrupt,
6938 		 * so we schedule RX Napi straight-away.
6939 		 */
6940 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6941 			__napi_schedule(&ch->rxtx_napi);
6942 	}
6943 
6944 	return 0;
6945 }
6946 
6947 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6948 {
6949 	struct stmmac_priv *priv = netdev_priv(dev);
6950 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6951 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6952 	unsigned int start;
6953 	int q;
6954 
6955 	for (q = 0; q < tx_cnt; q++) {
6956 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6957 		u64 tx_packets;
6958 		u64 tx_bytes;
6959 
6960 		do {
6961 			start = u64_stats_fetch_begin(&txq_stats->syncp);
6962 			tx_packets = txq_stats->tx_packets;
6963 			tx_bytes   = txq_stats->tx_bytes;
6964 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
6965 
6966 		stats->tx_packets += tx_packets;
6967 		stats->tx_bytes += tx_bytes;
6968 	}
6969 
6970 	for (q = 0; q < rx_cnt; q++) {
6971 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6972 		u64 rx_packets;
6973 		u64 rx_bytes;
6974 
6975 		do {
6976 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
6977 			rx_packets = rxq_stats->rx_packets;
6978 			rx_bytes   = rxq_stats->rx_bytes;
6979 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
6980 
6981 		stats->rx_packets += rx_packets;
6982 		stats->rx_bytes += rx_bytes;
6983 	}
6984 
6985 	stats->rx_dropped = priv->xstats.rx_dropped;
6986 	stats->rx_errors = priv->xstats.rx_errors;
6987 	stats->tx_dropped = priv->xstats.tx_dropped;
6988 	stats->tx_errors = priv->xstats.tx_errors;
6989 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6990 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6991 	stats->rx_length_errors = priv->xstats.rx_length;
6992 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6993 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6994 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6995 }
6996 
6997 static const struct net_device_ops stmmac_netdev_ops = {
6998 	.ndo_open = stmmac_open,
6999 	.ndo_start_xmit = stmmac_xmit,
7000 	.ndo_stop = stmmac_release,
7001 	.ndo_change_mtu = stmmac_change_mtu,
7002 	.ndo_fix_features = stmmac_fix_features,
7003 	.ndo_set_features = stmmac_set_features,
7004 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7005 	.ndo_tx_timeout = stmmac_tx_timeout,
7006 	.ndo_eth_ioctl = stmmac_ioctl,
7007 	.ndo_get_stats64 = stmmac_get_stats64,
7008 	.ndo_setup_tc = stmmac_setup_tc,
7009 	.ndo_select_queue = stmmac_select_queue,
7010 	.ndo_set_mac_address = stmmac_set_mac_address,
7011 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7012 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7013 	.ndo_bpf = stmmac_bpf,
7014 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7015 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7016 };
7017 
7018 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7019 {
7020 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7021 		return;
7022 	if (test_bit(STMMAC_DOWN, &priv->state))
7023 		return;
7024 
7025 	netdev_err(priv->dev, "Reset adapter.\n");
7026 
7027 	rtnl_lock();
7028 	netif_trans_update(priv->dev);
7029 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7030 		usleep_range(1000, 2000);
7031 
7032 	set_bit(STMMAC_DOWN, &priv->state);
7033 	dev_close(priv->dev);
7034 	dev_open(priv->dev, NULL);
7035 	clear_bit(STMMAC_DOWN, &priv->state);
7036 	clear_bit(STMMAC_RESETING, &priv->state);
7037 	rtnl_unlock();
7038 }
7039 
7040 static void stmmac_service_task(struct work_struct *work)
7041 {
7042 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7043 			service_task);
7044 
7045 	stmmac_reset_subtask(priv);
7046 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7047 }
7048 
7049 /**
7050  *  stmmac_hw_init - Init the MAC device
7051  *  @priv: driver private structure
7052  *  Description: this function is to configure the MAC device according to
7053  *  some platform parameters or the HW capability register. It prepares the
7054  *  driver to use either ring or chain modes and to setup either enhanced or
7055  *  normal descriptors.
7056  */
7057 static int stmmac_hw_init(struct stmmac_priv *priv)
7058 {
7059 	int ret;
7060 
7061 	/* dwmac-sun8i only work in chain mode */
7062 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7063 		chain_mode = 1;
7064 	priv->chain_mode = chain_mode;
7065 
7066 	/* Initialize HW Interface */
7067 	ret = stmmac_hwif_init(priv);
7068 	if (ret)
7069 		return ret;
7070 
7071 	/* Get the HW capability (new GMAC newer than 3.50a) */
7072 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7073 	if (priv->hw_cap_support) {
7074 		dev_info(priv->device, "DMA HW capability register supported\n");
7075 
7076 		/* We can override some gmac/dma configuration fields: e.g.
7077 		 * enh_desc, tx_coe (e.g. that are passed through the
7078 		 * platform) with the values from the HW capability
7079 		 * register (if supported).
7080 		 */
7081 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7082 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7083 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7084 		priv->hw->pmt = priv->plat->pmt;
7085 		if (priv->dma_cap.hash_tb_sz) {
7086 			priv->hw->multicast_filter_bins =
7087 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7088 			priv->hw->mcast_bits_log2 =
7089 					ilog2(priv->hw->multicast_filter_bins);
7090 		}
7091 
7092 		/* TXCOE doesn't work in thresh DMA mode */
7093 		if (priv->plat->force_thresh_dma_mode)
7094 			priv->plat->tx_coe = 0;
7095 		else
7096 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7097 
7098 		/* In case of GMAC4 rx_coe is from HW cap register. */
7099 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7100 
7101 		if (priv->dma_cap.rx_coe_type2)
7102 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7103 		else if (priv->dma_cap.rx_coe_type1)
7104 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7105 
7106 	} else {
7107 		dev_info(priv->device, "No HW DMA feature register supported\n");
7108 	}
7109 
7110 	if (priv->plat->rx_coe) {
7111 		priv->hw->rx_csum = priv->plat->rx_coe;
7112 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7113 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7114 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7115 	}
7116 	if (priv->plat->tx_coe)
7117 		dev_info(priv->device, "TX Checksum insertion supported\n");
7118 
7119 	if (priv->plat->pmt) {
7120 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7121 		device_set_wakeup_capable(priv->device, 1);
7122 	}
7123 
7124 	if (priv->dma_cap.tsoen)
7125 		dev_info(priv->device, "TSO supported\n");
7126 
7127 	priv->hw->vlan_fail_q_en =
7128 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7129 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7130 
7131 	/* Run HW quirks, if any */
7132 	if (priv->hwif_quirks) {
7133 		ret = priv->hwif_quirks(priv);
7134 		if (ret)
7135 			return ret;
7136 	}
7137 
7138 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7139 	 * In some case, for example on bugged HW this feature
7140 	 * has to be disable and this can be done by passing the
7141 	 * riwt_off field from the platform.
7142 	 */
7143 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7144 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7145 		priv->use_riwt = 1;
7146 		dev_info(priv->device,
7147 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7148 	}
7149 
7150 	return 0;
7151 }
7152 
7153 static void stmmac_napi_add(struct net_device *dev)
7154 {
7155 	struct stmmac_priv *priv = netdev_priv(dev);
7156 	u32 queue, maxq;
7157 
7158 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7159 
7160 	for (queue = 0; queue < maxq; queue++) {
7161 		struct stmmac_channel *ch = &priv->channel[queue];
7162 
7163 		ch->priv_data = priv;
7164 		ch->index = queue;
7165 		spin_lock_init(&ch->lock);
7166 
7167 		if (queue < priv->plat->rx_queues_to_use) {
7168 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7169 		}
7170 		if (queue < priv->plat->tx_queues_to_use) {
7171 			netif_napi_add_tx(dev, &ch->tx_napi,
7172 					  stmmac_napi_poll_tx);
7173 		}
7174 		if (queue < priv->plat->rx_queues_to_use &&
7175 		    queue < priv->plat->tx_queues_to_use) {
7176 			netif_napi_add(dev, &ch->rxtx_napi,
7177 				       stmmac_napi_poll_rxtx);
7178 		}
7179 	}
7180 }
7181 
7182 static void stmmac_napi_del(struct net_device *dev)
7183 {
7184 	struct stmmac_priv *priv = netdev_priv(dev);
7185 	u32 queue, maxq;
7186 
7187 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7188 
7189 	for (queue = 0; queue < maxq; queue++) {
7190 		struct stmmac_channel *ch = &priv->channel[queue];
7191 
7192 		if (queue < priv->plat->rx_queues_to_use)
7193 			netif_napi_del(&ch->rx_napi);
7194 		if (queue < priv->plat->tx_queues_to_use)
7195 			netif_napi_del(&ch->tx_napi);
7196 		if (queue < priv->plat->rx_queues_to_use &&
7197 		    queue < priv->plat->tx_queues_to_use) {
7198 			netif_napi_del(&ch->rxtx_napi);
7199 		}
7200 	}
7201 }
7202 
7203 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7204 {
7205 	struct stmmac_priv *priv = netdev_priv(dev);
7206 	int ret = 0, i;
7207 
7208 	if (netif_running(dev))
7209 		stmmac_release(dev);
7210 
7211 	stmmac_napi_del(dev);
7212 
7213 	priv->plat->rx_queues_to_use = rx_cnt;
7214 	priv->plat->tx_queues_to_use = tx_cnt;
7215 	if (!netif_is_rxfh_configured(dev))
7216 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7217 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7218 									rx_cnt);
7219 
7220 	stmmac_set_half_duplex(priv);
7221 	stmmac_napi_add(dev);
7222 
7223 	if (netif_running(dev))
7224 		ret = stmmac_open(dev);
7225 
7226 	return ret;
7227 }
7228 
7229 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7230 {
7231 	struct stmmac_priv *priv = netdev_priv(dev);
7232 	int ret = 0;
7233 
7234 	if (netif_running(dev))
7235 		stmmac_release(dev);
7236 
7237 	priv->dma_conf.dma_rx_size = rx_size;
7238 	priv->dma_conf.dma_tx_size = tx_size;
7239 
7240 	if (netif_running(dev))
7241 		ret = stmmac_open(dev);
7242 
7243 	return ret;
7244 }
7245 
7246 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7247 static void stmmac_fpe_lp_task(struct work_struct *work)
7248 {
7249 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7250 						fpe_task);
7251 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7252 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7253 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7254 	bool *hs_enable = &fpe_cfg->hs_enable;
7255 	bool *enable = &fpe_cfg->enable;
7256 	int retries = 20;
7257 
7258 	while (retries-- > 0) {
7259 		/* Bail out immediately if FPE handshake is OFF */
7260 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7261 			break;
7262 
7263 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7264 		    *lp_state == FPE_STATE_ENTERING_ON) {
7265 			stmmac_fpe_configure(priv, priv->ioaddr,
7266 					     priv->plat->tx_queues_to_use,
7267 					     priv->plat->rx_queues_to_use,
7268 					     *enable);
7269 
7270 			netdev_info(priv->dev, "configured FPE\n");
7271 
7272 			*lo_state = FPE_STATE_ON;
7273 			*lp_state = FPE_STATE_ON;
7274 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7275 			break;
7276 		}
7277 
7278 		if ((*lo_state == FPE_STATE_CAPABLE ||
7279 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7280 		     *lp_state != FPE_STATE_ON) {
7281 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7282 				    *lo_state, *lp_state);
7283 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7284 						MPACKET_VERIFY);
7285 		}
7286 		/* Sleep then retry */
7287 		msleep(500);
7288 	}
7289 
7290 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7291 }
7292 
7293 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7294 {
7295 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7296 		if (enable) {
7297 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7298 						MPACKET_VERIFY);
7299 		} else {
7300 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7301 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7302 		}
7303 
7304 		priv->plat->fpe_cfg->hs_enable = enable;
7305 	}
7306 }
7307 
7308 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7309 {
7310 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7311 	struct dma_desc *desc_contains_ts = ctx->desc;
7312 	struct stmmac_priv *priv = ctx->priv;
7313 	struct dma_desc *ndesc = ctx->ndesc;
7314 	struct dma_desc *desc = ctx->desc;
7315 	u64 ns = 0;
7316 
7317 	if (!priv->hwts_rx_en)
7318 		return -ENODATA;
7319 
7320 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7321 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7322 		desc_contains_ts = ndesc;
7323 
7324 	/* Check if timestamp is available */
7325 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7326 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7327 		ns -= priv->plat->cdc_error_adj;
7328 		*timestamp = ns_to_ktime(ns);
7329 		return 0;
7330 	}
7331 
7332 	return -ENODATA;
7333 }
7334 
7335 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7336 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7337 };
7338 
7339 /**
7340  * stmmac_dvr_probe
7341  * @device: device pointer
7342  * @plat_dat: platform data pointer
7343  * @res: stmmac resource pointer
7344  * Description: this is the main probe function used to
7345  * call the alloc_etherdev, allocate the priv structure.
7346  * Return:
7347  * returns 0 on success, otherwise errno.
7348  */
7349 int stmmac_dvr_probe(struct device *device,
7350 		     struct plat_stmmacenet_data *plat_dat,
7351 		     struct stmmac_resources *res)
7352 {
7353 	struct net_device *ndev = NULL;
7354 	struct stmmac_priv *priv;
7355 	u32 rxq;
7356 	int i, ret = 0;
7357 
7358 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7359 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7360 	if (!ndev)
7361 		return -ENOMEM;
7362 
7363 	SET_NETDEV_DEV(ndev, device);
7364 
7365 	priv = netdev_priv(ndev);
7366 	priv->device = device;
7367 	priv->dev = ndev;
7368 
7369 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7370 		u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
7371 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7372 		u64_stats_init(&priv->xstats.txq_stats[i].syncp);
7373 
7374 	stmmac_set_ethtool_ops(ndev);
7375 	priv->pause = pause;
7376 	priv->plat = plat_dat;
7377 	priv->ioaddr = res->addr;
7378 	priv->dev->base_addr = (unsigned long)res->addr;
7379 	priv->plat->dma_cfg->multi_msi_en =
7380 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7381 
7382 	priv->dev->irq = res->irq;
7383 	priv->wol_irq = res->wol_irq;
7384 	priv->lpi_irq = res->lpi_irq;
7385 	priv->sfty_ce_irq = res->sfty_ce_irq;
7386 	priv->sfty_ue_irq = res->sfty_ue_irq;
7387 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7388 		priv->rx_irq[i] = res->rx_irq[i];
7389 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7390 		priv->tx_irq[i] = res->tx_irq[i];
7391 
7392 	if (!is_zero_ether_addr(res->mac))
7393 		eth_hw_addr_set(priv->dev, res->mac);
7394 
7395 	dev_set_drvdata(device, priv->dev);
7396 
7397 	/* Verify driver arguments */
7398 	stmmac_verify_args();
7399 
7400 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7401 	if (!priv->af_xdp_zc_qps)
7402 		return -ENOMEM;
7403 
7404 	/* Allocate workqueue */
7405 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7406 	if (!priv->wq) {
7407 		dev_err(priv->device, "failed to create workqueue\n");
7408 		ret = -ENOMEM;
7409 		goto error_wq_init;
7410 	}
7411 
7412 	INIT_WORK(&priv->service_task, stmmac_service_task);
7413 
7414 	/* Initialize Link Partner FPE workqueue */
7415 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7416 
7417 	/* Override with kernel parameters if supplied XXX CRS XXX
7418 	 * this needs to have multiple instances
7419 	 */
7420 	if ((phyaddr >= 0) && (phyaddr <= 31))
7421 		priv->plat->phy_addr = phyaddr;
7422 
7423 	if (priv->plat->stmmac_rst) {
7424 		ret = reset_control_assert(priv->plat->stmmac_rst);
7425 		reset_control_deassert(priv->plat->stmmac_rst);
7426 		/* Some reset controllers have only reset callback instead of
7427 		 * assert + deassert callbacks pair.
7428 		 */
7429 		if (ret == -ENOTSUPP)
7430 			reset_control_reset(priv->plat->stmmac_rst);
7431 	}
7432 
7433 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7434 	if (ret == -ENOTSUPP)
7435 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7436 			ERR_PTR(ret));
7437 
7438 	/* Init MAC and get the capabilities */
7439 	ret = stmmac_hw_init(priv);
7440 	if (ret)
7441 		goto error_hw_init;
7442 
7443 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7444 	 */
7445 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7446 		priv->plat->dma_cfg->dche = false;
7447 
7448 	stmmac_check_ether_addr(priv);
7449 
7450 	ndev->netdev_ops = &stmmac_netdev_ops;
7451 
7452 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7453 
7454 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7455 			    NETIF_F_RXCSUM;
7456 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7457 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7458 
7459 	ret = stmmac_tc_init(priv, priv);
7460 	if (!ret) {
7461 		ndev->hw_features |= NETIF_F_HW_TC;
7462 	}
7463 
7464 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7465 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7466 		if (priv->plat->has_gmac4)
7467 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7468 		priv->tso = true;
7469 		dev_info(priv->device, "TSO feature enabled\n");
7470 	}
7471 
7472 	if (priv->dma_cap.sphen &&
7473 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7474 		ndev->hw_features |= NETIF_F_GRO;
7475 		priv->sph_cap = true;
7476 		priv->sph = priv->sph_cap;
7477 		dev_info(priv->device, "SPH feature enabled\n");
7478 	}
7479 
7480 	/* Ideally our host DMA address width is the same as for the
7481 	 * device. However, it may differ and then we have to use our
7482 	 * host DMA width for allocation and the device DMA width for
7483 	 * register handling.
7484 	 */
7485 	if (priv->plat->host_dma_width)
7486 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7487 	else
7488 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7489 
7490 	if (priv->dma_cap.host_dma_width) {
7491 		ret = dma_set_mask_and_coherent(device,
7492 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7493 		if (!ret) {
7494 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7495 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7496 
7497 			/*
7498 			 * If more than 32 bits can be addressed, make sure to
7499 			 * enable enhanced addressing mode.
7500 			 */
7501 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7502 				priv->plat->dma_cfg->eame = true;
7503 		} else {
7504 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7505 			if (ret) {
7506 				dev_err(priv->device, "Failed to set DMA Mask\n");
7507 				goto error_hw_init;
7508 			}
7509 
7510 			priv->dma_cap.host_dma_width = 32;
7511 		}
7512 	}
7513 
7514 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7515 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7516 #ifdef STMMAC_VLAN_TAG_USED
7517 	/* Both mac100 and gmac support receive VLAN tag detection */
7518 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7519 	if (priv->dma_cap.vlhash) {
7520 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7521 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7522 	}
7523 	if (priv->dma_cap.vlins) {
7524 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7525 		if (priv->dma_cap.dvlan)
7526 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7527 	}
7528 #endif
7529 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7530 
7531 	priv->xstats.threshold = tc;
7532 
7533 	/* Initialize RSS */
7534 	rxq = priv->plat->rx_queues_to_use;
7535 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7536 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7537 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7538 
7539 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7540 		ndev->features |= NETIF_F_RXHASH;
7541 
7542 	ndev->vlan_features |= ndev->features;
7543 	/* TSO doesn't work on VLANs yet */
7544 	ndev->vlan_features &= ~NETIF_F_TSO;
7545 
7546 	/* MTU range: 46 - hw-specific max */
7547 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7548 	if (priv->plat->has_xgmac)
7549 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7550 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7551 		ndev->max_mtu = JUMBO_LEN;
7552 	else
7553 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7554 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7555 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7556 	 */
7557 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7558 	    (priv->plat->maxmtu >= ndev->min_mtu))
7559 		ndev->max_mtu = priv->plat->maxmtu;
7560 	else if (priv->plat->maxmtu < ndev->min_mtu)
7561 		dev_warn(priv->device,
7562 			 "%s: warning: maxmtu having invalid value (%d)\n",
7563 			 __func__, priv->plat->maxmtu);
7564 
7565 	if (flow_ctrl)
7566 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7567 
7568 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7569 
7570 	/* Setup channels NAPI */
7571 	stmmac_napi_add(ndev);
7572 
7573 	mutex_init(&priv->lock);
7574 
7575 	/* If a specific clk_csr value is passed from the platform
7576 	 * this means that the CSR Clock Range selection cannot be
7577 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7578 	 * set the MDC clock dynamically according to the csr actual
7579 	 * clock input.
7580 	 */
7581 	if (priv->plat->clk_csr >= 0)
7582 		priv->clk_csr = priv->plat->clk_csr;
7583 	else
7584 		stmmac_clk_csr_set(priv);
7585 
7586 	stmmac_check_pcs_mode(priv);
7587 
7588 	pm_runtime_get_noresume(device);
7589 	pm_runtime_set_active(device);
7590 	if (!pm_runtime_enabled(device))
7591 		pm_runtime_enable(device);
7592 
7593 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7594 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7595 		/* MDIO bus Registration */
7596 		ret = stmmac_mdio_register(ndev);
7597 		if (ret < 0) {
7598 			dev_err_probe(priv->device, ret,
7599 				      "%s: MDIO bus (id: %d) registration failed\n",
7600 				      __func__, priv->plat->bus_id);
7601 			goto error_mdio_register;
7602 		}
7603 	}
7604 
7605 	if (priv->plat->speed_mode_2500)
7606 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7607 
7608 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7609 		ret = stmmac_xpcs_setup(priv->mii);
7610 		if (ret)
7611 			goto error_xpcs_setup;
7612 	}
7613 
7614 	ret = stmmac_phy_setup(priv);
7615 	if (ret) {
7616 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7617 		goto error_phy_setup;
7618 	}
7619 
7620 	ret = register_netdev(ndev);
7621 	if (ret) {
7622 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7623 			__func__, ret);
7624 		goto error_netdev_register;
7625 	}
7626 
7627 #ifdef CONFIG_DEBUG_FS
7628 	stmmac_init_fs(ndev);
7629 #endif
7630 
7631 	if (priv->plat->dump_debug_regs)
7632 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7633 
7634 	/* Let pm_runtime_put() disable the clocks.
7635 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7636 	 */
7637 	pm_runtime_put(device);
7638 
7639 	return ret;
7640 
7641 error_netdev_register:
7642 	phylink_destroy(priv->phylink);
7643 error_xpcs_setup:
7644 error_phy_setup:
7645 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7646 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7647 		stmmac_mdio_unregister(ndev);
7648 error_mdio_register:
7649 	stmmac_napi_del(ndev);
7650 error_hw_init:
7651 	destroy_workqueue(priv->wq);
7652 error_wq_init:
7653 	bitmap_free(priv->af_xdp_zc_qps);
7654 
7655 	return ret;
7656 }
7657 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7658 
7659 /**
7660  * stmmac_dvr_remove
7661  * @dev: device pointer
7662  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7663  * changes the link status, releases the DMA descriptor rings.
7664  */
7665 void stmmac_dvr_remove(struct device *dev)
7666 {
7667 	struct net_device *ndev = dev_get_drvdata(dev);
7668 	struct stmmac_priv *priv = netdev_priv(ndev);
7669 
7670 	netdev_info(priv->dev, "%s: removing driver", __func__);
7671 
7672 	pm_runtime_get_sync(dev);
7673 
7674 	stmmac_stop_all_dma(priv);
7675 	stmmac_mac_set(priv, priv->ioaddr, false);
7676 	netif_carrier_off(ndev);
7677 	unregister_netdev(ndev);
7678 
7679 #ifdef CONFIG_DEBUG_FS
7680 	stmmac_exit_fs(ndev);
7681 #endif
7682 	phylink_destroy(priv->phylink);
7683 	if (priv->plat->stmmac_rst)
7684 		reset_control_assert(priv->plat->stmmac_rst);
7685 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7686 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7687 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7688 		stmmac_mdio_unregister(ndev);
7689 	destroy_workqueue(priv->wq);
7690 	mutex_destroy(&priv->lock);
7691 	bitmap_free(priv->af_xdp_zc_qps);
7692 
7693 	pm_runtime_disable(dev);
7694 	pm_runtime_put_noidle(dev);
7695 }
7696 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7697 
7698 /**
7699  * stmmac_suspend - suspend callback
7700  * @dev: device pointer
7701  * Description: this is the function to suspend the device and it is called
7702  * by the platform driver to stop the network queue, release the resources,
7703  * program the PMT register (for WoL), clean and release driver resources.
7704  */
7705 int stmmac_suspend(struct device *dev)
7706 {
7707 	struct net_device *ndev = dev_get_drvdata(dev);
7708 	struct stmmac_priv *priv = netdev_priv(ndev);
7709 	u32 chan;
7710 
7711 	if (!ndev || !netif_running(ndev))
7712 		return 0;
7713 
7714 	mutex_lock(&priv->lock);
7715 
7716 	netif_device_detach(ndev);
7717 
7718 	stmmac_disable_all_queues(priv);
7719 
7720 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7721 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7722 
7723 	if (priv->eee_enabled) {
7724 		priv->tx_path_in_lpi_mode = false;
7725 		del_timer_sync(&priv->eee_ctrl_timer);
7726 	}
7727 
7728 	/* Stop TX/RX DMA */
7729 	stmmac_stop_all_dma(priv);
7730 
7731 	if (priv->plat->serdes_powerdown)
7732 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7733 
7734 	/* Enable Power down mode by programming the PMT regs */
7735 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7736 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7737 		priv->irq_wake = 1;
7738 	} else {
7739 		stmmac_mac_set(priv, priv->ioaddr, false);
7740 		pinctrl_pm_select_sleep_state(priv->device);
7741 	}
7742 
7743 	mutex_unlock(&priv->lock);
7744 
7745 	rtnl_lock();
7746 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7747 		phylink_suspend(priv->phylink, true);
7748 	} else {
7749 		if (device_may_wakeup(priv->device))
7750 			phylink_speed_down(priv->phylink, false);
7751 		phylink_suspend(priv->phylink, false);
7752 	}
7753 	rtnl_unlock();
7754 
7755 	if (priv->dma_cap.fpesel) {
7756 		/* Disable FPE */
7757 		stmmac_fpe_configure(priv, priv->ioaddr,
7758 				     priv->plat->tx_queues_to_use,
7759 				     priv->plat->rx_queues_to_use, false);
7760 
7761 		stmmac_fpe_handshake(priv, false);
7762 		stmmac_fpe_stop_wq(priv);
7763 	}
7764 
7765 	priv->speed = SPEED_UNKNOWN;
7766 	return 0;
7767 }
7768 EXPORT_SYMBOL_GPL(stmmac_suspend);
7769 
7770 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7771 {
7772 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7773 
7774 	rx_q->cur_rx = 0;
7775 	rx_q->dirty_rx = 0;
7776 }
7777 
7778 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7779 {
7780 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7781 
7782 	tx_q->cur_tx = 0;
7783 	tx_q->dirty_tx = 0;
7784 	tx_q->mss = 0;
7785 
7786 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7787 }
7788 
7789 /**
7790  * stmmac_reset_queues_param - reset queue parameters
7791  * @priv: device pointer
7792  */
7793 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7794 {
7795 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7796 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7797 	u32 queue;
7798 
7799 	for (queue = 0; queue < rx_cnt; queue++)
7800 		stmmac_reset_rx_queue(priv, queue);
7801 
7802 	for (queue = 0; queue < tx_cnt; queue++)
7803 		stmmac_reset_tx_queue(priv, queue);
7804 }
7805 
7806 /**
7807  * stmmac_resume - resume callback
7808  * @dev: device pointer
7809  * Description: when resume this function is invoked to setup the DMA and CORE
7810  * in a usable state.
7811  */
7812 int stmmac_resume(struct device *dev)
7813 {
7814 	struct net_device *ndev = dev_get_drvdata(dev);
7815 	struct stmmac_priv *priv = netdev_priv(ndev);
7816 	int ret;
7817 
7818 	if (!netif_running(ndev))
7819 		return 0;
7820 
7821 	/* Power Down bit, into the PM register, is cleared
7822 	 * automatically as soon as a magic packet or a Wake-up frame
7823 	 * is received. Anyway, it's better to manually clear
7824 	 * this bit because it can generate problems while resuming
7825 	 * from another devices (e.g. serial console).
7826 	 */
7827 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7828 		mutex_lock(&priv->lock);
7829 		stmmac_pmt(priv, priv->hw, 0);
7830 		mutex_unlock(&priv->lock);
7831 		priv->irq_wake = 0;
7832 	} else {
7833 		pinctrl_pm_select_default_state(priv->device);
7834 		/* reset the phy so that it's ready */
7835 		if (priv->mii)
7836 			stmmac_mdio_reset(priv->mii);
7837 	}
7838 
7839 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7840 	    priv->plat->serdes_powerup) {
7841 		ret = priv->plat->serdes_powerup(ndev,
7842 						 priv->plat->bsp_priv);
7843 
7844 		if (ret < 0)
7845 			return ret;
7846 	}
7847 
7848 	rtnl_lock();
7849 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7850 		phylink_resume(priv->phylink);
7851 	} else {
7852 		phylink_resume(priv->phylink);
7853 		if (device_may_wakeup(priv->device))
7854 			phylink_speed_up(priv->phylink);
7855 	}
7856 	rtnl_unlock();
7857 
7858 	rtnl_lock();
7859 	mutex_lock(&priv->lock);
7860 
7861 	stmmac_reset_queues_param(priv);
7862 
7863 	stmmac_free_tx_skbufs(priv);
7864 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7865 
7866 	stmmac_hw_setup(ndev, false);
7867 	stmmac_init_coalesce(priv);
7868 	stmmac_set_rx_mode(ndev);
7869 
7870 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7871 
7872 	stmmac_enable_all_queues(priv);
7873 	stmmac_enable_all_dma_irq(priv);
7874 
7875 	mutex_unlock(&priv->lock);
7876 	rtnl_unlock();
7877 
7878 	netif_device_attach(ndev);
7879 
7880 	return 0;
7881 }
7882 EXPORT_SYMBOL_GPL(stmmac_resume);
7883 
7884 #ifndef MODULE
7885 static int __init stmmac_cmdline_opt(char *str)
7886 {
7887 	char *opt;
7888 
7889 	if (!str || !*str)
7890 		return 1;
7891 	while ((opt = strsep(&str, ",")) != NULL) {
7892 		if (!strncmp(opt, "debug:", 6)) {
7893 			if (kstrtoint(opt + 6, 0, &debug))
7894 				goto err;
7895 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7896 			if (kstrtoint(opt + 8, 0, &phyaddr))
7897 				goto err;
7898 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7899 			if (kstrtoint(opt + 7, 0, &buf_sz))
7900 				goto err;
7901 		} else if (!strncmp(opt, "tc:", 3)) {
7902 			if (kstrtoint(opt + 3, 0, &tc))
7903 				goto err;
7904 		} else if (!strncmp(opt, "watchdog:", 9)) {
7905 			if (kstrtoint(opt + 9, 0, &watchdog))
7906 				goto err;
7907 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7908 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7909 				goto err;
7910 		} else if (!strncmp(opt, "pause:", 6)) {
7911 			if (kstrtoint(opt + 6, 0, &pause))
7912 				goto err;
7913 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7914 			if (kstrtoint(opt + 10, 0, &eee_timer))
7915 				goto err;
7916 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7917 			if (kstrtoint(opt + 11, 0, &chain_mode))
7918 				goto err;
7919 		}
7920 	}
7921 	return 1;
7922 
7923 err:
7924 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7925 	return 1;
7926 }
7927 
7928 __setup("stmmaceth=", stmmac_cmdline_opt);
7929 #endif /* MODULE */
7930 
7931 static int __init stmmac_init(void)
7932 {
7933 #ifdef CONFIG_DEBUG_FS
7934 	/* Create debugfs main directory if it doesn't exist yet */
7935 	if (!stmmac_fs_dir)
7936 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7937 	register_netdevice_notifier(&stmmac_notifier);
7938 #endif
7939 
7940 	return 0;
7941 }
7942 
7943 static void __exit stmmac_exit(void)
7944 {
7945 #ifdef CONFIG_DEBUG_FS
7946 	unregister_netdevice_notifier(&stmmac_notifier);
7947 	debugfs_remove_recursive(stmmac_fs_dir);
7948 #endif
7949 }
7950 
7951 module_init(stmmac_init)
7952 module_exit(stmmac_exit)
7953 
7954 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7955 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7956 MODULE_LICENSE("GPL");
7957