xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 8f109e91b852f159b917f5c565bcf43c26d974e2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968 					MPACKET_VERIFY);
969 	} else {
970 		*lo_state = FPE_STATE_OFF;
971 		*lp_state = FPE_STATE_OFF;
972 	}
973 }
974 
975 static void stmmac_mac_link_down(struct phylink_config *config,
976 				 unsigned int mode, phy_interface_t interface)
977 {
978 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979 
980 	stmmac_mac_set(priv, priv->ioaddr, false);
981 	priv->eee_active = false;
982 	priv->tx_lpi_enabled = false;
983 	priv->eee_enabled = stmmac_eee_init(priv);
984 	stmmac_set_eee_pls(priv, priv->hw, false);
985 
986 	if (priv->dma_cap.fpesel)
987 		stmmac_fpe_link_state_handle(priv, false);
988 }
989 
990 static void stmmac_mac_link_up(struct phylink_config *config,
991 			       struct phy_device *phy,
992 			       unsigned int mode, phy_interface_t interface,
993 			       int speed, int duplex,
994 			       bool tx_pause, bool rx_pause)
995 {
996 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997 	u32 old_ctrl, ctrl;
998 
999 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000 	    priv->plat->serdes_powerup)
1001 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002 
1003 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005 
1006 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007 		switch (speed) {
1008 		case SPEED_10000:
1009 			ctrl |= priv->hw->link.xgmii.speed10000;
1010 			break;
1011 		case SPEED_5000:
1012 			ctrl |= priv->hw->link.xgmii.speed5000;
1013 			break;
1014 		case SPEED_2500:
1015 			ctrl |= priv->hw->link.xgmii.speed2500;
1016 			break;
1017 		default:
1018 			return;
1019 		}
1020 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021 		switch (speed) {
1022 		case SPEED_100000:
1023 			ctrl |= priv->hw->link.xlgmii.speed100000;
1024 			break;
1025 		case SPEED_50000:
1026 			ctrl |= priv->hw->link.xlgmii.speed50000;
1027 			break;
1028 		case SPEED_40000:
1029 			ctrl |= priv->hw->link.xlgmii.speed40000;
1030 			break;
1031 		case SPEED_25000:
1032 			ctrl |= priv->hw->link.xlgmii.speed25000;
1033 			break;
1034 		case SPEED_10000:
1035 			ctrl |= priv->hw->link.xgmii.speed10000;
1036 			break;
1037 		case SPEED_2500:
1038 			ctrl |= priv->hw->link.speed2500;
1039 			break;
1040 		case SPEED_1000:
1041 			ctrl |= priv->hw->link.speed1000;
1042 			break;
1043 		default:
1044 			return;
1045 		}
1046 	} else {
1047 		switch (speed) {
1048 		case SPEED_2500:
1049 			ctrl |= priv->hw->link.speed2500;
1050 			break;
1051 		case SPEED_1000:
1052 			ctrl |= priv->hw->link.speed1000;
1053 			break;
1054 		case SPEED_100:
1055 			ctrl |= priv->hw->link.speed100;
1056 			break;
1057 		case SPEED_10:
1058 			ctrl |= priv->hw->link.speed10;
1059 			break;
1060 		default:
1061 			return;
1062 		}
1063 	}
1064 
1065 	priv->speed = speed;
1066 
1067 	if (priv->plat->fix_mac_speed)
1068 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069 
1070 	if (!duplex)
1071 		ctrl &= ~priv->hw->link.duplex;
1072 	else
1073 		ctrl |= priv->hw->link.duplex;
1074 
1075 	/* Flow Control operation */
1076 	if (rx_pause && tx_pause)
1077 		priv->flow_ctrl = FLOW_AUTO;
1078 	else if (rx_pause && !tx_pause)
1079 		priv->flow_ctrl = FLOW_RX;
1080 	else if (!rx_pause && tx_pause)
1081 		priv->flow_ctrl = FLOW_TX;
1082 	else
1083 		priv->flow_ctrl = FLOW_OFF;
1084 
1085 	stmmac_mac_flow_ctrl(priv, duplex);
1086 
1087 	if (ctrl != old_ctrl)
1088 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089 
1090 	stmmac_mac_set(priv, priv->ioaddr, true);
1091 	if (phy && priv->dma_cap.eee) {
1092 		priv->eee_active =
1093 			phy_init_eee(phy, !(priv->plat->flags &
1094 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095 		priv->eee_enabled = stmmac_eee_init(priv);
1096 		priv->tx_lpi_enabled = priv->eee_enabled;
1097 		stmmac_set_eee_pls(priv, priv->hw, true);
1098 	}
1099 
1100 	if (priv->dma_cap.fpesel)
1101 		stmmac_fpe_link_state_handle(priv, true);
1102 
1103 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104 		stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106 
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108 	.mac_select_pcs = stmmac_mac_select_pcs,
1109 	.mac_config = stmmac_mac_config,
1110 	.mac_link_down = stmmac_mac_link_down,
1111 	.mac_link_up = stmmac_mac_link_up,
1112 };
1113 
1114 /**
1115  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116  * @priv: driver private structure
1117  * Description: this is to verify if the HW supports the PCS.
1118  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119  * configured for the TBI, RTBI, or SGMII PHY interface.
1120  */
1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 	int interface = priv->plat->mac_interface;
1124 
1125 	if (priv->dma_cap.pcs) {
1126 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_RGMII;
1132 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135 		}
1136 	}
1137 }
1138 
1139 /**
1140  * stmmac_init_phy - PHY initialization
1141  * @dev: net device structure
1142  * Description: it initializes the driver's PHY state, and attaches the PHY
1143  * to the mac driver.
1144  *  Return value:
1145  *  0 on success
1146  */
1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 	struct stmmac_priv *priv = netdev_priv(dev);
1150 	struct fwnode_handle *phy_fwnode;
1151 	struct fwnode_handle *fwnode;
1152 	int ret;
1153 
1154 	if (!phylink_expects_phy(priv->phylink))
1155 		return 0;
1156 
1157 	fwnode = priv->plat->port_node;
1158 	if (!fwnode)
1159 		fwnode = dev_fwnode(priv->device);
1160 
1161 	if (fwnode)
1162 		phy_fwnode = fwnode_get_phy_node(fwnode);
1163 	else
1164 		phy_fwnode = NULL;
1165 
1166 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1167 	 * manually parse it
1168 	 */
1169 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 		int addr = priv->plat->phy_addr;
1171 		struct phy_device *phydev;
1172 
1173 		if (addr < 0) {
1174 			netdev_err(priv->dev, "no phy found\n");
1175 			return -ENODEV;
1176 		}
1177 
1178 		phydev = mdiobus_get_phy(priv->mii, addr);
1179 		if (!phydev) {
1180 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 			return -ENODEV;
1182 		}
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (!priv->plat->pmt) {
1191 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192 
1193 		phylink_ethtool_get_wol(priv->phylink, &wol);
1194 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1202 {
1203 	/* Half-Duplex can only work with single tx queue */
1204 	if (priv->plat->tx_queues_to_use > 1)
1205 		priv->phylink_config.mac_capabilities &=
1206 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1207 	else
1208 		priv->phylink_config.mac_capabilities |=
1209 			(MAC_10HD | MAC_100HD | MAC_1000HD);
1210 }
1211 
1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214 	struct stmmac_mdio_bus_data *mdio_bus_data;
1215 	int mode = priv->plat->phy_interface;
1216 	struct fwnode_handle *fwnode;
1217 	struct phylink *phylink;
1218 	int max_speed;
1219 
1220 	priv->phylink_config.dev = &priv->dev->dev;
1221 	priv->phylink_config.type = PHYLINK_NETDEV;
1222 	priv->phylink_config.mac_managed_pm = true;
1223 
1224 	mdio_bus_data = priv->plat->mdio_bus_data;
1225 	if (mdio_bus_data)
1226 		priv->phylink_config.ovr_an_inband =
1227 			mdio_bus_data->xpcs_an_inband;
1228 
1229 	/* Set the platform/firmware specified interface mode. Note, phylink
1230 	 * deals with the PHY interface mode, not the MAC interface mode.
1231 	 */
1232 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1233 
1234 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1235 	if (priv->hw->xpcs)
1236 		xpcs_get_interfaces(priv->hw->xpcs,
1237 				    priv->phylink_config.supported_interfaces);
1238 
1239 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1240 						MAC_10FD | MAC_100FD |
1241 						MAC_1000FD;
1242 
1243 	stmmac_set_half_duplex(priv);
1244 
1245 	/* Get the MAC specific capabilities */
1246 	stmmac_mac_phylink_get_caps(priv);
1247 
1248 	max_speed = priv->plat->max_speed;
1249 	if (max_speed)
1250 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1251 
1252 	fwnode = priv->plat->port_node;
1253 	if (!fwnode)
1254 		fwnode = dev_fwnode(priv->device);
1255 
1256 	phylink = phylink_create(&priv->phylink_config, fwnode,
1257 				 mode, &stmmac_phylink_mac_ops);
1258 	if (IS_ERR(phylink))
1259 		return PTR_ERR(phylink);
1260 
1261 	priv->phylink = phylink;
1262 	return 0;
1263 }
1264 
1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1266 				    struct stmmac_dma_conf *dma_conf)
1267 {
1268 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1269 	unsigned int desc_size;
1270 	void *head_rx;
1271 	u32 queue;
1272 
1273 	/* Display RX rings */
1274 	for (queue = 0; queue < rx_cnt; queue++) {
1275 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1276 
1277 		pr_info("\tRX Queue %u rings\n", queue);
1278 
1279 		if (priv->extend_desc) {
1280 			head_rx = (void *)rx_q->dma_erx;
1281 			desc_size = sizeof(struct dma_extended_desc);
1282 		} else {
1283 			head_rx = (void *)rx_q->dma_rx;
1284 			desc_size = sizeof(struct dma_desc);
1285 		}
1286 
1287 		/* Display RX ring */
1288 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1289 				    rx_q->dma_rx_phy, desc_size);
1290 	}
1291 }
1292 
1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1294 				    struct stmmac_dma_conf *dma_conf)
1295 {
1296 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1297 	unsigned int desc_size;
1298 	void *head_tx;
1299 	u32 queue;
1300 
1301 	/* Display TX rings */
1302 	for (queue = 0; queue < tx_cnt; queue++) {
1303 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1304 
1305 		pr_info("\tTX Queue %d rings\n", queue);
1306 
1307 		if (priv->extend_desc) {
1308 			head_tx = (void *)tx_q->dma_etx;
1309 			desc_size = sizeof(struct dma_extended_desc);
1310 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1311 			head_tx = (void *)tx_q->dma_entx;
1312 			desc_size = sizeof(struct dma_edesc);
1313 		} else {
1314 			head_tx = (void *)tx_q->dma_tx;
1315 			desc_size = sizeof(struct dma_desc);
1316 		}
1317 
1318 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1319 				    tx_q->dma_tx_phy, desc_size);
1320 	}
1321 }
1322 
1323 static void stmmac_display_rings(struct stmmac_priv *priv,
1324 				 struct stmmac_dma_conf *dma_conf)
1325 {
1326 	/* Display RX ring */
1327 	stmmac_display_rx_rings(priv, dma_conf);
1328 
1329 	/* Display TX ring */
1330 	stmmac_display_tx_rings(priv, dma_conf);
1331 }
1332 
1333 static int stmmac_set_bfsize(int mtu, int bufsize)
1334 {
1335 	int ret = bufsize;
1336 
1337 	if (mtu >= BUF_SIZE_8KiB)
1338 		ret = BUF_SIZE_16KiB;
1339 	else if (mtu >= BUF_SIZE_4KiB)
1340 		ret = BUF_SIZE_8KiB;
1341 	else if (mtu >= BUF_SIZE_2KiB)
1342 		ret = BUF_SIZE_4KiB;
1343 	else if (mtu > DEFAULT_BUFSIZE)
1344 		ret = BUF_SIZE_2KiB;
1345 	else
1346 		ret = DEFAULT_BUFSIZE;
1347 
1348 	return ret;
1349 }
1350 
1351 /**
1352  * stmmac_clear_rx_descriptors - clear RX descriptors
1353  * @priv: driver private structure
1354  * @dma_conf: structure to take the dma data
1355  * @queue: RX queue index
1356  * Description: this function is called to clear the RX descriptors
1357  * in case of both basic and extended descriptors are used.
1358  */
1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1360 					struct stmmac_dma_conf *dma_conf,
1361 					u32 queue)
1362 {
1363 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1364 	int i;
1365 
1366 	/* Clear the RX descriptors */
1367 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1368 		if (priv->extend_desc)
1369 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1370 					priv->use_riwt, priv->mode,
1371 					(i == dma_conf->dma_rx_size - 1),
1372 					dma_conf->dma_buf_sz);
1373 		else
1374 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1375 					priv->use_riwt, priv->mode,
1376 					(i == dma_conf->dma_rx_size - 1),
1377 					dma_conf->dma_buf_sz);
1378 }
1379 
1380 /**
1381  * stmmac_clear_tx_descriptors - clear tx descriptors
1382  * @priv: driver private structure
1383  * @dma_conf: structure to take the dma data
1384  * @queue: TX queue index.
1385  * Description: this function is called to clear the TX descriptors
1386  * in case of both basic and extended descriptors are used.
1387  */
1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1389 					struct stmmac_dma_conf *dma_conf,
1390 					u32 queue)
1391 {
1392 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1393 	int i;
1394 
1395 	/* Clear the TX descriptors */
1396 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1397 		int last = (i == (dma_conf->dma_tx_size - 1));
1398 		struct dma_desc *p;
1399 
1400 		if (priv->extend_desc)
1401 			p = &tx_q->dma_etx[i].basic;
1402 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1403 			p = &tx_q->dma_entx[i].basic;
1404 		else
1405 			p = &tx_q->dma_tx[i];
1406 
1407 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1408 	}
1409 }
1410 
1411 /**
1412  * stmmac_clear_descriptors - clear descriptors
1413  * @priv: driver private structure
1414  * @dma_conf: structure to take the dma data
1415  * Description: this function is called to clear the TX and RX descriptors
1416  * in case of both basic and extended descriptors are used.
1417  */
1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1419 				     struct stmmac_dma_conf *dma_conf)
1420 {
1421 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1422 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1423 	u32 queue;
1424 
1425 	/* Clear the RX descriptors */
1426 	for (queue = 0; queue < rx_queue_cnt; queue++)
1427 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1428 
1429 	/* Clear the TX descriptors */
1430 	for (queue = 0; queue < tx_queue_cnt; queue++)
1431 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1432 }
1433 
1434 /**
1435  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436  * @priv: driver private structure
1437  * @dma_conf: structure to take the dma data
1438  * @p: descriptor pointer
1439  * @i: descriptor index
1440  * @flags: gfp flag
1441  * @queue: RX queue index
1442  * Description: this function is called to allocate a receive buffer, perform
1443  * the DMA mapping and init the descriptor.
1444  */
1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1446 				  struct stmmac_dma_conf *dma_conf,
1447 				  struct dma_desc *p,
1448 				  int i, gfp_t flags, u32 queue)
1449 {
1450 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1452 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1453 
1454 	if (priv->dma_cap.host_dma_width <= 32)
1455 		gfp |= GFP_DMA32;
1456 
1457 	if (!buf->page) {
1458 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459 		if (!buf->page)
1460 			return -ENOMEM;
1461 		buf->page_offset = stmmac_rx_offset(priv);
1462 	}
1463 
1464 	if (priv->sph && !buf->sec_page) {
1465 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1466 		if (!buf->sec_page)
1467 			return -ENOMEM;
1468 
1469 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471 	} else {
1472 		buf->sec_page = NULL;
1473 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474 	}
1475 
1476 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477 
1478 	stmmac_set_desc_addr(priv, p, buf->addr);
1479 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1480 		stmmac_init_desc3(priv, p);
1481 
1482 	return 0;
1483 }
1484 
1485 /**
1486  * stmmac_free_rx_buffer - free RX dma buffers
1487  * @priv: private structure
1488  * @rx_q: RX queue
1489  * @i: buffer index.
1490  */
1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1492 				  struct stmmac_rx_queue *rx_q,
1493 				  int i)
1494 {
1495 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1496 
1497 	if (buf->page)
1498 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1499 	buf->page = NULL;
1500 
1501 	if (buf->sec_page)
1502 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1503 	buf->sec_page = NULL;
1504 }
1505 
1506 /**
1507  * stmmac_free_tx_buffer - free RX dma buffers
1508  * @priv: private structure
1509  * @dma_conf: structure to take the dma data
1510  * @queue: RX queue index
1511  * @i: buffer index.
1512  */
1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1514 				  struct stmmac_dma_conf *dma_conf,
1515 				  u32 queue, int i)
1516 {
1517 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1518 
1519 	if (tx_q->tx_skbuff_dma[i].buf &&
1520 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1522 			dma_unmap_page(priv->device,
1523 				       tx_q->tx_skbuff_dma[i].buf,
1524 				       tx_q->tx_skbuff_dma[i].len,
1525 				       DMA_TO_DEVICE);
1526 		else
1527 			dma_unmap_single(priv->device,
1528 					 tx_q->tx_skbuff_dma[i].buf,
1529 					 tx_q->tx_skbuff_dma[i].len,
1530 					 DMA_TO_DEVICE);
1531 	}
1532 
1533 	if (tx_q->xdpf[i] &&
1534 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536 		xdp_return_frame(tx_q->xdpf[i]);
1537 		tx_q->xdpf[i] = NULL;
1538 	}
1539 
1540 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541 		tx_q->xsk_frames_done++;
1542 
1543 	if (tx_q->tx_skbuff[i] &&
1544 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546 		tx_q->tx_skbuff[i] = NULL;
1547 	}
1548 
1549 	tx_q->tx_skbuff_dma[i].buf = 0;
1550 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1551 }
1552 
1553 /**
1554  * dma_free_rx_skbufs - free RX dma buffers
1555  * @priv: private structure
1556  * @dma_conf: structure to take the dma data
1557  * @queue: RX queue index
1558  */
1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1560 			       struct stmmac_dma_conf *dma_conf,
1561 			       u32 queue)
1562 {
1563 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 	int i;
1565 
1566 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1567 		stmmac_free_rx_buffer(priv, rx_q, i);
1568 }
1569 
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1571 				   struct stmmac_dma_conf *dma_conf,
1572 				   u32 queue, gfp_t flags)
1573 {
1574 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1575 	int i;
1576 
1577 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1578 		struct dma_desc *p;
1579 		int ret;
1580 
1581 		if (priv->extend_desc)
1582 			p = &((rx_q->dma_erx + i)->basic);
1583 		else
1584 			p = rx_q->dma_rx + i;
1585 
1586 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1587 					     queue);
1588 		if (ret)
1589 			return ret;
1590 
1591 		rx_q->buf_alloc_num++;
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 /**
1598  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599  * @priv: private structure
1600  * @dma_conf: structure to take the dma data
1601  * @queue: RX queue index
1602  */
1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1604 				struct stmmac_dma_conf *dma_conf,
1605 				u32 queue)
1606 {
1607 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608 	int i;
1609 
1610 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1611 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1612 
1613 		if (!buf->xdp)
1614 			continue;
1615 
1616 		xsk_buff_free(buf->xdp);
1617 		buf->xdp = NULL;
1618 	}
1619 }
1620 
1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1622 				      struct stmmac_dma_conf *dma_conf,
1623 				      u32 queue)
1624 {
1625 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626 	int i;
1627 
1628 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 	 * use this macro to make sure no size violations.
1631 	 */
1632 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1633 
1634 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1635 		struct stmmac_rx_buffer *buf;
1636 		dma_addr_t dma_addr;
1637 		struct dma_desc *p;
1638 
1639 		if (priv->extend_desc)
1640 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1641 		else
1642 			p = rx_q->dma_rx + i;
1643 
1644 		buf = &rx_q->buf_pool[i];
1645 
1646 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1647 		if (!buf->xdp)
1648 			return -ENOMEM;
1649 
1650 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1651 		stmmac_set_desc_addr(priv, p, dma_addr);
1652 		rx_q->buf_alloc_num++;
1653 	}
1654 
1655 	return 0;
1656 }
1657 
1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1659 {
1660 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1661 		return NULL;
1662 
1663 	return xsk_get_pool_from_qid(priv->dev, queue);
1664 }
1665 
1666 /**
1667  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668  * @priv: driver private structure
1669  * @dma_conf: structure to take the dma data
1670  * @queue: RX queue index
1671  * @flags: gfp flag.
1672  * Description: this function initializes the DMA RX descriptors
1673  * and allocates the socket buffers. It supports the chained and ring
1674  * modes.
1675  */
1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1677 				    struct stmmac_dma_conf *dma_conf,
1678 				    u32 queue, gfp_t flags)
1679 {
1680 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1681 	int ret;
1682 
1683 	netif_dbg(priv, probe, priv->dev,
1684 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1685 		  (u32)rx_q->dma_rx_phy);
1686 
1687 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1688 
1689 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1690 
1691 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1692 
1693 	if (rx_q->xsk_pool) {
1694 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695 						   MEM_TYPE_XSK_BUFF_POOL,
1696 						   NULL));
1697 		netdev_info(priv->dev,
1698 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699 			    rx_q->queue_index);
1700 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1701 	} else {
1702 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1703 						   MEM_TYPE_PAGE_POOL,
1704 						   rx_q->page_pool));
1705 		netdev_info(priv->dev,
1706 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1707 			    rx_q->queue_index);
1708 	}
1709 
1710 	if (rx_q->xsk_pool) {
1711 		/* RX XDP ZC buffer pool may not be populated, e.g.
1712 		 * xdpsock TX-only.
1713 		 */
1714 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1715 	} else {
1716 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1717 		if (ret < 0)
1718 			return -ENOMEM;
1719 	}
1720 
1721 	/* Setup the chained descriptor addresses */
1722 	if (priv->mode == STMMAC_CHAIN_MODE) {
1723 		if (priv->extend_desc)
1724 			stmmac_mode_init(priv, rx_q->dma_erx,
1725 					 rx_q->dma_rx_phy,
1726 					 dma_conf->dma_rx_size, 1);
1727 		else
1728 			stmmac_mode_init(priv, rx_q->dma_rx,
1729 					 rx_q->dma_rx_phy,
1730 					 dma_conf->dma_rx_size, 0);
1731 	}
1732 
1733 	return 0;
1734 }
1735 
1736 static int init_dma_rx_desc_rings(struct net_device *dev,
1737 				  struct stmmac_dma_conf *dma_conf,
1738 				  gfp_t flags)
1739 {
1740 	struct stmmac_priv *priv = netdev_priv(dev);
1741 	u32 rx_count = priv->plat->rx_queues_to_use;
1742 	int queue;
1743 	int ret;
1744 
1745 	/* RX INITIALIZATION */
1746 	netif_dbg(priv, probe, priv->dev,
1747 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748 
1749 	for (queue = 0; queue < rx_count; queue++) {
1750 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1751 		if (ret)
1752 			goto err_init_rx_buffers;
1753 	}
1754 
1755 	return 0;
1756 
1757 err_init_rx_buffers:
1758 	while (queue >= 0) {
1759 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1760 
1761 		if (rx_q->xsk_pool)
1762 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1763 		else
1764 			dma_free_rx_skbufs(priv, dma_conf, queue);
1765 
1766 		rx_q->buf_alloc_num = 0;
1767 		rx_q->xsk_pool = NULL;
1768 
1769 		queue--;
1770 	}
1771 
1772 	return ret;
1773 }
1774 
1775 /**
1776  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777  * @priv: driver private structure
1778  * @dma_conf: structure to take the dma data
1779  * @queue: TX queue index
1780  * Description: this function initializes the DMA TX descriptors
1781  * and allocates the socket buffers. It supports the chained and ring
1782  * modes.
1783  */
1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1785 				    struct stmmac_dma_conf *dma_conf,
1786 				    u32 queue)
1787 {
1788 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1789 	int i;
1790 
1791 	netif_dbg(priv, probe, priv->dev,
1792 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1793 		  (u32)tx_q->dma_tx_phy);
1794 
1795 	/* Setup the chained descriptor addresses */
1796 	if (priv->mode == STMMAC_CHAIN_MODE) {
1797 		if (priv->extend_desc)
1798 			stmmac_mode_init(priv, tx_q->dma_etx,
1799 					 tx_q->dma_tx_phy,
1800 					 dma_conf->dma_tx_size, 1);
1801 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1802 			stmmac_mode_init(priv, tx_q->dma_tx,
1803 					 tx_q->dma_tx_phy,
1804 					 dma_conf->dma_tx_size, 0);
1805 	}
1806 
1807 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1808 
1809 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1810 		struct dma_desc *p;
1811 
1812 		if (priv->extend_desc)
1813 			p = &((tx_q->dma_etx + i)->basic);
1814 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1815 			p = &((tx_q->dma_entx + i)->basic);
1816 		else
1817 			p = tx_q->dma_tx + i;
1818 
1819 		stmmac_clear_desc(priv, p);
1820 
1821 		tx_q->tx_skbuff_dma[i].buf = 0;
1822 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1823 		tx_q->tx_skbuff_dma[i].len = 0;
1824 		tx_q->tx_skbuff_dma[i].last_segment = false;
1825 		tx_q->tx_skbuff[i] = NULL;
1826 	}
1827 
1828 	return 0;
1829 }
1830 
1831 static int init_dma_tx_desc_rings(struct net_device *dev,
1832 				  struct stmmac_dma_conf *dma_conf)
1833 {
1834 	struct stmmac_priv *priv = netdev_priv(dev);
1835 	u32 tx_queue_cnt;
1836 	u32 queue;
1837 
1838 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1839 
1840 	for (queue = 0; queue < tx_queue_cnt; queue++)
1841 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1842 
1843 	return 0;
1844 }
1845 
1846 /**
1847  * init_dma_desc_rings - init the RX/TX descriptor rings
1848  * @dev: net device structure
1849  * @dma_conf: structure to take the dma data
1850  * @flags: gfp flag.
1851  * Description: this function initializes the DMA RX/TX descriptors
1852  * and allocates the socket buffers. It supports the chained and ring
1853  * modes.
1854  */
1855 static int init_dma_desc_rings(struct net_device *dev,
1856 			       struct stmmac_dma_conf *dma_conf,
1857 			       gfp_t flags)
1858 {
1859 	struct stmmac_priv *priv = netdev_priv(dev);
1860 	int ret;
1861 
1862 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1863 	if (ret)
1864 		return ret;
1865 
1866 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1867 
1868 	stmmac_clear_descriptors(priv, dma_conf);
1869 
1870 	if (netif_msg_hw(priv))
1871 		stmmac_display_rings(priv, dma_conf);
1872 
1873 	return ret;
1874 }
1875 
1876 /**
1877  * dma_free_tx_skbufs - free TX dma buffers
1878  * @priv: private structure
1879  * @dma_conf: structure to take the dma data
1880  * @queue: TX queue index
1881  */
1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1883 			       struct stmmac_dma_conf *dma_conf,
1884 			       u32 queue)
1885 {
1886 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1887 	int i;
1888 
1889 	tx_q->xsk_frames_done = 0;
1890 
1891 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1892 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893 
1894 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1895 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1896 		tx_q->xsk_frames_done = 0;
1897 		tx_q->xsk_pool = NULL;
1898 	}
1899 }
1900 
1901 /**
1902  * stmmac_free_tx_skbufs - free TX skb buffers
1903  * @priv: private structure
1904  */
1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1906 {
1907 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1908 	u32 queue;
1909 
1910 	for (queue = 0; queue < tx_queue_cnt; queue++)
1911 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1912 }
1913 
1914 /**
1915  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916  * @priv: private structure
1917  * @dma_conf: structure to take the dma data
1918  * @queue: RX queue index
1919  */
1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921 					 struct stmmac_dma_conf *dma_conf,
1922 					 u32 queue)
1923 {
1924 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1925 
1926 	/* Release the DMA RX socket buffers */
1927 	if (rx_q->xsk_pool)
1928 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1929 	else
1930 		dma_free_rx_skbufs(priv, dma_conf, queue);
1931 
1932 	rx_q->buf_alloc_num = 0;
1933 	rx_q->xsk_pool = NULL;
1934 
1935 	/* Free DMA regions of consistent memory previously allocated */
1936 	if (!priv->extend_desc)
1937 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1938 				  sizeof(struct dma_desc),
1939 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1940 	else
1941 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1942 				  sizeof(struct dma_extended_desc),
1943 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1944 
1945 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1946 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1947 
1948 	kfree(rx_q->buf_pool);
1949 	if (rx_q->page_pool)
1950 		page_pool_destroy(rx_q->page_pool);
1951 }
1952 
1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1954 				       struct stmmac_dma_conf *dma_conf)
1955 {
1956 	u32 rx_count = priv->plat->rx_queues_to_use;
1957 	u32 queue;
1958 
1959 	/* Free RX queue resources */
1960 	for (queue = 0; queue < rx_count; queue++)
1961 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1962 }
1963 
1964 /**
1965  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966  * @priv: private structure
1967  * @dma_conf: structure to take the dma data
1968  * @queue: TX queue index
1969  */
1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971 					 struct stmmac_dma_conf *dma_conf,
1972 					 u32 queue)
1973 {
1974 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 	size_t size;
1976 	void *addr;
1977 
1978 	/* Release the DMA TX socket buffers */
1979 	dma_free_tx_skbufs(priv, dma_conf, queue);
1980 
1981 	if (priv->extend_desc) {
1982 		size = sizeof(struct dma_extended_desc);
1983 		addr = tx_q->dma_etx;
1984 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1985 		size = sizeof(struct dma_edesc);
1986 		addr = tx_q->dma_entx;
1987 	} else {
1988 		size = sizeof(struct dma_desc);
1989 		addr = tx_q->dma_tx;
1990 	}
1991 
1992 	size *= dma_conf->dma_tx_size;
1993 
1994 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1995 
1996 	kfree(tx_q->tx_skbuff_dma);
1997 	kfree(tx_q->tx_skbuff);
1998 }
1999 
2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2001 				       struct stmmac_dma_conf *dma_conf)
2002 {
2003 	u32 tx_count = priv->plat->tx_queues_to_use;
2004 	u32 queue;
2005 
2006 	/* Free TX queue resources */
2007 	for (queue = 0; queue < tx_count; queue++)
2008 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2009 }
2010 
2011 /**
2012  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013  * @priv: private structure
2014  * @dma_conf: structure to take the dma data
2015  * @queue: RX queue index
2016  * Description: according to which descriptor can be used (extend or basic)
2017  * this function allocates the resources for TX and RX paths. In case of
2018  * reception, for example, it pre-allocated the RX socket buffer in order to
2019  * allow zero-copy mechanism.
2020  */
2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2022 					 struct stmmac_dma_conf *dma_conf,
2023 					 u32 queue)
2024 {
2025 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2026 	struct stmmac_channel *ch = &priv->channel[queue];
2027 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2028 	struct page_pool_params pp_params = { 0 };
2029 	unsigned int num_pages;
2030 	unsigned int napi_id;
2031 	int ret;
2032 
2033 	rx_q->queue_index = queue;
2034 	rx_q->priv_data = priv;
2035 
2036 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2037 	pp_params.pool_size = dma_conf->dma_rx_size;
2038 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2039 	pp_params.order = ilog2(num_pages);
2040 	pp_params.nid = dev_to_node(priv->device);
2041 	pp_params.dev = priv->device;
2042 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2043 	pp_params.offset = stmmac_rx_offset(priv);
2044 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2045 
2046 	rx_q->page_pool = page_pool_create(&pp_params);
2047 	if (IS_ERR(rx_q->page_pool)) {
2048 		ret = PTR_ERR(rx_q->page_pool);
2049 		rx_q->page_pool = NULL;
2050 		return ret;
2051 	}
2052 
2053 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2054 				 sizeof(*rx_q->buf_pool),
2055 				 GFP_KERNEL);
2056 	if (!rx_q->buf_pool)
2057 		return -ENOMEM;
2058 
2059 	if (priv->extend_desc) {
2060 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2061 						   dma_conf->dma_rx_size *
2062 						   sizeof(struct dma_extended_desc),
2063 						   &rx_q->dma_rx_phy,
2064 						   GFP_KERNEL);
2065 		if (!rx_q->dma_erx)
2066 			return -ENOMEM;
2067 
2068 	} else {
2069 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2070 						  dma_conf->dma_rx_size *
2071 						  sizeof(struct dma_desc),
2072 						  &rx_q->dma_rx_phy,
2073 						  GFP_KERNEL);
2074 		if (!rx_q->dma_rx)
2075 			return -ENOMEM;
2076 	}
2077 
2078 	if (stmmac_xdp_is_enabled(priv) &&
2079 	    test_bit(queue, priv->af_xdp_zc_qps))
2080 		napi_id = ch->rxtx_napi.napi_id;
2081 	else
2082 		napi_id = ch->rx_napi.napi_id;
2083 
2084 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2085 			       rx_q->queue_index,
2086 			       napi_id);
2087 	if (ret) {
2088 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2089 		return -EINVAL;
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2096 				       struct stmmac_dma_conf *dma_conf)
2097 {
2098 	u32 rx_count = priv->plat->rx_queues_to_use;
2099 	u32 queue;
2100 	int ret;
2101 
2102 	/* RX queues buffers and DMA */
2103 	for (queue = 0; queue < rx_count; queue++) {
2104 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2105 		if (ret)
2106 			goto err_dma;
2107 	}
2108 
2109 	return 0;
2110 
2111 err_dma:
2112 	free_dma_rx_desc_resources(priv, dma_conf);
2113 
2114 	return ret;
2115 }
2116 
2117 /**
2118  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119  * @priv: private structure
2120  * @dma_conf: structure to take the dma data
2121  * @queue: TX queue index
2122  * Description: according to which descriptor can be used (extend or basic)
2123  * this function allocates the resources for TX and RX paths. In case of
2124  * reception, for example, it pre-allocated the RX socket buffer in order to
2125  * allow zero-copy mechanism.
2126  */
2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2128 					 struct stmmac_dma_conf *dma_conf,
2129 					 u32 queue)
2130 {
2131 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2132 	size_t size;
2133 	void *addr;
2134 
2135 	tx_q->queue_index = queue;
2136 	tx_q->priv_data = priv;
2137 
2138 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2139 				      sizeof(*tx_q->tx_skbuff_dma),
2140 				      GFP_KERNEL);
2141 	if (!tx_q->tx_skbuff_dma)
2142 		return -ENOMEM;
2143 
2144 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2145 				  sizeof(struct sk_buff *),
2146 				  GFP_KERNEL);
2147 	if (!tx_q->tx_skbuff)
2148 		return -ENOMEM;
2149 
2150 	if (priv->extend_desc)
2151 		size = sizeof(struct dma_extended_desc);
2152 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153 		size = sizeof(struct dma_edesc);
2154 	else
2155 		size = sizeof(struct dma_desc);
2156 
2157 	size *= dma_conf->dma_tx_size;
2158 
2159 	addr = dma_alloc_coherent(priv->device, size,
2160 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2161 	if (!addr)
2162 		return -ENOMEM;
2163 
2164 	if (priv->extend_desc)
2165 		tx_q->dma_etx = addr;
2166 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2167 		tx_q->dma_entx = addr;
2168 	else
2169 		tx_q->dma_tx = addr;
2170 
2171 	return 0;
2172 }
2173 
2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2175 				       struct stmmac_dma_conf *dma_conf)
2176 {
2177 	u32 tx_count = priv->plat->tx_queues_to_use;
2178 	u32 queue;
2179 	int ret;
2180 
2181 	/* TX queues buffers and DMA */
2182 	for (queue = 0; queue < tx_count; queue++) {
2183 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2184 		if (ret)
2185 			goto err_dma;
2186 	}
2187 
2188 	return 0;
2189 
2190 err_dma:
2191 	free_dma_tx_desc_resources(priv, dma_conf);
2192 	return ret;
2193 }
2194 
2195 /**
2196  * alloc_dma_desc_resources - alloc TX/RX resources.
2197  * @priv: private structure
2198  * @dma_conf: structure to take the dma data
2199  * Description: according to which descriptor can be used (extend or basic)
2200  * this function allocates the resources for TX and RX paths. In case of
2201  * reception, for example, it pre-allocated the RX socket buffer in order to
2202  * allow zero-copy mechanism.
2203  */
2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2205 				    struct stmmac_dma_conf *dma_conf)
2206 {
2207 	/* RX Allocation */
2208 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2209 
2210 	if (ret)
2211 		return ret;
2212 
2213 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2214 
2215 	return ret;
2216 }
2217 
2218 /**
2219  * free_dma_desc_resources - free dma desc resources
2220  * @priv: private structure
2221  * @dma_conf: structure to take the dma data
2222  */
2223 static void free_dma_desc_resources(struct stmmac_priv *priv,
2224 				    struct stmmac_dma_conf *dma_conf)
2225 {
2226 	/* Release the DMA TX socket buffers */
2227 	free_dma_tx_desc_resources(priv, dma_conf);
2228 
2229 	/* Release the DMA RX socket buffers later
2230 	 * to ensure all pending XDP_TX buffers are returned.
2231 	 */
2232 	free_dma_rx_desc_resources(priv, dma_conf);
2233 }
2234 
2235 /**
2236  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237  *  @priv: driver private structure
2238  *  Description: It is used for enabling the rx queues in the MAC
2239  */
2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2241 {
2242 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2243 	int queue;
2244 	u8 mode;
2245 
2246 	for (queue = 0; queue < rx_queues_count; queue++) {
2247 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2248 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2249 	}
2250 }
2251 
2252 /**
2253  * stmmac_start_rx_dma - start RX DMA channel
2254  * @priv: driver private structure
2255  * @chan: RX channel index
2256  * Description:
2257  * This starts a RX DMA channel
2258  */
2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2262 	stmmac_start_rx(priv, priv->ioaddr, chan);
2263 }
2264 
2265 /**
2266  * stmmac_start_tx_dma - start TX DMA channel
2267  * @priv: driver private structure
2268  * @chan: TX channel index
2269  * Description:
2270  * This starts a TX DMA channel
2271  */
2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2275 	stmmac_start_tx(priv, priv->ioaddr, chan);
2276 }
2277 
2278 /**
2279  * stmmac_stop_rx_dma - stop RX DMA channel
2280  * @priv: driver private structure
2281  * @chan: RX channel index
2282  * Description:
2283  * This stops a RX DMA channel
2284  */
2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2288 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2289 }
2290 
2291 /**
2292  * stmmac_stop_tx_dma - stop TX DMA channel
2293  * @priv: driver private structure
2294  * @chan: TX channel index
2295  * Description:
2296  * This stops a TX DMA channel
2297  */
2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2301 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2302 }
2303 
2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2305 {
2306 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2309 	u32 chan;
2310 
2311 	for (chan = 0; chan < dma_csr_ch; chan++) {
2312 		struct stmmac_channel *ch = &priv->channel[chan];
2313 		unsigned long flags;
2314 
2315 		spin_lock_irqsave(&ch->lock, flags);
2316 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2317 		spin_unlock_irqrestore(&ch->lock, flags);
2318 	}
2319 }
2320 
2321 /**
2322  * stmmac_start_all_dma - start all RX and TX DMA channels
2323  * @priv: driver private structure
2324  * Description:
2325  * This starts all the RX and TX DMA channels
2326  */
2327 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2328 {
2329 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331 	u32 chan = 0;
2332 
2333 	for (chan = 0; chan < rx_channels_count; chan++)
2334 		stmmac_start_rx_dma(priv, chan);
2335 
2336 	for (chan = 0; chan < tx_channels_count; chan++)
2337 		stmmac_start_tx_dma(priv, chan);
2338 }
2339 
2340 /**
2341  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342  * @priv: driver private structure
2343  * Description:
2344  * This stops the RX and TX DMA channels
2345  */
2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2347 {
2348 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350 	u32 chan = 0;
2351 
2352 	for (chan = 0; chan < rx_channels_count; chan++)
2353 		stmmac_stop_rx_dma(priv, chan);
2354 
2355 	for (chan = 0; chan < tx_channels_count; chan++)
2356 		stmmac_stop_tx_dma(priv, chan);
2357 }
2358 
2359 /**
2360  *  stmmac_dma_operation_mode - HW DMA operation mode
2361  *  @priv: driver private structure
2362  *  Description: it is used for configuring the DMA operation mode register in
2363  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364  */
2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2366 {
2367 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2368 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2369 	int rxfifosz = priv->plat->rx_fifo_size;
2370 	int txfifosz = priv->plat->tx_fifo_size;
2371 	u32 txmode = 0;
2372 	u32 rxmode = 0;
2373 	u32 chan = 0;
2374 	u8 qmode = 0;
2375 
2376 	if (rxfifosz == 0)
2377 		rxfifosz = priv->dma_cap.rx_fifo_size;
2378 	if (txfifosz == 0)
2379 		txfifosz = priv->dma_cap.tx_fifo_size;
2380 
2381 	/* Adjust for real per queue fifo size */
2382 	rxfifosz /= rx_channels_count;
2383 	txfifosz /= tx_channels_count;
2384 
2385 	if (priv->plat->force_thresh_dma_mode) {
2386 		txmode = tc;
2387 		rxmode = tc;
2388 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2389 		/*
2390 		 * In case of GMAC, SF mode can be enabled
2391 		 * to perform the TX COE in HW. This depends on:
2392 		 * 1) TX COE if actually supported
2393 		 * 2) There is no bugged Jumbo frame support
2394 		 *    that needs to not insert csum in the TDES.
2395 		 */
2396 		txmode = SF_DMA_MODE;
2397 		rxmode = SF_DMA_MODE;
2398 		priv->xstats.threshold = SF_DMA_MODE;
2399 	} else {
2400 		txmode = tc;
2401 		rxmode = SF_DMA_MODE;
2402 	}
2403 
2404 	/* configure all channels */
2405 	for (chan = 0; chan < rx_channels_count; chan++) {
2406 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2407 		u32 buf_size;
2408 
2409 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2410 
2411 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2412 				rxfifosz, qmode);
2413 
2414 		if (rx_q->xsk_pool) {
2415 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2416 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2417 					      buf_size,
2418 					      chan);
2419 		} else {
2420 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 					      priv->dma_conf.dma_buf_sz,
2422 					      chan);
2423 		}
2424 	}
2425 
2426 	for (chan = 0; chan < tx_channels_count; chan++) {
2427 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2428 
2429 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2430 				txfifosz, qmode);
2431 	}
2432 }
2433 
2434 static void stmmac_xsk_request_timestamp(void *_priv)
2435 {
2436 	struct stmmac_metadata_request *meta_req = _priv;
2437 
2438 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2439 	*meta_req->set_ic = true;
2440 }
2441 
2442 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2443 {
2444 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2445 	struct stmmac_priv *priv = tx_compl->priv;
2446 	struct dma_desc *desc = tx_compl->desc;
2447 	bool found = false;
2448 	u64 ns = 0;
2449 
2450 	if (!priv->hwts_tx_en)
2451 		return 0;
2452 
2453 	/* check tx tstamp status */
2454 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2455 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2456 		found = true;
2457 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2458 		found = true;
2459 	}
2460 
2461 	if (found) {
2462 		ns -= priv->plat->cdc_error_adj;
2463 		return ns_to_ktime(ns);
2464 	}
2465 
2466 	return 0;
2467 }
2468 
2469 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2470 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2471 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2472 };
2473 
2474 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2475 {
2476 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2477 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2478 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2479 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2480 	unsigned int entry = tx_q->cur_tx;
2481 	struct dma_desc *tx_desc = NULL;
2482 	struct xdp_desc xdp_desc;
2483 	bool work_done = true;
2484 	u32 tx_set_ic_bit = 0;
2485 	unsigned long flags;
2486 
2487 	/* Avoids TX time-out as we are sharing with slow path */
2488 	txq_trans_cond_update(nq);
2489 
2490 	budget = min(budget, stmmac_tx_avail(priv, queue));
2491 
2492 	while (budget-- > 0) {
2493 		struct stmmac_metadata_request meta_req;
2494 		struct xsk_tx_metadata *meta = NULL;
2495 		dma_addr_t dma_addr;
2496 		bool set_ic;
2497 
2498 		/* We are sharing with slow path and stop XSK TX desc submission when
2499 		 * available TX ring is less than threshold.
2500 		 */
2501 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2502 		    !netif_carrier_ok(priv->dev)) {
2503 			work_done = false;
2504 			break;
2505 		}
2506 
2507 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2508 			break;
2509 
2510 		if (priv->plat->est && priv->plat->est->enable &&
2511 		    priv->plat->est->max_sdu[queue] &&
2512 		    xdp_desc.len > priv->plat->est->max_sdu[queue]) {
2513 			priv->xstats.max_sdu_txq_drop[queue]++;
2514 			continue;
2515 		}
2516 
2517 		if (likely(priv->extend_desc))
2518 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2519 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2520 			tx_desc = &tx_q->dma_entx[entry].basic;
2521 		else
2522 			tx_desc = tx_q->dma_tx + entry;
2523 
2524 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2525 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2526 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2527 
2528 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2529 
2530 		/* To return XDP buffer to XSK pool, we simple call
2531 		 * xsk_tx_completed(), so we don't need to fill up
2532 		 * 'buf' and 'xdpf'.
2533 		 */
2534 		tx_q->tx_skbuff_dma[entry].buf = 0;
2535 		tx_q->xdpf[entry] = NULL;
2536 
2537 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2538 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2539 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2540 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2541 
2542 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2543 
2544 		tx_q->tx_count_frames++;
2545 
2546 		if (!priv->tx_coal_frames[queue])
2547 			set_ic = false;
2548 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2549 			set_ic = true;
2550 		else
2551 			set_ic = false;
2552 
2553 		meta_req.priv = priv;
2554 		meta_req.tx_desc = tx_desc;
2555 		meta_req.set_ic = &set_ic;
2556 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2557 					&meta_req);
2558 		if (set_ic) {
2559 			tx_q->tx_count_frames = 0;
2560 			stmmac_set_tx_ic(priv, tx_desc);
2561 			tx_set_ic_bit++;
2562 		}
2563 
2564 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2565 				       true, priv->mode, true, true,
2566 				       xdp_desc.len);
2567 
2568 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2569 
2570 		xsk_tx_metadata_to_compl(meta,
2571 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2572 
2573 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2574 		entry = tx_q->cur_tx;
2575 	}
2576 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2577 	txq_stats->tx_set_ic_bit += tx_set_ic_bit;
2578 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2579 
2580 	if (tx_desc) {
2581 		stmmac_flush_tx_descriptors(priv, queue);
2582 		xsk_tx_release(pool);
2583 	}
2584 
2585 	/* Return true if all of the 3 conditions are met
2586 	 *  a) TX Budget is still available
2587 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2588 	 *     pending XSK TX for transmission)
2589 	 */
2590 	return !!budget && work_done;
2591 }
2592 
2593 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2594 {
2595 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2596 		tc += 64;
2597 
2598 		if (priv->plat->force_thresh_dma_mode)
2599 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2600 		else
2601 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2602 						      chan);
2603 
2604 		priv->xstats.threshold = tc;
2605 	}
2606 }
2607 
2608 /**
2609  * stmmac_tx_clean - to manage the transmission completion
2610  * @priv: driver private structure
2611  * @budget: napi budget limiting this functions packet handling
2612  * @queue: TX queue index
2613  * @pending_packets: signal to arm the TX coal timer
2614  * Description: it reclaims the transmit resources after transmission completes.
2615  * If some packets still needs to be handled, due to TX coalesce, set
2616  * pending_packets to true to make NAPI arm the TX coal timer.
2617  */
2618 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2619 			   bool *pending_packets)
2620 {
2621 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2622 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2623 	unsigned int bytes_compl = 0, pkts_compl = 0;
2624 	unsigned int entry, xmits = 0, count = 0;
2625 	u32 tx_packets = 0, tx_errors = 0;
2626 	unsigned long flags;
2627 
2628 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2629 
2630 	tx_q->xsk_frames_done = 0;
2631 
2632 	entry = tx_q->dirty_tx;
2633 
2634 	/* Try to clean all TX complete frame in 1 shot */
2635 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2636 		struct xdp_frame *xdpf;
2637 		struct sk_buff *skb;
2638 		struct dma_desc *p;
2639 		int status;
2640 
2641 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2642 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2643 			xdpf = tx_q->xdpf[entry];
2644 			skb = NULL;
2645 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2646 			xdpf = NULL;
2647 			skb = tx_q->tx_skbuff[entry];
2648 		} else {
2649 			xdpf = NULL;
2650 			skb = NULL;
2651 		}
2652 
2653 		if (priv->extend_desc)
2654 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2655 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2656 			p = &tx_q->dma_entx[entry].basic;
2657 		else
2658 			p = tx_q->dma_tx + entry;
2659 
2660 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2661 		/* Check if the descriptor is owned by the DMA */
2662 		if (unlikely(status & tx_dma_own))
2663 			break;
2664 
2665 		count++;
2666 
2667 		/* Make sure descriptor fields are read after reading
2668 		 * the own bit.
2669 		 */
2670 		dma_rmb();
2671 
2672 		/* Just consider the last segment and ...*/
2673 		if (likely(!(status & tx_not_ls))) {
2674 			/* ... verify the status error condition */
2675 			if (unlikely(status & tx_err)) {
2676 				tx_errors++;
2677 				if (unlikely(status & tx_err_bump_tc))
2678 					stmmac_bump_dma_threshold(priv, queue);
2679 			} else {
2680 				tx_packets++;
2681 			}
2682 			if (skb) {
2683 				stmmac_get_tx_hwtstamp(priv, p, skb);
2684 			} else {
2685 				struct stmmac_xsk_tx_complete tx_compl = {
2686 					.priv = priv,
2687 					.desc = p,
2688 				};
2689 
2690 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2691 							 &stmmac_xsk_tx_metadata_ops,
2692 							 &tx_compl);
2693 			}
2694 		}
2695 
2696 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2697 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2698 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2699 				dma_unmap_page(priv->device,
2700 					       tx_q->tx_skbuff_dma[entry].buf,
2701 					       tx_q->tx_skbuff_dma[entry].len,
2702 					       DMA_TO_DEVICE);
2703 			else
2704 				dma_unmap_single(priv->device,
2705 						 tx_q->tx_skbuff_dma[entry].buf,
2706 						 tx_q->tx_skbuff_dma[entry].len,
2707 						 DMA_TO_DEVICE);
2708 			tx_q->tx_skbuff_dma[entry].buf = 0;
2709 			tx_q->tx_skbuff_dma[entry].len = 0;
2710 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2711 		}
2712 
2713 		stmmac_clean_desc3(priv, tx_q, p);
2714 
2715 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2716 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2717 
2718 		if (xdpf &&
2719 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2720 			xdp_return_frame_rx_napi(xdpf);
2721 			tx_q->xdpf[entry] = NULL;
2722 		}
2723 
2724 		if (xdpf &&
2725 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2726 			xdp_return_frame(xdpf);
2727 			tx_q->xdpf[entry] = NULL;
2728 		}
2729 
2730 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2731 			tx_q->xsk_frames_done++;
2732 
2733 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2734 			if (likely(skb)) {
2735 				pkts_compl++;
2736 				bytes_compl += skb->len;
2737 				dev_consume_skb_any(skb);
2738 				tx_q->tx_skbuff[entry] = NULL;
2739 			}
2740 		}
2741 
2742 		stmmac_release_tx_desc(priv, p, priv->mode);
2743 
2744 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2745 	}
2746 	tx_q->dirty_tx = entry;
2747 
2748 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2749 				  pkts_compl, bytes_compl);
2750 
2751 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2752 								queue))) &&
2753 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2754 
2755 		netif_dbg(priv, tx_done, priv->dev,
2756 			  "%s: restart transmit\n", __func__);
2757 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2758 	}
2759 
2760 	if (tx_q->xsk_pool) {
2761 		bool work_done;
2762 
2763 		if (tx_q->xsk_frames_done)
2764 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2765 
2766 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2767 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2768 
2769 		/* For XSK TX, we try to send as many as possible.
2770 		 * If XSK work done (XSK TX desc empty and budget still
2771 		 * available), return "budget - 1" to reenable TX IRQ.
2772 		 * Else, return "budget" to make NAPI continue polling.
2773 		 */
2774 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2775 					       STMMAC_XSK_TX_BUDGET_MAX);
2776 		if (work_done)
2777 			xmits = budget - 1;
2778 		else
2779 			xmits = budget;
2780 	}
2781 
2782 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2783 	    priv->eee_sw_timer_en) {
2784 		if (stmmac_enable_eee_mode(priv))
2785 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2786 	}
2787 
2788 	/* We still have pending packets, let's call for a new scheduling */
2789 	if (tx_q->dirty_tx != tx_q->cur_tx)
2790 		*pending_packets = true;
2791 
2792 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2793 	txq_stats->tx_packets += tx_packets;
2794 	txq_stats->tx_pkt_n += tx_packets;
2795 	txq_stats->tx_clean++;
2796 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2797 
2798 	priv->xstats.tx_errors += tx_errors;
2799 
2800 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2801 
2802 	/* Combine decisions from TX clean and XSK TX */
2803 	return max(count, xmits);
2804 }
2805 
2806 /**
2807  * stmmac_tx_err - to manage the tx error
2808  * @priv: driver private structure
2809  * @chan: channel index
2810  * Description: it cleans the descriptors and restarts the transmission
2811  * in case of transmission errors.
2812  */
2813 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2814 {
2815 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2816 
2817 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2818 
2819 	stmmac_stop_tx_dma(priv, chan);
2820 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2821 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2822 	stmmac_reset_tx_queue(priv, chan);
2823 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2824 			    tx_q->dma_tx_phy, chan);
2825 	stmmac_start_tx_dma(priv, chan);
2826 
2827 	priv->xstats.tx_errors++;
2828 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2829 }
2830 
2831 /**
2832  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2833  *  @priv: driver private structure
2834  *  @txmode: TX operating mode
2835  *  @rxmode: RX operating mode
2836  *  @chan: channel index
2837  *  Description: it is used for configuring of the DMA operation mode in
2838  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2839  *  mode.
2840  */
2841 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2842 					  u32 rxmode, u32 chan)
2843 {
2844 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2845 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2846 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2847 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2848 	int rxfifosz = priv->plat->rx_fifo_size;
2849 	int txfifosz = priv->plat->tx_fifo_size;
2850 
2851 	if (rxfifosz == 0)
2852 		rxfifosz = priv->dma_cap.rx_fifo_size;
2853 	if (txfifosz == 0)
2854 		txfifosz = priv->dma_cap.tx_fifo_size;
2855 
2856 	/* Adjust for real per queue fifo size */
2857 	rxfifosz /= rx_channels_count;
2858 	txfifosz /= tx_channels_count;
2859 
2860 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2861 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2862 }
2863 
2864 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2865 {
2866 	int ret;
2867 
2868 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2869 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2870 	if (ret && (ret != -EINVAL)) {
2871 		stmmac_global_err(priv);
2872 		return true;
2873 	}
2874 
2875 	return false;
2876 }
2877 
2878 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2879 {
2880 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2881 						 &priv->xstats, chan, dir);
2882 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2883 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2884 	struct stmmac_channel *ch = &priv->channel[chan];
2885 	struct napi_struct *rx_napi;
2886 	struct napi_struct *tx_napi;
2887 	unsigned long flags;
2888 
2889 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2890 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2891 
2892 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2893 		if (napi_schedule_prep(rx_napi)) {
2894 			spin_lock_irqsave(&ch->lock, flags);
2895 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2896 			spin_unlock_irqrestore(&ch->lock, flags);
2897 			__napi_schedule(rx_napi);
2898 		}
2899 	}
2900 
2901 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2902 		if (napi_schedule_prep(tx_napi)) {
2903 			spin_lock_irqsave(&ch->lock, flags);
2904 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2905 			spin_unlock_irqrestore(&ch->lock, flags);
2906 			__napi_schedule(tx_napi);
2907 		}
2908 	}
2909 
2910 	return status;
2911 }
2912 
2913 /**
2914  * stmmac_dma_interrupt - DMA ISR
2915  * @priv: driver private structure
2916  * Description: this is the DMA ISR. It is called by the main ISR.
2917  * It calls the dwmac dma routine and schedule poll method in case of some
2918  * work can be done.
2919  */
2920 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2921 {
2922 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2923 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2924 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2925 				tx_channel_count : rx_channel_count;
2926 	u32 chan;
2927 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2928 
2929 	/* Make sure we never check beyond our status buffer. */
2930 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2931 		channels_to_check = ARRAY_SIZE(status);
2932 
2933 	for (chan = 0; chan < channels_to_check; chan++)
2934 		status[chan] = stmmac_napi_check(priv, chan,
2935 						 DMA_DIR_RXTX);
2936 
2937 	for (chan = 0; chan < tx_channel_count; chan++) {
2938 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2939 			/* Try to bump up the dma threshold on this failure */
2940 			stmmac_bump_dma_threshold(priv, chan);
2941 		} else if (unlikely(status[chan] == tx_hard_error)) {
2942 			stmmac_tx_err(priv, chan);
2943 		}
2944 	}
2945 }
2946 
2947 /**
2948  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2949  * @priv: driver private structure
2950  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2951  */
2952 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2953 {
2954 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2955 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2956 
2957 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2958 
2959 	if (priv->dma_cap.rmon) {
2960 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2961 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2962 	} else
2963 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2964 }
2965 
2966 /**
2967  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2968  * @priv: driver private structure
2969  * Description:
2970  *  new GMAC chip generations have a new register to indicate the
2971  *  presence of the optional feature/functions.
2972  *  This can be also used to override the value passed through the
2973  *  platform and necessary for old MAC10/100 and GMAC chips.
2974  */
2975 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2976 {
2977 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2978 }
2979 
2980 /**
2981  * stmmac_check_ether_addr - check if the MAC addr is valid
2982  * @priv: driver private structure
2983  * Description:
2984  * it is to verify if the MAC address is valid, in case of failures it
2985  * generates a random MAC address
2986  */
2987 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2988 {
2989 	u8 addr[ETH_ALEN];
2990 
2991 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2992 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2993 		if (is_valid_ether_addr(addr))
2994 			eth_hw_addr_set(priv->dev, addr);
2995 		else
2996 			eth_hw_addr_random(priv->dev);
2997 		dev_info(priv->device, "device MAC address %pM\n",
2998 			 priv->dev->dev_addr);
2999 	}
3000 }
3001 
3002 /**
3003  * stmmac_init_dma_engine - DMA init.
3004  * @priv: driver private structure
3005  * Description:
3006  * It inits the DMA invoking the specific MAC/GMAC callback.
3007  * Some DMA parameters can be passed from the platform;
3008  * in case of these are not passed a default is kept for the MAC or GMAC.
3009  */
3010 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3011 {
3012 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3013 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3014 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3015 	struct stmmac_rx_queue *rx_q;
3016 	struct stmmac_tx_queue *tx_q;
3017 	u32 chan = 0;
3018 	int atds = 0;
3019 	int ret = 0;
3020 
3021 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3022 		dev_err(priv->device, "Invalid DMA configuration\n");
3023 		return -EINVAL;
3024 	}
3025 
3026 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3027 		atds = 1;
3028 
3029 	ret = stmmac_reset(priv, priv->ioaddr);
3030 	if (ret) {
3031 		dev_err(priv->device, "Failed to reset the dma\n");
3032 		return ret;
3033 	}
3034 
3035 	/* DMA Configuration */
3036 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3037 
3038 	if (priv->plat->axi)
3039 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3040 
3041 	/* DMA CSR Channel configuration */
3042 	for (chan = 0; chan < dma_csr_ch; chan++) {
3043 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3044 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3045 	}
3046 
3047 	/* DMA RX Channel Configuration */
3048 	for (chan = 0; chan < rx_channels_count; chan++) {
3049 		rx_q = &priv->dma_conf.rx_queue[chan];
3050 
3051 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3052 				    rx_q->dma_rx_phy, chan);
3053 
3054 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3055 				     (rx_q->buf_alloc_num *
3056 				      sizeof(struct dma_desc));
3057 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3058 				       rx_q->rx_tail_addr, chan);
3059 	}
3060 
3061 	/* DMA TX Channel Configuration */
3062 	for (chan = 0; chan < tx_channels_count; chan++) {
3063 		tx_q = &priv->dma_conf.tx_queue[chan];
3064 
3065 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3066 				    tx_q->dma_tx_phy, chan);
3067 
3068 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3069 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3070 				       tx_q->tx_tail_addr, chan);
3071 	}
3072 
3073 	return ret;
3074 }
3075 
3076 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3077 {
3078 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3079 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3080 	struct stmmac_channel *ch;
3081 	struct napi_struct *napi;
3082 
3083 	if (!tx_coal_timer)
3084 		return;
3085 
3086 	ch = &priv->channel[tx_q->queue_index];
3087 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3088 
3089 	/* Arm timer only if napi is not already scheduled.
3090 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3091 	 * again in the next scheduled napi.
3092 	 */
3093 	if (unlikely(!napi_is_scheduled(napi)))
3094 		hrtimer_start(&tx_q->txtimer,
3095 			      STMMAC_COAL_TIMER(tx_coal_timer),
3096 			      HRTIMER_MODE_REL);
3097 	else
3098 		hrtimer_try_to_cancel(&tx_q->txtimer);
3099 }
3100 
3101 /**
3102  * stmmac_tx_timer - mitigation sw timer for tx.
3103  * @t: data pointer
3104  * Description:
3105  * This is the timer handler to directly invoke the stmmac_tx_clean.
3106  */
3107 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3108 {
3109 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3110 	struct stmmac_priv *priv = tx_q->priv_data;
3111 	struct stmmac_channel *ch;
3112 	struct napi_struct *napi;
3113 
3114 	ch = &priv->channel[tx_q->queue_index];
3115 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3116 
3117 	if (likely(napi_schedule_prep(napi))) {
3118 		unsigned long flags;
3119 
3120 		spin_lock_irqsave(&ch->lock, flags);
3121 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3122 		spin_unlock_irqrestore(&ch->lock, flags);
3123 		__napi_schedule(napi);
3124 	}
3125 
3126 	return HRTIMER_NORESTART;
3127 }
3128 
3129 /**
3130  * stmmac_init_coalesce - init mitigation options.
3131  * @priv: driver private structure
3132  * Description:
3133  * This inits the coalesce parameters: i.e. timer rate,
3134  * timer handler and default threshold used for enabling the
3135  * interrupt on completion bit.
3136  */
3137 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3138 {
3139 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3140 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3141 	u32 chan;
3142 
3143 	for (chan = 0; chan < tx_channel_count; chan++) {
3144 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3145 
3146 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3147 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3148 
3149 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3150 		tx_q->txtimer.function = stmmac_tx_timer;
3151 	}
3152 
3153 	for (chan = 0; chan < rx_channel_count; chan++)
3154 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3155 }
3156 
3157 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3158 {
3159 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3160 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3161 	u32 chan;
3162 
3163 	/* set TX ring length */
3164 	for (chan = 0; chan < tx_channels_count; chan++)
3165 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3166 				       (priv->dma_conf.dma_tx_size - 1), chan);
3167 
3168 	/* set RX ring length */
3169 	for (chan = 0; chan < rx_channels_count; chan++)
3170 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3171 				       (priv->dma_conf.dma_rx_size - 1), chan);
3172 }
3173 
3174 /**
3175  *  stmmac_set_tx_queue_weight - Set TX queue weight
3176  *  @priv: driver private structure
3177  *  Description: It is used for setting TX queues weight
3178  */
3179 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3180 {
3181 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3182 	u32 weight;
3183 	u32 queue;
3184 
3185 	for (queue = 0; queue < tx_queues_count; queue++) {
3186 		weight = priv->plat->tx_queues_cfg[queue].weight;
3187 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3188 	}
3189 }
3190 
3191 /**
3192  *  stmmac_configure_cbs - Configure CBS in TX queue
3193  *  @priv: driver private structure
3194  *  Description: It is used for configuring CBS in AVB TX queues
3195  */
3196 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3197 {
3198 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3199 	u32 mode_to_use;
3200 	u32 queue;
3201 
3202 	/* queue 0 is reserved for legacy traffic */
3203 	for (queue = 1; queue < tx_queues_count; queue++) {
3204 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3205 		if (mode_to_use == MTL_QUEUE_DCB)
3206 			continue;
3207 
3208 		stmmac_config_cbs(priv, priv->hw,
3209 				priv->plat->tx_queues_cfg[queue].send_slope,
3210 				priv->plat->tx_queues_cfg[queue].idle_slope,
3211 				priv->plat->tx_queues_cfg[queue].high_credit,
3212 				priv->plat->tx_queues_cfg[queue].low_credit,
3213 				queue);
3214 	}
3215 }
3216 
3217 /**
3218  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3219  *  @priv: driver private structure
3220  *  Description: It is used for mapping RX queues to RX dma channels
3221  */
3222 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3223 {
3224 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3225 	u32 queue;
3226 	u32 chan;
3227 
3228 	for (queue = 0; queue < rx_queues_count; queue++) {
3229 		chan = priv->plat->rx_queues_cfg[queue].chan;
3230 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3231 	}
3232 }
3233 
3234 /**
3235  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3236  *  @priv: driver private structure
3237  *  Description: It is used for configuring the RX Queue Priority
3238  */
3239 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3240 {
3241 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3242 	u32 queue;
3243 	u32 prio;
3244 
3245 	for (queue = 0; queue < rx_queues_count; queue++) {
3246 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3247 			continue;
3248 
3249 		prio = priv->plat->rx_queues_cfg[queue].prio;
3250 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3251 	}
3252 }
3253 
3254 /**
3255  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3256  *  @priv: driver private structure
3257  *  Description: It is used for configuring the TX Queue Priority
3258  */
3259 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3260 {
3261 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3262 	u32 queue;
3263 	u32 prio;
3264 
3265 	for (queue = 0; queue < tx_queues_count; queue++) {
3266 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3267 			continue;
3268 
3269 		prio = priv->plat->tx_queues_cfg[queue].prio;
3270 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3271 	}
3272 }
3273 
3274 /**
3275  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3276  *  @priv: driver private structure
3277  *  Description: It is used for configuring the RX queue routing
3278  */
3279 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3280 {
3281 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3282 	u32 queue;
3283 	u8 packet;
3284 
3285 	for (queue = 0; queue < rx_queues_count; queue++) {
3286 		/* no specific packet type routing specified for the queue */
3287 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3288 			continue;
3289 
3290 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3291 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3292 	}
3293 }
3294 
3295 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3296 {
3297 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3298 		priv->rss.enable = false;
3299 		return;
3300 	}
3301 
3302 	if (priv->dev->features & NETIF_F_RXHASH)
3303 		priv->rss.enable = true;
3304 	else
3305 		priv->rss.enable = false;
3306 
3307 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3308 			     priv->plat->rx_queues_to_use);
3309 }
3310 
3311 /**
3312  *  stmmac_mtl_configuration - Configure MTL
3313  *  @priv: driver private structure
3314  *  Description: It is used for configurring MTL
3315  */
3316 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3317 {
3318 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3319 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3320 
3321 	if (tx_queues_count > 1)
3322 		stmmac_set_tx_queue_weight(priv);
3323 
3324 	/* Configure MTL RX algorithms */
3325 	if (rx_queues_count > 1)
3326 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3327 				priv->plat->rx_sched_algorithm);
3328 
3329 	/* Configure MTL TX algorithms */
3330 	if (tx_queues_count > 1)
3331 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3332 				priv->plat->tx_sched_algorithm);
3333 
3334 	/* Configure CBS in AVB TX queues */
3335 	if (tx_queues_count > 1)
3336 		stmmac_configure_cbs(priv);
3337 
3338 	/* Map RX MTL to DMA channels */
3339 	stmmac_rx_queue_dma_chan_map(priv);
3340 
3341 	/* Enable MAC RX Queues */
3342 	stmmac_mac_enable_rx_queues(priv);
3343 
3344 	/* Set RX priorities */
3345 	if (rx_queues_count > 1)
3346 		stmmac_mac_config_rx_queues_prio(priv);
3347 
3348 	/* Set TX priorities */
3349 	if (tx_queues_count > 1)
3350 		stmmac_mac_config_tx_queues_prio(priv);
3351 
3352 	/* Set RX routing */
3353 	if (rx_queues_count > 1)
3354 		stmmac_mac_config_rx_queues_routing(priv);
3355 
3356 	/* Receive Side Scaling */
3357 	if (rx_queues_count > 1)
3358 		stmmac_mac_config_rss(priv);
3359 }
3360 
3361 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3362 {
3363 	if (priv->dma_cap.asp) {
3364 		netdev_info(priv->dev, "Enabling Safety Features\n");
3365 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3366 					  priv->plat->safety_feat_cfg);
3367 	} else {
3368 		netdev_info(priv->dev, "No Safety Features support found\n");
3369 	}
3370 }
3371 
3372 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3373 {
3374 	char *name;
3375 
3376 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3377 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3378 
3379 	name = priv->wq_name;
3380 	sprintf(name, "%s-fpe", priv->dev->name);
3381 
3382 	priv->fpe_wq = create_singlethread_workqueue(name);
3383 	if (!priv->fpe_wq) {
3384 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3385 
3386 		return -ENOMEM;
3387 	}
3388 	netdev_info(priv->dev, "FPE workqueue start");
3389 
3390 	return 0;
3391 }
3392 
3393 /**
3394  * stmmac_hw_setup - setup mac in a usable state.
3395  *  @dev : pointer to the device structure.
3396  *  @ptp_register: register PTP if set
3397  *  Description:
3398  *  this is the main function to setup the HW in a usable state because the
3399  *  dma engine is reset, the core registers are configured (e.g. AXI,
3400  *  Checksum features, timers). The DMA is ready to start receiving and
3401  *  transmitting.
3402  *  Return value:
3403  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3404  *  file on failure.
3405  */
3406 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3407 {
3408 	struct stmmac_priv *priv = netdev_priv(dev);
3409 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3410 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3411 	bool sph_en;
3412 	u32 chan;
3413 	int ret;
3414 
3415 	/* DMA initialization and SW reset */
3416 	ret = stmmac_init_dma_engine(priv);
3417 	if (ret < 0) {
3418 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3419 			   __func__);
3420 		return ret;
3421 	}
3422 
3423 	/* Copy the MAC addr into the HW  */
3424 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3425 
3426 	/* PS and related bits will be programmed according to the speed */
3427 	if (priv->hw->pcs) {
3428 		int speed = priv->plat->mac_port_sel_speed;
3429 
3430 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3431 		    (speed == SPEED_1000)) {
3432 			priv->hw->ps = speed;
3433 		} else {
3434 			dev_warn(priv->device, "invalid port speed\n");
3435 			priv->hw->ps = 0;
3436 		}
3437 	}
3438 
3439 	/* Initialize the MAC Core */
3440 	stmmac_core_init(priv, priv->hw, dev);
3441 
3442 	/* Initialize MTL*/
3443 	stmmac_mtl_configuration(priv);
3444 
3445 	/* Initialize Safety Features */
3446 	stmmac_safety_feat_configuration(priv);
3447 
3448 	ret = stmmac_rx_ipc(priv, priv->hw);
3449 	if (!ret) {
3450 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3451 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3452 		priv->hw->rx_csum = 0;
3453 	}
3454 
3455 	/* Enable the MAC Rx/Tx */
3456 	stmmac_mac_set(priv, priv->ioaddr, true);
3457 
3458 	/* Set the HW DMA mode and the COE */
3459 	stmmac_dma_operation_mode(priv);
3460 
3461 	stmmac_mmc_setup(priv);
3462 
3463 	if (ptp_register) {
3464 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3465 		if (ret < 0)
3466 			netdev_warn(priv->dev,
3467 				    "failed to enable PTP reference clock: %pe\n",
3468 				    ERR_PTR(ret));
3469 	}
3470 
3471 	ret = stmmac_init_ptp(priv);
3472 	if (ret == -EOPNOTSUPP)
3473 		netdev_info(priv->dev, "PTP not supported by HW\n");
3474 	else if (ret)
3475 		netdev_warn(priv->dev, "PTP init failed\n");
3476 	else if (ptp_register)
3477 		stmmac_ptp_register(priv);
3478 
3479 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3480 
3481 	/* Convert the timer from msec to usec */
3482 	if (!priv->tx_lpi_timer)
3483 		priv->tx_lpi_timer = eee_timer * 1000;
3484 
3485 	if (priv->use_riwt) {
3486 		u32 queue;
3487 
3488 		for (queue = 0; queue < rx_cnt; queue++) {
3489 			if (!priv->rx_riwt[queue])
3490 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3491 
3492 			stmmac_rx_watchdog(priv, priv->ioaddr,
3493 					   priv->rx_riwt[queue], queue);
3494 		}
3495 	}
3496 
3497 	if (priv->hw->pcs)
3498 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3499 
3500 	/* set TX and RX rings length */
3501 	stmmac_set_rings_length(priv);
3502 
3503 	/* Enable TSO */
3504 	if (priv->tso) {
3505 		for (chan = 0; chan < tx_cnt; chan++) {
3506 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3507 
3508 			/* TSO and TBS cannot co-exist */
3509 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3510 				continue;
3511 
3512 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3513 		}
3514 	}
3515 
3516 	/* Enable Split Header */
3517 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3518 	for (chan = 0; chan < rx_cnt; chan++)
3519 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3520 
3521 
3522 	/* VLAN Tag Insertion */
3523 	if (priv->dma_cap.vlins)
3524 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3525 
3526 	/* TBS */
3527 	for (chan = 0; chan < tx_cnt; chan++) {
3528 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3529 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3530 
3531 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3532 	}
3533 
3534 	/* Configure real RX and TX queues */
3535 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3536 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3537 
3538 	/* Start the ball rolling... */
3539 	stmmac_start_all_dma(priv);
3540 
3541 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3542 
3543 	if (priv->dma_cap.fpesel) {
3544 		stmmac_fpe_start_wq(priv);
3545 
3546 		if (priv->plat->fpe_cfg->enable)
3547 			stmmac_fpe_handshake(priv, true);
3548 	}
3549 
3550 	return 0;
3551 }
3552 
3553 static void stmmac_hw_teardown(struct net_device *dev)
3554 {
3555 	struct stmmac_priv *priv = netdev_priv(dev);
3556 
3557 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3558 }
3559 
3560 static void stmmac_free_irq(struct net_device *dev,
3561 			    enum request_irq_err irq_err, int irq_idx)
3562 {
3563 	struct stmmac_priv *priv = netdev_priv(dev);
3564 	int j;
3565 
3566 	switch (irq_err) {
3567 	case REQ_IRQ_ERR_ALL:
3568 		irq_idx = priv->plat->tx_queues_to_use;
3569 		fallthrough;
3570 	case REQ_IRQ_ERR_TX:
3571 		for (j = irq_idx - 1; j >= 0; j--) {
3572 			if (priv->tx_irq[j] > 0) {
3573 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3574 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3575 			}
3576 		}
3577 		irq_idx = priv->plat->rx_queues_to_use;
3578 		fallthrough;
3579 	case REQ_IRQ_ERR_RX:
3580 		for (j = irq_idx - 1; j >= 0; j--) {
3581 			if (priv->rx_irq[j] > 0) {
3582 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3583 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3584 			}
3585 		}
3586 
3587 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3588 			free_irq(priv->sfty_ue_irq, dev);
3589 		fallthrough;
3590 	case REQ_IRQ_ERR_SFTY_UE:
3591 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3592 			free_irq(priv->sfty_ce_irq, dev);
3593 		fallthrough;
3594 	case REQ_IRQ_ERR_SFTY_CE:
3595 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3596 			free_irq(priv->lpi_irq, dev);
3597 		fallthrough;
3598 	case REQ_IRQ_ERR_LPI:
3599 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3600 			free_irq(priv->wol_irq, dev);
3601 		fallthrough;
3602 	case REQ_IRQ_ERR_WOL:
3603 		free_irq(dev->irq, dev);
3604 		fallthrough;
3605 	case REQ_IRQ_ERR_MAC:
3606 	case REQ_IRQ_ERR_NO:
3607 		/* If MAC IRQ request error, no more IRQ to free */
3608 		break;
3609 	}
3610 }
3611 
3612 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3613 {
3614 	struct stmmac_priv *priv = netdev_priv(dev);
3615 	enum request_irq_err irq_err;
3616 	cpumask_t cpu_mask;
3617 	int irq_idx = 0;
3618 	char *int_name;
3619 	int ret;
3620 	int i;
3621 
3622 	/* For common interrupt */
3623 	int_name = priv->int_name_mac;
3624 	sprintf(int_name, "%s:%s", dev->name, "mac");
3625 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3626 			  0, int_name, dev);
3627 	if (unlikely(ret < 0)) {
3628 		netdev_err(priv->dev,
3629 			   "%s: alloc mac MSI %d (error: %d)\n",
3630 			   __func__, dev->irq, ret);
3631 		irq_err = REQ_IRQ_ERR_MAC;
3632 		goto irq_error;
3633 	}
3634 
3635 	/* Request the Wake IRQ in case of another line
3636 	 * is used for WoL
3637 	 */
3638 	priv->wol_irq_disabled = true;
3639 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3640 		int_name = priv->int_name_wol;
3641 		sprintf(int_name, "%s:%s", dev->name, "wol");
3642 		ret = request_irq(priv->wol_irq,
3643 				  stmmac_mac_interrupt,
3644 				  0, int_name, dev);
3645 		if (unlikely(ret < 0)) {
3646 			netdev_err(priv->dev,
3647 				   "%s: alloc wol MSI %d (error: %d)\n",
3648 				   __func__, priv->wol_irq, ret);
3649 			irq_err = REQ_IRQ_ERR_WOL;
3650 			goto irq_error;
3651 		}
3652 	}
3653 
3654 	/* Request the LPI IRQ in case of another line
3655 	 * is used for LPI
3656 	 */
3657 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3658 		int_name = priv->int_name_lpi;
3659 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3660 		ret = request_irq(priv->lpi_irq,
3661 				  stmmac_mac_interrupt,
3662 				  0, int_name, dev);
3663 		if (unlikely(ret < 0)) {
3664 			netdev_err(priv->dev,
3665 				   "%s: alloc lpi MSI %d (error: %d)\n",
3666 				   __func__, priv->lpi_irq, ret);
3667 			irq_err = REQ_IRQ_ERR_LPI;
3668 			goto irq_error;
3669 		}
3670 	}
3671 
3672 	/* Request the Safety Feature Correctible Error line in
3673 	 * case of another line is used
3674 	 */
3675 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3676 		int_name = priv->int_name_sfty_ce;
3677 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3678 		ret = request_irq(priv->sfty_ce_irq,
3679 				  stmmac_safety_interrupt,
3680 				  0, int_name, dev);
3681 		if (unlikely(ret < 0)) {
3682 			netdev_err(priv->dev,
3683 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3684 				   __func__, priv->sfty_ce_irq, ret);
3685 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3686 			goto irq_error;
3687 		}
3688 	}
3689 
3690 	/* Request the Safety Feature Uncorrectible Error line in
3691 	 * case of another line is used
3692 	 */
3693 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3694 		int_name = priv->int_name_sfty_ue;
3695 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3696 		ret = request_irq(priv->sfty_ue_irq,
3697 				  stmmac_safety_interrupt,
3698 				  0, int_name, dev);
3699 		if (unlikely(ret < 0)) {
3700 			netdev_err(priv->dev,
3701 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3702 				   __func__, priv->sfty_ue_irq, ret);
3703 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3704 			goto irq_error;
3705 		}
3706 	}
3707 
3708 	/* Request Rx MSI irq */
3709 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3710 		if (i >= MTL_MAX_RX_QUEUES)
3711 			break;
3712 		if (priv->rx_irq[i] == 0)
3713 			continue;
3714 
3715 		int_name = priv->int_name_rx_irq[i];
3716 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3717 		ret = request_irq(priv->rx_irq[i],
3718 				  stmmac_msi_intr_rx,
3719 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3720 		if (unlikely(ret < 0)) {
3721 			netdev_err(priv->dev,
3722 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3723 				   __func__, i, priv->rx_irq[i], ret);
3724 			irq_err = REQ_IRQ_ERR_RX;
3725 			irq_idx = i;
3726 			goto irq_error;
3727 		}
3728 		cpumask_clear(&cpu_mask);
3729 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3730 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3731 	}
3732 
3733 	/* Request Tx MSI irq */
3734 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3735 		if (i >= MTL_MAX_TX_QUEUES)
3736 			break;
3737 		if (priv->tx_irq[i] == 0)
3738 			continue;
3739 
3740 		int_name = priv->int_name_tx_irq[i];
3741 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3742 		ret = request_irq(priv->tx_irq[i],
3743 				  stmmac_msi_intr_tx,
3744 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3745 		if (unlikely(ret < 0)) {
3746 			netdev_err(priv->dev,
3747 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3748 				   __func__, i, priv->tx_irq[i], ret);
3749 			irq_err = REQ_IRQ_ERR_TX;
3750 			irq_idx = i;
3751 			goto irq_error;
3752 		}
3753 		cpumask_clear(&cpu_mask);
3754 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3755 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3756 	}
3757 
3758 	return 0;
3759 
3760 irq_error:
3761 	stmmac_free_irq(dev, irq_err, irq_idx);
3762 	return ret;
3763 }
3764 
3765 static int stmmac_request_irq_single(struct net_device *dev)
3766 {
3767 	struct stmmac_priv *priv = netdev_priv(dev);
3768 	enum request_irq_err irq_err;
3769 	int ret;
3770 
3771 	ret = request_irq(dev->irq, stmmac_interrupt,
3772 			  IRQF_SHARED, dev->name, dev);
3773 	if (unlikely(ret < 0)) {
3774 		netdev_err(priv->dev,
3775 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3776 			   __func__, dev->irq, ret);
3777 		irq_err = REQ_IRQ_ERR_MAC;
3778 		goto irq_error;
3779 	}
3780 
3781 	/* Request the Wake IRQ in case of another line
3782 	 * is used for WoL
3783 	 */
3784 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3785 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3786 				  IRQF_SHARED, dev->name, dev);
3787 		if (unlikely(ret < 0)) {
3788 			netdev_err(priv->dev,
3789 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3790 				   __func__, priv->wol_irq, ret);
3791 			irq_err = REQ_IRQ_ERR_WOL;
3792 			goto irq_error;
3793 		}
3794 	}
3795 
3796 	/* Request the IRQ lines */
3797 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3798 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3799 				  IRQF_SHARED, dev->name, dev);
3800 		if (unlikely(ret < 0)) {
3801 			netdev_err(priv->dev,
3802 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3803 				   __func__, priv->lpi_irq, ret);
3804 			irq_err = REQ_IRQ_ERR_LPI;
3805 			goto irq_error;
3806 		}
3807 	}
3808 
3809 	return 0;
3810 
3811 irq_error:
3812 	stmmac_free_irq(dev, irq_err, 0);
3813 	return ret;
3814 }
3815 
3816 static int stmmac_request_irq(struct net_device *dev)
3817 {
3818 	struct stmmac_priv *priv = netdev_priv(dev);
3819 	int ret;
3820 
3821 	/* Request the IRQ lines */
3822 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3823 		ret = stmmac_request_irq_multi_msi(dev);
3824 	else
3825 		ret = stmmac_request_irq_single(dev);
3826 
3827 	return ret;
3828 }
3829 
3830 /**
3831  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3832  *  @priv: driver private structure
3833  *  @mtu: MTU to setup the dma queue and buf with
3834  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3835  *  Allocate the Tx/Rx DMA queue and init them.
3836  *  Return value:
3837  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3838  */
3839 static struct stmmac_dma_conf *
3840 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3841 {
3842 	struct stmmac_dma_conf *dma_conf;
3843 	int chan, bfsize, ret;
3844 
3845 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3846 	if (!dma_conf) {
3847 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3848 			   __func__);
3849 		return ERR_PTR(-ENOMEM);
3850 	}
3851 
3852 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3853 	if (bfsize < 0)
3854 		bfsize = 0;
3855 
3856 	if (bfsize < BUF_SIZE_16KiB)
3857 		bfsize = stmmac_set_bfsize(mtu, 0);
3858 
3859 	dma_conf->dma_buf_sz = bfsize;
3860 	/* Chose the tx/rx size from the already defined one in the
3861 	 * priv struct. (if defined)
3862 	 */
3863 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3864 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3865 
3866 	if (!dma_conf->dma_tx_size)
3867 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3868 	if (!dma_conf->dma_rx_size)
3869 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3870 
3871 	/* Earlier check for TBS */
3872 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3873 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3874 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3875 
3876 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3877 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3878 	}
3879 
3880 	ret = alloc_dma_desc_resources(priv, dma_conf);
3881 	if (ret < 0) {
3882 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3883 			   __func__);
3884 		goto alloc_error;
3885 	}
3886 
3887 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3888 	if (ret < 0) {
3889 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3890 			   __func__);
3891 		goto init_error;
3892 	}
3893 
3894 	return dma_conf;
3895 
3896 init_error:
3897 	free_dma_desc_resources(priv, dma_conf);
3898 alloc_error:
3899 	kfree(dma_conf);
3900 	return ERR_PTR(ret);
3901 }
3902 
3903 /**
3904  *  __stmmac_open - open entry point of the driver
3905  *  @dev : pointer to the device structure.
3906  *  @dma_conf :  structure to take the dma data
3907  *  Description:
3908  *  This function is the open entry point of the driver.
3909  *  Return value:
3910  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3911  *  file on failure.
3912  */
3913 static int __stmmac_open(struct net_device *dev,
3914 			 struct stmmac_dma_conf *dma_conf)
3915 {
3916 	struct stmmac_priv *priv = netdev_priv(dev);
3917 	int mode = priv->plat->phy_interface;
3918 	u32 chan;
3919 	int ret;
3920 
3921 	ret = pm_runtime_resume_and_get(priv->device);
3922 	if (ret < 0)
3923 		return ret;
3924 
3925 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3926 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3927 	    (!priv->hw->xpcs ||
3928 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3929 	    !priv->hw->lynx_pcs) {
3930 		ret = stmmac_init_phy(dev);
3931 		if (ret) {
3932 			netdev_err(priv->dev,
3933 				   "%s: Cannot attach to PHY (error: %d)\n",
3934 				   __func__, ret);
3935 			goto init_phy_error;
3936 		}
3937 	}
3938 
3939 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3940 
3941 	buf_sz = dma_conf->dma_buf_sz;
3942 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3943 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3944 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3945 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3946 
3947 	stmmac_reset_queues_param(priv);
3948 
3949 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3950 	    priv->plat->serdes_powerup) {
3951 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3952 		if (ret < 0) {
3953 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3954 				   __func__);
3955 			goto init_error;
3956 		}
3957 	}
3958 
3959 	ret = stmmac_hw_setup(dev, true);
3960 	if (ret < 0) {
3961 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3962 		goto init_error;
3963 	}
3964 
3965 	stmmac_init_coalesce(priv);
3966 
3967 	phylink_start(priv->phylink);
3968 	/* We may have called phylink_speed_down before */
3969 	phylink_speed_up(priv->phylink);
3970 
3971 	ret = stmmac_request_irq(dev);
3972 	if (ret)
3973 		goto irq_error;
3974 
3975 	stmmac_enable_all_queues(priv);
3976 	netif_tx_start_all_queues(priv->dev);
3977 	stmmac_enable_all_dma_irq(priv);
3978 
3979 	return 0;
3980 
3981 irq_error:
3982 	phylink_stop(priv->phylink);
3983 
3984 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3985 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3986 
3987 	stmmac_hw_teardown(dev);
3988 init_error:
3989 	phylink_disconnect_phy(priv->phylink);
3990 init_phy_error:
3991 	pm_runtime_put(priv->device);
3992 	return ret;
3993 }
3994 
3995 static int stmmac_open(struct net_device *dev)
3996 {
3997 	struct stmmac_priv *priv = netdev_priv(dev);
3998 	struct stmmac_dma_conf *dma_conf;
3999 	int ret;
4000 
4001 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4002 	if (IS_ERR(dma_conf))
4003 		return PTR_ERR(dma_conf);
4004 
4005 	ret = __stmmac_open(dev, dma_conf);
4006 	if (ret)
4007 		free_dma_desc_resources(priv, dma_conf);
4008 
4009 	kfree(dma_conf);
4010 	return ret;
4011 }
4012 
4013 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4014 {
4015 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4016 
4017 	if (priv->fpe_wq)
4018 		destroy_workqueue(priv->fpe_wq);
4019 
4020 	netdev_info(priv->dev, "FPE workqueue stop");
4021 }
4022 
4023 /**
4024  *  stmmac_release - close entry point of the driver
4025  *  @dev : device pointer.
4026  *  Description:
4027  *  This is the stop entry point of the driver.
4028  */
4029 static int stmmac_release(struct net_device *dev)
4030 {
4031 	struct stmmac_priv *priv = netdev_priv(dev);
4032 	u32 chan;
4033 
4034 	if (device_may_wakeup(priv->device))
4035 		phylink_speed_down(priv->phylink, false);
4036 	/* Stop and disconnect the PHY */
4037 	phylink_stop(priv->phylink);
4038 	phylink_disconnect_phy(priv->phylink);
4039 
4040 	stmmac_disable_all_queues(priv);
4041 
4042 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4043 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4044 
4045 	netif_tx_disable(dev);
4046 
4047 	/* Free the IRQ lines */
4048 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4049 
4050 	if (priv->eee_enabled) {
4051 		priv->tx_path_in_lpi_mode = false;
4052 		del_timer_sync(&priv->eee_ctrl_timer);
4053 	}
4054 
4055 	/* Stop TX/RX DMA and clear the descriptors */
4056 	stmmac_stop_all_dma(priv);
4057 
4058 	/* Release and free the Rx/Tx resources */
4059 	free_dma_desc_resources(priv, &priv->dma_conf);
4060 
4061 	/* Disable the MAC Rx/Tx */
4062 	stmmac_mac_set(priv, priv->ioaddr, false);
4063 
4064 	/* Powerdown Serdes if there is */
4065 	if (priv->plat->serdes_powerdown)
4066 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4067 
4068 	netif_carrier_off(dev);
4069 
4070 	stmmac_release_ptp(priv);
4071 
4072 	pm_runtime_put(priv->device);
4073 
4074 	if (priv->dma_cap.fpesel)
4075 		stmmac_fpe_stop_wq(priv);
4076 
4077 	return 0;
4078 }
4079 
4080 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4081 			       struct stmmac_tx_queue *tx_q)
4082 {
4083 	u16 tag = 0x0, inner_tag = 0x0;
4084 	u32 inner_type = 0x0;
4085 	struct dma_desc *p;
4086 
4087 	if (!priv->dma_cap.vlins)
4088 		return false;
4089 	if (!skb_vlan_tag_present(skb))
4090 		return false;
4091 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4092 		inner_tag = skb_vlan_tag_get(skb);
4093 		inner_type = STMMAC_VLAN_INSERT;
4094 	}
4095 
4096 	tag = skb_vlan_tag_get(skb);
4097 
4098 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4099 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4100 	else
4101 		p = &tx_q->dma_tx[tx_q->cur_tx];
4102 
4103 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4104 		return false;
4105 
4106 	stmmac_set_tx_owner(priv, p);
4107 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4108 	return true;
4109 }
4110 
4111 /**
4112  *  stmmac_tso_allocator - close entry point of the driver
4113  *  @priv: driver private structure
4114  *  @des: buffer start address
4115  *  @total_len: total length to fill in descriptors
4116  *  @last_segment: condition for the last descriptor
4117  *  @queue: TX queue index
4118  *  Description:
4119  *  This function fills descriptor and request new descriptors according to
4120  *  buffer length to fill
4121  */
4122 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4123 				 int total_len, bool last_segment, u32 queue)
4124 {
4125 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4126 	struct dma_desc *desc;
4127 	u32 buff_size;
4128 	int tmp_len;
4129 
4130 	tmp_len = total_len;
4131 
4132 	while (tmp_len > 0) {
4133 		dma_addr_t curr_addr;
4134 
4135 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4136 						priv->dma_conf.dma_tx_size);
4137 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4138 
4139 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4140 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4141 		else
4142 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4143 
4144 		curr_addr = des + (total_len - tmp_len);
4145 		if (priv->dma_cap.addr64 <= 32)
4146 			desc->des0 = cpu_to_le32(curr_addr);
4147 		else
4148 			stmmac_set_desc_addr(priv, desc, curr_addr);
4149 
4150 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4151 			    TSO_MAX_BUFF_SIZE : tmp_len;
4152 
4153 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4154 				0, 1,
4155 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4156 				0, 0);
4157 
4158 		tmp_len -= TSO_MAX_BUFF_SIZE;
4159 	}
4160 }
4161 
4162 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4163 {
4164 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4165 	int desc_size;
4166 
4167 	if (likely(priv->extend_desc))
4168 		desc_size = sizeof(struct dma_extended_desc);
4169 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4170 		desc_size = sizeof(struct dma_edesc);
4171 	else
4172 		desc_size = sizeof(struct dma_desc);
4173 
4174 	/* The own bit must be the latest setting done when prepare the
4175 	 * descriptor and then barrier is needed to make sure that
4176 	 * all is coherent before granting the DMA engine.
4177 	 */
4178 	wmb();
4179 
4180 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4181 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4182 }
4183 
4184 /**
4185  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4186  *  @skb : the socket buffer
4187  *  @dev : device pointer
4188  *  Description: this is the transmit function that is called on TSO frames
4189  *  (support available on GMAC4 and newer chips).
4190  *  Diagram below show the ring programming in case of TSO frames:
4191  *
4192  *  First Descriptor
4193  *   --------
4194  *   | DES0 |---> buffer1 = L2/L3/L4 header
4195  *   | DES1 |---> TCP Payload (can continue on next descr...)
4196  *   | DES2 |---> buffer 1 and 2 len
4197  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4198  *   --------
4199  *	|
4200  *     ...
4201  *	|
4202  *   --------
4203  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4204  *   | DES1 | --|
4205  *   | DES2 | --> buffer 1 and 2 len
4206  *   | DES3 |
4207  *   --------
4208  *
4209  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4210  */
4211 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4212 {
4213 	struct dma_desc *desc, *first, *mss_desc = NULL;
4214 	struct stmmac_priv *priv = netdev_priv(dev);
4215 	int nfrags = skb_shinfo(skb)->nr_frags;
4216 	u32 queue = skb_get_queue_mapping(skb);
4217 	unsigned int first_entry, tx_packets;
4218 	struct stmmac_txq_stats *txq_stats;
4219 	int tmp_pay_len = 0, first_tx;
4220 	struct stmmac_tx_queue *tx_q;
4221 	bool has_vlan, set_ic;
4222 	u8 proto_hdr_len, hdr;
4223 	unsigned long flags;
4224 	u32 pay_len, mss;
4225 	dma_addr_t des;
4226 	int i;
4227 
4228 	tx_q = &priv->dma_conf.tx_queue[queue];
4229 	txq_stats = &priv->xstats.txq_stats[queue];
4230 	first_tx = tx_q->cur_tx;
4231 
4232 	/* Compute header lengths */
4233 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4234 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4235 		hdr = sizeof(struct udphdr);
4236 	} else {
4237 		proto_hdr_len = skb_tcp_all_headers(skb);
4238 		hdr = tcp_hdrlen(skb);
4239 	}
4240 
4241 	/* Desc availability based on threshold should be enough safe */
4242 	if (unlikely(stmmac_tx_avail(priv, queue) <
4243 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4244 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4245 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4246 								queue));
4247 			/* This is a hard error, log it. */
4248 			netdev_err(priv->dev,
4249 				   "%s: Tx Ring full when queue awake\n",
4250 				   __func__);
4251 		}
4252 		return NETDEV_TX_BUSY;
4253 	}
4254 
4255 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4256 
4257 	mss = skb_shinfo(skb)->gso_size;
4258 
4259 	/* set new MSS value if needed */
4260 	if (mss != tx_q->mss) {
4261 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4262 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4263 		else
4264 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4265 
4266 		stmmac_set_mss(priv, mss_desc, mss);
4267 		tx_q->mss = mss;
4268 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4269 						priv->dma_conf.dma_tx_size);
4270 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4271 	}
4272 
4273 	if (netif_msg_tx_queued(priv)) {
4274 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4275 			__func__, hdr, proto_hdr_len, pay_len, mss);
4276 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4277 			skb->data_len);
4278 	}
4279 
4280 	/* Check if VLAN can be inserted by HW */
4281 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4282 
4283 	first_entry = tx_q->cur_tx;
4284 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4285 
4286 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4287 		desc = &tx_q->dma_entx[first_entry].basic;
4288 	else
4289 		desc = &tx_q->dma_tx[first_entry];
4290 	first = desc;
4291 
4292 	if (has_vlan)
4293 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4294 
4295 	/* first descriptor: fill Headers on Buf1 */
4296 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4297 			     DMA_TO_DEVICE);
4298 	if (dma_mapping_error(priv->device, des))
4299 		goto dma_map_err;
4300 
4301 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4302 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4303 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4304 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4305 
4306 	if (priv->dma_cap.addr64 <= 32) {
4307 		first->des0 = cpu_to_le32(des);
4308 
4309 		/* Fill start of payload in buff2 of first descriptor */
4310 		if (pay_len)
4311 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4312 
4313 		/* If needed take extra descriptors to fill the remaining payload */
4314 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4315 	} else {
4316 		stmmac_set_desc_addr(priv, first, des);
4317 		tmp_pay_len = pay_len;
4318 		des += proto_hdr_len;
4319 		pay_len = 0;
4320 	}
4321 
4322 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4323 
4324 	/* Prepare fragments */
4325 	for (i = 0; i < nfrags; i++) {
4326 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4327 
4328 		des = skb_frag_dma_map(priv->device, frag, 0,
4329 				       skb_frag_size(frag),
4330 				       DMA_TO_DEVICE);
4331 		if (dma_mapping_error(priv->device, des))
4332 			goto dma_map_err;
4333 
4334 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4335 				     (i == nfrags - 1), queue);
4336 
4337 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4338 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4339 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4340 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4341 	}
4342 
4343 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4344 
4345 	/* Only the last descriptor gets to point to the skb. */
4346 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4347 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4348 
4349 	/* Manage tx mitigation */
4350 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4351 	tx_q->tx_count_frames += tx_packets;
4352 
4353 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4354 		set_ic = true;
4355 	else if (!priv->tx_coal_frames[queue])
4356 		set_ic = false;
4357 	else if (tx_packets > priv->tx_coal_frames[queue])
4358 		set_ic = true;
4359 	else if ((tx_q->tx_count_frames %
4360 		  priv->tx_coal_frames[queue]) < tx_packets)
4361 		set_ic = true;
4362 	else
4363 		set_ic = false;
4364 
4365 	if (set_ic) {
4366 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4367 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4368 		else
4369 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4370 
4371 		tx_q->tx_count_frames = 0;
4372 		stmmac_set_tx_ic(priv, desc);
4373 	}
4374 
4375 	/* We've used all descriptors we need for this skb, however,
4376 	 * advance cur_tx so that it references a fresh descriptor.
4377 	 * ndo_start_xmit will fill this descriptor the next time it's
4378 	 * called and stmmac_tx_clean may clean up to this descriptor.
4379 	 */
4380 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4381 
4382 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4383 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4384 			  __func__);
4385 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4386 	}
4387 
4388 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4389 	txq_stats->tx_bytes += skb->len;
4390 	txq_stats->tx_tso_frames++;
4391 	txq_stats->tx_tso_nfrags += nfrags;
4392 	if (set_ic)
4393 		txq_stats->tx_set_ic_bit++;
4394 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4395 
4396 	if (priv->sarc_type)
4397 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4398 
4399 	skb_tx_timestamp(skb);
4400 
4401 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4402 		     priv->hwts_tx_en)) {
4403 		/* declare that device is doing timestamping */
4404 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4405 		stmmac_enable_tx_timestamp(priv, first);
4406 	}
4407 
4408 	/* Complete the first descriptor before granting the DMA */
4409 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4410 			proto_hdr_len,
4411 			pay_len,
4412 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4413 			hdr / 4, (skb->len - proto_hdr_len));
4414 
4415 	/* If context desc is used to change MSS */
4416 	if (mss_desc) {
4417 		/* Make sure that first descriptor has been completely
4418 		 * written, including its own bit. This is because MSS is
4419 		 * actually before first descriptor, so we need to make
4420 		 * sure that MSS's own bit is the last thing written.
4421 		 */
4422 		dma_wmb();
4423 		stmmac_set_tx_owner(priv, mss_desc);
4424 	}
4425 
4426 	if (netif_msg_pktdata(priv)) {
4427 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4428 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4429 			tx_q->cur_tx, first, nfrags);
4430 		pr_info(">>> frame to be transmitted: ");
4431 		print_pkt(skb->data, skb_headlen(skb));
4432 	}
4433 
4434 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4435 
4436 	stmmac_flush_tx_descriptors(priv, queue);
4437 	stmmac_tx_timer_arm(priv, queue);
4438 
4439 	return NETDEV_TX_OK;
4440 
4441 dma_map_err:
4442 	dev_err(priv->device, "Tx dma map failed\n");
4443 	dev_kfree_skb(skb);
4444 	priv->xstats.tx_dropped++;
4445 	return NETDEV_TX_OK;
4446 }
4447 
4448 /**
4449  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4450  * @skb: socket buffer to check
4451  *
4452  * Check if a packet has an ethertype that will trigger the IP header checks
4453  * and IP/TCP checksum engine of the stmmac core.
4454  *
4455  * Return: true if the ethertype can trigger the checksum engine, false
4456  * otherwise
4457  */
4458 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4459 {
4460 	int depth = 0;
4461 	__be16 proto;
4462 
4463 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4464 				    &depth);
4465 
4466 	return (depth <= ETH_HLEN) &&
4467 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4468 }
4469 
4470 /**
4471  *  stmmac_xmit - Tx entry point of the driver
4472  *  @skb : the socket buffer
4473  *  @dev : device pointer
4474  *  Description : this is the tx entry point of the driver.
4475  *  It programs the chain or the ring and supports oversized frames
4476  *  and SG feature.
4477  */
4478 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4479 {
4480 	unsigned int first_entry, tx_packets, enh_desc;
4481 	struct stmmac_priv *priv = netdev_priv(dev);
4482 	unsigned int nopaged_len = skb_headlen(skb);
4483 	int i, csum_insertion = 0, is_jumbo = 0;
4484 	u32 queue = skb_get_queue_mapping(skb);
4485 	int nfrags = skb_shinfo(skb)->nr_frags;
4486 	int gso = skb_shinfo(skb)->gso_type;
4487 	struct stmmac_txq_stats *txq_stats;
4488 	struct dma_edesc *tbs_desc = NULL;
4489 	struct dma_desc *desc, *first;
4490 	struct stmmac_tx_queue *tx_q;
4491 	bool has_vlan, set_ic;
4492 	int entry, first_tx;
4493 	unsigned long flags;
4494 	dma_addr_t des;
4495 
4496 	tx_q = &priv->dma_conf.tx_queue[queue];
4497 	txq_stats = &priv->xstats.txq_stats[queue];
4498 	first_tx = tx_q->cur_tx;
4499 
4500 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4501 		stmmac_disable_eee_mode(priv);
4502 
4503 	/* Manage oversized TCP frames for GMAC4 device */
4504 	if (skb_is_gso(skb) && priv->tso) {
4505 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4506 			return stmmac_tso_xmit(skb, dev);
4507 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4508 			return stmmac_tso_xmit(skb, dev);
4509 	}
4510 
4511 	if (priv->plat->est && priv->plat->est->enable &&
4512 	    priv->plat->est->max_sdu[queue] &&
4513 	    skb->len > priv->plat->est->max_sdu[queue]){
4514 		priv->xstats.max_sdu_txq_drop[queue]++;
4515 		goto max_sdu_err;
4516 	}
4517 
4518 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4519 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4520 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4521 								queue));
4522 			/* This is a hard error, log it. */
4523 			netdev_err(priv->dev,
4524 				   "%s: Tx Ring full when queue awake\n",
4525 				   __func__);
4526 		}
4527 		return NETDEV_TX_BUSY;
4528 	}
4529 
4530 	/* Check if VLAN can be inserted by HW */
4531 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4532 
4533 	entry = tx_q->cur_tx;
4534 	first_entry = entry;
4535 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4536 
4537 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4538 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4539 	 * queues. In that case, checksum offloading for those queues that don't
4540 	 * support tx coe needs to fallback to software checksum calculation.
4541 	 *
4542 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4543 	 * also have to be checksummed in software.
4544 	 */
4545 	if (csum_insertion &&
4546 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4547 	     !stmmac_has_ip_ethertype(skb))) {
4548 		if (unlikely(skb_checksum_help(skb)))
4549 			goto dma_map_err;
4550 		csum_insertion = !csum_insertion;
4551 	}
4552 
4553 	if (likely(priv->extend_desc))
4554 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4555 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4556 		desc = &tx_q->dma_entx[entry].basic;
4557 	else
4558 		desc = tx_q->dma_tx + entry;
4559 
4560 	first = desc;
4561 
4562 	if (has_vlan)
4563 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4564 
4565 	enh_desc = priv->plat->enh_desc;
4566 	/* To program the descriptors according to the size of the frame */
4567 	if (enh_desc)
4568 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4569 
4570 	if (unlikely(is_jumbo)) {
4571 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4572 		if (unlikely(entry < 0) && (entry != -EINVAL))
4573 			goto dma_map_err;
4574 	}
4575 
4576 	for (i = 0; i < nfrags; i++) {
4577 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4578 		int len = skb_frag_size(frag);
4579 		bool last_segment = (i == (nfrags - 1));
4580 
4581 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4582 		WARN_ON(tx_q->tx_skbuff[entry]);
4583 
4584 		if (likely(priv->extend_desc))
4585 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4586 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4587 			desc = &tx_q->dma_entx[entry].basic;
4588 		else
4589 			desc = tx_q->dma_tx + entry;
4590 
4591 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4592 				       DMA_TO_DEVICE);
4593 		if (dma_mapping_error(priv->device, des))
4594 			goto dma_map_err; /* should reuse desc w/o issues */
4595 
4596 		tx_q->tx_skbuff_dma[entry].buf = des;
4597 
4598 		stmmac_set_desc_addr(priv, desc, des);
4599 
4600 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4601 		tx_q->tx_skbuff_dma[entry].len = len;
4602 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4603 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4604 
4605 		/* Prepare the descriptor and set the own bit too */
4606 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4607 				priv->mode, 1, last_segment, skb->len);
4608 	}
4609 
4610 	/* Only the last descriptor gets to point to the skb. */
4611 	tx_q->tx_skbuff[entry] = skb;
4612 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4613 
4614 	/* According to the coalesce parameter the IC bit for the latest
4615 	 * segment is reset and the timer re-started to clean the tx status.
4616 	 * This approach takes care about the fragments: desc is the first
4617 	 * element in case of no SG.
4618 	 */
4619 	tx_packets = (entry + 1) - first_tx;
4620 	tx_q->tx_count_frames += tx_packets;
4621 
4622 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4623 		set_ic = true;
4624 	else if (!priv->tx_coal_frames[queue])
4625 		set_ic = false;
4626 	else if (tx_packets > priv->tx_coal_frames[queue])
4627 		set_ic = true;
4628 	else if ((tx_q->tx_count_frames %
4629 		  priv->tx_coal_frames[queue]) < tx_packets)
4630 		set_ic = true;
4631 	else
4632 		set_ic = false;
4633 
4634 	if (set_ic) {
4635 		if (likely(priv->extend_desc))
4636 			desc = &tx_q->dma_etx[entry].basic;
4637 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4638 			desc = &tx_q->dma_entx[entry].basic;
4639 		else
4640 			desc = &tx_q->dma_tx[entry];
4641 
4642 		tx_q->tx_count_frames = 0;
4643 		stmmac_set_tx_ic(priv, desc);
4644 	}
4645 
4646 	/* We've used all descriptors we need for this skb, however,
4647 	 * advance cur_tx so that it references a fresh descriptor.
4648 	 * ndo_start_xmit will fill this descriptor the next time it's
4649 	 * called and stmmac_tx_clean may clean up to this descriptor.
4650 	 */
4651 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4652 	tx_q->cur_tx = entry;
4653 
4654 	if (netif_msg_pktdata(priv)) {
4655 		netdev_dbg(priv->dev,
4656 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4657 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4658 			   entry, first, nfrags);
4659 
4660 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4661 		print_pkt(skb->data, skb->len);
4662 	}
4663 
4664 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4665 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4666 			  __func__);
4667 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4668 	}
4669 
4670 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4671 	txq_stats->tx_bytes += skb->len;
4672 	if (set_ic)
4673 		txq_stats->tx_set_ic_bit++;
4674 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4675 
4676 	if (priv->sarc_type)
4677 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4678 
4679 	skb_tx_timestamp(skb);
4680 
4681 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4682 	 * problems because all the descriptors are actually ready to be
4683 	 * passed to the DMA engine.
4684 	 */
4685 	if (likely(!is_jumbo)) {
4686 		bool last_segment = (nfrags == 0);
4687 
4688 		des = dma_map_single(priv->device, skb->data,
4689 				     nopaged_len, DMA_TO_DEVICE);
4690 		if (dma_mapping_error(priv->device, des))
4691 			goto dma_map_err;
4692 
4693 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4694 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4695 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4696 
4697 		stmmac_set_desc_addr(priv, first, des);
4698 
4699 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4700 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4701 
4702 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4703 			     priv->hwts_tx_en)) {
4704 			/* declare that device is doing timestamping */
4705 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4706 			stmmac_enable_tx_timestamp(priv, first);
4707 		}
4708 
4709 		/* Prepare the first descriptor setting the OWN bit too */
4710 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4711 				csum_insertion, priv->mode, 0, last_segment,
4712 				skb->len);
4713 	}
4714 
4715 	if (tx_q->tbs & STMMAC_TBS_EN) {
4716 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4717 
4718 		tbs_desc = &tx_q->dma_entx[first_entry];
4719 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4720 	}
4721 
4722 	stmmac_set_tx_owner(priv, first);
4723 
4724 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4725 
4726 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4727 
4728 	stmmac_flush_tx_descriptors(priv, queue);
4729 	stmmac_tx_timer_arm(priv, queue);
4730 
4731 	return NETDEV_TX_OK;
4732 
4733 dma_map_err:
4734 	netdev_err(priv->dev, "Tx DMA map failed\n");
4735 max_sdu_err:
4736 	dev_kfree_skb(skb);
4737 	priv->xstats.tx_dropped++;
4738 	return NETDEV_TX_OK;
4739 }
4740 
4741 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4742 {
4743 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4744 	__be16 vlan_proto = veth->h_vlan_proto;
4745 	u16 vlanid;
4746 
4747 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4748 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4749 	    (vlan_proto == htons(ETH_P_8021AD) &&
4750 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4751 		/* pop the vlan tag */
4752 		vlanid = ntohs(veth->h_vlan_TCI);
4753 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4754 		skb_pull(skb, VLAN_HLEN);
4755 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4756 	}
4757 }
4758 
4759 /**
4760  * stmmac_rx_refill - refill used skb preallocated buffers
4761  * @priv: driver private structure
4762  * @queue: RX queue index
4763  * Description : this is to reallocate the skb for the reception process
4764  * that is based on zero-copy.
4765  */
4766 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4767 {
4768 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4769 	int dirty = stmmac_rx_dirty(priv, queue);
4770 	unsigned int entry = rx_q->dirty_rx;
4771 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4772 
4773 	if (priv->dma_cap.host_dma_width <= 32)
4774 		gfp |= GFP_DMA32;
4775 
4776 	while (dirty-- > 0) {
4777 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4778 		struct dma_desc *p;
4779 		bool use_rx_wd;
4780 
4781 		if (priv->extend_desc)
4782 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4783 		else
4784 			p = rx_q->dma_rx + entry;
4785 
4786 		if (!buf->page) {
4787 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4788 			if (!buf->page)
4789 				break;
4790 		}
4791 
4792 		if (priv->sph && !buf->sec_page) {
4793 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4794 			if (!buf->sec_page)
4795 				break;
4796 
4797 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4798 		}
4799 
4800 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4801 
4802 		stmmac_set_desc_addr(priv, p, buf->addr);
4803 		if (priv->sph)
4804 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4805 		else
4806 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4807 		stmmac_refill_desc3(priv, rx_q, p);
4808 
4809 		rx_q->rx_count_frames++;
4810 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4811 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4812 			rx_q->rx_count_frames = 0;
4813 
4814 		use_rx_wd = !priv->rx_coal_frames[queue];
4815 		use_rx_wd |= rx_q->rx_count_frames > 0;
4816 		if (!priv->use_riwt)
4817 			use_rx_wd = false;
4818 
4819 		dma_wmb();
4820 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4821 
4822 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4823 	}
4824 	rx_q->dirty_rx = entry;
4825 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4826 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4827 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4828 }
4829 
4830 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4831 				       struct dma_desc *p,
4832 				       int status, unsigned int len)
4833 {
4834 	unsigned int plen = 0, hlen = 0;
4835 	int coe = priv->hw->rx_csum;
4836 
4837 	/* Not first descriptor, buffer is always zero */
4838 	if (priv->sph && len)
4839 		return 0;
4840 
4841 	/* First descriptor, get split header length */
4842 	stmmac_get_rx_header_len(priv, p, &hlen);
4843 	if (priv->sph && hlen) {
4844 		priv->xstats.rx_split_hdr_pkt_n++;
4845 		return hlen;
4846 	}
4847 
4848 	/* First descriptor, not last descriptor and not split header */
4849 	if (status & rx_not_ls)
4850 		return priv->dma_conf.dma_buf_sz;
4851 
4852 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4853 
4854 	/* First descriptor and last descriptor and not split header */
4855 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4856 }
4857 
4858 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4859 				       struct dma_desc *p,
4860 				       int status, unsigned int len)
4861 {
4862 	int coe = priv->hw->rx_csum;
4863 	unsigned int plen = 0;
4864 
4865 	/* Not split header, buffer is not available */
4866 	if (!priv->sph)
4867 		return 0;
4868 
4869 	/* Not last descriptor */
4870 	if (status & rx_not_ls)
4871 		return priv->dma_conf.dma_buf_sz;
4872 
4873 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4874 
4875 	/* Last descriptor */
4876 	return plen - len;
4877 }
4878 
4879 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4880 				struct xdp_frame *xdpf, bool dma_map)
4881 {
4882 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4883 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4884 	unsigned int entry = tx_q->cur_tx;
4885 	struct dma_desc *tx_desc;
4886 	dma_addr_t dma_addr;
4887 	bool set_ic;
4888 
4889 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4890 		return STMMAC_XDP_CONSUMED;
4891 
4892 	if (priv->plat->est && priv->plat->est->enable &&
4893 	    priv->plat->est->max_sdu[queue] &&
4894 	    xdpf->len > priv->plat->est->max_sdu[queue]) {
4895 		priv->xstats.max_sdu_txq_drop[queue]++;
4896 		return STMMAC_XDP_CONSUMED;
4897 	}
4898 
4899 	if (likely(priv->extend_desc))
4900 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4901 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4902 		tx_desc = &tx_q->dma_entx[entry].basic;
4903 	else
4904 		tx_desc = tx_q->dma_tx + entry;
4905 
4906 	if (dma_map) {
4907 		dma_addr = dma_map_single(priv->device, xdpf->data,
4908 					  xdpf->len, DMA_TO_DEVICE);
4909 		if (dma_mapping_error(priv->device, dma_addr))
4910 			return STMMAC_XDP_CONSUMED;
4911 
4912 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4913 	} else {
4914 		struct page *page = virt_to_page(xdpf->data);
4915 
4916 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4917 			   xdpf->headroom;
4918 		dma_sync_single_for_device(priv->device, dma_addr,
4919 					   xdpf->len, DMA_BIDIRECTIONAL);
4920 
4921 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4922 	}
4923 
4924 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4925 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4926 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4927 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4928 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4929 
4930 	tx_q->xdpf[entry] = xdpf;
4931 
4932 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4933 
4934 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4935 			       true, priv->mode, true, true,
4936 			       xdpf->len);
4937 
4938 	tx_q->tx_count_frames++;
4939 
4940 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4941 		set_ic = true;
4942 	else
4943 		set_ic = false;
4944 
4945 	if (set_ic) {
4946 		unsigned long flags;
4947 		tx_q->tx_count_frames = 0;
4948 		stmmac_set_tx_ic(priv, tx_desc);
4949 		flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4950 		txq_stats->tx_set_ic_bit++;
4951 		u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4952 	}
4953 
4954 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4955 
4956 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4957 	tx_q->cur_tx = entry;
4958 
4959 	return STMMAC_XDP_TX;
4960 }
4961 
4962 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4963 				   int cpu)
4964 {
4965 	int index = cpu;
4966 
4967 	if (unlikely(index < 0))
4968 		index = 0;
4969 
4970 	while (index >= priv->plat->tx_queues_to_use)
4971 		index -= priv->plat->tx_queues_to_use;
4972 
4973 	return index;
4974 }
4975 
4976 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4977 				struct xdp_buff *xdp)
4978 {
4979 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4980 	int cpu = smp_processor_id();
4981 	struct netdev_queue *nq;
4982 	int queue;
4983 	int res;
4984 
4985 	if (unlikely(!xdpf))
4986 		return STMMAC_XDP_CONSUMED;
4987 
4988 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4989 	nq = netdev_get_tx_queue(priv->dev, queue);
4990 
4991 	__netif_tx_lock(nq, cpu);
4992 	/* Avoids TX time-out as we are sharing with slow path */
4993 	txq_trans_cond_update(nq);
4994 
4995 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4996 	if (res == STMMAC_XDP_TX)
4997 		stmmac_flush_tx_descriptors(priv, queue);
4998 
4999 	__netif_tx_unlock(nq);
5000 
5001 	return res;
5002 }
5003 
5004 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5005 				 struct bpf_prog *prog,
5006 				 struct xdp_buff *xdp)
5007 {
5008 	u32 act;
5009 	int res;
5010 
5011 	act = bpf_prog_run_xdp(prog, xdp);
5012 	switch (act) {
5013 	case XDP_PASS:
5014 		res = STMMAC_XDP_PASS;
5015 		break;
5016 	case XDP_TX:
5017 		res = stmmac_xdp_xmit_back(priv, xdp);
5018 		break;
5019 	case XDP_REDIRECT:
5020 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5021 			res = STMMAC_XDP_CONSUMED;
5022 		else
5023 			res = STMMAC_XDP_REDIRECT;
5024 		break;
5025 	default:
5026 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5027 		fallthrough;
5028 	case XDP_ABORTED:
5029 		trace_xdp_exception(priv->dev, prog, act);
5030 		fallthrough;
5031 	case XDP_DROP:
5032 		res = STMMAC_XDP_CONSUMED;
5033 		break;
5034 	}
5035 
5036 	return res;
5037 }
5038 
5039 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5040 					   struct xdp_buff *xdp)
5041 {
5042 	struct bpf_prog *prog;
5043 	int res;
5044 
5045 	prog = READ_ONCE(priv->xdp_prog);
5046 	if (!prog) {
5047 		res = STMMAC_XDP_PASS;
5048 		goto out;
5049 	}
5050 
5051 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5052 out:
5053 	return ERR_PTR(-res);
5054 }
5055 
5056 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5057 				   int xdp_status)
5058 {
5059 	int cpu = smp_processor_id();
5060 	int queue;
5061 
5062 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5063 
5064 	if (xdp_status & STMMAC_XDP_TX)
5065 		stmmac_tx_timer_arm(priv, queue);
5066 
5067 	if (xdp_status & STMMAC_XDP_REDIRECT)
5068 		xdp_do_flush();
5069 }
5070 
5071 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5072 					       struct xdp_buff *xdp)
5073 {
5074 	unsigned int metasize = xdp->data - xdp->data_meta;
5075 	unsigned int datasize = xdp->data_end - xdp->data;
5076 	struct sk_buff *skb;
5077 
5078 	skb = __napi_alloc_skb(&ch->rxtx_napi,
5079 			       xdp->data_end - xdp->data_hard_start,
5080 			       GFP_ATOMIC | __GFP_NOWARN);
5081 	if (unlikely(!skb))
5082 		return NULL;
5083 
5084 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5085 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5086 	if (metasize)
5087 		skb_metadata_set(skb, metasize);
5088 
5089 	return skb;
5090 }
5091 
5092 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5093 				   struct dma_desc *p, struct dma_desc *np,
5094 				   struct xdp_buff *xdp)
5095 {
5096 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5097 	struct stmmac_channel *ch = &priv->channel[queue];
5098 	unsigned int len = xdp->data_end - xdp->data;
5099 	enum pkt_hash_types hash_type;
5100 	int coe = priv->hw->rx_csum;
5101 	unsigned long flags;
5102 	struct sk_buff *skb;
5103 	u32 hash;
5104 
5105 	skb = stmmac_construct_skb_zc(ch, xdp);
5106 	if (!skb) {
5107 		priv->xstats.rx_dropped++;
5108 		return;
5109 	}
5110 
5111 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5112 	if (priv->hw->hw_vlan_en)
5113 		/* MAC level stripping. */
5114 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5115 	else
5116 		/* Driver level stripping. */
5117 		stmmac_rx_vlan(priv->dev, skb);
5118 	skb->protocol = eth_type_trans(skb, priv->dev);
5119 
5120 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5121 		skb_checksum_none_assert(skb);
5122 	else
5123 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5124 
5125 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5126 		skb_set_hash(skb, hash, hash_type);
5127 
5128 	skb_record_rx_queue(skb, queue);
5129 	napi_gro_receive(&ch->rxtx_napi, skb);
5130 
5131 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5132 	rxq_stats->rx_pkt_n++;
5133 	rxq_stats->rx_bytes += len;
5134 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5135 }
5136 
5137 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5138 {
5139 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5140 	unsigned int entry = rx_q->dirty_rx;
5141 	struct dma_desc *rx_desc = NULL;
5142 	bool ret = true;
5143 
5144 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5145 
5146 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5147 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5148 		dma_addr_t dma_addr;
5149 		bool use_rx_wd;
5150 
5151 		if (!buf->xdp) {
5152 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5153 			if (!buf->xdp) {
5154 				ret = false;
5155 				break;
5156 			}
5157 		}
5158 
5159 		if (priv->extend_desc)
5160 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5161 		else
5162 			rx_desc = rx_q->dma_rx + entry;
5163 
5164 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5165 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5166 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5167 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5168 
5169 		rx_q->rx_count_frames++;
5170 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5171 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5172 			rx_q->rx_count_frames = 0;
5173 
5174 		use_rx_wd = !priv->rx_coal_frames[queue];
5175 		use_rx_wd |= rx_q->rx_count_frames > 0;
5176 		if (!priv->use_riwt)
5177 			use_rx_wd = false;
5178 
5179 		dma_wmb();
5180 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5181 
5182 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5183 	}
5184 
5185 	if (rx_desc) {
5186 		rx_q->dirty_rx = entry;
5187 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5188 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5189 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5190 	}
5191 
5192 	return ret;
5193 }
5194 
5195 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5196 {
5197 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5198 	 * to represent incoming packet, whereas cb field in the same structure
5199 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5200 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5201 	 */
5202 	return (struct stmmac_xdp_buff *)xdp;
5203 }
5204 
5205 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5206 {
5207 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5208 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5209 	unsigned int count = 0, error = 0, len = 0;
5210 	int dirty = stmmac_rx_dirty(priv, queue);
5211 	unsigned int next_entry = rx_q->cur_rx;
5212 	u32 rx_errors = 0, rx_dropped = 0;
5213 	unsigned int desc_size;
5214 	struct bpf_prog *prog;
5215 	bool failure = false;
5216 	unsigned long flags;
5217 	int xdp_status = 0;
5218 	int status = 0;
5219 
5220 	if (netif_msg_rx_status(priv)) {
5221 		void *rx_head;
5222 
5223 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5224 		if (priv->extend_desc) {
5225 			rx_head = (void *)rx_q->dma_erx;
5226 			desc_size = sizeof(struct dma_extended_desc);
5227 		} else {
5228 			rx_head = (void *)rx_q->dma_rx;
5229 			desc_size = sizeof(struct dma_desc);
5230 		}
5231 
5232 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5233 				    rx_q->dma_rx_phy, desc_size);
5234 	}
5235 	while (count < limit) {
5236 		struct stmmac_rx_buffer *buf;
5237 		struct stmmac_xdp_buff *ctx;
5238 		unsigned int buf1_len = 0;
5239 		struct dma_desc *np, *p;
5240 		int entry;
5241 		int res;
5242 
5243 		if (!count && rx_q->state_saved) {
5244 			error = rx_q->state.error;
5245 			len = rx_q->state.len;
5246 		} else {
5247 			rx_q->state_saved = false;
5248 			error = 0;
5249 			len = 0;
5250 		}
5251 
5252 		if (count >= limit)
5253 			break;
5254 
5255 read_again:
5256 		buf1_len = 0;
5257 		entry = next_entry;
5258 		buf = &rx_q->buf_pool[entry];
5259 
5260 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5261 			failure = failure ||
5262 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5263 			dirty = 0;
5264 		}
5265 
5266 		if (priv->extend_desc)
5267 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5268 		else
5269 			p = rx_q->dma_rx + entry;
5270 
5271 		/* read the status of the incoming frame */
5272 		status = stmmac_rx_status(priv, &priv->xstats, p);
5273 		/* check if managed by the DMA otherwise go ahead */
5274 		if (unlikely(status & dma_own))
5275 			break;
5276 
5277 		/* Prefetch the next RX descriptor */
5278 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5279 						priv->dma_conf.dma_rx_size);
5280 		next_entry = rx_q->cur_rx;
5281 
5282 		if (priv->extend_desc)
5283 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5284 		else
5285 			np = rx_q->dma_rx + next_entry;
5286 
5287 		prefetch(np);
5288 
5289 		/* Ensure a valid XSK buffer before proceed */
5290 		if (!buf->xdp)
5291 			break;
5292 
5293 		if (priv->extend_desc)
5294 			stmmac_rx_extended_status(priv, &priv->xstats,
5295 						  rx_q->dma_erx + entry);
5296 		if (unlikely(status == discard_frame)) {
5297 			xsk_buff_free(buf->xdp);
5298 			buf->xdp = NULL;
5299 			dirty++;
5300 			error = 1;
5301 			if (!priv->hwts_rx_en)
5302 				rx_errors++;
5303 		}
5304 
5305 		if (unlikely(error && (status & rx_not_ls)))
5306 			goto read_again;
5307 		if (unlikely(error)) {
5308 			count++;
5309 			continue;
5310 		}
5311 
5312 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5313 		if (likely(status & rx_not_ls)) {
5314 			xsk_buff_free(buf->xdp);
5315 			buf->xdp = NULL;
5316 			dirty++;
5317 			count++;
5318 			goto read_again;
5319 		}
5320 
5321 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5322 		ctx->priv = priv;
5323 		ctx->desc = p;
5324 		ctx->ndesc = np;
5325 
5326 		/* XDP ZC Frame only support primary buffers for now */
5327 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5328 		len += buf1_len;
5329 
5330 		/* ACS is disabled; strip manually. */
5331 		if (likely(!(status & rx_not_ls))) {
5332 			buf1_len -= ETH_FCS_LEN;
5333 			len -= ETH_FCS_LEN;
5334 		}
5335 
5336 		/* RX buffer is good and fit into a XSK pool buffer */
5337 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5338 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5339 
5340 		prog = READ_ONCE(priv->xdp_prog);
5341 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5342 
5343 		switch (res) {
5344 		case STMMAC_XDP_PASS:
5345 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5346 			xsk_buff_free(buf->xdp);
5347 			break;
5348 		case STMMAC_XDP_CONSUMED:
5349 			xsk_buff_free(buf->xdp);
5350 			rx_dropped++;
5351 			break;
5352 		case STMMAC_XDP_TX:
5353 		case STMMAC_XDP_REDIRECT:
5354 			xdp_status |= res;
5355 			break;
5356 		}
5357 
5358 		buf->xdp = NULL;
5359 		dirty++;
5360 		count++;
5361 	}
5362 
5363 	if (status & rx_not_ls) {
5364 		rx_q->state_saved = true;
5365 		rx_q->state.error = error;
5366 		rx_q->state.len = len;
5367 	}
5368 
5369 	stmmac_finalize_xdp_rx(priv, xdp_status);
5370 
5371 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5372 	rxq_stats->rx_pkt_n += count;
5373 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5374 
5375 	priv->xstats.rx_dropped += rx_dropped;
5376 	priv->xstats.rx_errors += rx_errors;
5377 
5378 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5379 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5380 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5381 		else
5382 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5383 
5384 		return (int)count;
5385 	}
5386 
5387 	return failure ? limit : (int)count;
5388 }
5389 
5390 /**
5391  * stmmac_rx - manage the receive process
5392  * @priv: driver private structure
5393  * @limit: napi bugget
5394  * @queue: RX queue index.
5395  * Description :  this the function called by the napi poll method.
5396  * It gets all the frames inside the ring.
5397  */
5398 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5399 {
5400 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5401 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5402 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5403 	struct stmmac_channel *ch = &priv->channel[queue];
5404 	unsigned int count = 0, error = 0, len = 0;
5405 	int status = 0, coe = priv->hw->rx_csum;
5406 	unsigned int next_entry = rx_q->cur_rx;
5407 	enum dma_data_direction dma_dir;
5408 	unsigned int desc_size;
5409 	struct sk_buff *skb = NULL;
5410 	struct stmmac_xdp_buff ctx;
5411 	unsigned long flags;
5412 	int xdp_status = 0;
5413 	int buf_sz;
5414 
5415 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5416 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5417 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5418 
5419 	if (netif_msg_rx_status(priv)) {
5420 		void *rx_head;
5421 
5422 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5423 		if (priv->extend_desc) {
5424 			rx_head = (void *)rx_q->dma_erx;
5425 			desc_size = sizeof(struct dma_extended_desc);
5426 		} else {
5427 			rx_head = (void *)rx_q->dma_rx;
5428 			desc_size = sizeof(struct dma_desc);
5429 		}
5430 
5431 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5432 				    rx_q->dma_rx_phy, desc_size);
5433 	}
5434 	while (count < limit) {
5435 		unsigned int buf1_len = 0, buf2_len = 0;
5436 		enum pkt_hash_types hash_type;
5437 		struct stmmac_rx_buffer *buf;
5438 		struct dma_desc *np, *p;
5439 		int entry;
5440 		u32 hash;
5441 
5442 		if (!count && rx_q->state_saved) {
5443 			skb = rx_q->state.skb;
5444 			error = rx_q->state.error;
5445 			len = rx_q->state.len;
5446 		} else {
5447 			rx_q->state_saved = false;
5448 			skb = NULL;
5449 			error = 0;
5450 			len = 0;
5451 		}
5452 
5453 read_again:
5454 		if (count >= limit)
5455 			break;
5456 
5457 		buf1_len = 0;
5458 		buf2_len = 0;
5459 		entry = next_entry;
5460 		buf = &rx_q->buf_pool[entry];
5461 
5462 		if (priv->extend_desc)
5463 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5464 		else
5465 			p = rx_q->dma_rx + entry;
5466 
5467 		/* read the status of the incoming frame */
5468 		status = stmmac_rx_status(priv, &priv->xstats, p);
5469 		/* check if managed by the DMA otherwise go ahead */
5470 		if (unlikely(status & dma_own))
5471 			break;
5472 
5473 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5474 						priv->dma_conf.dma_rx_size);
5475 		next_entry = rx_q->cur_rx;
5476 
5477 		if (priv->extend_desc)
5478 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5479 		else
5480 			np = rx_q->dma_rx + next_entry;
5481 
5482 		prefetch(np);
5483 
5484 		if (priv->extend_desc)
5485 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5486 		if (unlikely(status == discard_frame)) {
5487 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5488 			buf->page = NULL;
5489 			error = 1;
5490 			if (!priv->hwts_rx_en)
5491 				rx_errors++;
5492 		}
5493 
5494 		if (unlikely(error && (status & rx_not_ls)))
5495 			goto read_again;
5496 		if (unlikely(error)) {
5497 			dev_kfree_skb(skb);
5498 			skb = NULL;
5499 			count++;
5500 			continue;
5501 		}
5502 
5503 		/* Buffer is good. Go on. */
5504 
5505 		prefetch(page_address(buf->page) + buf->page_offset);
5506 		if (buf->sec_page)
5507 			prefetch(page_address(buf->sec_page));
5508 
5509 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5510 		len += buf1_len;
5511 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5512 		len += buf2_len;
5513 
5514 		/* ACS is disabled; strip manually. */
5515 		if (likely(!(status & rx_not_ls))) {
5516 			if (buf2_len) {
5517 				buf2_len -= ETH_FCS_LEN;
5518 				len -= ETH_FCS_LEN;
5519 			} else if (buf1_len) {
5520 				buf1_len -= ETH_FCS_LEN;
5521 				len -= ETH_FCS_LEN;
5522 			}
5523 		}
5524 
5525 		if (!skb) {
5526 			unsigned int pre_len, sync_len;
5527 
5528 			dma_sync_single_for_cpu(priv->device, buf->addr,
5529 						buf1_len, dma_dir);
5530 
5531 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5532 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5533 					 buf->page_offset, buf1_len, true);
5534 
5535 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5536 				  buf->page_offset;
5537 
5538 			ctx.priv = priv;
5539 			ctx.desc = p;
5540 			ctx.ndesc = np;
5541 
5542 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5543 			/* Due xdp_adjust_tail: DMA sync for_device
5544 			 * cover max len CPU touch
5545 			 */
5546 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5547 				   buf->page_offset;
5548 			sync_len = max(sync_len, pre_len);
5549 
5550 			/* For Not XDP_PASS verdict */
5551 			if (IS_ERR(skb)) {
5552 				unsigned int xdp_res = -PTR_ERR(skb);
5553 
5554 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5555 					page_pool_put_page(rx_q->page_pool,
5556 							   virt_to_head_page(ctx.xdp.data),
5557 							   sync_len, true);
5558 					buf->page = NULL;
5559 					rx_dropped++;
5560 
5561 					/* Clear skb as it was set as
5562 					 * status by XDP program.
5563 					 */
5564 					skb = NULL;
5565 
5566 					if (unlikely((status & rx_not_ls)))
5567 						goto read_again;
5568 
5569 					count++;
5570 					continue;
5571 				} else if (xdp_res & (STMMAC_XDP_TX |
5572 						      STMMAC_XDP_REDIRECT)) {
5573 					xdp_status |= xdp_res;
5574 					buf->page = NULL;
5575 					skb = NULL;
5576 					count++;
5577 					continue;
5578 				}
5579 			}
5580 		}
5581 
5582 		if (!skb) {
5583 			/* XDP program may expand or reduce tail */
5584 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5585 
5586 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5587 			if (!skb) {
5588 				rx_dropped++;
5589 				count++;
5590 				goto drain_data;
5591 			}
5592 
5593 			/* XDP program may adjust header */
5594 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5595 			skb_put(skb, buf1_len);
5596 
5597 			/* Data payload copied into SKB, page ready for recycle */
5598 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5599 			buf->page = NULL;
5600 		} else if (buf1_len) {
5601 			dma_sync_single_for_cpu(priv->device, buf->addr,
5602 						buf1_len, dma_dir);
5603 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5604 					buf->page, buf->page_offset, buf1_len,
5605 					priv->dma_conf.dma_buf_sz);
5606 
5607 			/* Data payload appended into SKB */
5608 			skb_mark_for_recycle(skb);
5609 			buf->page = NULL;
5610 		}
5611 
5612 		if (buf2_len) {
5613 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5614 						buf2_len, dma_dir);
5615 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5616 					buf->sec_page, 0, buf2_len,
5617 					priv->dma_conf.dma_buf_sz);
5618 
5619 			/* Data payload appended into SKB */
5620 			skb_mark_for_recycle(skb);
5621 			buf->sec_page = NULL;
5622 		}
5623 
5624 drain_data:
5625 		if (likely(status & rx_not_ls))
5626 			goto read_again;
5627 		if (!skb)
5628 			continue;
5629 
5630 		/* Got entire packet into SKB. Finish it. */
5631 
5632 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5633 
5634 		if (priv->hw->hw_vlan_en)
5635 			/* MAC level stripping. */
5636 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5637 		else
5638 			/* Driver level stripping. */
5639 			stmmac_rx_vlan(priv->dev, skb);
5640 
5641 		skb->protocol = eth_type_trans(skb, priv->dev);
5642 
5643 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5644 			skb_checksum_none_assert(skb);
5645 		else
5646 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5647 
5648 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5649 			skb_set_hash(skb, hash, hash_type);
5650 
5651 		skb_record_rx_queue(skb, queue);
5652 		napi_gro_receive(&ch->rx_napi, skb);
5653 		skb = NULL;
5654 
5655 		rx_packets++;
5656 		rx_bytes += len;
5657 		count++;
5658 	}
5659 
5660 	if (status & rx_not_ls || skb) {
5661 		rx_q->state_saved = true;
5662 		rx_q->state.skb = skb;
5663 		rx_q->state.error = error;
5664 		rx_q->state.len = len;
5665 	}
5666 
5667 	stmmac_finalize_xdp_rx(priv, xdp_status);
5668 
5669 	stmmac_rx_refill(priv, queue);
5670 
5671 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5672 	rxq_stats->rx_packets += rx_packets;
5673 	rxq_stats->rx_bytes += rx_bytes;
5674 	rxq_stats->rx_pkt_n += count;
5675 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5676 
5677 	priv->xstats.rx_dropped += rx_dropped;
5678 	priv->xstats.rx_errors += rx_errors;
5679 
5680 	return count;
5681 }
5682 
5683 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5684 {
5685 	struct stmmac_channel *ch =
5686 		container_of(napi, struct stmmac_channel, rx_napi);
5687 	struct stmmac_priv *priv = ch->priv_data;
5688 	struct stmmac_rxq_stats *rxq_stats;
5689 	u32 chan = ch->index;
5690 	unsigned long flags;
5691 	int work_done;
5692 
5693 	rxq_stats = &priv->xstats.rxq_stats[chan];
5694 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5695 	rxq_stats->napi_poll++;
5696 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5697 
5698 	work_done = stmmac_rx(priv, budget, chan);
5699 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5700 		unsigned long flags;
5701 
5702 		spin_lock_irqsave(&ch->lock, flags);
5703 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5704 		spin_unlock_irqrestore(&ch->lock, flags);
5705 	}
5706 
5707 	return work_done;
5708 }
5709 
5710 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5711 {
5712 	struct stmmac_channel *ch =
5713 		container_of(napi, struct stmmac_channel, tx_napi);
5714 	struct stmmac_priv *priv = ch->priv_data;
5715 	struct stmmac_txq_stats *txq_stats;
5716 	bool pending_packets = false;
5717 	u32 chan = ch->index;
5718 	unsigned long flags;
5719 	int work_done;
5720 
5721 	txq_stats = &priv->xstats.txq_stats[chan];
5722 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5723 	txq_stats->napi_poll++;
5724 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5725 
5726 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5727 	work_done = min(work_done, budget);
5728 
5729 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5730 		unsigned long flags;
5731 
5732 		spin_lock_irqsave(&ch->lock, flags);
5733 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5734 		spin_unlock_irqrestore(&ch->lock, flags);
5735 	}
5736 
5737 	/* TX still have packet to handle, check if we need to arm tx timer */
5738 	if (pending_packets)
5739 		stmmac_tx_timer_arm(priv, chan);
5740 
5741 	return work_done;
5742 }
5743 
5744 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5745 {
5746 	struct stmmac_channel *ch =
5747 		container_of(napi, struct stmmac_channel, rxtx_napi);
5748 	struct stmmac_priv *priv = ch->priv_data;
5749 	bool tx_pending_packets = false;
5750 	int rx_done, tx_done, rxtx_done;
5751 	struct stmmac_rxq_stats *rxq_stats;
5752 	struct stmmac_txq_stats *txq_stats;
5753 	u32 chan = ch->index;
5754 	unsigned long flags;
5755 
5756 	rxq_stats = &priv->xstats.rxq_stats[chan];
5757 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5758 	rxq_stats->napi_poll++;
5759 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5760 
5761 	txq_stats = &priv->xstats.txq_stats[chan];
5762 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5763 	txq_stats->napi_poll++;
5764 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5765 
5766 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5767 	tx_done = min(tx_done, budget);
5768 
5769 	rx_done = stmmac_rx_zc(priv, budget, chan);
5770 
5771 	rxtx_done = max(tx_done, rx_done);
5772 
5773 	/* If either TX or RX work is not complete, return budget
5774 	 * and keep pooling
5775 	 */
5776 	if (rxtx_done >= budget)
5777 		return budget;
5778 
5779 	/* all work done, exit the polling mode */
5780 	if (napi_complete_done(napi, rxtx_done)) {
5781 		unsigned long flags;
5782 
5783 		spin_lock_irqsave(&ch->lock, flags);
5784 		/* Both RX and TX work done are compelte,
5785 		 * so enable both RX & TX IRQs.
5786 		 */
5787 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5788 		spin_unlock_irqrestore(&ch->lock, flags);
5789 	}
5790 
5791 	/* TX still have packet to handle, check if we need to arm tx timer */
5792 	if (tx_pending_packets)
5793 		stmmac_tx_timer_arm(priv, chan);
5794 
5795 	return min(rxtx_done, budget - 1);
5796 }
5797 
5798 /**
5799  *  stmmac_tx_timeout
5800  *  @dev : Pointer to net device structure
5801  *  @txqueue: the index of the hanging transmit queue
5802  *  Description: this function is called when a packet transmission fails to
5803  *   complete within a reasonable time. The driver will mark the error in the
5804  *   netdev structure and arrange for the device to be reset to a sane state
5805  *   in order to transmit a new packet.
5806  */
5807 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5808 {
5809 	struct stmmac_priv *priv = netdev_priv(dev);
5810 
5811 	stmmac_global_err(priv);
5812 }
5813 
5814 /**
5815  *  stmmac_set_rx_mode - entry point for multicast addressing
5816  *  @dev : pointer to the device structure
5817  *  Description:
5818  *  This function is a driver entry point which gets called by the kernel
5819  *  whenever multicast addresses must be enabled/disabled.
5820  *  Return value:
5821  *  void.
5822  */
5823 static void stmmac_set_rx_mode(struct net_device *dev)
5824 {
5825 	struct stmmac_priv *priv = netdev_priv(dev);
5826 
5827 	stmmac_set_filter(priv, priv->hw, dev);
5828 }
5829 
5830 /**
5831  *  stmmac_change_mtu - entry point to change MTU size for the device.
5832  *  @dev : device pointer.
5833  *  @new_mtu : the new MTU size for the device.
5834  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5835  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5836  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5837  *  Return value:
5838  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5839  *  file on failure.
5840  */
5841 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5842 {
5843 	struct stmmac_priv *priv = netdev_priv(dev);
5844 	int txfifosz = priv->plat->tx_fifo_size;
5845 	struct stmmac_dma_conf *dma_conf;
5846 	const int mtu = new_mtu;
5847 	int ret;
5848 
5849 	if (txfifosz == 0)
5850 		txfifosz = priv->dma_cap.tx_fifo_size;
5851 
5852 	txfifosz /= priv->plat->tx_queues_to_use;
5853 
5854 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5855 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5856 		return -EINVAL;
5857 	}
5858 
5859 	new_mtu = STMMAC_ALIGN(new_mtu);
5860 
5861 	/* If condition true, FIFO is too small or MTU too large */
5862 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5863 		return -EINVAL;
5864 
5865 	if (netif_running(dev)) {
5866 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5867 		/* Try to allocate the new DMA conf with the new mtu */
5868 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5869 		if (IS_ERR(dma_conf)) {
5870 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5871 				   mtu);
5872 			return PTR_ERR(dma_conf);
5873 		}
5874 
5875 		stmmac_release(dev);
5876 
5877 		ret = __stmmac_open(dev, dma_conf);
5878 		if (ret) {
5879 			free_dma_desc_resources(priv, dma_conf);
5880 			kfree(dma_conf);
5881 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5882 			return ret;
5883 		}
5884 
5885 		kfree(dma_conf);
5886 
5887 		stmmac_set_rx_mode(dev);
5888 	}
5889 
5890 	dev->mtu = mtu;
5891 	netdev_update_features(dev);
5892 
5893 	return 0;
5894 }
5895 
5896 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5897 					     netdev_features_t features)
5898 {
5899 	struct stmmac_priv *priv = netdev_priv(dev);
5900 
5901 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5902 		features &= ~NETIF_F_RXCSUM;
5903 
5904 	if (!priv->plat->tx_coe)
5905 		features &= ~NETIF_F_CSUM_MASK;
5906 
5907 	/* Some GMAC devices have a bugged Jumbo frame support that
5908 	 * needs to have the Tx COE disabled for oversized frames
5909 	 * (due to limited buffer sizes). In this case we disable
5910 	 * the TX csum insertion in the TDES and not use SF.
5911 	 */
5912 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5913 		features &= ~NETIF_F_CSUM_MASK;
5914 
5915 	/* Disable tso if asked by ethtool */
5916 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5917 		if (features & NETIF_F_TSO)
5918 			priv->tso = true;
5919 		else
5920 			priv->tso = false;
5921 	}
5922 
5923 	return features;
5924 }
5925 
5926 static int stmmac_set_features(struct net_device *netdev,
5927 			       netdev_features_t features)
5928 {
5929 	struct stmmac_priv *priv = netdev_priv(netdev);
5930 
5931 	/* Keep the COE Type in case of csum is supporting */
5932 	if (features & NETIF_F_RXCSUM)
5933 		priv->hw->rx_csum = priv->plat->rx_coe;
5934 	else
5935 		priv->hw->rx_csum = 0;
5936 	/* No check needed because rx_coe has been set before and it will be
5937 	 * fixed in case of issue.
5938 	 */
5939 	stmmac_rx_ipc(priv, priv->hw);
5940 
5941 	if (priv->sph_cap) {
5942 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5943 		u32 chan;
5944 
5945 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5946 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5947 	}
5948 
5949 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5950 		priv->hw->hw_vlan_en = true;
5951 	else
5952 		priv->hw->hw_vlan_en = false;
5953 
5954 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5955 
5956 	return 0;
5957 }
5958 
5959 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5960 {
5961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5964 	bool *hs_enable = &fpe_cfg->hs_enable;
5965 
5966 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5967 		return;
5968 
5969 	/* If LP has sent verify mPacket, LP is FPE capable */
5970 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5971 		if (*lp_state < FPE_STATE_CAPABLE)
5972 			*lp_state = FPE_STATE_CAPABLE;
5973 
5974 		/* If user has requested FPE enable, quickly response */
5975 		if (*hs_enable)
5976 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5977 						fpe_cfg,
5978 						MPACKET_RESPONSE);
5979 	}
5980 
5981 	/* If Local has sent verify mPacket, Local is FPE capable */
5982 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5983 		if (*lo_state < FPE_STATE_CAPABLE)
5984 			*lo_state = FPE_STATE_CAPABLE;
5985 	}
5986 
5987 	/* If LP has sent response mPacket, LP is entering FPE ON */
5988 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5989 		*lp_state = FPE_STATE_ENTERING_ON;
5990 
5991 	/* If Local has sent response mPacket, Local is entering FPE ON */
5992 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5993 		*lo_state = FPE_STATE_ENTERING_ON;
5994 
5995 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5996 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5997 	    priv->fpe_wq) {
5998 		queue_work(priv->fpe_wq, &priv->fpe_task);
5999 	}
6000 }
6001 
6002 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6003 {
6004 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6005 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6006 	u32 queues_count;
6007 	u32 queue;
6008 	bool xmac;
6009 
6010 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6011 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6012 
6013 	if (priv->irq_wake)
6014 		pm_wakeup_event(priv->device, 0);
6015 
6016 	if (priv->dma_cap.estsel)
6017 		stmmac_est_irq_status(priv, priv, priv->dev,
6018 				      &priv->xstats, tx_cnt);
6019 
6020 	if (priv->dma_cap.fpesel) {
6021 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6022 						   priv->dev);
6023 
6024 		stmmac_fpe_event_status(priv, status);
6025 	}
6026 
6027 	/* To handle GMAC own interrupts */
6028 	if ((priv->plat->has_gmac) || xmac) {
6029 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6030 
6031 		if (unlikely(status)) {
6032 			/* For LPI we need to save the tx status */
6033 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6034 				priv->tx_path_in_lpi_mode = true;
6035 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6036 				priv->tx_path_in_lpi_mode = false;
6037 		}
6038 
6039 		for (queue = 0; queue < queues_count; queue++) {
6040 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
6041 							    queue);
6042 		}
6043 
6044 		/* PCS link status */
6045 		if (priv->hw->pcs &&
6046 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6047 			if (priv->xstats.pcs_link)
6048 				netif_carrier_on(priv->dev);
6049 			else
6050 				netif_carrier_off(priv->dev);
6051 		}
6052 
6053 		stmmac_timestamp_interrupt(priv, priv);
6054 	}
6055 }
6056 
6057 /**
6058  *  stmmac_interrupt - main ISR
6059  *  @irq: interrupt number.
6060  *  @dev_id: to pass the net device pointer.
6061  *  Description: this is the main driver interrupt service routine.
6062  *  It can call:
6063  *  o DMA service routine (to manage incoming frame reception and transmission
6064  *    status)
6065  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6066  *    interrupts.
6067  */
6068 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6069 {
6070 	struct net_device *dev = (struct net_device *)dev_id;
6071 	struct stmmac_priv *priv = netdev_priv(dev);
6072 
6073 	/* Check if adapter is up */
6074 	if (test_bit(STMMAC_DOWN, &priv->state))
6075 		return IRQ_HANDLED;
6076 
6077 	/* Check if a fatal error happened */
6078 	if (stmmac_safety_feat_interrupt(priv))
6079 		return IRQ_HANDLED;
6080 
6081 	/* To handle Common interrupts */
6082 	stmmac_common_interrupt(priv);
6083 
6084 	/* To handle DMA interrupts */
6085 	stmmac_dma_interrupt(priv);
6086 
6087 	return IRQ_HANDLED;
6088 }
6089 
6090 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6091 {
6092 	struct net_device *dev = (struct net_device *)dev_id;
6093 	struct stmmac_priv *priv = netdev_priv(dev);
6094 
6095 	if (unlikely(!dev)) {
6096 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6097 		return IRQ_NONE;
6098 	}
6099 
6100 	/* Check if adapter is up */
6101 	if (test_bit(STMMAC_DOWN, &priv->state))
6102 		return IRQ_HANDLED;
6103 
6104 	/* To handle Common interrupts */
6105 	stmmac_common_interrupt(priv);
6106 
6107 	return IRQ_HANDLED;
6108 }
6109 
6110 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6111 {
6112 	struct net_device *dev = (struct net_device *)dev_id;
6113 	struct stmmac_priv *priv = netdev_priv(dev);
6114 
6115 	if (unlikely(!dev)) {
6116 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6117 		return IRQ_NONE;
6118 	}
6119 
6120 	/* Check if adapter is up */
6121 	if (test_bit(STMMAC_DOWN, &priv->state))
6122 		return IRQ_HANDLED;
6123 
6124 	/* Check if a fatal error happened */
6125 	stmmac_safety_feat_interrupt(priv);
6126 
6127 	return IRQ_HANDLED;
6128 }
6129 
6130 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6131 {
6132 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6133 	struct stmmac_dma_conf *dma_conf;
6134 	int chan = tx_q->queue_index;
6135 	struct stmmac_priv *priv;
6136 	int status;
6137 
6138 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6139 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6140 
6141 	if (unlikely(!data)) {
6142 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6143 		return IRQ_NONE;
6144 	}
6145 
6146 	/* Check if adapter is up */
6147 	if (test_bit(STMMAC_DOWN, &priv->state))
6148 		return IRQ_HANDLED;
6149 
6150 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6151 
6152 	if (unlikely(status & tx_hard_error_bump_tc)) {
6153 		/* Try to bump up the dma threshold on this failure */
6154 		stmmac_bump_dma_threshold(priv, chan);
6155 	} else if (unlikely(status == tx_hard_error)) {
6156 		stmmac_tx_err(priv, chan);
6157 	}
6158 
6159 	return IRQ_HANDLED;
6160 }
6161 
6162 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6163 {
6164 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6165 	struct stmmac_dma_conf *dma_conf;
6166 	int chan = rx_q->queue_index;
6167 	struct stmmac_priv *priv;
6168 
6169 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6170 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6171 
6172 	if (unlikely(!data)) {
6173 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6174 		return IRQ_NONE;
6175 	}
6176 
6177 	/* Check if adapter is up */
6178 	if (test_bit(STMMAC_DOWN, &priv->state))
6179 		return IRQ_HANDLED;
6180 
6181 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6182 
6183 	return IRQ_HANDLED;
6184 }
6185 
6186 /**
6187  *  stmmac_ioctl - Entry point for the Ioctl
6188  *  @dev: Device pointer.
6189  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6190  *  a proprietary structure used to pass information to the driver.
6191  *  @cmd: IOCTL command
6192  *  Description:
6193  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6194  */
6195 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6196 {
6197 	struct stmmac_priv *priv = netdev_priv (dev);
6198 	int ret = -EOPNOTSUPP;
6199 
6200 	if (!netif_running(dev))
6201 		return -EINVAL;
6202 
6203 	switch (cmd) {
6204 	case SIOCGMIIPHY:
6205 	case SIOCGMIIREG:
6206 	case SIOCSMIIREG:
6207 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6208 		break;
6209 	case SIOCSHWTSTAMP:
6210 		ret = stmmac_hwtstamp_set(dev, rq);
6211 		break;
6212 	case SIOCGHWTSTAMP:
6213 		ret = stmmac_hwtstamp_get(dev, rq);
6214 		break;
6215 	default:
6216 		break;
6217 	}
6218 
6219 	return ret;
6220 }
6221 
6222 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6223 				    void *cb_priv)
6224 {
6225 	struct stmmac_priv *priv = cb_priv;
6226 	int ret = -EOPNOTSUPP;
6227 
6228 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6229 		return ret;
6230 
6231 	__stmmac_disable_all_queues(priv);
6232 
6233 	switch (type) {
6234 	case TC_SETUP_CLSU32:
6235 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6236 		break;
6237 	case TC_SETUP_CLSFLOWER:
6238 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6239 		break;
6240 	default:
6241 		break;
6242 	}
6243 
6244 	stmmac_enable_all_queues(priv);
6245 	return ret;
6246 }
6247 
6248 static LIST_HEAD(stmmac_block_cb_list);
6249 
6250 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6251 			   void *type_data)
6252 {
6253 	struct stmmac_priv *priv = netdev_priv(ndev);
6254 
6255 	switch (type) {
6256 	case TC_QUERY_CAPS:
6257 		return stmmac_tc_query_caps(priv, priv, type_data);
6258 	case TC_SETUP_BLOCK:
6259 		return flow_block_cb_setup_simple(type_data,
6260 						  &stmmac_block_cb_list,
6261 						  stmmac_setup_tc_block_cb,
6262 						  priv, priv, true);
6263 	case TC_SETUP_QDISC_CBS:
6264 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6265 	case TC_SETUP_QDISC_TAPRIO:
6266 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6267 	case TC_SETUP_QDISC_ETF:
6268 		return stmmac_tc_setup_etf(priv, priv, type_data);
6269 	default:
6270 		return -EOPNOTSUPP;
6271 	}
6272 }
6273 
6274 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6275 			       struct net_device *sb_dev)
6276 {
6277 	int gso = skb_shinfo(skb)->gso_type;
6278 
6279 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6280 		/*
6281 		 * There is no way to determine the number of TSO/USO
6282 		 * capable Queues. Let's use always the Queue 0
6283 		 * because if TSO/USO is supported then at least this
6284 		 * one will be capable.
6285 		 */
6286 		return 0;
6287 	}
6288 
6289 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6290 }
6291 
6292 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6293 {
6294 	struct stmmac_priv *priv = netdev_priv(ndev);
6295 	int ret = 0;
6296 
6297 	ret = pm_runtime_resume_and_get(priv->device);
6298 	if (ret < 0)
6299 		return ret;
6300 
6301 	ret = eth_mac_addr(ndev, addr);
6302 	if (ret)
6303 		goto set_mac_error;
6304 
6305 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6306 
6307 set_mac_error:
6308 	pm_runtime_put(priv->device);
6309 
6310 	return ret;
6311 }
6312 
6313 #ifdef CONFIG_DEBUG_FS
6314 static struct dentry *stmmac_fs_dir;
6315 
6316 static void sysfs_display_ring(void *head, int size, int extend_desc,
6317 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6318 {
6319 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6320 	struct dma_desc *p = (struct dma_desc *)head;
6321 	unsigned int desc_size;
6322 	dma_addr_t dma_addr;
6323 	int i;
6324 
6325 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6326 	for (i = 0; i < size; i++) {
6327 		dma_addr = dma_phy_addr + i * desc_size;
6328 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6329 				i, &dma_addr,
6330 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6331 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6332 		if (extend_desc)
6333 			p = &(++ep)->basic;
6334 		else
6335 			p++;
6336 	}
6337 }
6338 
6339 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6340 {
6341 	struct net_device *dev = seq->private;
6342 	struct stmmac_priv *priv = netdev_priv(dev);
6343 	u32 rx_count = priv->plat->rx_queues_to_use;
6344 	u32 tx_count = priv->plat->tx_queues_to_use;
6345 	u32 queue;
6346 
6347 	if ((dev->flags & IFF_UP) == 0)
6348 		return 0;
6349 
6350 	for (queue = 0; queue < rx_count; queue++) {
6351 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6352 
6353 		seq_printf(seq, "RX Queue %d:\n", queue);
6354 
6355 		if (priv->extend_desc) {
6356 			seq_printf(seq, "Extended descriptor ring:\n");
6357 			sysfs_display_ring((void *)rx_q->dma_erx,
6358 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6359 		} else {
6360 			seq_printf(seq, "Descriptor ring:\n");
6361 			sysfs_display_ring((void *)rx_q->dma_rx,
6362 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6363 		}
6364 	}
6365 
6366 	for (queue = 0; queue < tx_count; queue++) {
6367 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6368 
6369 		seq_printf(seq, "TX Queue %d:\n", queue);
6370 
6371 		if (priv->extend_desc) {
6372 			seq_printf(seq, "Extended descriptor ring:\n");
6373 			sysfs_display_ring((void *)tx_q->dma_etx,
6374 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6375 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6376 			seq_printf(seq, "Descriptor ring:\n");
6377 			sysfs_display_ring((void *)tx_q->dma_tx,
6378 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6379 		}
6380 	}
6381 
6382 	return 0;
6383 }
6384 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6385 
6386 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6387 {
6388 	static const char * const dwxgmac_timestamp_source[] = {
6389 		"None",
6390 		"Internal",
6391 		"External",
6392 		"Both",
6393 	};
6394 	static const char * const dwxgmac_safety_feature_desc[] = {
6395 		"No",
6396 		"All Safety Features with ECC and Parity",
6397 		"All Safety Features without ECC or Parity",
6398 		"All Safety Features with Parity Only",
6399 		"ECC Only",
6400 		"UNDEFINED",
6401 		"UNDEFINED",
6402 		"UNDEFINED",
6403 	};
6404 	struct net_device *dev = seq->private;
6405 	struct stmmac_priv *priv = netdev_priv(dev);
6406 
6407 	if (!priv->hw_cap_support) {
6408 		seq_printf(seq, "DMA HW features not supported\n");
6409 		return 0;
6410 	}
6411 
6412 	seq_printf(seq, "==============================\n");
6413 	seq_printf(seq, "\tDMA HW features\n");
6414 	seq_printf(seq, "==============================\n");
6415 
6416 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6417 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6418 	seq_printf(seq, "\t1000 Mbps: %s\n",
6419 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6420 	seq_printf(seq, "\tHalf duplex: %s\n",
6421 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6422 	if (priv->plat->has_xgmac) {
6423 		seq_printf(seq,
6424 			   "\tNumber of Additional MAC address registers: %d\n",
6425 			   priv->dma_cap.multi_addr);
6426 	} else {
6427 		seq_printf(seq, "\tHash Filter: %s\n",
6428 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6429 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6430 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6431 	}
6432 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6433 		   (priv->dma_cap.pcs) ? "Y" : "N");
6434 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6435 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6436 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6437 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6438 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6439 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6440 	seq_printf(seq, "\tRMON module: %s\n",
6441 		   (priv->dma_cap.rmon) ? "Y" : "N");
6442 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6443 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6444 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6445 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6446 	if (priv->plat->has_xgmac)
6447 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6448 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6449 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6450 		   (priv->dma_cap.eee) ? "Y" : "N");
6451 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6452 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6453 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6454 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6455 	    priv->plat->has_xgmac) {
6456 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6457 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6458 	} else {
6459 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6460 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6461 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6462 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6463 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6464 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6465 	}
6466 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6467 		   priv->dma_cap.number_rx_channel);
6468 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6469 		   priv->dma_cap.number_tx_channel);
6470 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6471 		   priv->dma_cap.number_rx_queues);
6472 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6473 		   priv->dma_cap.number_tx_queues);
6474 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6475 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6476 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6477 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6478 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6479 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6480 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6481 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6482 		   priv->dma_cap.pps_out_num);
6483 	seq_printf(seq, "\tSafety Features: %s\n",
6484 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6485 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6486 		   priv->dma_cap.frpsel ? "Y" : "N");
6487 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6488 		   priv->dma_cap.host_dma_width);
6489 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6490 		   priv->dma_cap.rssen ? "Y" : "N");
6491 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6492 		   priv->dma_cap.vlhash ? "Y" : "N");
6493 	seq_printf(seq, "\tSplit Header: %s\n",
6494 		   priv->dma_cap.sphen ? "Y" : "N");
6495 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6496 		   priv->dma_cap.vlins ? "Y" : "N");
6497 	seq_printf(seq, "\tDouble VLAN: %s\n",
6498 		   priv->dma_cap.dvlan ? "Y" : "N");
6499 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6500 		   priv->dma_cap.l3l4fnum);
6501 	seq_printf(seq, "\tARP Offloading: %s\n",
6502 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6503 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6504 		   priv->dma_cap.estsel ? "Y" : "N");
6505 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6506 		   priv->dma_cap.fpesel ? "Y" : "N");
6507 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6508 		   priv->dma_cap.tbssel ? "Y" : "N");
6509 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6510 		   priv->dma_cap.tbs_ch_num);
6511 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6512 		   priv->dma_cap.sgfsel ? "Y" : "N");
6513 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6514 		   BIT(priv->dma_cap.ttsfd) >> 1);
6515 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6516 		   priv->dma_cap.numtc);
6517 	seq_printf(seq, "\tDCB Feature: %s\n",
6518 		   priv->dma_cap.dcben ? "Y" : "N");
6519 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6520 		   priv->dma_cap.advthword ? "Y" : "N");
6521 	seq_printf(seq, "\tPTP Offload: %s\n",
6522 		   priv->dma_cap.ptoen ? "Y" : "N");
6523 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6524 		   priv->dma_cap.osten ? "Y" : "N");
6525 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6526 		   priv->dma_cap.pfcen ? "Y" : "N");
6527 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6528 		   BIT(priv->dma_cap.frpes) << 6);
6529 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6530 		   BIT(priv->dma_cap.frpbs) << 6);
6531 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6532 		   priv->dma_cap.frppipe_num);
6533 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6534 		   priv->dma_cap.nrvf_num ?
6535 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6536 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6537 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6538 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6539 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6540 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6541 		   priv->dma_cap.cbtisel ? "Y" : "N");
6542 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6543 		   priv->dma_cap.aux_snapshot_n);
6544 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6545 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6546 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6547 		   priv->dma_cap.edma ? "Y" : "N");
6548 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6549 		   priv->dma_cap.ediffc ? "Y" : "N");
6550 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6551 		   priv->dma_cap.vxn ? "Y" : "N");
6552 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6553 		   priv->dma_cap.dbgmem ? "Y" : "N");
6554 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6555 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6556 	return 0;
6557 }
6558 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6559 
6560 /* Use network device events to rename debugfs file entries.
6561  */
6562 static int stmmac_device_event(struct notifier_block *unused,
6563 			       unsigned long event, void *ptr)
6564 {
6565 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6566 	struct stmmac_priv *priv = netdev_priv(dev);
6567 
6568 	if (dev->netdev_ops != &stmmac_netdev_ops)
6569 		goto done;
6570 
6571 	switch (event) {
6572 	case NETDEV_CHANGENAME:
6573 		if (priv->dbgfs_dir)
6574 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6575 							 priv->dbgfs_dir,
6576 							 stmmac_fs_dir,
6577 							 dev->name);
6578 		break;
6579 	}
6580 done:
6581 	return NOTIFY_DONE;
6582 }
6583 
6584 static struct notifier_block stmmac_notifier = {
6585 	.notifier_call = stmmac_device_event,
6586 };
6587 
6588 static void stmmac_init_fs(struct net_device *dev)
6589 {
6590 	struct stmmac_priv *priv = netdev_priv(dev);
6591 
6592 	rtnl_lock();
6593 
6594 	/* Create per netdev entries */
6595 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6596 
6597 	/* Entry to report DMA RX/TX rings */
6598 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6599 			    &stmmac_rings_status_fops);
6600 
6601 	/* Entry to report the DMA HW features */
6602 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6603 			    &stmmac_dma_cap_fops);
6604 
6605 	rtnl_unlock();
6606 }
6607 
6608 static void stmmac_exit_fs(struct net_device *dev)
6609 {
6610 	struct stmmac_priv *priv = netdev_priv(dev);
6611 
6612 	debugfs_remove_recursive(priv->dbgfs_dir);
6613 }
6614 #endif /* CONFIG_DEBUG_FS */
6615 
6616 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6617 {
6618 	unsigned char *data = (unsigned char *)&vid_le;
6619 	unsigned char data_byte = 0;
6620 	u32 crc = ~0x0;
6621 	u32 temp = 0;
6622 	int i, bits;
6623 
6624 	bits = get_bitmask_order(VLAN_VID_MASK);
6625 	for (i = 0; i < bits; i++) {
6626 		if ((i % 8) == 0)
6627 			data_byte = data[i / 8];
6628 
6629 		temp = ((crc & 1) ^ data_byte) & 1;
6630 		crc >>= 1;
6631 		data_byte >>= 1;
6632 
6633 		if (temp)
6634 			crc ^= 0xedb88320;
6635 	}
6636 
6637 	return crc;
6638 }
6639 
6640 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6641 {
6642 	u32 crc, hash = 0;
6643 	__le16 pmatch = 0;
6644 	int count = 0;
6645 	u16 vid = 0;
6646 
6647 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6648 		__le16 vid_le = cpu_to_le16(vid);
6649 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6650 		hash |= (1 << crc);
6651 		count++;
6652 	}
6653 
6654 	if (!priv->dma_cap.vlhash) {
6655 		if (count > 2) /* VID = 0 always passes filter */
6656 			return -EOPNOTSUPP;
6657 
6658 		pmatch = cpu_to_le16(vid);
6659 		hash = 0;
6660 	}
6661 
6662 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6663 }
6664 
6665 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6666 {
6667 	struct stmmac_priv *priv = netdev_priv(ndev);
6668 	bool is_double = false;
6669 	int ret;
6670 
6671 	ret = pm_runtime_resume_and_get(priv->device);
6672 	if (ret < 0)
6673 		return ret;
6674 
6675 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6676 		is_double = true;
6677 
6678 	set_bit(vid, priv->active_vlans);
6679 	ret = stmmac_vlan_update(priv, is_double);
6680 	if (ret) {
6681 		clear_bit(vid, priv->active_vlans);
6682 		goto err_pm_put;
6683 	}
6684 
6685 	if (priv->hw->num_vlan) {
6686 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6687 		if (ret)
6688 			goto err_pm_put;
6689 	}
6690 err_pm_put:
6691 	pm_runtime_put(priv->device);
6692 
6693 	return ret;
6694 }
6695 
6696 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6697 {
6698 	struct stmmac_priv *priv = netdev_priv(ndev);
6699 	bool is_double = false;
6700 	int ret;
6701 
6702 	ret = pm_runtime_resume_and_get(priv->device);
6703 	if (ret < 0)
6704 		return ret;
6705 
6706 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6707 		is_double = true;
6708 
6709 	clear_bit(vid, priv->active_vlans);
6710 
6711 	if (priv->hw->num_vlan) {
6712 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6713 		if (ret)
6714 			goto del_vlan_error;
6715 	}
6716 
6717 	ret = stmmac_vlan_update(priv, is_double);
6718 
6719 del_vlan_error:
6720 	pm_runtime_put(priv->device);
6721 
6722 	return ret;
6723 }
6724 
6725 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6726 {
6727 	struct stmmac_priv *priv = netdev_priv(dev);
6728 
6729 	switch (bpf->command) {
6730 	case XDP_SETUP_PROG:
6731 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6732 	case XDP_SETUP_XSK_POOL:
6733 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6734 					     bpf->xsk.queue_id);
6735 	default:
6736 		return -EOPNOTSUPP;
6737 	}
6738 }
6739 
6740 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6741 			   struct xdp_frame **frames, u32 flags)
6742 {
6743 	struct stmmac_priv *priv = netdev_priv(dev);
6744 	int cpu = smp_processor_id();
6745 	struct netdev_queue *nq;
6746 	int i, nxmit = 0;
6747 	int queue;
6748 
6749 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6750 		return -ENETDOWN;
6751 
6752 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6753 		return -EINVAL;
6754 
6755 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6756 	nq = netdev_get_tx_queue(priv->dev, queue);
6757 
6758 	__netif_tx_lock(nq, cpu);
6759 	/* Avoids TX time-out as we are sharing with slow path */
6760 	txq_trans_cond_update(nq);
6761 
6762 	for (i = 0; i < num_frames; i++) {
6763 		int res;
6764 
6765 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6766 		if (res == STMMAC_XDP_CONSUMED)
6767 			break;
6768 
6769 		nxmit++;
6770 	}
6771 
6772 	if (flags & XDP_XMIT_FLUSH) {
6773 		stmmac_flush_tx_descriptors(priv, queue);
6774 		stmmac_tx_timer_arm(priv, queue);
6775 	}
6776 
6777 	__netif_tx_unlock(nq);
6778 
6779 	return nxmit;
6780 }
6781 
6782 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6783 {
6784 	struct stmmac_channel *ch = &priv->channel[queue];
6785 	unsigned long flags;
6786 
6787 	spin_lock_irqsave(&ch->lock, flags);
6788 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6789 	spin_unlock_irqrestore(&ch->lock, flags);
6790 
6791 	stmmac_stop_rx_dma(priv, queue);
6792 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6793 }
6794 
6795 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6796 {
6797 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6798 	struct stmmac_channel *ch = &priv->channel[queue];
6799 	unsigned long flags;
6800 	u32 buf_size;
6801 	int ret;
6802 
6803 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6804 	if (ret) {
6805 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6806 		return;
6807 	}
6808 
6809 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6810 	if (ret) {
6811 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6812 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6813 		return;
6814 	}
6815 
6816 	stmmac_reset_rx_queue(priv, queue);
6817 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6818 
6819 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6820 			    rx_q->dma_rx_phy, rx_q->queue_index);
6821 
6822 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6823 			     sizeof(struct dma_desc));
6824 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6825 			       rx_q->rx_tail_addr, rx_q->queue_index);
6826 
6827 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6828 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6829 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6830 				      buf_size,
6831 				      rx_q->queue_index);
6832 	} else {
6833 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6834 				      priv->dma_conf.dma_buf_sz,
6835 				      rx_q->queue_index);
6836 	}
6837 
6838 	stmmac_start_rx_dma(priv, queue);
6839 
6840 	spin_lock_irqsave(&ch->lock, flags);
6841 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6842 	spin_unlock_irqrestore(&ch->lock, flags);
6843 }
6844 
6845 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6846 {
6847 	struct stmmac_channel *ch = &priv->channel[queue];
6848 	unsigned long flags;
6849 
6850 	spin_lock_irqsave(&ch->lock, flags);
6851 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6852 	spin_unlock_irqrestore(&ch->lock, flags);
6853 
6854 	stmmac_stop_tx_dma(priv, queue);
6855 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6856 }
6857 
6858 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6859 {
6860 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6861 	struct stmmac_channel *ch = &priv->channel[queue];
6862 	unsigned long flags;
6863 	int ret;
6864 
6865 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6866 	if (ret) {
6867 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6868 		return;
6869 	}
6870 
6871 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6872 	if (ret) {
6873 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6874 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6875 		return;
6876 	}
6877 
6878 	stmmac_reset_tx_queue(priv, queue);
6879 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6880 
6881 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6882 			    tx_q->dma_tx_phy, tx_q->queue_index);
6883 
6884 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6885 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6886 
6887 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6888 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6889 			       tx_q->tx_tail_addr, tx_q->queue_index);
6890 
6891 	stmmac_start_tx_dma(priv, queue);
6892 
6893 	spin_lock_irqsave(&ch->lock, flags);
6894 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6895 	spin_unlock_irqrestore(&ch->lock, flags);
6896 }
6897 
6898 void stmmac_xdp_release(struct net_device *dev)
6899 {
6900 	struct stmmac_priv *priv = netdev_priv(dev);
6901 	u32 chan;
6902 
6903 	/* Ensure tx function is not running */
6904 	netif_tx_disable(dev);
6905 
6906 	/* Disable NAPI process */
6907 	stmmac_disable_all_queues(priv);
6908 
6909 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6910 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6911 
6912 	/* Free the IRQ lines */
6913 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6914 
6915 	/* Stop TX/RX DMA channels */
6916 	stmmac_stop_all_dma(priv);
6917 
6918 	/* Release and free the Rx/Tx resources */
6919 	free_dma_desc_resources(priv, &priv->dma_conf);
6920 
6921 	/* Disable the MAC Rx/Tx */
6922 	stmmac_mac_set(priv, priv->ioaddr, false);
6923 
6924 	/* set trans_start so we don't get spurious
6925 	 * watchdogs during reset
6926 	 */
6927 	netif_trans_update(dev);
6928 	netif_carrier_off(dev);
6929 }
6930 
6931 int stmmac_xdp_open(struct net_device *dev)
6932 {
6933 	struct stmmac_priv *priv = netdev_priv(dev);
6934 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6935 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6936 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6937 	struct stmmac_rx_queue *rx_q;
6938 	struct stmmac_tx_queue *tx_q;
6939 	u32 buf_size;
6940 	bool sph_en;
6941 	u32 chan;
6942 	int ret;
6943 
6944 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6945 	if (ret < 0) {
6946 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6947 			   __func__);
6948 		goto dma_desc_error;
6949 	}
6950 
6951 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6952 	if (ret < 0) {
6953 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6954 			   __func__);
6955 		goto init_error;
6956 	}
6957 
6958 	stmmac_reset_queues_param(priv);
6959 
6960 	/* DMA CSR Channel configuration */
6961 	for (chan = 0; chan < dma_csr_ch; chan++) {
6962 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6963 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6964 	}
6965 
6966 	/* Adjust Split header */
6967 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6968 
6969 	/* DMA RX Channel Configuration */
6970 	for (chan = 0; chan < rx_cnt; chan++) {
6971 		rx_q = &priv->dma_conf.rx_queue[chan];
6972 
6973 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6974 				    rx_q->dma_rx_phy, chan);
6975 
6976 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6977 				     (rx_q->buf_alloc_num *
6978 				      sizeof(struct dma_desc));
6979 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6980 				       rx_q->rx_tail_addr, chan);
6981 
6982 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6983 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6984 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6985 					      buf_size,
6986 					      rx_q->queue_index);
6987 		} else {
6988 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6989 					      priv->dma_conf.dma_buf_sz,
6990 					      rx_q->queue_index);
6991 		}
6992 
6993 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6994 	}
6995 
6996 	/* DMA TX Channel Configuration */
6997 	for (chan = 0; chan < tx_cnt; chan++) {
6998 		tx_q = &priv->dma_conf.tx_queue[chan];
6999 
7000 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7001 				    tx_q->dma_tx_phy, chan);
7002 
7003 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7004 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7005 				       tx_q->tx_tail_addr, chan);
7006 
7007 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7008 		tx_q->txtimer.function = stmmac_tx_timer;
7009 	}
7010 
7011 	/* Enable the MAC Rx/Tx */
7012 	stmmac_mac_set(priv, priv->ioaddr, true);
7013 
7014 	/* Start Rx & Tx DMA Channels */
7015 	stmmac_start_all_dma(priv);
7016 
7017 	ret = stmmac_request_irq(dev);
7018 	if (ret)
7019 		goto irq_error;
7020 
7021 	/* Enable NAPI process*/
7022 	stmmac_enable_all_queues(priv);
7023 	netif_carrier_on(dev);
7024 	netif_tx_start_all_queues(dev);
7025 	stmmac_enable_all_dma_irq(priv);
7026 
7027 	return 0;
7028 
7029 irq_error:
7030 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7031 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7032 
7033 	stmmac_hw_teardown(dev);
7034 init_error:
7035 	free_dma_desc_resources(priv, &priv->dma_conf);
7036 dma_desc_error:
7037 	return ret;
7038 }
7039 
7040 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7041 {
7042 	struct stmmac_priv *priv = netdev_priv(dev);
7043 	struct stmmac_rx_queue *rx_q;
7044 	struct stmmac_tx_queue *tx_q;
7045 	struct stmmac_channel *ch;
7046 
7047 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7048 	    !netif_carrier_ok(priv->dev))
7049 		return -ENETDOWN;
7050 
7051 	if (!stmmac_xdp_is_enabled(priv))
7052 		return -EINVAL;
7053 
7054 	if (queue >= priv->plat->rx_queues_to_use ||
7055 	    queue >= priv->plat->tx_queues_to_use)
7056 		return -EINVAL;
7057 
7058 	rx_q = &priv->dma_conf.rx_queue[queue];
7059 	tx_q = &priv->dma_conf.tx_queue[queue];
7060 	ch = &priv->channel[queue];
7061 
7062 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7063 		return -EINVAL;
7064 
7065 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7066 		/* EQoS does not have per-DMA channel SW interrupt,
7067 		 * so we schedule RX Napi straight-away.
7068 		 */
7069 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7070 			__napi_schedule(&ch->rxtx_napi);
7071 	}
7072 
7073 	return 0;
7074 }
7075 
7076 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7077 {
7078 	struct stmmac_priv *priv = netdev_priv(dev);
7079 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7080 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7081 	unsigned int start;
7082 	int q;
7083 
7084 	for (q = 0; q < tx_cnt; q++) {
7085 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7086 		u64 tx_packets;
7087 		u64 tx_bytes;
7088 
7089 		do {
7090 			start = u64_stats_fetch_begin(&txq_stats->syncp);
7091 			tx_packets = txq_stats->tx_packets;
7092 			tx_bytes   = txq_stats->tx_bytes;
7093 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
7094 
7095 		stats->tx_packets += tx_packets;
7096 		stats->tx_bytes += tx_bytes;
7097 	}
7098 
7099 	for (q = 0; q < rx_cnt; q++) {
7100 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7101 		u64 rx_packets;
7102 		u64 rx_bytes;
7103 
7104 		do {
7105 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
7106 			rx_packets = rxq_stats->rx_packets;
7107 			rx_bytes   = rxq_stats->rx_bytes;
7108 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
7109 
7110 		stats->rx_packets += rx_packets;
7111 		stats->rx_bytes += rx_bytes;
7112 	}
7113 
7114 	stats->rx_dropped = priv->xstats.rx_dropped;
7115 	stats->rx_errors = priv->xstats.rx_errors;
7116 	stats->tx_dropped = priv->xstats.tx_dropped;
7117 	stats->tx_errors = priv->xstats.tx_errors;
7118 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7119 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7120 	stats->rx_length_errors = priv->xstats.rx_length;
7121 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7122 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7123 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7124 }
7125 
7126 static const struct net_device_ops stmmac_netdev_ops = {
7127 	.ndo_open = stmmac_open,
7128 	.ndo_start_xmit = stmmac_xmit,
7129 	.ndo_stop = stmmac_release,
7130 	.ndo_change_mtu = stmmac_change_mtu,
7131 	.ndo_fix_features = stmmac_fix_features,
7132 	.ndo_set_features = stmmac_set_features,
7133 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7134 	.ndo_tx_timeout = stmmac_tx_timeout,
7135 	.ndo_eth_ioctl = stmmac_ioctl,
7136 	.ndo_get_stats64 = stmmac_get_stats64,
7137 	.ndo_setup_tc = stmmac_setup_tc,
7138 	.ndo_select_queue = stmmac_select_queue,
7139 	.ndo_set_mac_address = stmmac_set_mac_address,
7140 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7141 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7142 	.ndo_bpf = stmmac_bpf,
7143 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7144 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7145 };
7146 
7147 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7148 {
7149 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7150 		return;
7151 	if (test_bit(STMMAC_DOWN, &priv->state))
7152 		return;
7153 
7154 	netdev_err(priv->dev, "Reset adapter.\n");
7155 
7156 	rtnl_lock();
7157 	netif_trans_update(priv->dev);
7158 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7159 		usleep_range(1000, 2000);
7160 
7161 	set_bit(STMMAC_DOWN, &priv->state);
7162 	dev_close(priv->dev);
7163 	dev_open(priv->dev, NULL);
7164 	clear_bit(STMMAC_DOWN, &priv->state);
7165 	clear_bit(STMMAC_RESETING, &priv->state);
7166 	rtnl_unlock();
7167 }
7168 
7169 static void stmmac_service_task(struct work_struct *work)
7170 {
7171 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7172 			service_task);
7173 
7174 	stmmac_reset_subtask(priv);
7175 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7176 }
7177 
7178 /**
7179  *  stmmac_hw_init - Init the MAC device
7180  *  @priv: driver private structure
7181  *  Description: this function is to configure the MAC device according to
7182  *  some platform parameters or the HW capability register. It prepares the
7183  *  driver to use either ring or chain modes and to setup either enhanced or
7184  *  normal descriptors.
7185  */
7186 static int stmmac_hw_init(struct stmmac_priv *priv)
7187 {
7188 	int ret;
7189 
7190 	/* dwmac-sun8i only work in chain mode */
7191 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7192 		chain_mode = 1;
7193 	priv->chain_mode = chain_mode;
7194 
7195 	/* Initialize HW Interface */
7196 	ret = stmmac_hwif_init(priv);
7197 	if (ret)
7198 		return ret;
7199 
7200 	/* Get the HW capability (new GMAC newer than 3.50a) */
7201 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7202 	if (priv->hw_cap_support) {
7203 		dev_info(priv->device, "DMA HW capability register supported\n");
7204 
7205 		/* We can override some gmac/dma configuration fields: e.g.
7206 		 * enh_desc, tx_coe (e.g. that are passed through the
7207 		 * platform) with the values from the HW capability
7208 		 * register (if supported).
7209 		 */
7210 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7211 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7212 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7213 		priv->hw->pmt = priv->plat->pmt;
7214 		if (priv->dma_cap.hash_tb_sz) {
7215 			priv->hw->multicast_filter_bins =
7216 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7217 			priv->hw->mcast_bits_log2 =
7218 					ilog2(priv->hw->multicast_filter_bins);
7219 		}
7220 
7221 		/* TXCOE doesn't work in thresh DMA mode */
7222 		if (priv->plat->force_thresh_dma_mode)
7223 			priv->plat->tx_coe = 0;
7224 		else
7225 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7226 
7227 		/* In case of GMAC4 rx_coe is from HW cap register. */
7228 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7229 
7230 		if (priv->dma_cap.rx_coe_type2)
7231 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7232 		else if (priv->dma_cap.rx_coe_type1)
7233 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7234 
7235 	} else {
7236 		dev_info(priv->device, "No HW DMA feature register supported\n");
7237 	}
7238 
7239 	if (priv->plat->rx_coe) {
7240 		priv->hw->rx_csum = priv->plat->rx_coe;
7241 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7242 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7243 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7244 	}
7245 	if (priv->plat->tx_coe)
7246 		dev_info(priv->device, "TX Checksum insertion supported\n");
7247 
7248 	if (priv->plat->pmt) {
7249 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7250 		device_set_wakeup_capable(priv->device, 1);
7251 	}
7252 
7253 	if (priv->dma_cap.tsoen)
7254 		dev_info(priv->device, "TSO supported\n");
7255 
7256 	priv->hw->vlan_fail_q_en =
7257 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7258 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7259 
7260 	/* Run HW quirks, if any */
7261 	if (priv->hwif_quirks) {
7262 		ret = priv->hwif_quirks(priv);
7263 		if (ret)
7264 			return ret;
7265 	}
7266 
7267 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7268 	 * In some case, for example on bugged HW this feature
7269 	 * has to be disable and this can be done by passing the
7270 	 * riwt_off field from the platform.
7271 	 */
7272 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7273 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7274 		priv->use_riwt = 1;
7275 		dev_info(priv->device,
7276 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7277 	}
7278 
7279 	return 0;
7280 }
7281 
7282 static void stmmac_napi_add(struct net_device *dev)
7283 {
7284 	struct stmmac_priv *priv = netdev_priv(dev);
7285 	u32 queue, maxq;
7286 
7287 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7288 
7289 	for (queue = 0; queue < maxq; queue++) {
7290 		struct stmmac_channel *ch = &priv->channel[queue];
7291 
7292 		ch->priv_data = priv;
7293 		ch->index = queue;
7294 		spin_lock_init(&ch->lock);
7295 
7296 		if (queue < priv->plat->rx_queues_to_use) {
7297 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7298 		}
7299 		if (queue < priv->plat->tx_queues_to_use) {
7300 			netif_napi_add_tx(dev, &ch->tx_napi,
7301 					  stmmac_napi_poll_tx);
7302 		}
7303 		if (queue < priv->plat->rx_queues_to_use &&
7304 		    queue < priv->plat->tx_queues_to_use) {
7305 			netif_napi_add(dev, &ch->rxtx_napi,
7306 				       stmmac_napi_poll_rxtx);
7307 		}
7308 	}
7309 }
7310 
7311 static void stmmac_napi_del(struct net_device *dev)
7312 {
7313 	struct stmmac_priv *priv = netdev_priv(dev);
7314 	u32 queue, maxq;
7315 
7316 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7317 
7318 	for (queue = 0; queue < maxq; queue++) {
7319 		struct stmmac_channel *ch = &priv->channel[queue];
7320 
7321 		if (queue < priv->plat->rx_queues_to_use)
7322 			netif_napi_del(&ch->rx_napi);
7323 		if (queue < priv->plat->tx_queues_to_use)
7324 			netif_napi_del(&ch->tx_napi);
7325 		if (queue < priv->plat->rx_queues_to_use &&
7326 		    queue < priv->plat->tx_queues_to_use) {
7327 			netif_napi_del(&ch->rxtx_napi);
7328 		}
7329 	}
7330 }
7331 
7332 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7333 {
7334 	struct stmmac_priv *priv = netdev_priv(dev);
7335 	int ret = 0, i;
7336 
7337 	if (netif_running(dev))
7338 		stmmac_release(dev);
7339 
7340 	stmmac_napi_del(dev);
7341 
7342 	priv->plat->rx_queues_to_use = rx_cnt;
7343 	priv->plat->tx_queues_to_use = tx_cnt;
7344 	if (!netif_is_rxfh_configured(dev))
7345 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7346 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7347 									rx_cnt);
7348 
7349 	stmmac_set_half_duplex(priv);
7350 	stmmac_napi_add(dev);
7351 
7352 	if (netif_running(dev))
7353 		ret = stmmac_open(dev);
7354 
7355 	return ret;
7356 }
7357 
7358 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7359 {
7360 	struct stmmac_priv *priv = netdev_priv(dev);
7361 	int ret = 0;
7362 
7363 	if (netif_running(dev))
7364 		stmmac_release(dev);
7365 
7366 	priv->dma_conf.dma_rx_size = rx_size;
7367 	priv->dma_conf.dma_tx_size = tx_size;
7368 
7369 	if (netif_running(dev))
7370 		ret = stmmac_open(dev);
7371 
7372 	return ret;
7373 }
7374 
7375 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7376 static void stmmac_fpe_lp_task(struct work_struct *work)
7377 {
7378 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7379 						fpe_task);
7380 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7381 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7382 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7383 	bool *hs_enable = &fpe_cfg->hs_enable;
7384 	bool *enable = &fpe_cfg->enable;
7385 	int retries = 20;
7386 
7387 	while (retries-- > 0) {
7388 		/* Bail out immediately if FPE handshake is OFF */
7389 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7390 			break;
7391 
7392 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7393 		    *lp_state == FPE_STATE_ENTERING_ON) {
7394 			stmmac_fpe_configure(priv, priv->ioaddr,
7395 					     fpe_cfg,
7396 					     priv->plat->tx_queues_to_use,
7397 					     priv->plat->rx_queues_to_use,
7398 					     *enable);
7399 
7400 			netdev_info(priv->dev, "configured FPE\n");
7401 
7402 			*lo_state = FPE_STATE_ON;
7403 			*lp_state = FPE_STATE_ON;
7404 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7405 			break;
7406 		}
7407 
7408 		if ((*lo_state == FPE_STATE_CAPABLE ||
7409 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7410 		     *lp_state != FPE_STATE_ON) {
7411 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7412 				    *lo_state, *lp_state);
7413 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7414 						fpe_cfg,
7415 						MPACKET_VERIFY);
7416 		}
7417 		/* Sleep then retry */
7418 		msleep(500);
7419 	}
7420 
7421 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7422 }
7423 
7424 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7425 {
7426 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7427 		if (enable) {
7428 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7429 						priv->plat->fpe_cfg,
7430 						MPACKET_VERIFY);
7431 		} else {
7432 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7433 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7434 		}
7435 
7436 		priv->plat->fpe_cfg->hs_enable = enable;
7437 	}
7438 }
7439 
7440 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7441 {
7442 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7443 	struct dma_desc *desc_contains_ts = ctx->desc;
7444 	struct stmmac_priv *priv = ctx->priv;
7445 	struct dma_desc *ndesc = ctx->ndesc;
7446 	struct dma_desc *desc = ctx->desc;
7447 	u64 ns = 0;
7448 
7449 	if (!priv->hwts_rx_en)
7450 		return -ENODATA;
7451 
7452 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7453 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7454 		desc_contains_ts = ndesc;
7455 
7456 	/* Check if timestamp is available */
7457 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7458 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7459 		ns -= priv->plat->cdc_error_adj;
7460 		*timestamp = ns_to_ktime(ns);
7461 		return 0;
7462 	}
7463 
7464 	return -ENODATA;
7465 }
7466 
7467 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7468 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7469 };
7470 
7471 /**
7472  * stmmac_dvr_probe
7473  * @device: device pointer
7474  * @plat_dat: platform data pointer
7475  * @res: stmmac resource pointer
7476  * Description: this is the main probe function used to
7477  * call the alloc_etherdev, allocate the priv structure.
7478  * Return:
7479  * returns 0 on success, otherwise errno.
7480  */
7481 int stmmac_dvr_probe(struct device *device,
7482 		     struct plat_stmmacenet_data *plat_dat,
7483 		     struct stmmac_resources *res)
7484 {
7485 	struct net_device *ndev = NULL;
7486 	struct stmmac_priv *priv;
7487 	u32 rxq;
7488 	int i, ret = 0;
7489 
7490 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7491 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7492 	if (!ndev)
7493 		return -ENOMEM;
7494 
7495 	SET_NETDEV_DEV(ndev, device);
7496 
7497 	priv = netdev_priv(ndev);
7498 	priv->device = device;
7499 	priv->dev = ndev;
7500 
7501 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7502 		u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
7503 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7504 		u64_stats_init(&priv->xstats.txq_stats[i].syncp);
7505 
7506 	stmmac_set_ethtool_ops(ndev);
7507 	priv->pause = pause;
7508 	priv->plat = plat_dat;
7509 	priv->ioaddr = res->addr;
7510 	priv->dev->base_addr = (unsigned long)res->addr;
7511 	priv->plat->dma_cfg->multi_msi_en =
7512 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7513 
7514 	priv->dev->irq = res->irq;
7515 	priv->wol_irq = res->wol_irq;
7516 	priv->lpi_irq = res->lpi_irq;
7517 	priv->sfty_ce_irq = res->sfty_ce_irq;
7518 	priv->sfty_ue_irq = res->sfty_ue_irq;
7519 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7520 		priv->rx_irq[i] = res->rx_irq[i];
7521 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7522 		priv->tx_irq[i] = res->tx_irq[i];
7523 
7524 	if (!is_zero_ether_addr(res->mac))
7525 		eth_hw_addr_set(priv->dev, res->mac);
7526 
7527 	dev_set_drvdata(device, priv->dev);
7528 
7529 	/* Verify driver arguments */
7530 	stmmac_verify_args();
7531 
7532 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7533 	if (!priv->af_xdp_zc_qps)
7534 		return -ENOMEM;
7535 
7536 	/* Allocate workqueue */
7537 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7538 	if (!priv->wq) {
7539 		dev_err(priv->device, "failed to create workqueue\n");
7540 		ret = -ENOMEM;
7541 		goto error_wq_init;
7542 	}
7543 
7544 	INIT_WORK(&priv->service_task, stmmac_service_task);
7545 
7546 	/* Initialize Link Partner FPE workqueue */
7547 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7548 
7549 	/* Override with kernel parameters if supplied XXX CRS XXX
7550 	 * this needs to have multiple instances
7551 	 */
7552 	if ((phyaddr >= 0) && (phyaddr <= 31))
7553 		priv->plat->phy_addr = phyaddr;
7554 
7555 	if (priv->plat->stmmac_rst) {
7556 		ret = reset_control_assert(priv->plat->stmmac_rst);
7557 		reset_control_deassert(priv->plat->stmmac_rst);
7558 		/* Some reset controllers have only reset callback instead of
7559 		 * assert + deassert callbacks pair.
7560 		 */
7561 		if (ret == -ENOTSUPP)
7562 			reset_control_reset(priv->plat->stmmac_rst);
7563 	}
7564 
7565 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7566 	if (ret == -ENOTSUPP)
7567 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7568 			ERR_PTR(ret));
7569 
7570 	/* Wait a bit for the reset to take effect */
7571 	udelay(10);
7572 
7573 	/* Init MAC and get the capabilities */
7574 	ret = stmmac_hw_init(priv);
7575 	if (ret)
7576 		goto error_hw_init;
7577 
7578 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7579 	 */
7580 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7581 		priv->plat->dma_cfg->dche = false;
7582 
7583 	stmmac_check_ether_addr(priv);
7584 
7585 	ndev->netdev_ops = &stmmac_netdev_ops;
7586 
7587 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7588 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7589 
7590 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7591 			    NETIF_F_RXCSUM;
7592 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7593 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7594 
7595 	ret = stmmac_tc_init(priv, priv);
7596 	if (!ret) {
7597 		ndev->hw_features |= NETIF_F_HW_TC;
7598 	}
7599 
7600 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7601 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7602 		if (priv->plat->has_gmac4)
7603 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7604 		priv->tso = true;
7605 		dev_info(priv->device, "TSO feature enabled\n");
7606 	}
7607 
7608 	if (priv->dma_cap.sphen &&
7609 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7610 		ndev->hw_features |= NETIF_F_GRO;
7611 		priv->sph_cap = true;
7612 		priv->sph = priv->sph_cap;
7613 		dev_info(priv->device, "SPH feature enabled\n");
7614 	}
7615 
7616 	/* Ideally our host DMA address width is the same as for the
7617 	 * device. However, it may differ and then we have to use our
7618 	 * host DMA width for allocation and the device DMA width for
7619 	 * register handling.
7620 	 */
7621 	if (priv->plat->host_dma_width)
7622 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7623 	else
7624 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7625 
7626 	if (priv->dma_cap.host_dma_width) {
7627 		ret = dma_set_mask_and_coherent(device,
7628 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7629 		if (!ret) {
7630 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7631 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7632 
7633 			/*
7634 			 * If more than 32 bits can be addressed, make sure to
7635 			 * enable enhanced addressing mode.
7636 			 */
7637 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7638 				priv->plat->dma_cfg->eame = true;
7639 		} else {
7640 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7641 			if (ret) {
7642 				dev_err(priv->device, "Failed to set DMA Mask\n");
7643 				goto error_hw_init;
7644 			}
7645 
7646 			priv->dma_cap.host_dma_width = 32;
7647 		}
7648 	}
7649 
7650 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7651 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7652 #ifdef STMMAC_VLAN_TAG_USED
7653 	/* Both mac100 and gmac support receive VLAN tag detection */
7654 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7655 	ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7656 	priv->hw->hw_vlan_en = true;
7657 
7658 	if (priv->dma_cap.vlhash) {
7659 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7660 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7661 	}
7662 	if (priv->dma_cap.vlins) {
7663 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7664 		if (priv->dma_cap.dvlan)
7665 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7666 	}
7667 #endif
7668 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7669 
7670 	priv->xstats.threshold = tc;
7671 
7672 	/* Initialize RSS */
7673 	rxq = priv->plat->rx_queues_to_use;
7674 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7675 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7676 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7677 
7678 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7679 		ndev->features |= NETIF_F_RXHASH;
7680 
7681 	ndev->vlan_features |= ndev->features;
7682 	/* TSO doesn't work on VLANs yet */
7683 	ndev->vlan_features &= ~NETIF_F_TSO;
7684 
7685 	/* MTU range: 46 - hw-specific max */
7686 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7687 	if (priv->plat->has_xgmac)
7688 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7689 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7690 		ndev->max_mtu = JUMBO_LEN;
7691 	else
7692 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7693 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7694 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7695 	 */
7696 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7697 	    (priv->plat->maxmtu >= ndev->min_mtu))
7698 		ndev->max_mtu = priv->plat->maxmtu;
7699 	else if (priv->plat->maxmtu < ndev->min_mtu)
7700 		dev_warn(priv->device,
7701 			 "%s: warning: maxmtu having invalid value (%d)\n",
7702 			 __func__, priv->plat->maxmtu);
7703 
7704 	if (flow_ctrl)
7705 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7706 
7707 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7708 
7709 	/* Setup channels NAPI */
7710 	stmmac_napi_add(ndev);
7711 
7712 	mutex_init(&priv->lock);
7713 
7714 	/* If a specific clk_csr value is passed from the platform
7715 	 * this means that the CSR Clock Range selection cannot be
7716 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7717 	 * set the MDC clock dynamically according to the csr actual
7718 	 * clock input.
7719 	 */
7720 	if (priv->plat->clk_csr >= 0)
7721 		priv->clk_csr = priv->plat->clk_csr;
7722 	else
7723 		stmmac_clk_csr_set(priv);
7724 
7725 	stmmac_check_pcs_mode(priv);
7726 
7727 	pm_runtime_get_noresume(device);
7728 	pm_runtime_set_active(device);
7729 	if (!pm_runtime_enabled(device))
7730 		pm_runtime_enable(device);
7731 
7732 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7733 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7734 		/* MDIO bus Registration */
7735 		ret = stmmac_mdio_register(ndev);
7736 		if (ret < 0) {
7737 			dev_err_probe(priv->device, ret,
7738 				      "%s: MDIO bus (id: %d) registration failed\n",
7739 				      __func__, priv->plat->bus_id);
7740 			goto error_mdio_register;
7741 		}
7742 	}
7743 
7744 	if (priv->plat->speed_mode_2500)
7745 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7746 
7747 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7748 		ret = stmmac_xpcs_setup(priv->mii);
7749 		if (ret)
7750 			goto error_xpcs_setup;
7751 	}
7752 
7753 	ret = stmmac_phy_setup(priv);
7754 	if (ret) {
7755 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7756 		goto error_phy_setup;
7757 	}
7758 
7759 	ret = register_netdev(ndev);
7760 	if (ret) {
7761 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7762 			__func__, ret);
7763 		goto error_netdev_register;
7764 	}
7765 
7766 #ifdef CONFIG_DEBUG_FS
7767 	stmmac_init_fs(ndev);
7768 #endif
7769 
7770 	if (priv->plat->dump_debug_regs)
7771 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7772 
7773 	/* Let pm_runtime_put() disable the clocks.
7774 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7775 	 */
7776 	pm_runtime_put(device);
7777 
7778 	return ret;
7779 
7780 error_netdev_register:
7781 	phylink_destroy(priv->phylink);
7782 error_xpcs_setup:
7783 error_phy_setup:
7784 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7785 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7786 		stmmac_mdio_unregister(ndev);
7787 error_mdio_register:
7788 	stmmac_napi_del(ndev);
7789 error_hw_init:
7790 	destroy_workqueue(priv->wq);
7791 error_wq_init:
7792 	bitmap_free(priv->af_xdp_zc_qps);
7793 
7794 	return ret;
7795 }
7796 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7797 
7798 /**
7799  * stmmac_dvr_remove
7800  * @dev: device pointer
7801  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7802  * changes the link status, releases the DMA descriptor rings.
7803  */
7804 void stmmac_dvr_remove(struct device *dev)
7805 {
7806 	struct net_device *ndev = dev_get_drvdata(dev);
7807 	struct stmmac_priv *priv = netdev_priv(ndev);
7808 
7809 	netdev_info(priv->dev, "%s: removing driver", __func__);
7810 
7811 	pm_runtime_get_sync(dev);
7812 
7813 	stmmac_stop_all_dma(priv);
7814 	stmmac_mac_set(priv, priv->ioaddr, false);
7815 	netif_carrier_off(ndev);
7816 	unregister_netdev(ndev);
7817 
7818 #ifdef CONFIG_DEBUG_FS
7819 	stmmac_exit_fs(ndev);
7820 #endif
7821 	phylink_destroy(priv->phylink);
7822 	if (priv->plat->stmmac_rst)
7823 		reset_control_assert(priv->plat->stmmac_rst);
7824 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7825 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7826 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7827 		stmmac_mdio_unregister(ndev);
7828 	destroy_workqueue(priv->wq);
7829 	mutex_destroy(&priv->lock);
7830 	bitmap_free(priv->af_xdp_zc_qps);
7831 
7832 	pm_runtime_disable(dev);
7833 	pm_runtime_put_noidle(dev);
7834 }
7835 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7836 
7837 /**
7838  * stmmac_suspend - suspend callback
7839  * @dev: device pointer
7840  * Description: this is the function to suspend the device and it is called
7841  * by the platform driver to stop the network queue, release the resources,
7842  * program the PMT register (for WoL), clean and release driver resources.
7843  */
7844 int stmmac_suspend(struct device *dev)
7845 {
7846 	struct net_device *ndev = dev_get_drvdata(dev);
7847 	struct stmmac_priv *priv = netdev_priv(ndev);
7848 	u32 chan;
7849 
7850 	if (!ndev || !netif_running(ndev))
7851 		return 0;
7852 
7853 	mutex_lock(&priv->lock);
7854 
7855 	netif_device_detach(ndev);
7856 
7857 	stmmac_disable_all_queues(priv);
7858 
7859 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7860 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7861 
7862 	if (priv->eee_enabled) {
7863 		priv->tx_path_in_lpi_mode = false;
7864 		del_timer_sync(&priv->eee_ctrl_timer);
7865 	}
7866 
7867 	/* Stop TX/RX DMA */
7868 	stmmac_stop_all_dma(priv);
7869 
7870 	if (priv->plat->serdes_powerdown)
7871 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7872 
7873 	/* Enable Power down mode by programming the PMT regs */
7874 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7875 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7876 		priv->irq_wake = 1;
7877 	} else {
7878 		stmmac_mac_set(priv, priv->ioaddr, false);
7879 		pinctrl_pm_select_sleep_state(priv->device);
7880 	}
7881 
7882 	mutex_unlock(&priv->lock);
7883 
7884 	rtnl_lock();
7885 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7886 		phylink_suspend(priv->phylink, true);
7887 	} else {
7888 		if (device_may_wakeup(priv->device))
7889 			phylink_speed_down(priv->phylink, false);
7890 		phylink_suspend(priv->phylink, false);
7891 	}
7892 	rtnl_unlock();
7893 
7894 	if (priv->dma_cap.fpesel) {
7895 		/* Disable FPE */
7896 		stmmac_fpe_configure(priv, priv->ioaddr,
7897 				     priv->plat->fpe_cfg,
7898 				     priv->plat->tx_queues_to_use,
7899 				     priv->plat->rx_queues_to_use, false);
7900 
7901 		stmmac_fpe_handshake(priv, false);
7902 		stmmac_fpe_stop_wq(priv);
7903 	}
7904 
7905 	priv->speed = SPEED_UNKNOWN;
7906 	return 0;
7907 }
7908 EXPORT_SYMBOL_GPL(stmmac_suspend);
7909 
7910 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7911 {
7912 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7913 
7914 	rx_q->cur_rx = 0;
7915 	rx_q->dirty_rx = 0;
7916 }
7917 
7918 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7919 {
7920 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7921 
7922 	tx_q->cur_tx = 0;
7923 	tx_q->dirty_tx = 0;
7924 	tx_q->mss = 0;
7925 
7926 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7927 }
7928 
7929 /**
7930  * stmmac_reset_queues_param - reset queue parameters
7931  * @priv: device pointer
7932  */
7933 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7934 {
7935 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7936 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7937 	u32 queue;
7938 
7939 	for (queue = 0; queue < rx_cnt; queue++)
7940 		stmmac_reset_rx_queue(priv, queue);
7941 
7942 	for (queue = 0; queue < tx_cnt; queue++)
7943 		stmmac_reset_tx_queue(priv, queue);
7944 }
7945 
7946 /**
7947  * stmmac_resume - resume callback
7948  * @dev: device pointer
7949  * Description: when resume this function is invoked to setup the DMA and CORE
7950  * in a usable state.
7951  */
7952 int stmmac_resume(struct device *dev)
7953 {
7954 	struct net_device *ndev = dev_get_drvdata(dev);
7955 	struct stmmac_priv *priv = netdev_priv(ndev);
7956 	int ret;
7957 
7958 	if (!netif_running(ndev))
7959 		return 0;
7960 
7961 	/* Power Down bit, into the PM register, is cleared
7962 	 * automatically as soon as a magic packet or a Wake-up frame
7963 	 * is received. Anyway, it's better to manually clear
7964 	 * this bit because it can generate problems while resuming
7965 	 * from another devices (e.g. serial console).
7966 	 */
7967 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7968 		mutex_lock(&priv->lock);
7969 		stmmac_pmt(priv, priv->hw, 0);
7970 		mutex_unlock(&priv->lock);
7971 		priv->irq_wake = 0;
7972 	} else {
7973 		pinctrl_pm_select_default_state(priv->device);
7974 		/* reset the phy so that it's ready */
7975 		if (priv->mii)
7976 			stmmac_mdio_reset(priv->mii);
7977 	}
7978 
7979 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7980 	    priv->plat->serdes_powerup) {
7981 		ret = priv->plat->serdes_powerup(ndev,
7982 						 priv->plat->bsp_priv);
7983 
7984 		if (ret < 0)
7985 			return ret;
7986 	}
7987 
7988 	rtnl_lock();
7989 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7990 		phylink_resume(priv->phylink);
7991 	} else {
7992 		phylink_resume(priv->phylink);
7993 		if (device_may_wakeup(priv->device))
7994 			phylink_speed_up(priv->phylink);
7995 	}
7996 	rtnl_unlock();
7997 
7998 	rtnl_lock();
7999 	mutex_lock(&priv->lock);
8000 
8001 	stmmac_reset_queues_param(priv);
8002 
8003 	stmmac_free_tx_skbufs(priv);
8004 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8005 
8006 	stmmac_hw_setup(ndev, false);
8007 	stmmac_init_coalesce(priv);
8008 	stmmac_set_rx_mode(ndev);
8009 
8010 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8011 
8012 	stmmac_enable_all_queues(priv);
8013 	stmmac_enable_all_dma_irq(priv);
8014 
8015 	mutex_unlock(&priv->lock);
8016 	rtnl_unlock();
8017 
8018 	netif_device_attach(ndev);
8019 
8020 	return 0;
8021 }
8022 EXPORT_SYMBOL_GPL(stmmac_resume);
8023 
8024 #ifndef MODULE
8025 static int __init stmmac_cmdline_opt(char *str)
8026 {
8027 	char *opt;
8028 
8029 	if (!str || !*str)
8030 		return 1;
8031 	while ((opt = strsep(&str, ",")) != NULL) {
8032 		if (!strncmp(opt, "debug:", 6)) {
8033 			if (kstrtoint(opt + 6, 0, &debug))
8034 				goto err;
8035 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8036 			if (kstrtoint(opt + 8, 0, &phyaddr))
8037 				goto err;
8038 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8039 			if (kstrtoint(opt + 7, 0, &buf_sz))
8040 				goto err;
8041 		} else if (!strncmp(opt, "tc:", 3)) {
8042 			if (kstrtoint(opt + 3, 0, &tc))
8043 				goto err;
8044 		} else if (!strncmp(opt, "watchdog:", 9)) {
8045 			if (kstrtoint(opt + 9, 0, &watchdog))
8046 				goto err;
8047 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8048 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8049 				goto err;
8050 		} else if (!strncmp(opt, "pause:", 6)) {
8051 			if (kstrtoint(opt + 6, 0, &pause))
8052 				goto err;
8053 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8054 			if (kstrtoint(opt + 10, 0, &eee_timer))
8055 				goto err;
8056 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8057 			if (kstrtoint(opt + 11, 0, &chain_mode))
8058 				goto err;
8059 		}
8060 	}
8061 	return 1;
8062 
8063 err:
8064 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8065 	return 1;
8066 }
8067 
8068 __setup("stmmaceth=", stmmac_cmdline_opt);
8069 #endif /* MODULE */
8070 
8071 static int __init stmmac_init(void)
8072 {
8073 #ifdef CONFIG_DEBUG_FS
8074 	/* Create debugfs main directory if it doesn't exist yet */
8075 	if (!stmmac_fs_dir)
8076 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8077 	register_netdevice_notifier(&stmmac_notifier);
8078 #endif
8079 
8080 	return 0;
8081 }
8082 
8083 static void __exit stmmac_exit(void)
8084 {
8085 #ifdef CONFIG_DEBUG_FS
8086 	unregister_netdevice_notifier(&stmmac_notifier);
8087 	debugfs_remove_recursive(stmmac_fs_dir);
8088 #endif
8089 }
8090 
8091 module_init(stmmac_init)
8092 module_exit(stmmac_exit)
8093 
8094 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8095 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8096 MODULE_LICENSE("GPL");
8097