xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 9fc31a9251de4acaab2d0704450d70ddc99f5ea2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	return priv->hw->phylink_pcs;
948 }
949 
950 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
951 			      const struct phylink_link_state *state)
952 {
953 	/* Nothing to do, xpcs_config() handles everything */
954 }
955 
956 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
957 {
958 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
959 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
960 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
961 	bool *hs_enable = &fpe_cfg->hs_enable;
962 
963 	if (is_up && *hs_enable) {
964 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
965 					MPACKET_VERIFY);
966 	} else {
967 		*lo_state = FPE_STATE_OFF;
968 		*lp_state = FPE_STATE_OFF;
969 	}
970 }
971 
972 static void stmmac_mac_link_down(struct phylink_config *config,
973 				 unsigned int mode, phy_interface_t interface)
974 {
975 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
976 
977 	stmmac_mac_set(priv, priv->ioaddr, false);
978 	priv->eee_active = false;
979 	priv->tx_lpi_enabled = false;
980 	priv->eee_enabled = stmmac_eee_init(priv);
981 	stmmac_set_eee_pls(priv, priv->hw, false);
982 
983 	if (priv->dma_cap.fpesel)
984 		stmmac_fpe_link_state_handle(priv, false);
985 }
986 
987 static void stmmac_mac_link_up(struct phylink_config *config,
988 			       struct phy_device *phy,
989 			       unsigned int mode, phy_interface_t interface,
990 			       int speed, int duplex,
991 			       bool tx_pause, bool rx_pause)
992 {
993 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
994 	u32 old_ctrl, ctrl;
995 
996 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
997 	    priv->plat->serdes_powerup)
998 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
999 
1000 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1001 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1002 
1003 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1004 		switch (speed) {
1005 		case SPEED_10000:
1006 			ctrl |= priv->hw->link.xgmii.speed10000;
1007 			break;
1008 		case SPEED_5000:
1009 			ctrl |= priv->hw->link.xgmii.speed5000;
1010 			break;
1011 		case SPEED_2500:
1012 			ctrl |= priv->hw->link.xgmii.speed2500;
1013 			break;
1014 		default:
1015 			return;
1016 		}
1017 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1018 		switch (speed) {
1019 		case SPEED_100000:
1020 			ctrl |= priv->hw->link.xlgmii.speed100000;
1021 			break;
1022 		case SPEED_50000:
1023 			ctrl |= priv->hw->link.xlgmii.speed50000;
1024 			break;
1025 		case SPEED_40000:
1026 			ctrl |= priv->hw->link.xlgmii.speed40000;
1027 			break;
1028 		case SPEED_25000:
1029 			ctrl |= priv->hw->link.xlgmii.speed25000;
1030 			break;
1031 		case SPEED_10000:
1032 			ctrl |= priv->hw->link.xgmii.speed10000;
1033 			break;
1034 		case SPEED_2500:
1035 			ctrl |= priv->hw->link.speed2500;
1036 			break;
1037 		case SPEED_1000:
1038 			ctrl |= priv->hw->link.speed1000;
1039 			break;
1040 		default:
1041 			return;
1042 		}
1043 	} else {
1044 		switch (speed) {
1045 		case SPEED_2500:
1046 			ctrl |= priv->hw->link.speed2500;
1047 			break;
1048 		case SPEED_1000:
1049 			ctrl |= priv->hw->link.speed1000;
1050 			break;
1051 		case SPEED_100:
1052 			ctrl |= priv->hw->link.speed100;
1053 			break;
1054 		case SPEED_10:
1055 			ctrl |= priv->hw->link.speed10;
1056 			break;
1057 		default:
1058 			return;
1059 		}
1060 	}
1061 
1062 	priv->speed = speed;
1063 
1064 	if (priv->plat->fix_mac_speed)
1065 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1066 
1067 	if (!duplex)
1068 		ctrl &= ~priv->hw->link.duplex;
1069 	else
1070 		ctrl |= priv->hw->link.duplex;
1071 
1072 	/* Flow Control operation */
1073 	if (rx_pause && tx_pause)
1074 		priv->flow_ctrl = FLOW_AUTO;
1075 	else if (rx_pause && !tx_pause)
1076 		priv->flow_ctrl = FLOW_RX;
1077 	else if (!rx_pause && tx_pause)
1078 		priv->flow_ctrl = FLOW_TX;
1079 	else
1080 		priv->flow_ctrl = FLOW_OFF;
1081 
1082 	stmmac_mac_flow_ctrl(priv, duplex);
1083 
1084 	if (ctrl != old_ctrl)
1085 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1086 
1087 	stmmac_mac_set(priv, priv->ioaddr, true);
1088 	if (phy && priv->dma_cap.eee) {
1089 		priv->eee_active =
1090 			phy_init_eee(phy, !(priv->plat->flags &
1091 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1092 		priv->eee_enabled = stmmac_eee_init(priv);
1093 		priv->tx_lpi_enabled = priv->eee_enabled;
1094 		stmmac_set_eee_pls(priv, priv->hw, true);
1095 	}
1096 
1097 	if (priv->dma_cap.fpesel)
1098 		stmmac_fpe_link_state_handle(priv, true);
1099 
1100 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1101 		stmmac_hwtstamp_correct_latency(priv, priv);
1102 }
1103 
1104 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1105 	.mac_select_pcs = stmmac_mac_select_pcs,
1106 	.mac_config = stmmac_mac_config,
1107 	.mac_link_down = stmmac_mac_link_down,
1108 	.mac_link_up = stmmac_mac_link_up,
1109 };
1110 
1111 /**
1112  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1113  * @priv: driver private structure
1114  * Description: this is to verify if the HW supports the PCS.
1115  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1116  * configured for the TBI, RTBI, or SGMII PHY interface.
1117  */
1118 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1119 {
1120 	int interface = priv->plat->mac_interface;
1121 
1122 	if (priv->dma_cap.pcs) {
1123 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1124 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1125 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1127 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1128 			priv->hw->pcs = STMMAC_PCS_RGMII;
1129 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1130 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_SGMII;
1132 		}
1133 	}
1134 }
1135 
1136 /**
1137  * stmmac_init_phy - PHY initialization
1138  * @dev: net device structure
1139  * Description: it initializes the driver's PHY state, and attaches the PHY
1140  * to the mac driver.
1141  *  Return value:
1142  *  0 on success
1143  */
1144 static int stmmac_init_phy(struct net_device *dev)
1145 {
1146 	struct stmmac_priv *priv = netdev_priv(dev);
1147 	struct fwnode_handle *phy_fwnode;
1148 	struct fwnode_handle *fwnode;
1149 	int ret;
1150 
1151 	if (!phylink_expects_phy(priv->phylink))
1152 		return 0;
1153 
1154 	fwnode = priv->plat->port_node;
1155 	if (!fwnode)
1156 		fwnode = dev_fwnode(priv->device);
1157 
1158 	if (fwnode)
1159 		phy_fwnode = fwnode_get_phy_node(fwnode);
1160 	else
1161 		phy_fwnode = NULL;
1162 
1163 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1164 	 * manually parse it
1165 	 */
1166 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1167 		int addr = priv->plat->phy_addr;
1168 		struct phy_device *phydev;
1169 
1170 		if (addr < 0) {
1171 			netdev_err(priv->dev, "no phy found\n");
1172 			return -ENODEV;
1173 		}
1174 
1175 		phydev = mdiobus_get_phy(priv->mii, addr);
1176 		if (!phydev) {
1177 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1178 			return -ENODEV;
1179 		}
1180 
1181 		ret = phylink_connect_phy(priv->phylink, phydev);
1182 	} else {
1183 		fwnode_handle_put(phy_fwnode);
1184 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1185 	}
1186 
1187 	if (!priv->plat->pmt) {
1188 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1189 
1190 		phylink_ethtool_get_wol(priv->phylink, &wol);
1191 		device_set_wakeup_capable(priv->device, !!wol.supported);
1192 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1193 	}
1194 
1195 	return ret;
1196 }
1197 
1198 static int stmmac_phy_setup(struct stmmac_priv *priv)
1199 {
1200 	struct stmmac_mdio_bus_data *mdio_bus_data;
1201 	int mode = priv->plat->phy_interface;
1202 	struct fwnode_handle *fwnode;
1203 	struct phylink *phylink;
1204 	int max_speed;
1205 
1206 	priv->phylink_config.dev = &priv->dev->dev;
1207 	priv->phylink_config.type = PHYLINK_NETDEV;
1208 	priv->phylink_config.mac_managed_pm = true;
1209 
1210 	/* Stmmac always requires an RX clock for hardware initialization */
1211 	priv->phylink_config.mac_requires_rxc = true;
1212 
1213 	mdio_bus_data = priv->plat->mdio_bus_data;
1214 	if (mdio_bus_data)
1215 		priv->phylink_config.ovr_an_inband =
1216 			mdio_bus_data->xpcs_an_inband;
1217 
1218 	/* Set the platform/firmware specified interface mode. Note, phylink
1219 	 * deals with the PHY interface mode, not the MAC interface mode.
1220 	 */
1221 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1222 
1223 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1224 	if (priv->hw->xpcs)
1225 		xpcs_get_interfaces(priv->hw->xpcs,
1226 				    priv->phylink_config.supported_interfaces);
1227 
1228 	/* Get the MAC specific capabilities */
1229 	stmmac_mac_phylink_get_caps(priv);
1230 
1231 	priv->phylink_config.mac_capabilities = priv->hw->link.caps;
1232 
1233 	max_speed = priv->plat->max_speed;
1234 	if (max_speed)
1235 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1236 
1237 	fwnode = priv->plat->port_node;
1238 	if (!fwnode)
1239 		fwnode = dev_fwnode(priv->device);
1240 
1241 	phylink = phylink_create(&priv->phylink_config, fwnode,
1242 				 mode, &stmmac_phylink_mac_ops);
1243 	if (IS_ERR(phylink))
1244 		return PTR_ERR(phylink);
1245 
1246 	priv->phylink = phylink;
1247 	return 0;
1248 }
1249 
1250 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1251 				    struct stmmac_dma_conf *dma_conf)
1252 {
1253 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1254 	unsigned int desc_size;
1255 	void *head_rx;
1256 	u32 queue;
1257 
1258 	/* Display RX rings */
1259 	for (queue = 0; queue < rx_cnt; queue++) {
1260 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1261 
1262 		pr_info("\tRX Queue %u rings\n", queue);
1263 
1264 		if (priv->extend_desc) {
1265 			head_rx = (void *)rx_q->dma_erx;
1266 			desc_size = sizeof(struct dma_extended_desc);
1267 		} else {
1268 			head_rx = (void *)rx_q->dma_rx;
1269 			desc_size = sizeof(struct dma_desc);
1270 		}
1271 
1272 		/* Display RX ring */
1273 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1274 				    rx_q->dma_rx_phy, desc_size);
1275 	}
1276 }
1277 
1278 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1279 				    struct stmmac_dma_conf *dma_conf)
1280 {
1281 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1282 	unsigned int desc_size;
1283 	void *head_tx;
1284 	u32 queue;
1285 
1286 	/* Display TX rings */
1287 	for (queue = 0; queue < tx_cnt; queue++) {
1288 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1289 
1290 		pr_info("\tTX Queue %d rings\n", queue);
1291 
1292 		if (priv->extend_desc) {
1293 			head_tx = (void *)tx_q->dma_etx;
1294 			desc_size = sizeof(struct dma_extended_desc);
1295 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1296 			head_tx = (void *)tx_q->dma_entx;
1297 			desc_size = sizeof(struct dma_edesc);
1298 		} else {
1299 			head_tx = (void *)tx_q->dma_tx;
1300 			desc_size = sizeof(struct dma_desc);
1301 		}
1302 
1303 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1304 				    tx_q->dma_tx_phy, desc_size);
1305 	}
1306 }
1307 
1308 static void stmmac_display_rings(struct stmmac_priv *priv,
1309 				 struct stmmac_dma_conf *dma_conf)
1310 {
1311 	/* Display RX ring */
1312 	stmmac_display_rx_rings(priv, dma_conf);
1313 
1314 	/* Display TX ring */
1315 	stmmac_display_tx_rings(priv, dma_conf);
1316 }
1317 
1318 static int stmmac_set_bfsize(int mtu, int bufsize)
1319 {
1320 	int ret = bufsize;
1321 
1322 	if (mtu >= BUF_SIZE_8KiB)
1323 		ret = BUF_SIZE_16KiB;
1324 	else if (mtu >= BUF_SIZE_4KiB)
1325 		ret = BUF_SIZE_8KiB;
1326 	else if (mtu >= BUF_SIZE_2KiB)
1327 		ret = BUF_SIZE_4KiB;
1328 	else if (mtu > DEFAULT_BUFSIZE)
1329 		ret = BUF_SIZE_2KiB;
1330 	else
1331 		ret = DEFAULT_BUFSIZE;
1332 
1333 	return ret;
1334 }
1335 
1336 /**
1337  * stmmac_clear_rx_descriptors - clear RX descriptors
1338  * @priv: driver private structure
1339  * @dma_conf: structure to take the dma data
1340  * @queue: RX queue index
1341  * Description: this function is called to clear the RX descriptors
1342  * in case of both basic and extended descriptors are used.
1343  */
1344 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1345 					struct stmmac_dma_conf *dma_conf,
1346 					u32 queue)
1347 {
1348 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1349 	int i;
1350 
1351 	/* Clear the RX descriptors */
1352 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1353 		if (priv->extend_desc)
1354 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1355 					priv->use_riwt, priv->mode,
1356 					(i == dma_conf->dma_rx_size - 1),
1357 					dma_conf->dma_buf_sz);
1358 		else
1359 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1360 					priv->use_riwt, priv->mode,
1361 					(i == dma_conf->dma_rx_size - 1),
1362 					dma_conf->dma_buf_sz);
1363 }
1364 
1365 /**
1366  * stmmac_clear_tx_descriptors - clear tx descriptors
1367  * @priv: driver private structure
1368  * @dma_conf: structure to take the dma data
1369  * @queue: TX queue index.
1370  * Description: this function is called to clear the TX descriptors
1371  * in case of both basic and extended descriptors are used.
1372  */
1373 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1374 					struct stmmac_dma_conf *dma_conf,
1375 					u32 queue)
1376 {
1377 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1378 	int i;
1379 
1380 	/* Clear the TX descriptors */
1381 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1382 		int last = (i == (dma_conf->dma_tx_size - 1));
1383 		struct dma_desc *p;
1384 
1385 		if (priv->extend_desc)
1386 			p = &tx_q->dma_etx[i].basic;
1387 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1388 			p = &tx_q->dma_entx[i].basic;
1389 		else
1390 			p = &tx_q->dma_tx[i];
1391 
1392 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1393 	}
1394 }
1395 
1396 /**
1397  * stmmac_clear_descriptors - clear descriptors
1398  * @priv: driver private structure
1399  * @dma_conf: structure to take the dma data
1400  * Description: this function is called to clear the TX and RX descriptors
1401  * in case of both basic and extended descriptors are used.
1402  */
1403 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1404 				     struct stmmac_dma_conf *dma_conf)
1405 {
1406 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1407 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1408 	u32 queue;
1409 
1410 	/* Clear the RX descriptors */
1411 	for (queue = 0; queue < rx_queue_cnt; queue++)
1412 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1413 
1414 	/* Clear the TX descriptors */
1415 	for (queue = 0; queue < tx_queue_cnt; queue++)
1416 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1417 }
1418 
1419 /**
1420  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1421  * @priv: driver private structure
1422  * @dma_conf: structure to take the dma data
1423  * @p: descriptor pointer
1424  * @i: descriptor index
1425  * @flags: gfp flag
1426  * @queue: RX queue index
1427  * Description: this function is called to allocate a receive buffer, perform
1428  * the DMA mapping and init the descriptor.
1429  */
1430 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1431 				  struct stmmac_dma_conf *dma_conf,
1432 				  struct dma_desc *p,
1433 				  int i, gfp_t flags, u32 queue)
1434 {
1435 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1436 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1437 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1438 
1439 	if (priv->dma_cap.host_dma_width <= 32)
1440 		gfp |= GFP_DMA32;
1441 
1442 	if (!buf->page) {
1443 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1444 		if (!buf->page)
1445 			return -ENOMEM;
1446 		buf->page_offset = stmmac_rx_offset(priv);
1447 	}
1448 
1449 	if (priv->sph && !buf->sec_page) {
1450 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1451 		if (!buf->sec_page)
1452 			return -ENOMEM;
1453 
1454 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1455 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1456 	} else {
1457 		buf->sec_page = NULL;
1458 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1459 	}
1460 
1461 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1462 
1463 	stmmac_set_desc_addr(priv, p, buf->addr);
1464 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1465 		stmmac_init_desc3(priv, p);
1466 
1467 	return 0;
1468 }
1469 
1470 /**
1471  * stmmac_free_rx_buffer - free RX dma buffers
1472  * @priv: private structure
1473  * @rx_q: RX queue
1474  * @i: buffer index.
1475  */
1476 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1477 				  struct stmmac_rx_queue *rx_q,
1478 				  int i)
1479 {
1480 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1481 
1482 	if (buf->page)
1483 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1484 	buf->page = NULL;
1485 
1486 	if (buf->sec_page)
1487 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1488 	buf->sec_page = NULL;
1489 }
1490 
1491 /**
1492  * stmmac_free_tx_buffer - free RX dma buffers
1493  * @priv: private structure
1494  * @dma_conf: structure to take the dma data
1495  * @queue: RX queue index
1496  * @i: buffer index.
1497  */
1498 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1499 				  struct stmmac_dma_conf *dma_conf,
1500 				  u32 queue, int i)
1501 {
1502 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1503 
1504 	if (tx_q->tx_skbuff_dma[i].buf &&
1505 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1506 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1507 			dma_unmap_page(priv->device,
1508 				       tx_q->tx_skbuff_dma[i].buf,
1509 				       tx_q->tx_skbuff_dma[i].len,
1510 				       DMA_TO_DEVICE);
1511 		else
1512 			dma_unmap_single(priv->device,
1513 					 tx_q->tx_skbuff_dma[i].buf,
1514 					 tx_q->tx_skbuff_dma[i].len,
1515 					 DMA_TO_DEVICE);
1516 	}
1517 
1518 	if (tx_q->xdpf[i] &&
1519 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1520 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1521 		xdp_return_frame(tx_q->xdpf[i]);
1522 		tx_q->xdpf[i] = NULL;
1523 	}
1524 
1525 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1526 		tx_q->xsk_frames_done++;
1527 
1528 	if (tx_q->tx_skbuff[i] &&
1529 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1530 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1531 		tx_q->tx_skbuff[i] = NULL;
1532 	}
1533 
1534 	tx_q->tx_skbuff_dma[i].buf = 0;
1535 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1536 }
1537 
1538 /**
1539  * dma_free_rx_skbufs - free RX dma buffers
1540  * @priv: private structure
1541  * @dma_conf: structure to take the dma data
1542  * @queue: RX queue index
1543  */
1544 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1545 			       struct stmmac_dma_conf *dma_conf,
1546 			       u32 queue)
1547 {
1548 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1549 	int i;
1550 
1551 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1552 		stmmac_free_rx_buffer(priv, rx_q, i);
1553 }
1554 
1555 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1556 				   struct stmmac_dma_conf *dma_conf,
1557 				   u32 queue, gfp_t flags)
1558 {
1559 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1560 	int i;
1561 
1562 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1563 		struct dma_desc *p;
1564 		int ret;
1565 
1566 		if (priv->extend_desc)
1567 			p = &((rx_q->dma_erx + i)->basic);
1568 		else
1569 			p = rx_q->dma_rx + i;
1570 
1571 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1572 					     queue);
1573 		if (ret)
1574 			return ret;
1575 
1576 		rx_q->buf_alloc_num++;
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 /**
1583  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1584  * @priv: private structure
1585  * @dma_conf: structure to take the dma data
1586  * @queue: RX queue index
1587  */
1588 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1589 				struct stmmac_dma_conf *dma_conf,
1590 				u32 queue)
1591 {
1592 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1593 	int i;
1594 
1595 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1596 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1597 
1598 		if (!buf->xdp)
1599 			continue;
1600 
1601 		xsk_buff_free(buf->xdp);
1602 		buf->xdp = NULL;
1603 	}
1604 }
1605 
1606 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1607 				      struct stmmac_dma_conf *dma_conf,
1608 				      u32 queue)
1609 {
1610 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611 	int i;
1612 
1613 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1614 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1615 	 * use this macro to make sure no size violations.
1616 	 */
1617 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1618 
1619 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1620 		struct stmmac_rx_buffer *buf;
1621 		dma_addr_t dma_addr;
1622 		struct dma_desc *p;
1623 
1624 		if (priv->extend_desc)
1625 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1626 		else
1627 			p = rx_q->dma_rx + i;
1628 
1629 		buf = &rx_q->buf_pool[i];
1630 
1631 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1632 		if (!buf->xdp)
1633 			return -ENOMEM;
1634 
1635 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1636 		stmmac_set_desc_addr(priv, p, dma_addr);
1637 		rx_q->buf_alloc_num++;
1638 	}
1639 
1640 	return 0;
1641 }
1642 
1643 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1644 {
1645 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1646 		return NULL;
1647 
1648 	return xsk_get_pool_from_qid(priv->dev, queue);
1649 }
1650 
1651 /**
1652  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1653  * @priv: driver private structure
1654  * @dma_conf: structure to take the dma data
1655  * @queue: RX queue index
1656  * @flags: gfp flag.
1657  * Description: this function initializes the DMA RX descriptors
1658  * and allocates the socket buffers. It supports the chained and ring
1659  * modes.
1660  */
1661 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1662 				    struct stmmac_dma_conf *dma_conf,
1663 				    u32 queue, gfp_t flags)
1664 {
1665 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1666 	int ret;
1667 
1668 	netif_dbg(priv, probe, priv->dev,
1669 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1670 		  (u32)rx_q->dma_rx_phy);
1671 
1672 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1673 
1674 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1675 
1676 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1677 
1678 	if (rx_q->xsk_pool) {
1679 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680 						   MEM_TYPE_XSK_BUFF_POOL,
1681 						   NULL));
1682 		netdev_info(priv->dev,
1683 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1684 			    rx_q->queue_index);
1685 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1686 	} else {
1687 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1688 						   MEM_TYPE_PAGE_POOL,
1689 						   rx_q->page_pool));
1690 		netdev_info(priv->dev,
1691 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1692 			    rx_q->queue_index);
1693 	}
1694 
1695 	if (rx_q->xsk_pool) {
1696 		/* RX XDP ZC buffer pool may not be populated, e.g.
1697 		 * xdpsock TX-only.
1698 		 */
1699 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1700 	} else {
1701 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1702 		if (ret < 0)
1703 			return -ENOMEM;
1704 	}
1705 
1706 	/* Setup the chained descriptor addresses */
1707 	if (priv->mode == STMMAC_CHAIN_MODE) {
1708 		if (priv->extend_desc)
1709 			stmmac_mode_init(priv, rx_q->dma_erx,
1710 					 rx_q->dma_rx_phy,
1711 					 dma_conf->dma_rx_size, 1);
1712 		else
1713 			stmmac_mode_init(priv, rx_q->dma_rx,
1714 					 rx_q->dma_rx_phy,
1715 					 dma_conf->dma_rx_size, 0);
1716 	}
1717 
1718 	return 0;
1719 }
1720 
1721 static int init_dma_rx_desc_rings(struct net_device *dev,
1722 				  struct stmmac_dma_conf *dma_conf,
1723 				  gfp_t flags)
1724 {
1725 	struct stmmac_priv *priv = netdev_priv(dev);
1726 	u32 rx_count = priv->plat->rx_queues_to_use;
1727 	int queue;
1728 	int ret;
1729 
1730 	/* RX INITIALIZATION */
1731 	netif_dbg(priv, probe, priv->dev,
1732 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1733 
1734 	for (queue = 0; queue < rx_count; queue++) {
1735 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1736 		if (ret)
1737 			goto err_init_rx_buffers;
1738 	}
1739 
1740 	return 0;
1741 
1742 err_init_rx_buffers:
1743 	while (queue >= 0) {
1744 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1745 
1746 		if (rx_q->xsk_pool)
1747 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1748 		else
1749 			dma_free_rx_skbufs(priv, dma_conf, queue);
1750 
1751 		rx_q->buf_alloc_num = 0;
1752 		rx_q->xsk_pool = NULL;
1753 
1754 		queue--;
1755 	}
1756 
1757 	return ret;
1758 }
1759 
1760 /**
1761  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1762  * @priv: driver private structure
1763  * @dma_conf: structure to take the dma data
1764  * @queue: TX queue index
1765  * Description: this function initializes the DMA TX descriptors
1766  * and allocates the socket buffers. It supports the chained and ring
1767  * modes.
1768  */
1769 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1770 				    struct stmmac_dma_conf *dma_conf,
1771 				    u32 queue)
1772 {
1773 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1774 	int i;
1775 
1776 	netif_dbg(priv, probe, priv->dev,
1777 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1778 		  (u32)tx_q->dma_tx_phy);
1779 
1780 	/* Setup the chained descriptor addresses */
1781 	if (priv->mode == STMMAC_CHAIN_MODE) {
1782 		if (priv->extend_desc)
1783 			stmmac_mode_init(priv, tx_q->dma_etx,
1784 					 tx_q->dma_tx_phy,
1785 					 dma_conf->dma_tx_size, 1);
1786 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1787 			stmmac_mode_init(priv, tx_q->dma_tx,
1788 					 tx_q->dma_tx_phy,
1789 					 dma_conf->dma_tx_size, 0);
1790 	}
1791 
1792 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1793 
1794 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1795 		struct dma_desc *p;
1796 
1797 		if (priv->extend_desc)
1798 			p = &((tx_q->dma_etx + i)->basic);
1799 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1800 			p = &((tx_q->dma_entx + i)->basic);
1801 		else
1802 			p = tx_q->dma_tx + i;
1803 
1804 		stmmac_clear_desc(priv, p);
1805 
1806 		tx_q->tx_skbuff_dma[i].buf = 0;
1807 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1808 		tx_q->tx_skbuff_dma[i].len = 0;
1809 		tx_q->tx_skbuff_dma[i].last_segment = false;
1810 		tx_q->tx_skbuff[i] = NULL;
1811 	}
1812 
1813 	return 0;
1814 }
1815 
1816 static int init_dma_tx_desc_rings(struct net_device *dev,
1817 				  struct stmmac_dma_conf *dma_conf)
1818 {
1819 	struct stmmac_priv *priv = netdev_priv(dev);
1820 	u32 tx_queue_cnt;
1821 	u32 queue;
1822 
1823 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1824 
1825 	for (queue = 0; queue < tx_queue_cnt; queue++)
1826 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1827 
1828 	return 0;
1829 }
1830 
1831 /**
1832  * init_dma_desc_rings - init the RX/TX descriptor rings
1833  * @dev: net device structure
1834  * @dma_conf: structure to take the dma data
1835  * @flags: gfp flag.
1836  * Description: this function initializes the DMA RX/TX descriptors
1837  * and allocates the socket buffers. It supports the chained and ring
1838  * modes.
1839  */
1840 static int init_dma_desc_rings(struct net_device *dev,
1841 			       struct stmmac_dma_conf *dma_conf,
1842 			       gfp_t flags)
1843 {
1844 	struct stmmac_priv *priv = netdev_priv(dev);
1845 	int ret;
1846 
1847 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1848 	if (ret)
1849 		return ret;
1850 
1851 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1852 
1853 	stmmac_clear_descriptors(priv, dma_conf);
1854 
1855 	if (netif_msg_hw(priv))
1856 		stmmac_display_rings(priv, dma_conf);
1857 
1858 	return ret;
1859 }
1860 
1861 /**
1862  * dma_free_tx_skbufs - free TX dma buffers
1863  * @priv: private structure
1864  * @dma_conf: structure to take the dma data
1865  * @queue: TX queue index
1866  */
1867 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1868 			       struct stmmac_dma_conf *dma_conf,
1869 			       u32 queue)
1870 {
1871 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1872 	int i;
1873 
1874 	tx_q->xsk_frames_done = 0;
1875 
1876 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1877 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1878 
1879 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1880 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1881 		tx_q->xsk_frames_done = 0;
1882 		tx_q->xsk_pool = NULL;
1883 	}
1884 }
1885 
1886 /**
1887  * stmmac_free_tx_skbufs - free TX skb buffers
1888  * @priv: private structure
1889  */
1890 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1891 {
1892 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1893 	u32 queue;
1894 
1895 	for (queue = 0; queue < tx_queue_cnt; queue++)
1896 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1897 }
1898 
1899 /**
1900  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1901  * @priv: private structure
1902  * @dma_conf: structure to take the dma data
1903  * @queue: RX queue index
1904  */
1905 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1906 					 struct stmmac_dma_conf *dma_conf,
1907 					 u32 queue)
1908 {
1909 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1910 
1911 	/* Release the DMA RX socket buffers */
1912 	if (rx_q->xsk_pool)
1913 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1914 	else
1915 		dma_free_rx_skbufs(priv, dma_conf, queue);
1916 
1917 	rx_q->buf_alloc_num = 0;
1918 	rx_q->xsk_pool = NULL;
1919 
1920 	/* Free DMA regions of consistent memory previously allocated */
1921 	if (!priv->extend_desc)
1922 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1923 				  sizeof(struct dma_desc),
1924 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1925 	else
1926 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1927 				  sizeof(struct dma_extended_desc),
1928 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1929 
1930 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1931 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1932 
1933 	kfree(rx_q->buf_pool);
1934 	if (rx_q->page_pool)
1935 		page_pool_destroy(rx_q->page_pool);
1936 }
1937 
1938 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1939 				       struct stmmac_dma_conf *dma_conf)
1940 {
1941 	u32 rx_count = priv->plat->rx_queues_to_use;
1942 	u32 queue;
1943 
1944 	/* Free RX queue resources */
1945 	for (queue = 0; queue < rx_count; queue++)
1946 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1947 }
1948 
1949 /**
1950  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1951  * @priv: private structure
1952  * @dma_conf: structure to take the dma data
1953  * @queue: TX queue index
1954  */
1955 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1956 					 struct stmmac_dma_conf *dma_conf,
1957 					 u32 queue)
1958 {
1959 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1960 	size_t size;
1961 	void *addr;
1962 
1963 	/* Release the DMA TX socket buffers */
1964 	dma_free_tx_skbufs(priv, dma_conf, queue);
1965 
1966 	if (priv->extend_desc) {
1967 		size = sizeof(struct dma_extended_desc);
1968 		addr = tx_q->dma_etx;
1969 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1970 		size = sizeof(struct dma_edesc);
1971 		addr = tx_q->dma_entx;
1972 	} else {
1973 		size = sizeof(struct dma_desc);
1974 		addr = tx_q->dma_tx;
1975 	}
1976 
1977 	size *= dma_conf->dma_tx_size;
1978 
1979 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1980 
1981 	kfree(tx_q->tx_skbuff_dma);
1982 	kfree(tx_q->tx_skbuff);
1983 }
1984 
1985 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1986 				       struct stmmac_dma_conf *dma_conf)
1987 {
1988 	u32 tx_count = priv->plat->tx_queues_to_use;
1989 	u32 queue;
1990 
1991 	/* Free TX queue resources */
1992 	for (queue = 0; queue < tx_count; queue++)
1993 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1994 }
1995 
1996 /**
1997  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1998  * @priv: private structure
1999  * @dma_conf: structure to take the dma data
2000  * @queue: RX queue index
2001  * Description: according to which descriptor can be used (extend or basic)
2002  * this function allocates the resources for TX and RX paths. In case of
2003  * reception, for example, it pre-allocated the RX socket buffer in order to
2004  * allow zero-copy mechanism.
2005  */
2006 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2007 					 struct stmmac_dma_conf *dma_conf,
2008 					 u32 queue)
2009 {
2010 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2011 	struct stmmac_channel *ch = &priv->channel[queue];
2012 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2013 	struct page_pool_params pp_params = { 0 };
2014 	unsigned int num_pages;
2015 	unsigned int napi_id;
2016 	int ret;
2017 
2018 	rx_q->queue_index = queue;
2019 	rx_q->priv_data = priv;
2020 
2021 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2022 	pp_params.pool_size = dma_conf->dma_rx_size;
2023 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2024 	pp_params.order = ilog2(num_pages);
2025 	pp_params.nid = dev_to_node(priv->device);
2026 	pp_params.dev = priv->device;
2027 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2028 	pp_params.offset = stmmac_rx_offset(priv);
2029 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2030 
2031 	rx_q->page_pool = page_pool_create(&pp_params);
2032 	if (IS_ERR(rx_q->page_pool)) {
2033 		ret = PTR_ERR(rx_q->page_pool);
2034 		rx_q->page_pool = NULL;
2035 		return ret;
2036 	}
2037 
2038 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2039 				 sizeof(*rx_q->buf_pool),
2040 				 GFP_KERNEL);
2041 	if (!rx_q->buf_pool)
2042 		return -ENOMEM;
2043 
2044 	if (priv->extend_desc) {
2045 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2046 						   dma_conf->dma_rx_size *
2047 						   sizeof(struct dma_extended_desc),
2048 						   &rx_q->dma_rx_phy,
2049 						   GFP_KERNEL);
2050 		if (!rx_q->dma_erx)
2051 			return -ENOMEM;
2052 
2053 	} else {
2054 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2055 						  dma_conf->dma_rx_size *
2056 						  sizeof(struct dma_desc),
2057 						  &rx_q->dma_rx_phy,
2058 						  GFP_KERNEL);
2059 		if (!rx_q->dma_rx)
2060 			return -ENOMEM;
2061 	}
2062 
2063 	if (stmmac_xdp_is_enabled(priv) &&
2064 	    test_bit(queue, priv->af_xdp_zc_qps))
2065 		napi_id = ch->rxtx_napi.napi_id;
2066 	else
2067 		napi_id = ch->rx_napi.napi_id;
2068 
2069 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2070 			       rx_q->queue_index,
2071 			       napi_id);
2072 	if (ret) {
2073 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2074 		return -EINVAL;
2075 	}
2076 
2077 	return 0;
2078 }
2079 
2080 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2081 				       struct stmmac_dma_conf *dma_conf)
2082 {
2083 	u32 rx_count = priv->plat->rx_queues_to_use;
2084 	u32 queue;
2085 	int ret;
2086 
2087 	/* RX queues buffers and DMA */
2088 	for (queue = 0; queue < rx_count; queue++) {
2089 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2090 		if (ret)
2091 			goto err_dma;
2092 	}
2093 
2094 	return 0;
2095 
2096 err_dma:
2097 	free_dma_rx_desc_resources(priv, dma_conf);
2098 
2099 	return ret;
2100 }
2101 
2102 /**
2103  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2104  * @priv: private structure
2105  * @dma_conf: structure to take the dma data
2106  * @queue: TX queue index
2107  * Description: according to which descriptor can be used (extend or basic)
2108  * this function allocates the resources for TX and RX paths. In case of
2109  * reception, for example, it pre-allocated the RX socket buffer in order to
2110  * allow zero-copy mechanism.
2111  */
2112 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2113 					 struct stmmac_dma_conf *dma_conf,
2114 					 u32 queue)
2115 {
2116 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2117 	size_t size;
2118 	void *addr;
2119 
2120 	tx_q->queue_index = queue;
2121 	tx_q->priv_data = priv;
2122 
2123 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2124 				      sizeof(*tx_q->tx_skbuff_dma),
2125 				      GFP_KERNEL);
2126 	if (!tx_q->tx_skbuff_dma)
2127 		return -ENOMEM;
2128 
2129 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2130 				  sizeof(struct sk_buff *),
2131 				  GFP_KERNEL);
2132 	if (!tx_q->tx_skbuff)
2133 		return -ENOMEM;
2134 
2135 	if (priv->extend_desc)
2136 		size = sizeof(struct dma_extended_desc);
2137 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2138 		size = sizeof(struct dma_edesc);
2139 	else
2140 		size = sizeof(struct dma_desc);
2141 
2142 	size *= dma_conf->dma_tx_size;
2143 
2144 	addr = dma_alloc_coherent(priv->device, size,
2145 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2146 	if (!addr)
2147 		return -ENOMEM;
2148 
2149 	if (priv->extend_desc)
2150 		tx_q->dma_etx = addr;
2151 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2152 		tx_q->dma_entx = addr;
2153 	else
2154 		tx_q->dma_tx = addr;
2155 
2156 	return 0;
2157 }
2158 
2159 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2160 				       struct stmmac_dma_conf *dma_conf)
2161 {
2162 	u32 tx_count = priv->plat->tx_queues_to_use;
2163 	u32 queue;
2164 	int ret;
2165 
2166 	/* TX queues buffers and DMA */
2167 	for (queue = 0; queue < tx_count; queue++) {
2168 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2169 		if (ret)
2170 			goto err_dma;
2171 	}
2172 
2173 	return 0;
2174 
2175 err_dma:
2176 	free_dma_tx_desc_resources(priv, dma_conf);
2177 	return ret;
2178 }
2179 
2180 /**
2181  * alloc_dma_desc_resources - alloc TX/RX resources.
2182  * @priv: private structure
2183  * @dma_conf: structure to take the dma data
2184  * Description: according to which descriptor can be used (extend or basic)
2185  * this function allocates the resources for TX and RX paths. In case of
2186  * reception, for example, it pre-allocated the RX socket buffer in order to
2187  * allow zero-copy mechanism.
2188  */
2189 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2190 				    struct stmmac_dma_conf *dma_conf)
2191 {
2192 	/* RX Allocation */
2193 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2194 
2195 	if (ret)
2196 		return ret;
2197 
2198 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2199 
2200 	return ret;
2201 }
2202 
2203 /**
2204  * free_dma_desc_resources - free dma desc resources
2205  * @priv: private structure
2206  * @dma_conf: structure to take the dma data
2207  */
2208 static void free_dma_desc_resources(struct stmmac_priv *priv,
2209 				    struct stmmac_dma_conf *dma_conf)
2210 {
2211 	/* Release the DMA TX socket buffers */
2212 	free_dma_tx_desc_resources(priv, dma_conf);
2213 
2214 	/* Release the DMA RX socket buffers later
2215 	 * to ensure all pending XDP_TX buffers are returned.
2216 	 */
2217 	free_dma_rx_desc_resources(priv, dma_conf);
2218 }
2219 
2220 /**
2221  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2222  *  @priv: driver private structure
2223  *  Description: It is used for enabling the rx queues in the MAC
2224  */
2225 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2226 {
2227 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2228 	int queue;
2229 	u8 mode;
2230 
2231 	for (queue = 0; queue < rx_queues_count; queue++) {
2232 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2233 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2234 	}
2235 }
2236 
2237 /**
2238  * stmmac_start_rx_dma - start RX DMA channel
2239  * @priv: driver private structure
2240  * @chan: RX channel index
2241  * Description:
2242  * This starts a RX DMA channel
2243  */
2244 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2245 {
2246 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2247 	stmmac_start_rx(priv, priv->ioaddr, chan);
2248 }
2249 
2250 /**
2251  * stmmac_start_tx_dma - start TX DMA channel
2252  * @priv: driver private structure
2253  * @chan: TX channel index
2254  * Description:
2255  * This starts a TX DMA channel
2256  */
2257 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2258 {
2259 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2260 	stmmac_start_tx(priv, priv->ioaddr, chan);
2261 }
2262 
2263 /**
2264  * stmmac_stop_rx_dma - stop RX DMA channel
2265  * @priv: driver private structure
2266  * @chan: RX channel index
2267  * Description:
2268  * This stops a RX DMA channel
2269  */
2270 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2271 {
2272 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2273 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2274 }
2275 
2276 /**
2277  * stmmac_stop_tx_dma - stop TX DMA channel
2278  * @priv: driver private structure
2279  * @chan: TX channel index
2280  * Description:
2281  * This stops a TX DMA channel
2282  */
2283 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2284 {
2285 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2286 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2287 }
2288 
2289 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2290 {
2291 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2292 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2293 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2294 	u32 chan;
2295 
2296 	for (chan = 0; chan < dma_csr_ch; chan++) {
2297 		struct stmmac_channel *ch = &priv->channel[chan];
2298 		unsigned long flags;
2299 
2300 		spin_lock_irqsave(&ch->lock, flags);
2301 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2302 		spin_unlock_irqrestore(&ch->lock, flags);
2303 	}
2304 }
2305 
2306 /**
2307  * stmmac_start_all_dma - start all RX and TX DMA channels
2308  * @priv: driver private structure
2309  * Description:
2310  * This starts all the RX and TX DMA channels
2311  */
2312 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2313 {
2314 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2315 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2316 	u32 chan = 0;
2317 
2318 	for (chan = 0; chan < rx_channels_count; chan++)
2319 		stmmac_start_rx_dma(priv, chan);
2320 
2321 	for (chan = 0; chan < tx_channels_count; chan++)
2322 		stmmac_start_tx_dma(priv, chan);
2323 }
2324 
2325 /**
2326  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2327  * @priv: driver private structure
2328  * Description:
2329  * This stops the RX and TX DMA channels
2330  */
2331 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2332 {
2333 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2334 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2335 	u32 chan = 0;
2336 
2337 	for (chan = 0; chan < rx_channels_count; chan++)
2338 		stmmac_stop_rx_dma(priv, chan);
2339 
2340 	for (chan = 0; chan < tx_channels_count; chan++)
2341 		stmmac_stop_tx_dma(priv, chan);
2342 }
2343 
2344 /**
2345  *  stmmac_dma_operation_mode - HW DMA operation mode
2346  *  @priv: driver private structure
2347  *  Description: it is used for configuring the DMA operation mode register in
2348  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2349  */
2350 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2351 {
2352 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2353 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2354 	int rxfifosz = priv->plat->rx_fifo_size;
2355 	int txfifosz = priv->plat->tx_fifo_size;
2356 	u32 txmode = 0;
2357 	u32 rxmode = 0;
2358 	u32 chan = 0;
2359 	u8 qmode = 0;
2360 
2361 	if (rxfifosz == 0)
2362 		rxfifosz = priv->dma_cap.rx_fifo_size;
2363 	if (txfifosz == 0)
2364 		txfifosz = priv->dma_cap.tx_fifo_size;
2365 
2366 	/* Adjust for real per queue fifo size */
2367 	rxfifosz /= rx_channels_count;
2368 	txfifosz /= tx_channels_count;
2369 
2370 	if (priv->plat->force_thresh_dma_mode) {
2371 		txmode = tc;
2372 		rxmode = tc;
2373 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2374 		/*
2375 		 * In case of GMAC, SF mode can be enabled
2376 		 * to perform the TX COE in HW. This depends on:
2377 		 * 1) TX COE if actually supported
2378 		 * 2) There is no bugged Jumbo frame support
2379 		 *    that needs to not insert csum in the TDES.
2380 		 */
2381 		txmode = SF_DMA_MODE;
2382 		rxmode = SF_DMA_MODE;
2383 		priv->xstats.threshold = SF_DMA_MODE;
2384 	} else {
2385 		txmode = tc;
2386 		rxmode = SF_DMA_MODE;
2387 	}
2388 
2389 	/* configure all channels */
2390 	for (chan = 0; chan < rx_channels_count; chan++) {
2391 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2392 		u32 buf_size;
2393 
2394 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2395 
2396 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2397 				rxfifosz, qmode);
2398 
2399 		if (rx_q->xsk_pool) {
2400 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2401 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2402 					      buf_size,
2403 					      chan);
2404 		} else {
2405 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2406 					      priv->dma_conf.dma_buf_sz,
2407 					      chan);
2408 		}
2409 	}
2410 
2411 	for (chan = 0; chan < tx_channels_count; chan++) {
2412 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2413 
2414 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2415 				txfifosz, qmode);
2416 	}
2417 }
2418 
2419 static void stmmac_xsk_request_timestamp(void *_priv)
2420 {
2421 	struct stmmac_metadata_request *meta_req = _priv;
2422 
2423 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2424 	*meta_req->set_ic = true;
2425 }
2426 
2427 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2428 {
2429 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2430 	struct stmmac_priv *priv = tx_compl->priv;
2431 	struct dma_desc *desc = tx_compl->desc;
2432 	bool found = false;
2433 	u64 ns = 0;
2434 
2435 	if (!priv->hwts_tx_en)
2436 		return 0;
2437 
2438 	/* check tx tstamp status */
2439 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2440 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2441 		found = true;
2442 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2443 		found = true;
2444 	}
2445 
2446 	if (found) {
2447 		ns -= priv->plat->cdc_error_adj;
2448 		return ns_to_ktime(ns);
2449 	}
2450 
2451 	return 0;
2452 }
2453 
2454 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2455 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2456 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2457 };
2458 
2459 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2460 {
2461 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2462 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2463 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2464 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2465 	unsigned int entry = tx_q->cur_tx;
2466 	struct dma_desc *tx_desc = NULL;
2467 	struct xdp_desc xdp_desc;
2468 	bool work_done = true;
2469 	u32 tx_set_ic_bit = 0;
2470 
2471 	/* Avoids TX time-out as we are sharing with slow path */
2472 	txq_trans_cond_update(nq);
2473 
2474 	budget = min(budget, stmmac_tx_avail(priv, queue));
2475 
2476 	while (budget-- > 0) {
2477 		struct stmmac_metadata_request meta_req;
2478 		struct xsk_tx_metadata *meta = NULL;
2479 		dma_addr_t dma_addr;
2480 		bool set_ic;
2481 
2482 		/* We are sharing with slow path and stop XSK TX desc submission when
2483 		 * available TX ring is less than threshold.
2484 		 */
2485 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2486 		    !netif_carrier_ok(priv->dev)) {
2487 			work_done = false;
2488 			break;
2489 		}
2490 
2491 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2492 			break;
2493 
2494 		if (priv->plat->est && priv->plat->est->enable &&
2495 		    priv->plat->est->max_sdu[queue] &&
2496 		    xdp_desc.len > priv->plat->est->max_sdu[queue]) {
2497 			priv->xstats.max_sdu_txq_drop[queue]++;
2498 			continue;
2499 		}
2500 
2501 		if (likely(priv->extend_desc))
2502 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2503 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2504 			tx_desc = &tx_q->dma_entx[entry].basic;
2505 		else
2506 			tx_desc = tx_q->dma_tx + entry;
2507 
2508 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2509 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2510 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2511 
2512 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2513 
2514 		/* To return XDP buffer to XSK pool, we simple call
2515 		 * xsk_tx_completed(), so we don't need to fill up
2516 		 * 'buf' and 'xdpf'.
2517 		 */
2518 		tx_q->tx_skbuff_dma[entry].buf = 0;
2519 		tx_q->xdpf[entry] = NULL;
2520 
2521 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2522 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2523 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2524 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2525 
2526 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2527 
2528 		tx_q->tx_count_frames++;
2529 
2530 		if (!priv->tx_coal_frames[queue])
2531 			set_ic = false;
2532 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2533 			set_ic = true;
2534 		else
2535 			set_ic = false;
2536 
2537 		meta_req.priv = priv;
2538 		meta_req.tx_desc = tx_desc;
2539 		meta_req.set_ic = &set_ic;
2540 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2541 					&meta_req);
2542 		if (set_ic) {
2543 			tx_q->tx_count_frames = 0;
2544 			stmmac_set_tx_ic(priv, tx_desc);
2545 			tx_set_ic_bit++;
2546 		}
2547 
2548 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2549 				       true, priv->mode, true, true,
2550 				       xdp_desc.len);
2551 
2552 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2553 
2554 		xsk_tx_metadata_to_compl(meta,
2555 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2556 
2557 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2558 		entry = tx_q->cur_tx;
2559 	}
2560 	u64_stats_update_begin(&txq_stats->napi_syncp);
2561 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2562 	u64_stats_update_end(&txq_stats->napi_syncp);
2563 
2564 	if (tx_desc) {
2565 		stmmac_flush_tx_descriptors(priv, queue);
2566 		xsk_tx_release(pool);
2567 	}
2568 
2569 	/* Return true if all of the 3 conditions are met
2570 	 *  a) TX Budget is still available
2571 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2572 	 *     pending XSK TX for transmission)
2573 	 */
2574 	return !!budget && work_done;
2575 }
2576 
2577 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2578 {
2579 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2580 		tc += 64;
2581 
2582 		if (priv->plat->force_thresh_dma_mode)
2583 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2584 		else
2585 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2586 						      chan);
2587 
2588 		priv->xstats.threshold = tc;
2589 	}
2590 }
2591 
2592 /**
2593  * stmmac_tx_clean - to manage the transmission completion
2594  * @priv: driver private structure
2595  * @budget: napi budget limiting this functions packet handling
2596  * @queue: TX queue index
2597  * @pending_packets: signal to arm the TX coal timer
2598  * Description: it reclaims the transmit resources after transmission completes.
2599  * If some packets still needs to be handled, due to TX coalesce, set
2600  * pending_packets to true to make NAPI arm the TX coal timer.
2601  */
2602 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2603 			   bool *pending_packets)
2604 {
2605 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2606 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2607 	unsigned int bytes_compl = 0, pkts_compl = 0;
2608 	unsigned int entry, xmits = 0, count = 0;
2609 	u32 tx_packets = 0, tx_errors = 0;
2610 
2611 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2612 
2613 	tx_q->xsk_frames_done = 0;
2614 
2615 	entry = tx_q->dirty_tx;
2616 
2617 	/* Try to clean all TX complete frame in 1 shot */
2618 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2619 		struct xdp_frame *xdpf;
2620 		struct sk_buff *skb;
2621 		struct dma_desc *p;
2622 		int status;
2623 
2624 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2625 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2626 			xdpf = tx_q->xdpf[entry];
2627 			skb = NULL;
2628 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2629 			xdpf = NULL;
2630 			skb = tx_q->tx_skbuff[entry];
2631 		} else {
2632 			xdpf = NULL;
2633 			skb = NULL;
2634 		}
2635 
2636 		if (priv->extend_desc)
2637 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2638 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2639 			p = &tx_q->dma_entx[entry].basic;
2640 		else
2641 			p = tx_q->dma_tx + entry;
2642 
2643 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2644 		/* Check if the descriptor is owned by the DMA */
2645 		if (unlikely(status & tx_dma_own))
2646 			break;
2647 
2648 		count++;
2649 
2650 		/* Make sure descriptor fields are read after reading
2651 		 * the own bit.
2652 		 */
2653 		dma_rmb();
2654 
2655 		/* Just consider the last segment and ...*/
2656 		if (likely(!(status & tx_not_ls))) {
2657 			/* ... verify the status error condition */
2658 			if (unlikely(status & tx_err)) {
2659 				tx_errors++;
2660 				if (unlikely(status & tx_err_bump_tc))
2661 					stmmac_bump_dma_threshold(priv, queue);
2662 			} else {
2663 				tx_packets++;
2664 			}
2665 			if (skb) {
2666 				stmmac_get_tx_hwtstamp(priv, p, skb);
2667 			} else if (tx_q->xsk_pool &&
2668 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2669 				struct stmmac_xsk_tx_complete tx_compl = {
2670 					.priv = priv,
2671 					.desc = p,
2672 				};
2673 
2674 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2675 							 &stmmac_xsk_tx_metadata_ops,
2676 							 &tx_compl);
2677 			}
2678 		}
2679 
2680 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2681 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2682 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2683 				dma_unmap_page(priv->device,
2684 					       tx_q->tx_skbuff_dma[entry].buf,
2685 					       tx_q->tx_skbuff_dma[entry].len,
2686 					       DMA_TO_DEVICE);
2687 			else
2688 				dma_unmap_single(priv->device,
2689 						 tx_q->tx_skbuff_dma[entry].buf,
2690 						 tx_q->tx_skbuff_dma[entry].len,
2691 						 DMA_TO_DEVICE);
2692 			tx_q->tx_skbuff_dma[entry].buf = 0;
2693 			tx_q->tx_skbuff_dma[entry].len = 0;
2694 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2695 		}
2696 
2697 		stmmac_clean_desc3(priv, tx_q, p);
2698 
2699 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2700 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2701 
2702 		if (xdpf &&
2703 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2704 			xdp_return_frame_rx_napi(xdpf);
2705 			tx_q->xdpf[entry] = NULL;
2706 		}
2707 
2708 		if (xdpf &&
2709 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2710 			xdp_return_frame(xdpf);
2711 			tx_q->xdpf[entry] = NULL;
2712 		}
2713 
2714 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2715 			tx_q->xsk_frames_done++;
2716 
2717 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2718 			if (likely(skb)) {
2719 				pkts_compl++;
2720 				bytes_compl += skb->len;
2721 				dev_consume_skb_any(skb);
2722 				tx_q->tx_skbuff[entry] = NULL;
2723 			}
2724 		}
2725 
2726 		stmmac_release_tx_desc(priv, p, priv->mode);
2727 
2728 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2729 	}
2730 	tx_q->dirty_tx = entry;
2731 
2732 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2733 				  pkts_compl, bytes_compl);
2734 
2735 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2736 								queue))) &&
2737 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2738 
2739 		netif_dbg(priv, tx_done, priv->dev,
2740 			  "%s: restart transmit\n", __func__);
2741 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2742 	}
2743 
2744 	if (tx_q->xsk_pool) {
2745 		bool work_done;
2746 
2747 		if (tx_q->xsk_frames_done)
2748 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2749 
2750 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2751 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2752 
2753 		/* For XSK TX, we try to send as many as possible.
2754 		 * If XSK work done (XSK TX desc empty and budget still
2755 		 * available), return "budget - 1" to reenable TX IRQ.
2756 		 * Else, return "budget" to make NAPI continue polling.
2757 		 */
2758 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2759 					       STMMAC_XSK_TX_BUDGET_MAX);
2760 		if (work_done)
2761 			xmits = budget - 1;
2762 		else
2763 			xmits = budget;
2764 	}
2765 
2766 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2767 	    priv->eee_sw_timer_en) {
2768 		if (stmmac_enable_eee_mode(priv))
2769 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2770 	}
2771 
2772 	/* We still have pending packets, let's call for a new scheduling */
2773 	if (tx_q->dirty_tx != tx_q->cur_tx)
2774 		*pending_packets = true;
2775 
2776 	u64_stats_update_begin(&txq_stats->napi_syncp);
2777 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2778 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2779 	u64_stats_inc(&txq_stats->napi.tx_clean);
2780 	u64_stats_update_end(&txq_stats->napi_syncp);
2781 
2782 	priv->xstats.tx_errors += tx_errors;
2783 
2784 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2785 
2786 	/* Combine decisions from TX clean and XSK TX */
2787 	return max(count, xmits);
2788 }
2789 
2790 /**
2791  * stmmac_tx_err - to manage the tx error
2792  * @priv: driver private structure
2793  * @chan: channel index
2794  * Description: it cleans the descriptors and restarts the transmission
2795  * in case of transmission errors.
2796  */
2797 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2798 {
2799 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2800 
2801 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2802 
2803 	stmmac_stop_tx_dma(priv, chan);
2804 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2805 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2806 	stmmac_reset_tx_queue(priv, chan);
2807 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2808 			    tx_q->dma_tx_phy, chan);
2809 	stmmac_start_tx_dma(priv, chan);
2810 
2811 	priv->xstats.tx_errors++;
2812 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2813 }
2814 
2815 /**
2816  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2817  *  @priv: driver private structure
2818  *  @txmode: TX operating mode
2819  *  @rxmode: RX operating mode
2820  *  @chan: channel index
2821  *  Description: it is used for configuring of the DMA operation mode in
2822  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2823  *  mode.
2824  */
2825 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2826 					  u32 rxmode, u32 chan)
2827 {
2828 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2829 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2830 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2831 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2832 	int rxfifosz = priv->plat->rx_fifo_size;
2833 	int txfifosz = priv->plat->tx_fifo_size;
2834 
2835 	if (rxfifosz == 0)
2836 		rxfifosz = priv->dma_cap.rx_fifo_size;
2837 	if (txfifosz == 0)
2838 		txfifosz = priv->dma_cap.tx_fifo_size;
2839 
2840 	/* Adjust for real per queue fifo size */
2841 	rxfifosz /= rx_channels_count;
2842 	txfifosz /= tx_channels_count;
2843 
2844 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2845 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2846 }
2847 
2848 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2849 {
2850 	int ret;
2851 
2852 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2853 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2854 	if (ret && (ret != -EINVAL)) {
2855 		stmmac_global_err(priv);
2856 		return true;
2857 	}
2858 
2859 	return false;
2860 }
2861 
2862 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2863 {
2864 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2865 						 &priv->xstats, chan, dir);
2866 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2867 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2868 	struct stmmac_channel *ch = &priv->channel[chan];
2869 	struct napi_struct *rx_napi;
2870 	struct napi_struct *tx_napi;
2871 	unsigned long flags;
2872 
2873 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2874 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2875 
2876 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2877 		if (napi_schedule_prep(rx_napi)) {
2878 			spin_lock_irqsave(&ch->lock, flags);
2879 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2880 			spin_unlock_irqrestore(&ch->lock, flags);
2881 			__napi_schedule(rx_napi);
2882 		}
2883 	}
2884 
2885 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2886 		if (napi_schedule_prep(tx_napi)) {
2887 			spin_lock_irqsave(&ch->lock, flags);
2888 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2889 			spin_unlock_irqrestore(&ch->lock, flags);
2890 			__napi_schedule(tx_napi);
2891 		}
2892 	}
2893 
2894 	return status;
2895 }
2896 
2897 /**
2898  * stmmac_dma_interrupt - DMA ISR
2899  * @priv: driver private structure
2900  * Description: this is the DMA ISR. It is called by the main ISR.
2901  * It calls the dwmac dma routine and schedule poll method in case of some
2902  * work can be done.
2903  */
2904 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2905 {
2906 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2907 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2908 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2909 				tx_channel_count : rx_channel_count;
2910 	u32 chan;
2911 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2912 
2913 	/* Make sure we never check beyond our status buffer. */
2914 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2915 		channels_to_check = ARRAY_SIZE(status);
2916 
2917 	for (chan = 0; chan < channels_to_check; chan++)
2918 		status[chan] = stmmac_napi_check(priv, chan,
2919 						 DMA_DIR_RXTX);
2920 
2921 	for (chan = 0; chan < tx_channel_count; chan++) {
2922 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2923 			/* Try to bump up the dma threshold on this failure */
2924 			stmmac_bump_dma_threshold(priv, chan);
2925 		} else if (unlikely(status[chan] == tx_hard_error)) {
2926 			stmmac_tx_err(priv, chan);
2927 		}
2928 	}
2929 }
2930 
2931 /**
2932  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2933  * @priv: driver private structure
2934  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2935  */
2936 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2937 {
2938 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2939 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2940 
2941 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2942 
2943 	if (priv->dma_cap.rmon) {
2944 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2945 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2946 	} else
2947 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2948 }
2949 
2950 /**
2951  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2952  * @priv: driver private structure
2953  * Description:
2954  *  new GMAC chip generations have a new register to indicate the
2955  *  presence of the optional feature/functions.
2956  *  This can be also used to override the value passed through the
2957  *  platform and necessary for old MAC10/100 and GMAC chips.
2958  */
2959 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2960 {
2961 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2962 }
2963 
2964 /**
2965  * stmmac_check_ether_addr - check if the MAC addr is valid
2966  * @priv: driver private structure
2967  * Description:
2968  * it is to verify if the MAC address is valid, in case of failures it
2969  * generates a random MAC address
2970  */
2971 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2972 {
2973 	u8 addr[ETH_ALEN];
2974 
2975 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2976 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2977 		if (is_valid_ether_addr(addr))
2978 			eth_hw_addr_set(priv->dev, addr);
2979 		else
2980 			eth_hw_addr_random(priv->dev);
2981 		dev_info(priv->device, "device MAC address %pM\n",
2982 			 priv->dev->dev_addr);
2983 	}
2984 }
2985 
2986 /**
2987  * stmmac_init_dma_engine - DMA init.
2988  * @priv: driver private structure
2989  * Description:
2990  * It inits the DMA invoking the specific MAC/GMAC callback.
2991  * Some DMA parameters can be passed from the platform;
2992  * in case of these are not passed a default is kept for the MAC or GMAC.
2993  */
2994 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2995 {
2996 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2997 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2998 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2999 	struct stmmac_rx_queue *rx_q;
3000 	struct stmmac_tx_queue *tx_q;
3001 	u32 chan = 0;
3002 	int atds = 0;
3003 	int ret = 0;
3004 
3005 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3006 		dev_err(priv->device, "Invalid DMA configuration\n");
3007 		return -EINVAL;
3008 	}
3009 
3010 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3011 		atds = 1;
3012 
3013 	ret = stmmac_reset(priv, priv->ioaddr);
3014 	if (ret) {
3015 		dev_err(priv->device, "Failed to reset the dma\n");
3016 		return ret;
3017 	}
3018 
3019 	/* DMA Configuration */
3020 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3021 
3022 	if (priv->plat->axi)
3023 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3024 
3025 	/* DMA CSR Channel configuration */
3026 	for (chan = 0; chan < dma_csr_ch; chan++) {
3027 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3028 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3029 	}
3030 
3031 	/* DMA RX Channel Configuration */
3032 	for (chan = 0; chan < rx_channels_count; chan++) {
3033 		rx_q = &priv->dma_conf.rx_queue[chan];
3034 
3035 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3036 				    rx_q->dma_rx_phy, chan);
3037 
3038 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3039 				     (rx_q->buf_alloc_num *
3040 				      sizeof(struct dma_desc));
3041 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3042 				       rx_q->rx_tail_addr, chan);
3043 	}
3044 
3045 	/* DMA TX Channel Configuration */
3046 	for (chan = 0; chan < tx_channels_count; chan++) {
3047 		tx_q = &priv->dma_conf.tx_queue[chan];
3048 
3049 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3050 				    tx_q->dma_tx_phy, chan);
3051 
3052 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3053 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3054 				       tx_q->tx_tail_addr, chan);
3055 	}
3056 
3057 	return ret;
3058 }
3059 
3060 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3061 {
3062 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3063 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3064 	struct stmmac_channel *ch;
3065 	struct napi_struct *napi;
3066 
3067 	if (!tx_coal_timer)
3068 		return;
3069 
3070 	ch = &priv->channel[tx_q->queue_index];
3071 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3072 
3073 	/* Arm timer only if napi is not already scheduled.
3074 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3075 	 * again in the next scheduled napi.
3076 	 */
3077 	if (unlikely(!napi_is_scheduled(napi)))
3078 		hrtimer_start(&tx_q->txtimer,
3079 			      STMMAC_COAL_TIMER(tx_coal_timer),
3080 			      HRTIMER_MODE_REL);
3081 	else
3082 		hrtimer_try_to_cancel(&tx_q->txtimer);
3083 }
3084 
3085 /**
3086  * stmmac_tx_timer - mitigation sw timer for tx.
3087  * @t: data pointer
3088  * Description:
3089  * This is the timer handler to directly invoke the stmmac_tx_clean.
3090  */
3091 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3092 {
3093 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3094 	struct stmmac_priv *priv = tx_q->priv_data;
3095 	struct stmmac_channel *ch;
3096 	struct napi_struct *napi;
3097 
3098 	ch = &priv->channel[tx_q->queue_index];
3099 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3100 
3101 	if (likely(napi_schedule_prep(napi))) {
3102 		unsigned long flags;
3103 
3104 		spin_lock_irqsave(&ch->lock, flags);
3105 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3106 		spin_unlock_irqrestore(&ch->lock, flags);
3107 		__napi_schedule(napi);
3108 	}
3109 
3110 	return HRTIMER_NORESTART;
3111 }
3112 
3113 /**
3114  * stmmac_init_coalesce - init mitigation options.
3115  * @priv: driver private structure
3116  * Description:
3117  * This inits the coalesce parameters: i.e. timer rate,
3118  * timer handler and default threshold used for enabling the
3119  * interrupt on completion bit.
3120  */
3121 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3122 {
3123 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3124 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3125 	u32 chan;
3126 
3127 	for (chan = 0; chan < tx_channel_count; chan++) {
3128 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3129 
3130 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3131 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3132 
3133 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3134 		tx_q->txtimer.function = stmmac_tx_timer;
3135 	}
3136 
3137 	for (chan = 0; chan < rx_channel_count; chan++)
3138 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3139 }
3140 
3141 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3142 {
3143 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3144 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3145 	u32 chan;
3146 
3147 	/* set TX ring length */
3148 	for (chan = 0; chan < tx_channels_count; chan++)
3149 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3150 				       (priv->dma_conf.dma_tx_size - 1), chan);
3151 
3152 	/* set RX ring length */
3153 	for (chan = 0; chan < rx_channels_count; chan++)
3154 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3155 				       (priv->dma_conf.dma_rx_size - 1), chan);
3156 }
3157 
3158 /**
3159  *  stmmac_set_tx_queue_weight - Set TX queue weight
3160  *  @priv: driver private structure
3161  *  Description: It is used for setting TX queues weight
3162  */
3163 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3164 {
3165 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3166 	u32 weight;
3167 	u32 queue;
3168 
3169 	for (queue = 0; queue < tx_queues_count; queue++) {
3170 		weight = priv->plat->tx_queues_cfg[queue].weight;
3171 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3172 	}
3173 }
3174 
3175 /**
3176  *  stmmac_configure_cbs - Configure CBS in TX queue
3177  *  @priv: driver private structure
3178  *  Description: It is used for configuring CBS in AVB TX queues
3179  */
3180 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3181 {
3182 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3183 	u32 mode_to_use;
3184 	u32 queue;
3185 
3186 	/* queue 0 is reserved for legacy traffic */
3187 	for (queue = 1; queue < tx_queues_count; queue++) {
3188 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3189 		if (mode_to_use == MTL_QUEUE_DCB)
3190 			continue;
3191 
3192 		stmmac_config_cbs(priv, priv->hw,
3193 				priv->plat->tx_queues_cfg[queue].send_slope,
3194 				priv->plat->tx_queues_cfg[queue].idle_slope,
3195 				priv->plat->tx_queues_cfg[queue].high_credit,
3196 				priv->plat->tx_queues_cfg[queue].low_credit,
3197 				queue);
3198 	}
3199 }
3200 
3201 /**
3202  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3203  *  @priv: driver private structure
3204  *  Description: It is used for mapping RX queues to RX dma channels
3205  */
3206 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3207 {
3208 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3209 	u32 queue;
3210 	u32 chan;
3211 
3212 	for (queue = 0; queue < rx_queues_count; queue++) {
3213 		chan = priv->plat->rx_queues_cfg[queue].chan;
3214 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3215 	}
3216 }
3217 
3218 /**
3219  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3220  *  @priv: driver private structure
3221  *  Description: It is used for configuring the RX Queue Priority
3222  */
3223 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3224 {
3225 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3226 	u32 queue;
3227 	u32 prio;
3228 
3229 	for (queue = 0; queue < rx_queues_count; queue++) {
3230 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3231 			continue;
3232 
3233 		prio = priv->plat->rx_queues_cfg[queue].prio;
3234 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3235 	}
3236 }
3237 
3238 /**
3239  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3240  *  @priv: driver private structure
3241  *  Description: It is used for configuring the TX Queue Priority
3242  */
3243 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3244 {
3245 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3246 	u32 queue;
3247 	u32 prio;
3248 
3249 	for (queue = 0; queue < tx_queues_count; queue++) {
3250 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3251 			continue;
3252 
3253 		prio = priv->plat->tx_queues_cfg[queue].prio;
3254 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3255 	}
3256 }
3257 
3258 /**
3259  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3260  *  @priv: driver private structure
3261  *  Description: It is used for configuring the RX queue routing
3262  */
3263 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3264 {
3265 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3266 	u32 queue;
3267 	u8 packet;
3268 
3269 	for (queue = 0; queue < rx_queues_count; queue++) {
3270 		/* no specific packet type routing specified for the queue */
3271 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3272 			continue;
3273 
3274 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3275 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3276 	}
3277 }
3278 
3279 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3280 {
3281 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3282 		priv->rss.enable = false;
3283 		return;
3284 	}
3285 
3286 	if (priv->dev->features & NETIF_F_RXHASH)
3287 		priv->rss.enable = true;
3288 	else
3289 		priv->rss.enable = false;
3290 
3291 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3292 			     priv->plat->rx_queues_to_use);
3293 }
3294 
3295 /**
3296  *  stmmac_mtl_configuration - Configure MTL
3297  *  @priv: driver private structure
3298  *  Description: It is used for configurring MTL
3299  */
3300 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3301 {
3302 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3303 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3304 
3305 	if (tx_queues_count > 1)
3306 		stmmac_set_tx_queue_weight(priv);
3307 
3308 	/* Configure MTL RX algorithms */
3309 	if (rx_queues_count > 1)
3310 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3311 				priv->plat->rx_sched_algorithm);
3312 
3313 	/* Configure MTL TX algorithms */
3314 	if (tx_queues_count > 1)
3315 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3316 				priv->plat->tx_sched_algorithm);
3317 
3318 	/* Configure CBS in AVB TX queues */
3319 	if (tx_queues_count > 1)
3320 		stmmac_configure_cbs(priv);
3321 
3322 	/* Map RX MTL to DMA channels */
3323 	stmmac_rx_queue_dma_chan_map(priv);
3324 
3325 	/* Enable MAC RX Queues */
3326 	stmmac_mac_enable_rx_queues(priv);
3327 
3328 	/* Set RX priorities */
3329 	if (rx_queues_count > 1)
3330 		stmmac_mac_config_rx_queues_prio(priv);
3331 
3332 	/* Set TX priorities */
3333 	if (tx_queues_count > 1)
3334 		stmmac_mac_config_tx_queues_prio(priv);
3335 
3336 	/* Set RX routing */
3337 	if (rx_queues_count > 1)
3338 		stmmac_mac_config_rx_queues_routing(priv);
3339 
3340 	/* Receive Side Scaling */
3341 	if (rx_queues_count > 1)
3342 		stmmac_mac_config_rss(priv);
3343 }
3344 
3345 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3346 {
3347 	if (priv->dma_cap.asp) {
3348 		netdev_info(priv->dev, "Enabling Safety Features\n");
3349 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3350 					  priv->plat->safety_feat_cfg);
3351 	} else {
3352 		netdev_info(priv->dev, "No Safety Features support found\n");
3353 	}
3354 }
3355 
3356 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3357 {
3358 	char *name;
3359 
3360 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3361 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3362 
3363 	name = priv->wq_name;
3364 	sprintf(name, "%s-fpe", priv->dev->name);
3365 
3366 	priv->fpe_wq = create_singlethread_workqueue(name);
3367 	if (!priv->fpe_wq) {
3368 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3369 
3370 		return -ENOMEM;
3371 	}
3372 	netdev_info(priv->dev, "FPE workqueue start");
3373 
3374 	return 0;
3375 }
3376 
3377 /**
3378  * stmmac_hw_setup - setup mac in a usable state.
3379  *  @dev : pointer to the device structure.
3380  *  @ptp_register: register PTP if set
3381  *  Description:
3382  *  this is the main function to setup the HW in a usable state because the
3383  *  dma engine is reset, the core registers are configured (e.g. AXI,
3384  *  Checksum features, timers). The DMA is ready to start receiving and
3385  *  transmitting.
3386  *  Return value:
3387  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3388  *  file on failure.
3389  */
3390 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3391 {
3392 	struct stmmac_priv *priv = netdev_priv(dev);
3393 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3394 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3395 	bool sph_en;
3396 	u32 chan;
3397 	int ret;
3398 
3399 	/* Make sure RX clock is enabled */
3400 	if (priv->hw->phylink_pcs)
3401 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3402 
3403 	/* DMA initialization and SW reset */
3404 	ret = stmmac_init_dma_engine(priv);
3405 	if (ret < 0) {
3406 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3407 			   __func__);
3408 		return ret;
3409 	}
3410 
3411 	/* Copy the MAC addr into the HW  */
3412 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3413 
3414 	/* PS and related bits will be programmed according to the speed */
3415 	if (priv->hw->pcs) {
3416 		int speed = priv->plat->mac_port_sel_speed;
3417 
3418 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3419 		    (speed == SPEED_1000)) {
3420 			priv->hw->ps = speed;
3421 		} else {
3422 			dev_warn(priv->device, "invalid port speed\n");
3423 			priv->hw->ps = 0;
3424 		}
3425 	}
3426 
3427 	/* Initialize the MAC Core */
3428 	stmmac_core_init(priv, priv->hw, dev);
3429 
3430 	/* Initialize MTL*/
3431 	stmmac_mtl_configuration(priv);
3432 
3433 	/* Initialize Safety Features */
3434 	stmmac_safety_feat_configuration(priv);
3435 
3436 	ret = stmmac_rx_ipc(priv, priv->hw);
3437 	if (!ret) {
3438 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3439 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3440 		priv->hw->rx_csum = 0;
3441 	}
3442 
3443 	/* Enable the MAC Rx/Tx */
3444 	stmmac_mac_set(priv, priv->ioaddr, true);
3445 
3446 	/* Set the HW DMA mode and the COE */
3447 	stmmac_dma_operation_mode(priv);
3448 
3449 	stmmac_mmc_setup(priv);
3450 
3451 	if (ptp_register) {
3452 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3453 		if (ret < 0)
3454 			netdev_warn(priv->dev,
3455 				    "failed to enable PTP reference clock: %pe\n",
3456 				    ERR_PTR(ret));
3457 	}
3458 
3459 	ret = stmmac_init_ptp(priv);
3460 	if (ret == -EOPNOTSUPP)
3461 		netdev_info(priv->dev, "PTP not supported by HW\n");
3462 	else if (ret)
3463 		netdev_warn(priv->dev, "PTP init failed\n");
3464 	else if (ptp_register)
3465 		stmmac_ptp_register(priv);
3466 
3467 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3468 
3469 	/* Convert the timer from msec to usec */
3470 	if (!priv->tx_lpi_timer)
3471 		priv->tx_lpi_timer = eee_timer * 1000;
3472 
3473 	if (priv->use_riwt) {
3474 		u32 queue;
3475 
3476 		for (queue = 0; queue < rx_cnt; queue++) {
3477 			if (!priv->rx_riwt[queue])
3478 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3479 
3480 			stmmac_rx_watchdog(priv, priv->ioaddr,
3481 					   priv->rx_riwt[queue], queue);
3482 		}
3483 	}
3484 
3485 	if (priv->hw->pcs)
3486 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3487 
3488 	/* set TX and RX rings length */
3489 	stmmac_set_rings_length(priv);
3490 
3491 	/* Enable TSO */
3492 	if (priv->tso) {
3493 		for (chan = 0; chan < tx_cnt; chan++) {
3494 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3495 
3496 			/* TSO and TBS cannot co-exist */
3497 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3498 				continue;
3499 
3500 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3501 		}
3502 	}
3503 
3504 	/* Enable Split Header */
3505 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3506 	for (chan = 0; chan < rx_cnt; chan++)
3507 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3508 
3509 
3510 	/* VLAN Tag Insertion */
3511 	if (priv->dma_cap.vlins)
3512 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3513 
3514 	/* TBS */
3515 	for (chan = 0; chan < tx_cnt; chan++) {
3516 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3517 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3518 
3519 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3520 	}
3521 
3522 	/* Configure real RX and TX queues */
3523 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3524 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3525 
3526 	/* Start the ball rolling... */
3527 	stmmac_start_all_dma(priv);
3528 
3529 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3530 
3531 	if (priv->dma_cap.fpesel) {
3532 		stmmac_fpe_start_wq(priv);
3533 
3534 		if (priv->plat->fpe_cfg->enable)
3535 			stmmac_fpe_handshake(priv, true);
3536 	}
3537 
3538 	return 0;
3539 }
3540 
3541 static void stmmac_hw_teardown(struct net_device *dev)
3542 {
3543 	struct stmmac_priv *priv = netdev_priv(dev);
3544 
3545 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3546 }
3547 
3548 static void stmmac_free_irq(struct net_device *dev,
3549 			    enum request_irq_err irq_err, int irq_idx)
3550 {
3551 	struct stmmac_priv *priv = netdev_priv(dev);
3552 	int j;
3553 
3554 	switch (irq_err) {
3555 	case REQ_IRQ_ERR_ALL:
3556 		irq_idx = priv->plat->tx_queues_to_use;
3557 		fallthrough;
3558 	case REQ_IRQ_ERR_TX:
3559 		for (j = irq_idx - 1; j >= 0; j--) {
3560 			if (priv->tx_irq[j] > 0) {
3561 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3562 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3563 			}
3564 		}
3565 		irq_idx = priv->plat->rx_queues_to_use;
3566 		fallthrough;
3567 	case REQ_IRQ_ERR_RX:
3568 		for (j = irq_idx - 1; j >= 0; j--) {
3569 			if (priv->rx_irq[j] > 0) {
3570 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3571 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3572 			}
3573 		}
3574 
3575 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3576 			free_irq(priv->sfty_ue_irq, dev);
3577 		fallthrough;
3578 	case REQ_IRQ_ERR_SFTY_UE:
3579 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3580 			free_irq(priv->sfty_ce_irq, dev);
3581 		fallthrough;
3582 	case REQ_IRQ_ERR_SFTY_CE:
3583 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3584 			free_irq(priv->lpi_irq, dev);
3585 		fallthrough;
3586 	case REQ_IRQ_ERR_LPI:
3587 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3588 			free_irq(priv->wol_irq, dev);
3589 		fallthrough;
3590 	case REQ_IRQ_ERR_SFTY:
3591 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3592 			free_irq(priv->sfty_irq, dev);
3593 		fallthrough;
3594 	case REQ_IRQ_ERR_WOL:
3595 		free_irq(dev->irq, dev);
3596 		fallthrough;
3597 	case REQ_IRQ_ERR_MAC:
3598 	case REQ_IRQ_ERR_NO:
3599 		/* If MAC IRQ request error, no more IRQ to free */
3600 		break;
3601 	}
3602 }
3603 
3604 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3605 {
3606 	struct stmmac_priv *priv = netdev_priv(dev);
3607 	enum request_irq_err irq_err;
3608 	cpumask_t cpu_mask;
3609 	int irq_idx = 0;
3610 	char *int_name;
3611 	int ret;
3612 	int i;
3613 
3614 	/* For common interrupt */
3615 	int_name = priv->int_name_mac;
3616 	sprintf(int_name, "%s:%s", dev->name, "mac");
3617 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3618 			  0, int_name, dev);
3619 	if (unlikely(ret < 0)) {
3620 		netdev_err(priv->dev,
3621 			   "%s: alloc mac MSI %d (error: %d)\n",
3622 			   __func__, dev->irq, ret);
3623 		irq_err = REQ_IRQ_ERR_MAC;
3624 		goto irq_error;
3625 	}
3626 
3627 	/* Request the Wake IRQ in case of another line
3628 	 * is used for WoL
3629 	 */
3630 	priv->wol_irq_disabled = true;
3631 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3632 		int_name = priv->int_name_wol;
3633 		sprintf(int_name, "%s:%s", dev->name, "wol");
3634 		ret = request_irq(priv->wol_irq,
3635 				  stmmac_mac_interrupt,
3636 				  0, int_name, dev);
3637 		if (unlikely(ret < 0)) {
3638 			netdev_err(priv->dev,
3639 				   "%s: alloc wol MSI %d (error: %d)\n",
3640 				   __func__, priv->wol_irq, ret);
3641 			irq_err = REQ_IRQ_ERR_WOL;
3642 			goto irq_error;
3643 		}
3644 	}
3645 
3646 	/* Request the LPI IRQ in case of another line
3647 	 * is used for LPI
3648 	 */
3649 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3650 		int_name = priv->int_name_lpi;
3651 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3652 		ret = request_irq(priv->lpi_irq,
3653 				  stmmac_mac_interrupt,
3654 				  0, int_name, dev);
3655 		if (unlikely(ret < 0)) {
3656 			netdev_err(priv->dev,
3657 				   "%s: alloc lpi MSI %d (error: %d)\n",
3658 				   __func__, priv->lpi_irq, ret);
3659 			irq_err = REQ_IRQ_ERR_LPI;
3660 			goto irq_error;
3661 		}
3662 	}
3663 
3664 	/* Request the common Safety Feature Correctible/Uncorrectible
3665 	 * Error line in case of another line is used
3666 	 */
3667 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3668 		int_name = priv->int_name_sfty;
3669 		sprintf(int_name, "%s:%s", dev->name, "safety");
3670 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3671 				  0, int_name, dev);
3672 		if (unlikely(ret < 0)) {
3673 			netdev_err(priv->dev,
3674 				   "%s: alloc sfty MSI %d (error: %d)\n",
3675 				   __func__, priv->sfty_irq, ret);
3676 			irq_err = REQ_IRQ_ERR_SFTY;
3677 			goto irq_error;
3678 		}
3679 	}
3680 
3681 	/* Request the Safety Feature Correctible Error line in
3682 	 * case of another line is used
3683 	 */
3684 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3685 		int_name = priv->int_name_sfty_ce;
3686 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3687 		ret = request_irq(priv->sfty_ce_irq,
3688 				  stmmac_safety_interrupt,
3689 				  0, int_name, dev);
3690 		if (unlikely(ret < 0)) {
3691 			netdev_err(priv->dev,
3692 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3693 				   __func__, priv->sfty_ce_irq, ret);
3694 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3695 			goto irq_error;
3696 		}
3697 	}
3698 
3699 	/* Request the Safety Feature Uncorrectible Error line in
3700 	 * case of another line is used
3701 	 */
3702 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3703 		int_name = priv->int_name_sfty_ue;
3704 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3705 		ret = request_irq(priv->sfty_ue_irq,
3706 				  stmmac_safety_interrupt,
3707 				  0, int_name, dev);
3708 		if (unlikely(ret < 0)) {
3709 			netdev_err(priv->dev,
3710 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3711 				   __func__, priv->sfty_ue_irq, ret);
3712 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3713 			goto irq_error;
3714 		}
3715 	}
3716 
3717 	/* Request Rx MSI irq */
3718 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3719 		if (i >= MTL_MAX_RX_QUEUES)
3720 			break;
3721 		if (priv->rx_irq[i] == 0)
3722 			continue;
3723 
3724 		int_name = priv->int_name_rx_irq[i];
3725 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3726 		ret = request_irq(priv->rx_irq[i],
3727 				  stmmac_msi_intr_rx,
3728 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3729 		if (unlikely(ret < 0)) {
3730 			netdev_err(priv->dev,
3731 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3732 				   __func__, i, priv->rx_irq[i], ret);
3733 			irq_err = REQ_IRQ_ERR_RX;
3734 			irq_idx = i;
3735 			goto irq_error;
3736 		}
3737 		cpumask_clear(&cpu_mask);
3738 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3739 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3740 	}
3741 
3742 	/* Request Tx MSI irq */
3743 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3744 		if (i >= MTL_MAX_TX_QUEUES)
3745 			break;
3746 		if (priv->tx_irq[i] == 0)
3747 			continue;
3748 
3749 		int_name = priv->int_name_tx_irq[i];
3750 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3751 		ret = request_irq(priv->tx_irq[i],
3752 				  stmmac_msi_intr_tx,
3753 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3754 		if (unlikely(ret < 0)) {
3755 			netdev_err(priv->dev,
3756 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3757 				   __func__, i, priv->tx_irq[i], ret);
3758 			irq_err = REQ_IRQ_ERR_TX;
3759 			irq_idx = i;
3760 			goto irq_error;
3761 		}
3762 		cpumask_clear(&cpu_mask);
3763 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3764 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3765 	}
3766 
3767 	return 0;
3768 
3769 irq_error:
3770 	stmmac_free_irq(dev, irq_err, irq_idx);
3771 	return ret;
3772 }
3773 
3774 static int stmmac_request_irq_single(struct net_device *dev)
3775 {
3776 	struct stmmac_priv *priv = netdev_priv(dev);
3777 	enum request_irq_err irq_err;
3778 	int ret;
3779 
3780 	ret = request_irq(dev->irq, stmmac_interrupt,
3781 			  IRQF_SHARED, dev->name, dev);
3782 	if (unlikely(ret < 0)) {
3783 		netdev_err(priv->dev,
3784 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3785 			   __func__, dev->irq, ret);
3786 		irq_err = REQ_IRQ_ERR_MAC;
3787 		goto irq_error;
3788 	}
3789 
3790 	/* Request the Wake IRQ in case of another line
3791 	 * is used for WoL
3792 	 */
3793 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3794 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3795 				  IRQF_SHARED, dev->name, dev);
3796 		if (unlikely(ret < 0)) {
3797 			netdev_err(priv->dev,
3798 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3799 				   __func__, priv->wol_irq, ret);
3800 			irq_err = REQ_IRQ_ERR_WOL;
3801 			goto irq_error;
3802 		}
3803 	}
3804 
3805 	/* Request the IRQ lines */
3806 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3807 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3808 				  IRQF_SHARED, dev->name, dev);
3809 		if (unlikely(ret < 0)) {
3810 			netdev_err(priv->dev,
3811 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3812 				   __func__, priv->lpi_irq, ret);
3813 			irq_err = REQ_IRQ_ERR_LPI;
3814 			goto irq_error;
3815 		}
3816 	}
3817 
3818 	/* Request the common Safety Feature Correctible/Uncorrectible
3819 	 * Error line in case of another line is used
3820 	 */
3821 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3822 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3823 				  IRQF_SHARED, dev->name, dev);
3824 		if (unlikely(ret < 0)) {
3825 			netdev_err(priv->dev,
3826 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3827 				   __func__, priv->sfty_irq, ret);
3828 			irq_err = REQ_IRQ_ERR_SFTY;
3829 			goto irq_error;
3830 		}
3831 	}
3832 
3833 	return 0;
3834 
3835 irq_error:
3836 	stmmac_free_irq(dev, irq_err, 0);
3837 	return ret;
3838 }
3839 
3840 static int stmmac_request_irq(struct net_device *dev)
3841 {
3842 	struct stmmac_priv *priv = netdev_priv(dev);
3843 	int ret;
3844 
3845 	/* Request the IRQ lines */
3846 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3847 		ret = stmmac_request_irq_multi_msi(dev);
3848 	else
3849 		ret = stmmac_request_irq_single(dev);
3850 
3851 	return ret;
3852 }
3853 
3854 /**
3855  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3856  *  @priv: driver private structure
3857  *  @mtu: MTU to setup the dma queue and buf with
3858  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3859  *  Allocate the Tx/Rx DMA queue and init them.
3860  *  Return value:
3861  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3862  */
3863 static struct stmmac_dma_conf *
3864 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3865 {
3866 	struct stmmac_dma_conf *dma_conf;
3867 	int chan, bfsize, ret;
3868 
3869 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3870 	if (!dma_conf) {
3871 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3872 			   __func__);
3873 		return ERR_PTR(-ENOMEM);
3874 	}
3875 
3876 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3877 	if (bfsize < 0)
3878 		bfsize = 0;
3879 
3880 	if (bfsize < BUF_SIZE_16KiB)
3881 		bfsize = stmmac_set_bfsize(mtu, 0);
3882 
3883 	dma_conf->dma_buf_sz = bfsize;
3884 	/* Chose the tx/rx size from the already defined one in the
3885 	 * priv struct. (if defined)
3886 	 */
3887 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3888 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3889 
3890 	if (!dma_conf->dma_tx_size)
3891 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3892 	if (!dma_conf->dma_rx_size)
3893 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3894 
3895 	/* Earlier check for TBS */
3896 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3897 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3898 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3899 
3900 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3901 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3902 	}
3903 
3904 	ret = alloc_dma_desc_resources(priv, dma_conf);
3905 	if (ret < 0) {
3906 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3907 			   __func__);
3908 		goto alloc_error;
3909 	}
3910 
3911 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3912 	if (ret < 0) {
3913 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3914 			   __func__);
3915 		goto init_error;
3916 	}
3917 
3918 	return dma_conf;
3919 
3920 init_error:
3921 	free_dma_desc_resources(priv, dma_conf);
3922 alloc_error:
3923 	kfree(dma_conf);
3924 	return ERR_PTR(ret);
3925 }
3926 
3927 /**
3928  *  __stmmac_open - open entry point of the driver
3929  *  @dev : pointer to the device structure.
3930  *  @dma_conf :  structure to take the dma data
3931  *  Description:
3932  *  This function is the open entry point of the driver.
3933  *  Return value:
3934  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3935  *  file on failure.
3936  */
3937 static int __stmmac_open(struct net_device *dev,
3938 			 struct stmmac_dma_conf *dma_conf)
3939 {
3940 	struct stmmac_priv *priv = netdev_priv(dev);
3941 	int mode = priv->plat->phy_interface;
3942 	u32 chan;
3943 	int ret;
3944 
3945 	ret = pm_runtime_resume_and_get(priv->device);
3946 	if (ret < 0)
3947 		return ret;
3948 
3949 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3950 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3951 	    (!priv->hw->xpcs ||
3952 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3953 		ret = stmmac_init_phy(dev);
3954 		if (ret) {
3955 			netdev_err(priv->dev,
3956 				   "%s: Cannot attach to PHY (error: %d)\n",
3957 				   __func__, ret);
3958 			goto init_phy_error;
3959 		}
3960 	}
3961 
3962 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3963 
3964 	buf_sz = dma_conf->dma_buf_sz;
3965 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3966 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3967 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3968 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3969 
3970 	stmmac_reset_queues_param(priv);
3971 
3972 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3973 	    priv->plat->serdes_powerup) {
3974 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3975 		if (ret < 0) {
3976 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3977 				   __func__);
3978 			goto init_error;
3979 		}
3980 	}
3981 
3982 	ret = stmmac_hw_setup(dev, true);
3983 	if (ret < 0) {
3984 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3985 		goto init_error;
3986 	}
3987 
3988 	stmmac_init_coalesce(priv);
3989 
3990 	phylink_start(priv->phylink);
3991 	/* We may have called phylink_speed_down before */
3992 	phylink_speed_up(priv->phylink);
3993 
3994 	ret = stmmac_request_irq(dev);
3995 	if (ret)
3996 		goto irq_error;
3997 
3998 	stmmac_enable_all_queues(priv);
3999 	netif_tx_start_all_queues(priv->dev);
4000 	stmmac_enable_all_dma_irq(priv);
4001 
4002 	return 0;
4003 
4004 irq_error:
4005 	phylink_stop(priv->phylink);
4006 
4007 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4008 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4009 
4010 	stmmac_hw_teardown(dev);
4011 init_error:
4012 	phylink_disconnect_phy(priv->phylink);
4013 init_phy_error:
4014 	pm_runtime_put(priv->device);
4015 	return ret;
4016 }
4017 
4018 static int stmmac_open(struct net_device *dev)
4019 {
4020 	struct stmmac_priv *priv = netdev_priv(dev);
4021 	struct stmmac_dma_conf *dma_conf;
4022 	int ret;
4023 
4024 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4025 	if (IS_ERR(dma_conf))
4026 		return PTR_ERR(dma_conf);
4027 
4028 	ret = __stmmac_open(dev, dma_conf);
4029 	if (ret)
4030 		free_dma_desc_resources(priv, dma_conf);
4031 
4032 	kfree(dma_conf);
4033 	return ret;
4034 }
4035 
4036 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4037 {
4038 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4039 
4040 	if (priv->fpe_wq) {
4041 		destroy_workqueue(priv->fpe_wq);
4042 		priv->fpe_wq = NULL;
4043 	}
4044 
4045 	netdev_info(priv->dev, "FPE workqueue stop");
4046 }
4047 
4048 /**
4049  *  stmmac_release - close entry point of the driver
4050  *  @dev : device pointer.
4051  *  Description:
4052  *  This is the stop entry point of the driver.
4053  */
4054 static int stmmac_release(struct net_device *dev)
4055 {
4056 	struct stmmac_priv *priv = netdev_priv(dev);
4057 	u32 chan;
4058 
4059 	if (device_may_wakeup(priv->device))
4060 		phylink_speed_down(priv->phylink, false);
4061 	/* Stop and disconnect the PHY */
4062 	phylink_stop(priv->phylink);
4063 	phylink_disconnect_phy(priv->phylink);
4064 
4065 	stmmac_disable_all_queues(priv);
4066 
4067 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4068 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4069 
4070 	netif_tx_disable(dev);
4071 
4072 	/* Free the IRQ lines */
4073 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4074 
4075 	if (priv->eee_enabled) {
4076 		priv->tx_path_in_lpi_mode = false;
4077 		del_timer_sync(&priv->eee_ctrl_timer);
4078 	}
4079 
4080 	/* Stop TX/RX DMA and clear the descriptors */
4081 	stmmac_stop_all_dma(priv);
4082 
4083 	/* Release and free the Rx/Tx resources */
4084 	free_dma_desc_resources(priv, &priv->dma_conf);
4085 
4086 	/* Disable the MAC Rx/Tx */
4087 	stmmac_mac_set(priv, priv->ioaddr, false);
4088 
4089 	/* Powerdown Serdes if there is */
4090 	if (priv->plat->serdes_powerdown)
4091 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4092 
4093 	netif_carrier_off(dev);
4094 
4095 	stmmac_release_ptp(priv);
4096 
4097 	pm_runtime_put(priv->device);
4098 
4099 	if (priv->dma_cap.fpesel)
4100 		stmmac_fpe_stop_wq(priv);
4101 
4102 	return 0;
4103 }
4104 
4105 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4106 			       struct stmmac_tx_queue *tx_q)
4107 {
4108 	u16 tag = 0x0, inner_tag = 0x0;
4109 	u32 inner_type = 0x0;
4110 	struct dma_desc *p;
4111 
4112 	if (!priv->dma_cap.vlins)
4113 		return false;
4114 	if (!skb_vlan_tag_present(skb))
4115 		return false;
4116 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4117 		inner_tag = skb_vlan_tag_get(skb);
4118 		inner_type = STMMAC_VLAN_INSERT;
4119 	}
4120 
4121 	tag = skb_vlan_tag_get(skb);
4122 
4123 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4124 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4125 	else
4126 		p = &tx_q->dma_tx[tx_q->cur_tx];
4127 
4128 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4129 		return false;
4130 
4131 	stmmac_set_tx_owner(priv, p);
4132 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4133 	return true;
4134 }
4135 
4136 /**
4137  *  stmmac_tso_allocator - close entry point of the driver
4138  *  @priv: driver private structure
4139  *  @des: buffer start address
4140  *  @total_len: total length to fill in descriptors
4141  *  @last_segment: condition for the last descriptor
4142  *  @queue: TX queue index
4143  *  Description:
4144  *  This function fills descriptor and request new descriptors according to
4145  *  buffer length to fill
4146  */
4147 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4148 				 int total_len, bool last_segment, u32 queue)
4149 {
4150 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4151 	struct dma_desc *desc;
4152 	u32 buff_size;
4153 	int tmp_len;
4154 
4155 	tmp_len = total_len;
4156 
4157 	while (tmp_len > 0) {
4158 		dma_addr_t curr_addr;
4159 
4160 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4161 						priv->dma_conf.dma_tx_size);
4162 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4163 
4164 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4165 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4166 		else
4167 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4168 
4169 		curr_addr = des + (total_len - tmp_len);
4170 		if (priv->dma_cap.addr64 <= 32)
4171 			desc->des0 = cpu_to_le32(curr_addr);
4172 		else
4173 			stmmac_set_desc_addr(priv, desc, curr_addr);
4174 
4175 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4176 			    TSO_MAX_BUFF_SIZE : tmp_len;
4177 
4178 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4179 				0, 1,
4180 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4181 				0, 0);
4182 
4183 		tmp_len -= TSO_MAX_BUFF_SIZE;
4184 	}
4185 }
4186 
4187 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4188 {
4189 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4190 	int desc_size;
4191 
4192 	if (likely(priv->extend_desc))
4193 		desc_size = sizeof(struct dma_extended_desc);
4194 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4195 		desc_size = sizeof(struct dma_edesc);
4196 	else
4197 		desc_size = sizeof(struct dma_desc);
4198 
4199 	/* The own bit must be the latest setting done when prepare the
4200 	 * descriptor and then barrier is needed to make sure that
4201 	 * all is coherent before granting the DMA engine.
4202 	 */
4203 	wmb();
4204 
4205 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4206 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4207 }
4208 
4209 /**
4210  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4211  *  @skb : the socket buffer
4212  *  @dev : device pointer
4213  *  Description: this is the transmit function that is called on TSO frames
4214  *  (support available on GMAC4 and newer chips).
4215  *  Diagram below show the ring programming in case of TSO frames:
4216  *
4217  *  First Descriptor
4218  *   --------
4219  *   | DES0 |---> buffer1 = L2/L3/L4 header
4220  *   | DES1 |---> TCP Payload (can continue on next descr...)
4221  *   | DES2 |---> buffer 1 and 2 len
4222  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4223  *   --------
4224  *	|
4225  *     ...
4226  *	|
4227  *   --------
4228  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4229  *   | DES1 | --|
4230  *   | DES2 | --> buffer 1 and 2 len
4231  *   | DES3 |
4232  *   --------
4233  *
4234  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4235  */
4236 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4237 {
4238 	struct dma_desc *desc, *first, *mss_desc = NULL;
4239 	struct stmmac_priv *priv = netdev_priv(dev);
4240 	int nfrags = skb_shinfo(skb)->nr_frags;
4241 	u32 queue = skb_get_queue_mapping(skb);
4242 	unsigned int first_entry, tx_packets;
4243 	struct stmmac_txq_stats *txq_stats;
4244 	int tmp_pay_len = 0, first_tx;
4245 	struct stmmac_tx_queue *tx_q;
4246 	bool has_vlan, set_ic;
4247 	u8 proto_hdr_len, hdr;
4248 	u32 pay_len, mss;
4249 	dma_addr_t des;
4250 	int i;
4251 
4252 	tx_q = &priv->dma_conf.tx_queue[queue];
4253 	txq_stats = &priv->xstats.txq_stats[queue];
4254 	first_tx = tx_q->cur_tx;
4255 
4256 	/* Compute header lengths */
4257 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4258 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4259 		hdr = sizeof(struct udphdr);
4260 	} else {
4261 		proto_hdr_len = skb_tcp_all_headers(skb);
4262 		hdr = tcp_hdrlen(skb);
4263 	}
4264 
4265 	/* Desc availability based on threshold should be enough safe */
4266 	if (unlikely(stmmac_tx_avail(priv, queue) <
4267 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4268 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4269 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4270 								queue));
4271 			/* This is a hard error, log it. */
4272 			netdev_err(priv->dev,
4273 				   "%s: Tx Ring full when queue awake\n",
4274 				   __func__);
4275 		}
4276 		return NETDEV_TX_BUSY;
4277 	}
4278 
4279 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4280 
4281 	mss = skb_shinfo(skb)->gso_size;
4282 
4283 	/* set new MSS value if needed */
4284 	if (mss != tx_q->mss) {
4285 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4286 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4287 		else
4288 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4289 
4290 		stmmac_set_mss(priv, mss_desc, mss);
4291 		tx_q->mss = mss;
4292 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4293 						priv->dma_conf.dma_tx_size);
4294 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4295 	}
4296 
4297 	if (netif_msg_tx_queued(priv)) {
4298 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4299 			__func__, hdr, proto_hdr_len, pay_len, mss);
4300 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4301 			skb->data_len);
4302 	}
4303 
4304 	/* Check if VLAN can be inserted by HW */
4305 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4306 
4307 	first_entry = tx_q->cur_tx;
4308 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4309 
4310 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4311 		desc = &tx_q->dma_entx[first_entry].basic;
4312 	else
4313 		desc = &tx_q->dma_tx[first_entry];
4314 	first = desc;
4315 
4316 	if (has_vlan)
4317 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4318 
4319 	/* first descriptor: fill Headers on Buf1 */
4320 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4321 			     DMA_TO_DEVICE);
4322 	if (dma_mapping_error(priv->device, des))
4323 		goto dma_map_err;
4324 
4325 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4326 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4327 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4328 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4329 
4330 	if (priv->dma_cap.addr64 <= 32) {
4331 		first->des0 = cpu_to_le32(des);
4332 
4333 		/* Fill start of payload in buff2 of first descriptor */
4334 		if (pay_len)
4335 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4336 
4337 		/* If needed take extra descriptors to fill the remaining payload */
4338 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4339 	} else {
4340 		stmmac_set_desc_addr(priv, first, des);
4341 		tmp_pay_len = pay_len;
4342 		des += proto_hdr_len;
4343 		pay_len = 0;
4344 	}
4345 
4346 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4347 
4348 	/* Prepare fragments */
4349 	for (i = 0; i < nfrags; i++) {
4350 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4351 
4352 		des = skb_frag_dma_map(priv->device, frag, 0,
4353 				       skb_frag_size(frag),
4354 				       DMA_TO_DEVICE);
4355 		if (dma_mapping_error(priv->device, des))
4356 			goto dma_map_err;
4357 
4358 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4359 				     (i == nfrags - 1), queue);
4360 
4361 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4362 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4363 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4364 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4365 	}
4366 
4367 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4368 
4369 	/* Only the last descriptor gets to point to the skb. */
4370 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4371 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4372 
4373 	/* Manage tx mitigation */
4374 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4375 	tx_q->tx_count_frames += tx_packets;
4376 
4377 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4378 		set_ic = true;
4379 	else if (!priv->tx_coal_frames[queue])
4380 		set_ic = false;
4381 	else if (tx_packets > priv->tx_coal_frames[queue])
4382 		set_ic = true;
4383 	else if ((tx_q->tx_count_frames %
4384 		  priv->tx_coal_frames[queue]) < tx_packets)
4385 		set_ic = true;
4386 	else
4387 		set_ic = false;
4388 
4389 	if (set_ic) {
4390 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4391 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4392 		else
4393 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4394 
4395 		tx_q->tx_count_frames = 0;
4396 		stmmac_set_tx_ic(priv, desc);
4397 	}
4398 
4399 	/* We've used all descriptors we need for this skb, however,
4400 	 * advance cur_tx so that it references a fresh descriptor.
4401 	 * ndo_start_xmit will fill this descriptor the next time it's
4402 	 * called and stmmac_tx_clean may clean up to this descriptor.
4403 	 */
4404 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4405 
4406 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4407 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4408 			  __func__);
4409 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4410 	}
4411 
4412 	u64_stats_update_begin(&txq_stats->q_syncp);
4413 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4414 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4415 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4416 	if (set_ic)
4417 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4418 	u64_stats_update_end(&txq_stats->q_syncp);
4419 
4420 	if (priv->sarc_type)
4421 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4422 
4423 	skb_tx_timestamp(skb);
4424 
4425 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4426 		     priv->hwts_tx_en)) {
4427 		/* declare that device is doing timestamping */
4428 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4429 		stmmac_enable_tx_timestamp(priv, first);
4430 	}
4431 
4432 	/* Complete the first descriptor before granting the DMA */
4433 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4434 			proto_hdr_len,
4435 			pay_len,
4436 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4437 			hdr / 4, (skb->len - proto_hdr_len));
4438 
4439 	/* If context desc is used to change MSS */
4440 	if (mss_desc) {
4441 		/* Make sure that first descriptor has been completely
4442 		 * written, including its own bit. This is because MSS is
4443 		 * actually before first descriptor, so we need to make
4444 		 * sure that MSS's own bit is the last thing written.
4445 		 */
4446 		dma_wmb();
4447 		stmmac_set_tx_owner(priv, mss_desc);
4448 	}
4449 
4450 	if (netif_msg_pktdata(priv)) {
4451 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4452 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4453 			tx_q->cur_tx, first, nfrags);
4454 		pr_info(">>> frame to be transmitted: ");
4455 		print_pkt(skb->data, skb_headlen(skb));
4456 	}
4457 
4458 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4459 
4460 	stmmac_flush_tx_descriptors(priv, queue);
4461 	stmmac_tx_timer_arm(priv, queue);
4462 
4463 	return NETDEV_TX_OK;
4464 
4465 dma_map_err:
4466 	dev_err(priv->device, "Tx dma map failed\n");
4467 	dev_kfree_skb(skb);
4468 	priv->xstats.tx_dropped++;
4469 	return NETDEV_TX_OK;
4470 }
4471 
4472 /**
4473  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4474  * @skb: socket buffer to check
4475  *
4476  * Check if a packet has an ethertype that will trigger the IP header checks
4477  * and IP/TCP checksum engine of the stmmac core.
4478  *
4479  * Return: true if the ethertype can trigger the checksum engine, false
4480  * otherwise
4481  */
4482 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4483 {
4484 	int depth = 0;
4485 	__be16 proto;
4486 
4487 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4488 				    &depth);
4489 
4490 	return (depth <= ETH_HLEN) &&
4491 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4492 }
4493 
4494 /**
4495  *  stmmac_xmit - Tx entry point of the driver
4496  *  @skb : the socket buffer
4497  *  @dev : device pointer
4498  *  Description : this is the tx entry point of the driver.
4499  *  It programs the chain or the ring and supports oversized frames
4500  *  and SG feature.
4501  */
4502 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4503 {
4504 	unsigned int first_entry, tx_packets, enh_desc;
4505 	struct stmmac_priv *priv = netdev_priv(dev);
4506 	unsigned int nopaged_len = skb_headlen(skb);
4507 	int i, csum_insertion = 0, is_jumbo = 0;
4508 	u32 queue = skb_get_queue_mapping(skb);
4509 	int nfrags = skb_shinfo(skb)->nr_frags;
4510 	int gso = skb_shinfo(skb)->gso_type;
4511 	struct stmmac_txq_stats *txq_stats;
4512 	struct dma_edesc *tbs_desc = NULL;
4513 	struct dma_desc *desc, *first;
4514 	struct stmmac_tx_queue *tx_q;
4515 	bool has_vlan, set_ic;
4516 	int entry, first_tx;
4517 	dma_addr_t des;
4518 
4519 	tx_q = &priv->dma_conf.tx_queue[queue];
4520 	txq_stats = &priv->xstats.txq_stats[queue];
4521 	first_tx = tx_q->cur_tx;
4522 
4523 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4524 		stmmac_disable_eee_mode(priv);
4525 
4526 	/* Manage oversized TCP frames for GMAC4 device */
4527 	if (skb_is_gso(skb) && priv->tso) {
4528 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4529 			return stmmac_tso_xmit(skb, dev);
4530 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4531 			return stmmac_tso_xmit(skb, dev);
4532 	}
4533 
4534 	if (priv->plat->est && priv->plat->est->enable &&
4535 	    priv->plat->est->max_sdu[queue] &&
4536 	    skb->len > priv->plat->est->max_sdu[queue]){
4537 		priv->xstats.max_sdu_txq_drop[queue]++;
4538 		goto max_sdu_err;
4539 	}
4540 
4541 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4542 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4543 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4544 								queue));
4545 			/* This is a hard error, log it. */
4546 			netdev_err(priv->dev,
4547 				   "%s: Tx Ring full when queue awake\n",
4548 				   __func__);
4549 		}
4550 		return NETDEV_TX_BUSY;
4551 	}
4552 
4553 	/* Check if VLAN can be inserted by HW */
4554 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4555 
4556 	entry = tx_q->cur_tx;
4557 	first_entry = entry;
4558 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4559 
4560 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4561 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4562 	 * queues. In that case, checksum offloading for those queues that don't
4563 	 * support tx coe needs to fallback to software checksum calculation.
4564 	 *
4565 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4566 	 * also have to be checksummed in software.
4567 	 */
4568 	if (csum_insertion &&
4569 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4570 	     !stmmac_has_ip_ethertype(skb))) {
4571 		if (unlikely(skb_checksum_help(skb)))
4572 			goto dma_map_err;
4573 		csum_insertion = !csum_insertion;
4574 	}
4575 
4576 	if (likely(priv->extend_desc))
4577 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4578 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4579 		desc = &tx_q->dma_entx[entry].basic;
4580 	else
4581 		desc = tx_q->dma_tx + entry;
4582 
4583 	first = desc;
4584 
4585 	if (has_vlan)
4586 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4587 
4588 	enh_desc = priv->plat->enh_desc;
4589 	/* To program the descriptors according to the size of the frame */
4590 	if (enh_desc)
4591 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4592 
4593 	if (unlikely(is_jumbo)) {
4594 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4595 		if (unlikely(entry < 0) && (entry != -EINVAL))
4596 			goto dma_map_err;
4597 	}
4598 
4599 	for (i = 0; i < nfrags; i++) {
4600 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4601 		int len = skb_frag_size(frag);
4602 		bool last_segment = (i == (nfrags - 1));
4603 
4604 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4605 		WARN_ON(tx_q->tx_skbuff[entry]);
4606 
4607 		if (likely(priv->extend_desc))
4608 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4609 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4610 			desc = &tx_q->dma_entx[entry].basic;
4611 		else
4612 			desc = tx_q->dma_tx + entry;
4613 
4614 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4615 				       DMA_TO_DEVICE);
4616 		if (dma_mapping_error(priv->device, des))
4617 			goto dma_map_err; /* should reuse desc w/o issues */
4618 
4619 		tx_q->tx_skbuff_dma[entry].buf = des;
4620 
4621 		stmmac_set_desc_addr(priv, desc, des);
4622 
4623 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4624 		tx_q->tx_skbuff_dma[entry].len = len;
4625 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4626 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4627 
4628 		/* Prepare the descriptor and set the own bit too */
4629 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4630 				priv->mode, 1, last_segment, skb->len);
4631 	}
4632 
4633 	/* Only the last descriptor gets to point to the skb. */
4634 	tx_q->tx_skbuff[entry] = skb;
4635 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4636 
4637 	/* According to the coalesce parameter the IC bit for the latest
4638 	 * segment is reset and the timer re-started to clean the tx status.
4639 	 * This approach takes care about the fragments: desc is the first
4640 	 * element in case of no SG.
4641 	 */
4642 	tx_packets = (entry + 1) - first_tx;
4643 	tx_q->tx_count_frames += tx_packets;
4644 
4645 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4646 		set_ic = true;
4647 	else if (!priv->tx_coal_frames[queue])
4648 		set_ic = false;
4649 	else if (tx_packets > priv->tx_coal_frames[queue])
4650 		set_ic = true;
4651 	else if ((tx_q->tx_count_frames %
4652 		  priv->tx_coal_frames[queue]) < tx_packets)
4653 		set_ic = true;
4654 	else
4655 		set_ic = false;
4656 
4657 	if (set_ic) {
4658 		if (likely(priv->extend_desc))
4659 			desc = &tx_q->dma_etx[entry].basic;
4660 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4661 			desc = &tx_q->dma_entx[entry].basic;
4662 		else
4663 			desc = &tx_q->dma_tx[entry];
4664 
4665 		tx_q->tx_count_frames = 0;
4666 		stmmac_set_tx_ic(priv, desc);
4667 	}
4668 
4669 	/* We've used all descriptors we need for this skb, however,
4670 	 * advance cur_tx so that it references a fresh descriptor.
4671 	 * ndo_start_xmit will fill this descriptor the next time it's
4672 	 * called and stmmac_tx_clean may clean up to this descriptor.
4673 	 */
4674 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4675 	tx_q->cur_tx = entry;
4676 
4677 	if (netif_msg_pktdata(priv)) {
4678 		netdev_dbg(priv->dev,
4679 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4680 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4681 			   entry, first, nfrags);
4682 
4683 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4684 		print_pkt(skb->data, skb->len);
4685 	}
4686 
4687 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4688 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4689 			  __func__);
4690 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4691 	}
4692 
4693 	u64_stats_update_begin(&txq_stats->q_syncp);
4694 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4695 	if (set_ic)
4696 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4697 	u64_stats_update_end(&txq_stats->q_syncp);
4698 
4699 	if (priv->sarc_type)
4700 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4701 
4702 	skb_tx_timestamp(skb);
4703 
4704 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4705 	 * problems because all the descriptors are actually ready to be
4706 	 * passed to the DMA engine.
4707 	 */
4708 	if (likely(!is_jumbo)) {
4709 		bool last_segment = (nfrags == 0);
4710 
4711 		des = dma_map_single(priv->device, skb->data,
4712 				     nopaged_len, DMA_TO_DEVICE);
4713 		if (dma_mapping_error(priv->device, des))
4714 			goto dma_map_err;
4715 
4716 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4717 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4718 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4719 
4720 		stmmac_set_desc_addr(priv, first, des);
4721 
4722 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4723 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4724 
4725 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4726 			     priv->hwts_tx_en)) {
4727 			/* declare that device is doing timestamping */
4728 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4729 			stmmac_enable_tx_timestamp(priv, first);
4730 		}
4731 
4732 		/* Prepare the first descriptor setting the OWN bit too */
4733 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4734 				csum_insertion, priv->mode, 0, last_segment,
4735 				skb->len);
4736 	}
4737 
4738 	if (tx_q->tbs & STMMAC_TBS_EN) {
4739 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4740 
4741 		tbs_desc = &tx_q->dma_entx[first_entry];
4742 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4743 	}
4744 
4745 	stmmac_set_tx_owner(priv, first);
4746 
4747 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4748 
4749 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4750 
4751 	stmmac_flush_tx_descriptors(priv, queue);
4752 	stmmac_tx_timer_arm(priv, queue);
4753 
4754 	return NETDEV_TX_OK;
4755 
4756 dma_map_err:
4757 	netdev_err(priv->dev, "Tx DMA map failed\n");
4758 max_sdu_err:
4759 	dev_kfree_skb(skb);
4760 	priv->xstats.tx_dropped++;
4761 	return NETDEV_TX_OK;
4762 }
4763 
4764 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4765 {
4766 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4767 	__be16 vlan_proto = veth->h_vlan_proto;
4768 	u16 vlanid;
4769 
4770 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4771 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4772 	    (vlan_proto == htons(ETH_P_8021AD) &&
4773 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4774 		/* pop the vlan tag */
4775 		vlanid = ntohs(veth->h_vlan_TCI);
4776 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4777 		skb_pull(skb, VLAN_HLEN);
4778 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4779 	}
4780 }
4781 
4782 /**
4783  * stmmac_rx_refill - refill used skb preallocated buffers
4784  * @priv: driver private structure
4785  * @queue: RX queue index
4786  * Description : this is to reallocate the skb for the reception process
4787  * that is based on zero-copy.
4788  */
4789 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4790 {
4791 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4792 	int dirty = stmmac_rx_dirty(priv, queue);
4793 	unsigned int entry = rx_q->dirty_rx;
4794 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4795 
4796 	if (priv->dma_cap.host_dma_width <= 32)
4797 		gfp |= GFP_DMA32;
4798 
4799 	while (dirty-- > 0) {
4800 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4801 		struct dma_desc *p;
4802 		bool use_rx_wd;
4803 
4804 		if (priv->extend_desc)
4805 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4806 		else
4807 			p = rx_q->dma_rx + entry;
4808 
4809 		if (!buf->page) {
4810 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4811 			if (!buf->page)
4812 				break;
4813 		}
4814 
4815 		if (priv->sph && !buf->sec_page) {
4816 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4817 			if (!buf->sec_page)
4818 				break;
4819 
4820 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4821 		}
4822 
4823 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4824 
4825 		stmmac_set_desc_addr(priv, p, buf->addr);
4826 		if (priv->sph)
4827 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4828 		else
4829 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4830 		stmmac_refill_desc3(priv, rx_q, p);
4831 
4832 		rx_q->rx_count_frames++;
4833 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4834 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4835 			rx_q->rx_count_frames = 0;
4836 
4837 		use_rx_wd = !priv->rx_coal_frames[queue];
4838 		use_rx_wd |= rx_q->rx_count_frames > 0;
4839 		if (!priv->use_riwt)
4840 			use_rx_wd = false;
4841 
4842 		dma_wmb();
4843 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4844 
4845 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4846 	}
4847 	rx_q->dirty_rx = entry;
4848 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4849 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4850 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4851 }
4852 
4853 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4854 				       struct dma_desc *p,
4855 				       int status, unsigned int len)
4856 {
4857 	unsigned int plen = 0, hlen = 0;
4858 	int coe = priv->hw->rx_csum;
4859 
4860 	/* Not first descriptor, buffer is always zero */
4861 	if (priv->sph && len)
4862 		return 0;
4863 
4864 	/* First descriptor, get split header length */
4865 	stmmac_get_rx_header_len(priv, p, &hlen);
4866 	if (priv->sph && hlen) {
4867 		priv->xstats.rx_split_hdr_pkt_n++;
4868 		return hlen;
4869 	}
4870 
4871 	/* First descriptor, not last descriptor and not split header */
4872 	if (status & rx_not_ls)
4873 		return priv->dma_conf.dma_buf_sz;
4874 
4875 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4876 
4877 	/* First descriptor and last descriptor and not split header */
4878 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4879 }
4880 
4881 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4882 				       struct dma_desc *p,
4883 				       int status, unsigned int len)
4884 {
4885 	int coe = priv->hw->rx_csum;
4886 	unsigned int plen = 0;
4887 
4888 	/* Not split header, buffer is not available */
4889 	if (!priv->sph)
4890 		return 0;
4891 
4892 	/* Not last descriptor */
4893 	if (status & rx_not_ls)
4894 		return priv->dma_conf.dma_buf_sz;
4895 
4896 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4897 
4898 	/* Last descriptor */
4899 	return plen - len;
4900 }
4901 
4902 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4903 				struct xdp_frame *xdpf, bool dma_map)
4904 {
4905 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4906 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4907 	unsigned int entry = tx_q->cur_tx;
4908 	struct dma_desc *tx_desc;
4909 	dma_addr_t dma_addr;
4910 	bool set_ic;
4911 
4912 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4913 		return STMMAC_XDP_CONSUMED;
4914 
4915 	if (priv->plat->est && priv->plat->est->enable &&
4916 	    priv->plat->est->max_sdu[queue] &&
4917 	    xdpf->len > priv->plat->est->max_sdu[queue]) {
4918 		priv->xstats.max_sdu_txq_drop[queue]++;
4919 		return STMMAC_XDP_CONSUMED;
4920 	}
4921 
4922 	if (likely(priv->extend_desc))
4923 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4924 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4925 		tx_desc = &tx_q->dma_entx[entry].basic;
4926 	else
4927 		tx_desc = tx_q->dma_tx + entry;
4928 
4929 	if (dma_map) {
4930 		dma_addr = dma_map_single(priv->device, xdpf->data,
4931 					  xdpf->len, DMA_TO_DEVICE);
4932 		if (dma_mapping_error(priv->device, dma_addr))
4933 			return STMMAC_XDP_CONSUMED;
4934 
4935 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4936 	} else {
4937 		struct page *page = virt_to_page(xdpf->data);
4938 
4939 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4940 			   xdpf->headroom;
4941 		dma_sync_single_for_device(priv->device, dma_addr,
4942 					   xdpf->len, DMA_BIDIRECTIONAL);
4943 
4944 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4945 	}
4946 
4947 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4948 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4949 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4950 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4951 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4952 
4953 	tx_q->xdpf[entry] = xdpf;
4954 
4955 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4956 
4957 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4958 			       true, priv->mode, true, true,
4959 			       xdpf->len);
4960 
4961 	tx_q->tx_count_frames++;
4962 
4963 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4964 		set_ic = true;
4965 	else
4966 		set_ic = false;
4967 
4968 	if (set_ic) {
4969 		tx_q->tx_count_frames = 0;
4970 		stmmac_set_tx_ic(priv, tx_desc);
4971 		u64_stats_update_begin(&txq_stats->q_syncp);
4972 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4973 		u64_stats_update_end(&txq_stats->q_syncp);
4974 	}
4975 
4976 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4977 
4978 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4979 	tx_q->cur_tx = entry;
4980 
4981 	return STMMAC_XDP_TX;
4982 }
4983 
4984 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4985 				   int cpu)
4986 {
4987 	int index = cpu;
4988 
4989 	if (unlikely(index < 0))
4990 		index = 0;
4991 
4992 	while (index >= priv->plat->tx_queues_to_use)
4993 		index -= priv->plat->tx_queues_to_use;
4994 
4995 	return index;
4996 }
4997 
4998 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4999 				struct xdp_buff *xdp)
5000 {
5001 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5002 	int cpu = smp_processor_id();
5003 	struct netdev_queue *nq;
5004 	int queue;
5005 	int res;
5006 
5007 	if (unlikely(!xdpf))
5008 		return STMMAC_XDP_CONSUMED;
5009 
5010 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5011 	nq = netdev_get_tx_queue(priv->dev, queue);
5012 
5013 	__netif_tx_lock(nq, cpu);
5014 	/* Avoids TX time-out as we are sharing with slow path */
5015 	txq_trans_cond_update(nq);
5016 
5017 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5018 	if (res == STMMAC_XDP_TX)
5019 		stmmac_flush_tx_descriptors(priv, queue);
5020 
5021 	__netif_tx_unlock(nq);
5022 
5023 	return res;
5024 }
5025 
5026 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5027 				 struct bpf_prog *prog,
5028 				 struct xdp_buff *xdp)
5029 {
5030 	u32 act;
5031 	int res;
5032 
5033 	act = bpf_prog_run_xdp(prog, xdp);
5034 	switch (act) {
5035 	case XDP_PASS:
5036 		res = STMMAC_XDP_PASS;
5037 		break;
5038 	case XDP_TX:
5039 		res = stmmac_xdp_xmit_back(priv, xdp);
5040 		break;
5041 	case XDP_REDIRECT:
5042 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5043 			res = STMMAC_XDP_CONSUMED;
5044 		else
5045 			res = STMMAC_XDP_REDIRECT;
5046 		break;
5047 	default:
5048 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5049 		fallthrough;
5050 	case XDP_ABORTED:
5051 		trace_xdp_exception(priv->dev, prog, act);
5052 		fallthrough;
5053 	case XDP_DROP:
5054 		res = STMMAC_XDP_CONSUMED;
5055 		break;
5056 	}
5057 
5058 	return res;
5059 }
5060 
5061 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5062 					   struct xdp_buff *xdp)
5063 {
5064 	struct bpf_prog *prog;
5065 	int res;
5066 
5067 	prog = READ_ONCE(priv->xdp_prog);
5068 	if (!prog) {
5069 		res = STMMAC_XDP_PASS;
5070 		goto out;
5071 	}
5072 
5073 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5074 out:
5075 	return ERR_PTR(-res);
5076 }
5077 
5078 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5079 				   int xdp_status)
5080 {
5081 	int cpu = smp_processor_id();
5082 	int queue;
5083 
5084 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5085 
5086 	if (xdp_status & STMMAC_XDP_TX)
5087 		stmmac_tx_timer_arm(priv, queue);
5088 
5089 	if (xdp_status & STMMAC_XDP_REDIRECT)
5090 		xdp_do_flush();
5091 }
5092 
5093 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5094 					       struct xdp_buff *xdp)
5095 {
5096 	unsigned int metasize = xdp->data - xdp->data_meta;
5097 	unsigned int datasize = xdp->data_end - xdp->data;
5098 	struct sk_buff *skb;
5099 
5100 	skb = napi_alloc_skb(&ch->rxtx_napi,
5101 			     xdp->data_end - xdp->data_hard_start);
5102 	if (unlikely(!skb))
5103 		return NULL;
5104 
5105 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5106 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5107 	if (metasize)
5108 		skb_metadata_set(skb, metasize);
5109 
5110 	return skb;
5111 }
5112 
5113 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5114 				   struct dma_desc *p, struct dma_desc *np,
5115 				   struct xdp_buff *xdp)
5116 {
5117 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5118 	struct stmmac_channel *ch = &priv->channel[queue];
5119 	unsigned int len = xdp->data_end - xdp->data;
5120 	enum pkt_hash_types hash_type;
5121 	int coe = priv->hw->rx_csum;
5122 	struct sk_buff *skb;
5123 	u32 hash;
5124 
5125 	skb = stmmac_construct_skb_zc(ch, xdp);
5126 	if (!skb) {
5127 		priv->xstats.rx_dropped++;
5128 		return;
5129 	}
5130 
5131 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5132 	if (priv->hw->hw_vlan_en)
5133 		/* MAC level stripping. */
5134 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5135 	else
5136 		/* Driver level stripping. */
5137 		stmmac_rx_vlan(priv->dev, skb);
5138 	skb->protocol = eth_type_trans(skb, priv->dev);
5139 
5140 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5141 		skb_checksum_none_assert(skb);
5142 	else
5143 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5144 
5145 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5146 		skb_set_hash(skb, hash, hash_type);
5147 
5148 	skb_record_rx_queue(skb, queue);
5149 	napi_gro_receive(&ch->rxtx_napi, skb);
5150 
5151 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5152 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5153 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5154 	u64_stats_update_end(&rxq_stats->napi_syncp);
5155 }
5156 
5157 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5158 {
5159 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5160 	unsigned int entry = rx_q->dirty_rx;
5161 	struct dma_desc *rx_desc = NULL;
5162 	bool ret = true;
5163 
5164 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5165 
5166 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5167 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5168 		dma_addr_t dma_addr;
5169 		bool use_rx_wd;
5170 
5171 		if (!buf->xdp) {
5172 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5173 			if (!buf->xdp) {
5174 				ret = false;
5175 				break;
5176 			}
5177 		}
5178 
5179 		if (priv->extend_desc)
5180 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5181 		else
5182 			rx_desc = rx_q->dma_rx + entry;
5183 
5184 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5185 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5186 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5187 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5188 
5189 		rx_q->rx_count_frames++;
5190 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5191 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5192 			rx_q->rx_count_frames = 0;
5193 
5194 		use_rx_wd = !priv->rx_coal_frames[queue];
5195 		use_rx_wd |= rx_q->rx_count_frames > 0;
5196 		if (!priv->use_riwt)
5197 			use_rx_wd = false;
5198 
5199 		dma_wmb();
5200 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5201 
5202 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5203 	}
5204 
5205 	if (rx_desc) {
5206 		rx_q->dirty_rx = entry;
5207 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5208 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5209 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5210 	}
5211 
5212 	return ret;
5213 }
5214 
5215 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5216 {
5217 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5218 	 * to represent incoming packet, whereas cb field in the same structure
5219 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5220 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5221 	 */
5222 	return (struct stmmac_xdp_buff *)xdp;
5223 }
5224 
5225 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5226 {
5227 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5228 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5229 	unsigned int count = 0, error = 0, len = 0;
5230 	int dirty = stmmac_rx_dirty(priv, queue);
5231 	unsigned int next_entry = rx_q->cur_rx;
5232 	u32 rx_errors = 0, rx_dropped = 0;
5233 	unsigned int desc_size;
5234 	struct bpf_prog *prog;
5235 	bool failure = false;
5236 	int xdp_status = 0;
5237 	int status = 0;
5238 
5239 	if (netif_msg_rx_status(priv)) {
5240 		void *rx_head;
5241 
5242 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5243 		if (priv->extend_desc) {
5244 			rx_head = (void *)rx_q->dma_erx;
5245 			desc_size = sizeof(struct dma_extended_desc);
5246 		} else {
5247 			rx_head = (void *)rx_q->dma_rx;
5248 			desc_size = sizeof(struct dma_desc);
5249 		}
5250 
5251 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5252 				    rx_q->dma_rx_phy, desc_size);
5253 	}
5254 	while (count < limit) {
5255 		struct stmmac_rx_buffer *buf;
5256 		struct stmmac_xdp_buff *ctx;
5257 		unsigned int buf1_len = 0;
5258 		struct dma_desc *np, *p;
5259 		int entry;
5260 		int res;
5261 
5262 		if (!count && rx_q->state_saved) {
5263 			error = rx_q->state.error;
5264 			len = rx_q->state.len;
5265 		} else {
5266 			rx_q->state_saved = false;
5267 			error = 0;
5268 			len = 0;
5269 		}
5270 
5271 		if (count >= limit)
5272 			break;
5273 
5274 read_again:
5275 		buf1_len = 0;
5276 		entry = next_entry;
5277 		buf = &rx_q->buf_pool[entry];
5278 
5279 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5280 			failure = failure ||
5281 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5282 			dirty = 0;
5283 		}
5284 
5285 		if (priv->extend_desc)
5286 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5287 		else
5288 			p = rx_q->dma_rx + entry;
5289 
5290 		/* read the status of the incoming frame */
5291 		status = stmmac_rx_status(priv, &priv->xstats, p);
5292 		/* check if managed by the DMA otherwise go ahead */
5293 		if (unlikely(status & dma_own))
5294 			break;
5295 
5296 		/* Prefetch the next RX descriptor */
5297 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5298 						priv->dma_conf.dma_rx_size);
5299 		next_entry = rx_q->cur_rx;
5300 
5301 		if (priv->extend_desc)
5302 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5303 		else
5304 			np = rx_q->dma_rx + next_entry;
5305 
5306 		prefetch(np);
5307 
5308 		/* Ensure a valid XSK buffer before proceed */
5309 		if (!buf->xdp)
5310 			break;
5311 
5312 		if (priv->extend_desc)
5313 			stmmac_rx_extended_status(priv, &priv->xstats,
5314 						  rx_q->dma_erx + entry);
5315 		if (unlikely(status == discard_frame)) {
5316 			xsk_buff_free(buf->xdp);
5317 			buf->xdp = NULL;
5318 			dirty++;
5319 			error = 1;
5320 			if (!priv->hwts_rx_en)
5321 				rx_errors++;
5322 		}
5323 
5324 		if (unlikely(error && (status & rx_not_ls)))
5325 			goto read_again;
5326 		if (unlikely(error)) {
5327 			count++;
5328 			continue;
5329 		}
5330 
5331 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5332 		if (likely(status & rx_not_ls)) {
5333 			xsk_buff_free(buf->xdp);
5334 			buf->xdp = NULL;
5335 			dirty++;
5336 			count++;
5337 			goto read_again;
5338 		}
5339 
5340 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5341 		ctx->priv = priv;
5342 		ctx->desc = p;
5343 		ctx->ndesc = np;
5344 
5345 		/* XDP ZC Frame only support primary buffers for now */
5346 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5347 		len += buf1_len;
5348 
5349 		/* ACS is disabled; strip manually. */
5350 		if (likely(!(status & rx_not_ls))) {
5351 			buf1_len -= ETH_FCS_LEN;
5352 			len -= ETH_FCS_LEN;
5353 		}
5354 
5355 		/* RX buffer is good and fit into a XSK pool buffer */
5356 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5357 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5358 
5359 		prog = READ_ONCE(priv->xdp_prog);
5360 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5361 
5362 		switch (res) {
5363 		case STMMAC_XDP_PASS:
5364 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5365 			xsk_buff_free(buf->xdp);
5366 			break;
5367 		case STMMAC_XDP_CONSUMED:
5368 			xsk_buff_free(buf->xdp);
5369 			rx_dropped++;
5370 			break;
5371 		case STMMAC_XDP_TX:
5372 		case STMMAC_XDP_REDIRECT:
5373 			xdp_status |= res;
5374 			break;
5375 		}
5376 
5377 		buf->xdp = NULL;
5378 		dirty++;
5379 		count++;
5380 	}
5381 
5382 	if (status & rx_not_ls) {
5383 		rx_q->state_saved = true;
5384 		rx_q->state.error = error;
5385 		rx_q->state.len = len;
5386 	}
5387 
5388 	stmmac_finalize_xdp_rx(priv, xdp_status);
5389 
5390 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5391 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5392 	u64_stats_update_end(&rxq_stats->napi_syncp);
5393 
5394 	priv->xstats.rx_dropped += rx_dropped;
5395 	priv->xstats.rx_errors += rx_errors;
5396 
5397 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5398 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5399 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5400 		else
5401 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5402 
5403 		return (int)count;
5404 	}
5405 
5406 	return failure ? limit : (int)count;
5407 }
5408 
5409 /**
5410  * stmmac_rx - manage the receive process
5411  * @priv: driver private structure
5412  * @limit: napi bugget
5413  * @queue: RX queue index.
5414  * Description :  this the function called by the napi poll method.
5415  * It gets all the frames inside the ring.
5416  */
5417 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5418 {
5419 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5420 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5421 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5422 	struct stmmac_channel *ch = &priv->channel[queue];
5423 	unsigned int count = 0, error = 0, len = 0;
5424 	int status = 0, coe = priv->hw->rx_csum;
5425 	unsigned int next_entry = rx_q->cur_rx;
5426 	enum dma_data_direction dma_dir;
5427 	unsigned int desc_size;
5428 	struct sk_buff *skb = NULL;
5429 	struct stmmac_xdp_buff ctx;
5430 	int xdp_status = 0;
5431 	int buf_sz;
5432 
5433 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5434 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5435 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5436 
5437 	if (netif_msg_rx_status(priv)) {
5438 		void *rx_head;
5439 
5440 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5441 		if (priv->extend_desc) {
5442 			rx_head = (void *)rx_q->dma_erx;
5443 			desc_size = sizeof(struct dma_extended_desc);
5444 		} else {
5445 			rx_head = (void *)rx_q->dma_rx;
5446 			desc_size = sizeof(struct dma_desc);
5447 		}
5448 
5449 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5450 				    rx_q->dma_rx_phy, desc_size);
5451 	}
5452 	while (count < limit) {
5453 		unsigned int buf1_len = 0, buf2_len = 0;
5454 		enum pkt_hash_types hash_type;
5455 		struct stmmac_rx_buffer *buf;
5456 		struct dma_desc *np, *p;
5457 		int entry;
5458 		u32 hash;
5459 
5460 		if (!count && rx_q->state_saved) {
5461 			skb = rx_q->state.skb;
5462 			error = rx_q->state.error;
5463 			len = rx_q->state.len;
5464 		} else {
5465 			rx_q->state_saved = false;
5466 			skb = NULL;
5467 			error = 0;
5468 			len = 0;
5469 		}
5470 
5471 read_again:
5472 		if (count >= limit)
5473 			break;
5474 
5475 		buf1_len = 0;
5476 		buf2_len = 0;
5477 		entry = next_entry;
5478 		buf = &rx_q->buf_pool[entry];
5479 
5480 		if (priv->extend_desc)
5481 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5482 		else
5483 			p = rx_q->dma_rx + entry;
5484 
5485 		/* read the status of the incoming frame */
5486 		status = stmmac_rx_status(priv, &priv->xstats, p);
5487 		/* check if managed by the DMA otherwise go ahead */
5488 		if (unlikely(status & dma_own))
5489 			break;
5490 
5491 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5492 						priv->dma_conf.dma_rx_size);
5493 		next_entry = rx_q->cur_rx;
5494 
5495 		if (priv->extend_desc)
5496 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5497 		else
5498 			np = rx_q->dma_rx + next_entry;
5499 
5500 		prefetch(np);
5501 
5502 		if (priv->extend_desc)
5503 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5504 		if (unlikely(status == discard_frame)) {
5505 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5506 			buf->page = NULL;
5507 			error = 1;
5508 			if (!priv->hwts_rx_en)
5509 				rx_errors++;
5510 		}
5511 
5512 		if (unlikely(error && (status & rx_not_ls)))
5513 			goto read_again;
5514 		if (unlikely(error)) {
5515 			dev_kfree_skb(skb);
5516 			skb = NULL;
5517 			count++;
5518 			continue;
5519 		}
5520 
5521 		/* Buffer is good. Go on. */
5522 
5523 		prefetch(page_address(buf->page) + buf->page_offset);
5524 		if (buf->sec_page)
5525 			prefetch(page_address(buf->sec_page));
5526 
5527 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5528 		len += buf1_len;
5529 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5530 		len += buf2_len;
5531 
5532 		/* ACS is disabled; strip manually. */
5533 		if (likely(!(status & rx_not_ls))) {
5534 			if (buf2_len) {
5535 				buf2_len -= ETH_FCS_LEN;
5536 				len -= ETH_FCS_LEN;
5537 			} else if (buf1_len) {
5538 				buf1_len -= ETH_FCS_LEN;
5539 				len -= ETH_FCS_LEN;
5540 			}
5541 		}
5542 
5543 		if (!skb) {
5544 			unsigned int pre_len, sync_len;
5545 
5546 			dma_sync_single_for_cpu(priv->device, buf->addr,
5547 						buf1_len, dma_dir);
5548 
5549 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5550 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5551 					 buf->page_offset, buf1_len, true);
5552 
5553 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5554 				  buf->page_offset;
5555 
5556 			ctx.priv = priv;
5557 			ctx.desc = p;
5558 			ctx.ndesc = np;
5559 
5560 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5561 			/* Due xdp_adjust_tail: DMA sync for_device
5562 			 * cover max len CPU touch
5563 			 */
5564 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5565 				   buf->page_offset;
5566 			sync_len = max(sync_len, pre_len);
5567 
5568 			/* For Not XDP_PASS verdict */
5569 			if (IS_ERR(skb)) {
5570 				unsigned int xdp_res = -PTR_ERR(skb);
5571 
5572 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5573 					page_pool_put_page(rx_q->page_pool,
5574 							   virt_to_head_page(ctx.xdp.data),
5575 							   sync_len, true);
5576 					buf->page = NULL;
5577 					rx_dropped++;
5578 
5579 					/* Clear skb as it was set as
5580 					 * status by XDP program.
5581 					 */
5582 					skb = NULL;
5583 
5584 					if (unlikely((status & rx_not_ls)))
5585 						goto read_again;
5586 
5587 					count++;
5588 					continue;
5589 				} else if (xdp_res & (STMMAC_XDP_TX |
5590 						      STMMAC_XDP_REDIRECT)) {
5591 					xdp_status |= xdp_res;
5592 					buf->page = NULL;
5593 					skb = NULL;
5594 					count++;
5595 					continue;
5596 				}
5597 			}
5598 		}
5599 
5600 		if (!skb) {
5601 			/* XDP program may expand or reduce tail */
5602 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5603 
5604 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5605 			if (!skb) {
5606 				rx_dropped++;
5607 				count++;
5608 				goto drain_data;
5609 			}
5610 
5611 			/* XDP program may adjust header */
5612 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5613 			skb_put(skb, buf1_len);
5614 
5615 			/* Data payload copied into SKB, page ready for recycle */
5616 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5617 			buf->page = NULL;
5618 		} else if (buf1_len) {
5619 			dma_sync_single_for_cpu(priv->device, buf->addr,
5620 						buf1_len, dma_dir);
5621 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5622 					buf->page, buf->page_offset, buf1_len,
5623 					priv->dma_conf.dma_buf_sz);
5624 
5625 			/* Data payload appended into SKB */
5626 			skb_mark_for_recycle(skb);
5627 			buf->page = NULL;
5628 		}
5629 
5630 		if (buf2_len) {
5631 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5632 						buf2_len, dma_dir);
5633 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5634 					buf->sec_page, 0, buf2_len,
5635 					priv->dma_conf.dma_buf_sz);
5636 
5637 			/* Data payload appended into SKB */
5638 			skb_mark_for_recycle(skb);
5639 			buf->sec_page = NULL;
5640 		}
5641 
5642 drain_data:
5643 		if (likely(status & rx_not_ls))
5644 			goto read_again;
5645 		if (!skb)
5646 			continue;
5647 
5648 		/* Got entire packet into SKB. Finish it. */
5649 
5650 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5651 
5652 		if (priv->hw->hw_vlan_en)
5653 			/* MAC level stripping. */
5654 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5655 		else
5656 			/* Driver level stripping. */
5657 			stmmac_rx_vlan(priv->dev, skb);
5658 
5659 		skb->protocol = eth_type_trans(skb, priv->dev);
5660 
5661 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5662 			skb_checksum_none_assert(skb);
5663 		else
5664 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5665 
5666 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5667 			skb_set_hash(skb, hash, hash_type);
5668 
5669 		skb_record_rx_queue(skb, queue);
5670 		napi_gro_receive(&ch->rx_napi, skb);
5671 		skb = NULL;
5672 
5673 		rx_packets++;
5674 		rx_bytes += len;
5675 		count++;
5676 	}
5677 
5678 	if (status & rx_not_ls || skb) {
5679 		rx_q->state_saved = true;
5680 		rx_q->state.skb = skb;
5681 		rx_q->state.error = error;
5682 		rx_q->state.len = len;
5683 	}
5684 
5685 	stmmac_finalize_xdp_rx(priv, xdp_status);
5686 
5687 	stmmac_rx_refill(priv, queue);
5688 
5689 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5690 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5691 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5692 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5693 	u64_stats_update_end(&rxq_stats->napi_syncp);
5694 
5695 	priv->xstats.rx_dropped += rx_dropped;
5696 	priv->xstats.rx_errors += rx_errors;
5697 
5698 	return count;
5699 }
5700 
5701 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5702 {
5703 	struct stmmac_channel *ch =
5704 		container_of(napi, struct stmmac_channel, rx_napi);
5705 	struct stmmac_priv *priv = ch->priv_data;
5706 	struct stmmac_rxq_stats *rxq_stats;
5707 	u32 chan = ch->index;
5708 	int work_done;
5709 
5710 	rxq_stats = &priv->xstats.rxq_stats[chan];
5711 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5712 	u64_stats_inc(&rxq_stats->napi.poll);
5713 	u64_stats_update_end(&rxq_stats->napi_syncp);
5714 
5715 	work_done = stmmac_rx(priv, budget, chan);
5716 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5717 		unsigned long flags;
5718 
5719 		spin_lock_irqsave(&ch->lock, flags);
5720 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5721 		spin_unlock_irqrestore(&ch->lock, flags);
5722 	}
5723 
5724 	return work_done;
5725 }
5726 
5727 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5728 {
5729 	struct stmmac_channel *ch =
5730 		container_of(napi, struct stmmac_channel, tx_napi);
5731 	struct stmmac_priv *priv = ch->priv_data;
5732 	struct stmmac_txq_stats *txq_stats;
5733 	bool pending_packets = false;
5734 	u32 chan = ch->index;
5735 	int work_done;
5736 
5737 	txq_stats = &priv->xstats.txq_stats[chan];
5738 	u64_stats_update_begin(&txq_stats->napi_syncp);
5739 	u64_stats_inc(&txq_stats->napi.poll);
5740 	u64_stats_update_end(&txq_stats->napi_syncp);
5741 
5742 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5743 	work_done = min(work_done, budget);
5744 
5745 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5746 		unsigned long flags;
5747 
5748 		spin_lock_irqsave(&ch->lock, flags);
5749 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5750 		spin_unlock_irqrestore(&ch->lock, flags);
5751 	}
5752 
5753 	/* TX still have packet to handle, check if we need to arm tx timer */
5754 	if (pending_packets)
5755 		stmmac_tx_timer_arm(priv, chan);
5756 
5757 	return work_done;
5758 }
5759 
5760 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5761 {
5762 	struct stmmac_channel *ch =
5763 		container_of(napi, struct stmmac_channel, rxtx_napi);
5764 	struct stmmac_priv *priv = ch->priv_data;
5765 	bool tx_pending_packets = false;
5766 	int rx_done, tx_done, rxtx_done;
5767 	struct stmmac_rxq_stats *rxq_stats;
5768 	struct stmmac_txq_stats *txq_stats;
5769 	u32 chan = ch->index;
5770 
5771 	rxq_stats = &priv->xstats.rxq_stats[chan];
5772 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5773 	u64_stats_inc(&rxq_stats->napi.poll);
5774 	u64_stats_update_end(&rxq_stats->napi_syncp);
5775 
5776 	txq_stats = &priv->xstats.txq_stats[chan];
5777 	u64_stats_update_begin(&txq_stats->napi_syncp);
5778 	u64_stats_inc(&txq_stats->napi.poll);
5779 	u64_stats_update_end(&txq_stats->napi_syncp);
5780 
5781 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5782 	tx_done = min(tx_done, budget);
5783 
5784 	rx_done = stmmac_rx_zc(priv, budget, chan);
5785 
5786 	rxtx_done = max(tx_done, rx_done);
5787 
5788 	/* If either TX or RX work is not complete, return budget
5789 	 * and keep pooling
5790 	 */
5791 	if (rxtx_done >= budget)
5792 		return budget;
5793 
5794 	/* all work done, exit the polling mode */
5795 	if (napi_complete_done(napi, rxtx_done)) {
5796 		unsigned long flags;
5797 
5798 		spin_lock_irqsave(&ch->lock, flags);
5799 		/* Both RX and TX work done are compelte,
5800 		 * so enable both RX & TX IRQs.
5801 		 */
5802 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5803 		spin_unlock_irqrestore(&ch->lock, flags);
5804 	}
5805 
5806 	/* TX still have packet to handle, check if we need to arm tx timer */
5807 	if (tx_pending_packets)
5808 		stmmac_tx_timer_arm(priv, chan);
5809 
5810 	return min(rxtx_done, budget - 1);
5811 }
5812 
5813 /**
5814  *  stmmac_tx_timeout
5815  *  @dev : Pointer to net device structure
5816  *  @txqueue: the index of the hanging transmit queue
5817  *  Description: this function is called when a packet transmission fails to
5818  *   complete within a reasonable time. The driver will mark the error in the
5819  *   netdev structure and arrange for the device to be reset to a sane state
5820  *   in order to transmit a new packet.
5821  */
5822 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5823 {
5824 	struct stmmac_priv *priv = netdev_priv(dev);
5825 
5826 	stmmac_global_err(priv);
5827 }
5828 
5829 /**
5830  *  stmmac_set_rx_mode - entry point for multicast addressing
5831  *  @dev : pointer to the device structure
5832  *  Description:
5833  *  This function is a driver entry point which gets called by the kernel
5834  *  whenever multicast addresses must be enabled/disabled.
5835  *  Return value:
5836  *  void.
5837  */
5838 static void stmmac_set_rx_mode(struct net_device *dev)
5839 {
5840 	struct stmmac_priv *priv = netdev_priv(dev);
5841 
5842 	stmmac_set_filter(priv, priv->hw, dev);
5843 }
5844 
5845 /**
5846  *  stmmac_change_mtu - entry point to change MTU size for the device.
5847  *  @dev : device pointer.
5848  *  @new_mtu : the new MTU size for the device.
5849  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5850  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5851  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5852  *  Return value:
5853  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5854  *  file on failure.
5855  */
5856 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5857 {
5858 	struct stmmac_priv *priv = netdev_priv(dev);
5859 	int txfifosz = priv->plat->tx_fifo_size;
5860 	struct stmmac_dma_conf *dma_conf;
5861 	const int mtu = new_mtu;
5862 	int ret;
5863 
5864 	if (txfifosz == 0)
5865 		txfifosz = priv->dma_cap.tx_fifo_size;
5866 
5867 	txfifosz /= priv->plat->tx_queues_to_use;
5868 
5869 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5870 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5871 		return -EINVAL;
5872 	}
5873 
5874 	new_mtu = STMMAC_ALIGN(new_mtu);
5875 
5876 	/* If condition true, FIFO is too small or MTU too large */
5877 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5878 		return -EINVAL;
5879 
5880 	if (netif_running(dev)) {
5881 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5882 		/* Try to allocate the new DMA conf with the new mtu */
5883 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5884 		if (IS_ERR(dma_conf)) {
5885 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5886 				   mtu);
5887 			return PTR_ERR(dma_conf);
5888 		}
5889 
5890 		stmmac_release(dev);
5891 
5892 		ret = __stmmac_open(dev, dma_conf);
5893 		if (ret) {
5894 			free_dma_desc_resources(priv, dma_conf);
5895 			kfree(dma_conf);
5896 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5897 			return ret;
5898 		}
5899 
5900 		kfree(dma_conf);
5901 
5902 		stmmac_set_rx_mode(dev);
5903 	}
5904 
5905 	dev->mtu = mtu;
5906 	netdev_update_features(dev);
5907 
5908 	return 0;
5909 }
5910 
5911 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5912 					     netdev_features_t features)
5913 {
5914 	struct stmmac_priv *priv = netdev_priv(dev);
5915 
5916 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5917 		features &= ~NETIF_F_RXCSUM;
5918 
5919 	if (!priv->plat->tx_coe)
5920 		features &= ~NETIF_F_CSUM_MASK;
5921 
5922 	/* Some GMAC devices have a bugged Jumbo frame support that
5923 	 * needs to have the Tx COE disabled for oversized frames
5924 	 * (due to limited buffer sizes). In this case we disable
5925 	 * the TX csum insertion in the TDES and not use SF.
5926 	 */
5927 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5928 		features &= ~NETIF_F_CSUM_MASK;
5929 
5930 	/* Disable tso if asked by ethtool */
5931 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5932 		if (features & NETIF_F_TSO)
5933 			priv->tso = true;
5934 		else
5935 			priv->tso = false;
5936 	}
5937 
5938 	return features;
5939 }
5940 
5941 static int stmmac_set_features(struct net_device *netdev,
5942 			       netdev_features_t features)
5943 {
5944 	struct stmmac_priv *priv = netdev_priv(netdev);
5945 
5946 	/* Keep the COE Type in case of csum is supporting */
5947 	if (features & NETIF_F_RXCSUM)
5948 		priv->hw->rx_csum = priv->plat->rx_coe;
5949 	else
5950 		priv->hw->rx_csum = 0;
5951 	/* No check needed because rx_coe has been set before and it will be
5952 	 * fixed in case of issue.
5953 	 */
5954 	stmmac_rx_ipc(priv, priv->hw);
5955 
5956 	if (priv->sph_cap) {
5957 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5958 		u32 chan;
5959 
5960 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5961 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5962 	}
5963 
5964 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5965 		priv->hw->hw_vlan_en = true;
5966 	else
5967 		priv->hw->hw_vlan_en = false;
5968 
5969 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5970 
5971 	return 0;
5972 }
5973 
5974 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5975 {
5976 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5977 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5978 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5979 	bool *hs_enable = &fpe_cfg->hs_enable;
5980 
5981 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5982 		return;
5983 
5984 	/* If LP has sent verify mPacket, LP is FPE capable */
5985 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5986 		if (*lp_state < FPE_STATE_CAPABLE)
5987 			*lp_state = FPE_STATE_CAPABLE;
5988 
5989 		/* If user has requested FPE enable, quickly response */
5990 		if (*hs_enable)
5991 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5992 						fpe_cfg,
5993 						MPACKET_RESPONSE);
5994 	}
5995 
5996 	/* If Local has sent verify mPacket, Local is FPE capable */
5997 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5998 		if (*lo_state < FPE_STATE_CAPABLE)
5999 			*lo_state = FPE_STATE_CAPABLE;
6000 	}
6001 
6002 	/* If LP has sent response mPacket, LP is entering FPE ON */
6003 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
6004 		*lp_state = FPE_STATE_ENTERING_ON;
6005 
6006 	/* If Local has sent response mPacket, Local is entering FPE ON */
6007 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
6008 		*lo_state = FPE_STATE_ENTERING_ON;
6009 
6010 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
6011 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
6012 	    priv->fpe_wq) {
6013 		queue_work(priv->fpe_wq, &priv->fpe_task);
6014 	}
6015 }
6016 
6017 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6018 {
6019 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6020 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6021 	u32 queues_count;
6022 	u32 queue;
6023 	bool xmac;
6024 
6025 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6026 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6027 
6028 	if (priv->irq_wake)
6029 		pm_wakeup_event(priv->device, 0);
6030 
6031 	if (priv->dma_cap.estsel)
6032 		stmmac_est_irq_status(priv, priv, priv->dev,
6033 				      &priv->xstats, tx_cnt);
6034 
6035 	if (priv->dma_cap.fpesel) {
6036 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6037 						   priv->dev);
6038 
6039 		stmmac_fpe_event_status(priv, status);
6040 	}
6041 
6042 	/* To handle GMAC own interrupts */
6043 	if ((priv->plat->has_gmac) || xmac) {
6044 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6045 
6046 		if (unlikely(status)) {
6047 			/* For LPI we need to save the tx status */
6048 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6049 				priv->tx_path_in_lpi_mode = true;
6050 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6051 				priv->tx_path_in_lpi_mode = false;
6052 		}
6053 
6054 		for (queue = 0; queue < queues_count; queue++)
6055 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6056 
6057 		/* PCS link status */
6058 		if (priv->hw->pcs &&
6059 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6060 			if (priv->xstats.pcs_link)
6061 				netif_carrier_on(priv->dev);
6062 			else
6063 				netif_carrier_off(priv->dev);
6064 		}
6065 
6066 		stmmac_timestamp_interrupt(priv, priv);
6067 	}
6068 }
6069 
6070 /**
6071  *  stmmac_interrupt - main ISR
6072  *  @irq: interrupt number.
6073  *  @dev_id: to pass the net device pointer.
6074  *  Description: this is the main driver interrupt service routine.
6075  *  It can call:
6076  *  o DMA service routine (to manage incoming frame reception and transmission
6077  *    status)
6078  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6079  *    interrupts.
6080  */
6081 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6082 {
6083 	struct net_device *dev = (struct net_device *)dev_id;
6084 	struct stmmac_priv *priv = netdev_priv(dev);
6085 
6086 	/* Check if adapter is up */
6087 	if (test_bit(STMMAC_DOWN, &priv->state))
6088 		return IRQ_HANDLED;
6089 
6090 	/* Check ASP error if it isn't delivered via an individual IRQ */
6091 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6092 		return IRQ_HANDLED;
6093 
6094 	/* To handle Common interrupts */
6095 	stmmac_common_interrupt(priv);
6096 
6097 	/* To handle DMA interrupts */
6098 	stmmac_dma_interrupt(priv);
6099 
6100 	return IRQ_HANDLED;
6101 }
6102 
6103 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6104 {
6105 	struct net_device *dev = (struct net_device *)dev_id;
6106 	struct stmmac_priv *priv = netdev_priv(dev);
6107 
6108 	/* Check if adapter is up */
6109 	if (test_bit(STMMAC_DOWN, &priv->state))
6110 		return IRQ_HANDLED;
6111 
6112 	/* To handle Common interrupts */
6113 	stmmac_common_interrupt(priv);
6114 
6115 	return IRQ_HANDLED;
6116 }
6117 
6118 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6119 {
6120 	struct net_device *dev = (struct net_device *)dev_id;
6121 	struct stmmac_priv *priv = netdev_priv(dev);
6122 
6123 	/* Check if adapter is up */
6124 	if (test_bit(STMMAC_DOWN, &priv->state))
6125 		return IRQ_HANDLED;
6126 
6127 	/* Check if a fatal error happened */
6128 	stmmac_safety_feat_interrupt(priv);
6129 
6130 	return IRQ_HANDLED;
6131 }
6132 
6133 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6134 {
6135 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6136 	struct stmmac_dma_conf *dma_conf;
6137 	int chan = tx_q->queue_index;
6138 	struct stmmac_priv *priv;
6139 	int status;
6140 
6141 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6142 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6143 
6144 	/* Check if adapter is up */
6145 	if (test_bit(STMMAC_DOWN, &priv->state))
6146 		return IRQ_HANDLED;
6147 
6148 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6149 
6150 	if (unlikely(status & tx_hard_error_bump_tc)) {
6151 		/* Try to bump up the dma threshold on this failure */
6152 		stmmac_bump_dma_threshold(priv, chan);
6153 	} else if (unlikely(status == tx_hard_error)) {
6154 		stmmac_tx_err(priv, chan);
6155 	}
6156 
6157 	return IRQ_HANDLED;
6158 }
6159 
6160 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6161 {
6162 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6163 	struct stmmac_dma_conf *dma_conf;
6164 	int chan = rx_q->queue_index;
6165 	struct stmmac_priv *priv;
6166 
6167 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6168 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6169 
6170 	/* Check if adapter is up */
6171 	if (test_bit(STMMAC_DOWN, &priv->state))
6172 		return IRQ_HANDLED;
6173 
6174 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6175 
6176 	return IRQ_HANDLED;
6177 }
6178 
6179 /**
6180  *  stmmac_ioctl - Entry point for the Ioctl
6181  *  @dev: Device pointer.
6182  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6183  *  a proprietary structure used to pass information to the driver.
6184  *  @cmd: IOCTL command
6185  *  Description:
6186  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6187  */
6188 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6189 {
6190 	struct stmmac_priv *priv = netdev_priv (dev);
6191 	int ret = -EOPNOTSUPP;
6192 
6193 	if (!netif_running(dev))
6194 		return -EINVAL;
6195 
6196 	switch (cmd) {
6197 	case SIOCGMIIPHY:
6198 	case SIOCGMIIREG:
6199 	case SIOCSMIIREG:
6200 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6201 		break;
6202 	case SIOCSHWTSTAMP:
6203 		ret = stmmac_hwtstamp_set(dev, rq);
6204 		break;
6205 	case SIOCGHWTSTAMP:
6206 		ret = stmmac_hwtstamp_get(dev, rq);
6207 		break;
6208 	default:
6209 		break;
6210 	}
6211 
6212 	return ret;
6213 }
6214 
6215 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6216 				    void *cb_priv)
6217 {
6218 	struct stmmac_priv *priv = cb_priv;
6219 	int ret = -EOPNOTSUPP;
6220 
6221 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6222 		return ret;
6223 
6224 	__stmmac_disable_all_queues(priv);
6225 
6226 	switch (type) {
6227 	case TC_SETUP_CLSU32:
6228 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6229 		break;
6230 	case TC_SETUP_CLSFLOWER:
6231 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6232 		break;
6233 	default:
6234 		break;
6235 	}
6236 
6237 	stmmac_enable_all_queues(priv);
6238 	return ret;
6239 }
6240 
6241 static LIST_HEAD(stmmac_block_cb_list);
6242 
6243 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6244 			   void *type_data)
6245 {
6246 	struct stmmac_priv *priv = netdev_priv(ndev);
6247 
6248 	switch (type) {
6249 	case TC_QUERY_CAPS:
6250 		return stmmac_tc_query_caps(priv, priv, type_data);
6251 	case TC_SETUP_BLOCK:
6252 		return flow_block_cb_setup_simple(type_data,
6253 						  &stmmac_block_cb_list,
6254 						  stmmac_setup_tc_block_cb,
6255 						  priv, priv, true);
6256 	case TC_SETUP_QDISC_CBS:
6257 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6258 	case TC_SETUP_QDISC_TAPRIO:
6259 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6260 	case TC_SETUP_QDISC_ETF:
6261 		return stmmac_tc_setup_etf(priv, priv, type_data);
6262 	default:
6263 		return -EOPNOTSUPP;
6264 	}
6265 }
6266 
6267 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6268 			       struct net_device *sb_dev)
6269 {
6270 	int gso = skb_shinfo(skb)->gso_type;
6271 
6272 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6273 		/*
6274 		 * There is no way to determine the number of TSO/USO
6275 		 * capable Queues. Let's use always the Queue 0
6276 		 * because if TSO/USO is supported then at least this
6277 		 * one will be capable.
6278 		 */
6279 		return 0;
6280 	}
6281 
6282 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6283 }
6284 
6285 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6286 {
6287 	struct stmmac_priv *priv = netdev_priv(ndev);
6288 	int ret = 0;
6289 
6290 	ret = pm_runtime_resume_and_get(priv->device);
6291 	if (ret < 0)
6292 		return ret;
6293 
6294 	ret = eth_mac_addr(ndev, addr);
6295 	if (ret)
6296 		goto set_mac_error;
6297 
6298 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6299 
6300 set_mac_error:
6301 	pm_runtime_put(priv->device);
6302 
6303 	return ret;
6304 }
6305 
6306 #ifdef CONFIG_DEBUG_FS
6307 static struct dentry *stmmac_fs_dir;
6308 
6309 static void sysfs_display_ring(void *head, int size, int extend_desc,
6310 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6311 {
6312 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6313 	struct dma_desc *p = (struct dma_desc *)head;
6314 	unsigned int desc_size;
6315 	dma_addr_t dma_addr;
6316 	int i;
6317 
6318 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6319 	for (i = 0; i < size; i++) {
6320 		dma_addr = dma_phy_addr + i * desc_size;
6321 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6322 				i, &dma_addr,
6323 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6324 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6325 		if (extend_desc)
6326 			p = &(++ep)->basic;
6327 		else
6328 			p++;
6329 	}
6330 }
6331 
6332 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6333 {
6334 	struct net_device *dev = seq->private;
6335 	struct stmmac_priv *priv = netdev_priv(dev);
6336 	u32 rx_count = priv->plat->rx_queues_to_use;
6337 	u32 tx_count = priv->plat->tx_queues_to_use;
6338 	u32 queue;
6339 
6340 	if ((dev->flags & IFF_UP) == 0)
6341 		return 0;
6342 
6343 	for (queue = 0; queue < rx_count; queue++) {
6344 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6345 
6346 		seq_printf(seq, "RX Queue %d:\n", queue);
6347 
6348 		if (priv->extend_desc) {
6349 			seq_printf(seq, "Extended descriptor ring:\n");
6350 			sysfs_display_ring((void *)rx_q->dma_erx,
6351 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6352 		} else {
6353 			seq_printf(seq, "Descriptor ring:\n");
6354 			sysfs_display_ring((void *)rx_q->dma_rx,
6355 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6356 		}
6357 	}
6358 
6359 	for (queue = 0; queue < tx_count; queue++) {
6360 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6361 
6362 		seq_printf(seq, "TX Queue %d:\n", queue);
6363 
6364 		if (priv->extend_desc) {
6365 			seq_printf(seq, "Extended descriptor ring:\n");
6366 			sysfs_display_ring((void *)tx_q->dma_etx,
6367 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6368 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6369 			seq_printf(seq, "Descriptor ring:\n");
6370 			sysfs_display_ring((void *)tx_q->dma_tx,
6371 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6372 		}
6373 	}
6374 
6375 	return 0;
6376 }
6377 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6378 
6379 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6380 {
6381 	static const char * const dwxgmac_timestamp_source[] = {
6382 		"None",
6383 		"Internal",
6384 		"External",
6385 		"Both",
6386 	};
6387 	static const char * const dwxgmac_safety_feature_desc[] = {
6388 		"No",
6389 		"All Safety Features with ECC and Parity",
6390 		"All Safety Features without ECC or Parity",
6391 		"All Safety Features with Parity Only",
6392 		"ECC Only",
6393 		"UNDEFINED",
6394 		"UNDEFINED",
6395 		"UNDEFINED",
6396 	};
6397 	struct net_device *dev = seq->private;
6398 	struct stmmac_priv *priv = netdev_priv(dev);
6399 
6400 	if (!priv->hw_cap_support) {
6401 		seq_printf(seq, "DMA HW features not supported\n");
6402 		return 0;
6403 	}
6404 
6405 	seq_printf(seq, "==============================\n");
6406 	seq_printf(seq, "\tDMA HW features\n");
6407 	seq_printf(seq, "==============================\n");
6408 
6409 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6410 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6411 	seq_printf(seq, "\t1000 Mbps: %s\n",
6412 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6413 	seq_printf(seq, "\tHalf duplex: %s\n",
6414 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6415 	if (priv->plat->has_xgmac) {
6416 		seq_printf(seq,
6417 			   "\tNumber of Additional MAC address registers: %d\n",
6418 			   priv->dma_cap.multi_addr);
6419 	} else {
6420 		seq_printf(seq, "\tHash Filter: %s\n",
6421 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6422 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6423 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6424 	}
6425 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6426 		   (priv->dma_cap.pcs) ? "Y" : "N");
6427 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6428 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6429 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6430 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6431 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6432 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6433 	seq_printf(seq, "\tRMON module: %s\n",
6434 		   (priv->dma_cap.rmon) ? "Y" : "N");
6435 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6436 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6437 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6438 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6439 	if (priv->plat->has_xgmac)
6440 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6441 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6442 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6443 		   (priv->dma_cap.eee) ? "Y" : "N");
6444 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6445 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6446 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6447 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6448 	    priv->plat->has_xgmac) {
6449 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6450 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6451 	} else {
6452 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6453 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6454 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6455 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6456 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6457 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6458 	}
6459 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6460 		   priv->dma_cap.number_rx_channel);
6461 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6462 		   priv->dma_cap.number_tx_channel);
6463 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6464 		   priv->dma_cap.number_rx_queues);
6465 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6466 		   priv->dma_cap.number_tx_queues);
6467 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6468 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6469 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6470 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6471 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6472 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6473 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6474 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6475 		   priv->dma_cap.pps_out_num);
6476 	seq_printf(seq, "\tSafety Features: %s\n",
6477 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6478 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6479 		   priv->dma_cap.frpsel ? "Y" : "N");
6480 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6481 		   priv->dma_cap.host_dma_width);
6482 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6483 		   priv->dma_cap.rssen ? "Y" : "N");
6484 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6485 		   priv->dma_cap.vlhash ? "Y" : "N");
6486 	seq_printf(seq, "\tSplit Header: %s\n",
6487 		   priv->dma_cap.sphen ? "Y" : "N");
6488 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6489 		   priv->dma_cap.vlins ? "Y" : "N");
6490 	seq_printf(seq, "\tDouble VLAN: %s\n",
6491 		   priv->dma_cap.dvlan ? "Y" : "N");
6492 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6493 		   priv->dma_cap.l3l4fnum);
6494 	seq_printf(seq, "\tARP Offloading: %s\n",
6495 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6496 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6497 		   priv->dma_cap.estsel ? "Y" : "N");
6498 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6499 		   priv->dma_cap.fpesel ? "Y" : "N");
6500 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6501 		   priv->dma_cap.tbssel ? "Y" : "N");
6502 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6503 		   priv->dma_cap.tbs_ch_num);
6504 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6505 		   priv->dma_cap.sgfsel ? "Y" : "N");
6506 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6507 		   BIT(priv->dma_cap.ttsfd) >> 1);
6508 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6509 		   priv->dma_cap.numtc);
6510 	seq_printf(seq, "\tDCB Feature: %s\n",
6511 		   priv->dma_cap.dcben ? "Y" : "N");
6512 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6513 		   priv->dma_cap.advthword ? "Y" : "N");
6514 	seq_printf(seq, "\tPTP Offload: %s\n",
6515 		   priv->dma_cap.ptoen ? "Y" : "N");
6516 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6517 		   priv->dma_cap.osten ? "Y" : "N");
6518 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6519 		   priv->dma_cap.pfcen ? "Y" : "N");
6520 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6521 		   BIT(priv->dma_cap.frpes) << 6);
6522 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6523 		   BIT(priv->dma_cap.frpbs) << 6);
6524 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6525 		   priv->dma_cap.frppipe_num);
6526 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6527 		   priv->dma_cap.nrvf_num ?
6528 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6529 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6530 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6531 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6532 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6533 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6534 		   priv->dma_cap.cbtisel ? "Y" : "N");
6535 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6536 		   priv->dma_cap.aux_snapshot_n);
6537 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6538 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6539 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6540 		   priv->dma_cap.edma ? "Y" : "N");
6541 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6542 		   priv->dma_cap.ediffc ? "Y" : "N");
6543 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6544 		   priv->dma_cap.vxn ? "Y" : "N");
6545 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6546 		   priv->dma_cap.dbgmem ? "Y" : "N");
6547 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6548 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6549 	return 0;
6550 }
6551 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6552 
6553 /* Use network device events to rename debugfs file entries.
6554  */
6555 static int stmmac_device_event(struct notifier_block *unused,
6556 			       unsigned long event, void *ptr)
6557 {
6558 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6559 	struct stmmac_priv *priv = netdev_priv(dev);
6560 
6561 	if (dev->netdev_ops != &stmmac_netdev_ops)
6562 		goto done;
6563 
6564 	switch (event) {
6565 	case NETDEV_CHANGENAME:
6566 		if (priv->dbgfs_dir)
6567 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6568 							 priv->dbgfs_dir,
6569 							 stmmac_fs_dir,
6570 							 dev->name);
6571 		break;
6572 	}
6573 done:
6574 	return NOTIFY_DONE;
6575 }
6576 
6577 static struct notifier_block stmmac_notifier = {
6578 	.notifier_call = stmmac_device_event,
6579 };
6580 
6581 static void stmmac_init_fs(struct net_device *dev)
6582 {
6583 	struct stmmac_priv *priv = netdev_priv(dev);
6584 
6585 	rtnl_lock();
6586 
6587 	/* Create per netdev entries */
6588 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6589 
6590 	/* Entry to report DMA RX/TX rings */
6591 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6592 			    &stmmac_rings_status_fops);
6593 
6594 	/* Entry to report the DMA HW features */
6595 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6596 			    &stmmac_dma_cap_fops);
6597 
6598 	rtnl_unlock();
6599 }
6600 
6601 static void stmmac_exit_fs(struct net_device *dev)
6602 {
6603 	struct stmmac_priv *priv = netdev_priv(dev);
6604 
6605 	debugfs_remove_recursive(priv->dbgfs_dir);
6606 }
6607 #endif /* CONFIG_DEBUG_FS */
6608 
6609 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6610 {
6611 	unsigned char *data = (unsigned char *)&vid_le;
6612 	unsigned char data_byte = 0;
6613 	u32 crc = ~0x0;
6614 	u32 temp = 0;
6615 	int i, bits;
6616 
6617 	bits = get_bitmask_order(VLAN_VID_MASK);
6618 	for (i = 0; i < bits; i++) {
6619 		if ((i % 8) == 0)
6620 			data_byte = data[i / 8];
6621 
6622 		temp = ((crc & 1) ^ data_byte) & 1;
6623 		crc >>= 1;
6624 		data_byte >>= 1;
6625 
6626 		if (temp)
6627 			crc ^= 0xedb88320;
6628 	}
6629 
6630 	return crc;
6631 }
6632 
6633 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6634 {
6635 	u32 crc, hash = 0;
6636 	__le16 pmatch = 0;
6637 	int count = 0;
6638 	u16 vid = 0;
6639 
6640 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6641 		__le16 vid_le = cpu_to_le16(vid);
6642 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6643 		hash |= (1 << crc);
6644 		count++;
6645 	}
6646 
6647 	if (!priv->dma_cap.vlhash) {
6648 		if (count > 2) /* VID = 0 always passes filter */
6649 			return -EOPNOTSUPP;
6650 
6651 		pmatch = cpu_to_le16(vid);
6652 		hash = 0;
6653 	}
6654 
6655 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6656 }
6657 
6658 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6659 {
6660 	struct stmmac_priv *priv = netdev_priv(ndev);
6661 	bool is_double = false;
6662 	int ret;
6663 
6664 	ret = pm_runtime_resume_and_get(priv->device);
6665 	if (ret < 0)
6666 		return ret;
6667 
6668 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6669 		is_double = true;
6670 
6671 	set_bit(vid, priv->active_vlans);
6672 	ret = stmmac_vlan_update(priv, is_double);
6673 	if (ret) {
6674 		clear_bit(vid, priv->active_vlans);
6675 		goto err_pm_put;
6676 	}
6677 
6678 	if (priv->hw->num_vlan) {
6679 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6680 		if (ret)
6681 			goto err_pm_put;
6682 	}
6683 err_pm_put:
6684 	pm_runtime_put(priv->device);
6685 
6686 	return ret;
6687 }
6688 
6689 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6690 {
6691 	struct stmmac_priv *priv = netdev_priv(ndev);
6692 	bool is_double = false;
6693 	int ret;
6694 
6695 	ret = pm_runtime_resume_and_get(priv->device);
6696 	if (ret < 0)
6697 		return ret;
6698 
6699 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6700 		is_double = true;
6701 
6702 	clear_bit(vid, priv->active_vlans);
6703 
6704 	if (priv->hw->num_vlan) {
6705 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6706 		if (ret)
6707 			goto del_vlan_error;
6708 	}
6709 
6710 	ret = stmmac_vlan_update(priv, is_double);
6711 
6712 del_vlan_error:
6713 	pm_runtime_put(priv->device);
6714 
6715 	return ret;
6716 }
6717 
6718 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6719 {
6720 	struct stmmac_priv *priv = netdev_priv(dev);
6721 
6722 	switch (bpf->command) {
6723 	case XDP_SETUP_PROG:
6724 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6725 	case XDP_SETUP_XSK_POOL:
6726 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6727 					     bpf->xsk.queue_id);
6728 	default:
6729 		return -EOPNOTSUPP;
6730 	}
6731 }
6732 
6733 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6734 			   struct xdp_frame **frames, u32 flags)
6735 {
6736 	struct stmmac_priv *priv = netdev_priv(dev);
6737 	int cpu = smp_processor_id();
6738 	struct netdev_queue *nq;
6739 	int i, nxmit = 0;
6740 	int queue;
6741 
6742 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6743 		return -ENETDOWN;
6744 
6745 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6746 		return -EINVAL;
6747 
6748 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6749 	nq = netdev_get_tx_queue(priv->dev, queue);
6750 
6751 	__netif_tx_lock(nq, cpu);
6752 	/* Avoids TX time-out as we are sharing with slow path */
6753 	txq_trans_cond_update(nq);
6754 
6755 	for (i = 0; i < num_frames; i++) {
6756 		int res;
6757 
6758 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6759 		if (res == STMMAC_XDP_CONSUMED)
6760 			break;
6761 
6762 		nxmit++;
6763 	}
6764 
6765 	if (flags & XDP_XMIT_FLUSH) {
6766 		stmmac_flush_tx_descriptors(priv, queue);
6767 		stmmac_tx_timer_arm(priv, queue);
6768 	}
6769 
6770 	__netif_tx_unlock(nq);
6771 
6772 	return nxmit;
6773 }
6774 
6775 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6776 {
6777 	struct stmmac_channel *ch = &priv->channel[queue];
6778 	unsigned long flags;
6779 
6780 	spin_lock_irqsave(&ch->lock, flags);
6781 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6782 	spin_unlock_irqrestore(&ch->lock, flags);
6783 
6784 	stmmac_stop_rx_dma(priv, queue);
6785 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6786 }
6787 
6788 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6789 {
6790 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6791 	struct stmmac_channel *ch = &priv->channel[queue];
6792 	unsigned long flags;
6793 	u32 buf_size;
6794 	int ret;
6795 
6796 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6797 	if (ret) {
6798 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6799 		return;
6800 	}
6801 
6802 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6803 	if (ret) {
6804 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6805 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6806 		return;
6807 	}
6808 
6809 	stmmac_reset_rx_queue(priv, queue);
6810 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6811 
6812 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6813 			    rx_q->dma_rx_phy, rx_q->queue_index);
6814 
6815 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6816 			     sizeof(struct dma_desc));
6817 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6818 			       rx_q->rx_tail_addr, rx_q->queue_index);
6819 
6820 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6821 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6822 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6823 				      buf_size,
6824 				      rx_q->queue_index);
6825 	} else {
6826 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6827 				      priv->dma_conf.dma_buf_sz,
6828 				      rx_q->queue_index);
6829 	}
6830 
6831 	stmmac_start_rx_dma(priv, queue);
6832 
6833 	spin_lock_irqsave(&ch->lock, flags);
6834 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6835 	spin_unlock_irqrestore(&ch->lock, flags);
6836 }
6837 
6838 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6839 {
6840 	struct stmmac_channel *ch = &priv->channel[queue];
6841 	unsigned long flags;
6842 
6843 	spin_lock_irqsave(&ch->lock, flags);
6844 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6845 	spin_unlock_irqrestore(&ch->lock, flags);
6846 
6847 	stmmac_stop_tx_dma(priv, queue);
6848 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6849 }
6850 
6851 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6852 {
6853 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6854 	struct stmmac_channel *ch = &priv->channel[queue];
6855 	unsigned long flags;
6856 	int ret;
6857 
6858 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6859 	if (ret) {
6860 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6861 		return;
6862 	}
6863 
6864 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6865 	if (ret) {
6866 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6867 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6868 		return;
6869 	}
6870 
6871 	stmmac_reset_tx_queue(priv, queue);
6872 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6873 
6874 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6875 			    tx_q->dma_tx_phy, tx_q->queue_index);
6876 
6877 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6878 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6879 
6880 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6881 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6882 			       tx_q->tx_tail_addr, tx_q->queue_index);
6883 
6884 	stmmac_start_tx_dma(priv, queue);
6885 
6886 	spin_lock_irqsave(&ch->lock, flags);
6887 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6888 	spin_unlock_irqrestore(&ch->lock, flags);
6889 }
6890 
6891 void stmmac_xdp_release(struct net_device *dev)
6892 {
6893 	struct stmmac_priv *priv = netdev_priv(dev);
6894 	u32 chan;
6895 
6896 	/* Ensure tx function is not running */
6897 	netif_tx_disable(dev);
6898 
6899 	/* Disable NAPI process */
6900 	stmmac_disable_all_queues(priv);
6901 
6902 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6903 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6904 
6905 	/* Free the IRQ lines */
6906 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6907 
6908 	/* Stop TX/RX DMA channels */
6909 	stmmac_stop_all_dma(priv);
6910 
6911 	/* Release and free the Rx/Tx resources */
6912 	free_dma_desc_resources(priv, &priv->dma_conf);
6913 
6914 	/* Disable the MAC Rx/Tx */
6915 	stmmac_mac_set(priv, priv->ioaddr, false);
6916 
6917 	/* set trans_start so we don't get spurious
6918 	 * watchdogs during reset
6919 	 */
6920 	netif_trans_update(dev);
6921 	netif_carrier_off(dev);
6922 }
6923 
6924 int stmmac_xdp_open(struct net_device *dev)
6925 {
6926 	struct stmmac_priv *priv = netdev_priv(dev);
6927 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6928 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6929 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6930 	struct stmmac_rx_queue *rx_q;
6931 	struct stmmac_tx_queue *tx_q;
6932 	u32 buf_size;
6933 	bool sph_en;
6934 	u32 chan;
6935 	int ret;
6936 
6937 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6938 	if (ret < 0) {
6939 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6940 			   __func__);
6941 		goto dma_desc_error;
6942 	}
6943 
6944 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6945 	if (ret < 0) {
6946 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6947 			   __func__);
6948 		goto init_error;
6949 	}
6950 
6951 	stmmac_reset_queues_param(priv);
6952 
6953 	/* DMA CSR Channel configuration */
6954 	for (chan = 0; chan < dma_csr_ch; chan++) {
6955 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6956 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6957 	}
6958 
6959 	/* Adjust Split header */
6960 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6961 
6962 	/* DMA RX Channel Configuration */
6963 	for (chan = 0; chan < rx_cnt; chan++) {
6964 		rx_q = &priv->dma_conf.rx_queue[chan];
6965 
6966 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6967 				    rx_q->dma_rx_phy, chan);
6968 
6969 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6970 				     (rx_q->buf_alloc_num *
6971 				      sizeof(struct dma_desc));
6972 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6973 				       rx_q->rx_tail_addr, chan);
6974 
6975 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6976 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6977 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6978 					      buf_size,
6979 					      rx_q->queue_index);
6980 		} else {
6981 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6982 					      priv->dma_conf.dma_buf_sz,
6983 					      rx_q->queue_index);
6984 		}
6985 
6986 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6987 	}
6988 
6989 	/* DMA TX Channel Configuration */
6990 	for (chan = 0; chan < tx_cnt; chan++) {
6991 		tx_q = &priv->dma_conf.tx_queue[chan];
6992 
6993 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6994 				    tx_q->dma_tx_phy, chan);
6995 
6996 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6997 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6998 				       tx_q->tx_tail_addr, chan);
6999 
7000 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7001 		tx_q->txtimer.function = stmmac_tx_timer;
7002 	}
7003 
7004 	/* Enable the MAC Rx/Tx */
7005 	stmmac_mac_set(priv, priv->ioaddr, true);
7006 
7007 	/* Start Rx & Tx DMA Channels */
7008 	stmmac_start_all_dma(priv);
7009 
7010 	ret = stmmac_request_irq(dev);
7011 	if (ret)
7012 		goto irq_error;
7013 
7014 	/* Enable NAPI process*/
7015 	stmmac_enable_all_queues(priv);
7016 	netif_carrier_on(dev);
7017 	netif_tx_start_all_queues(dev);
7018 	stmmac_enable_all_dma_irq(priv);
7019 
7020 	return 0;
7021 
7022 irq_error:
7023 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7024 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7025 
7026 	stmmac_hw_teardown(dev);
7027 init_error:
7028 	free_dma_desc_resources(priv, &priv->dma_conf);
7029 dma_desc_error:
7030 	return ret;
7031 }
7032 
7033 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7034 {
7035 	struct stmmac_priv *priv = netdev_priv(dev);
7036 	struct stmmac_rx_queue *rx_q;
7037 	struct stmmac_tx_queue *tx_q;
7038 	struct stmmac_channel *ch;
7039 
7040 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7041 	    !netif_carrier_ok(priv->dev))
7042 		return -ENETDOWN;
7043 
7044 	if (!stmmac_xdp_is_enabled(priv))
7045 		return -EINVAL;
7046 
7047 	if (queue >= priv->plat->rx_queues_to_use ||
7048 	    queue >= priv->plat->tx_queues_to_use)
7049 		return -EINVAL;
7050 
7051 	rx_q = &priv->dma_conf.rx_queue[queue];
7052 	tx_q = &priv->dma_conf.tx_queue[queue];
7053 	ch = &priv->channel[queue];
7054 
7055 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7056 		return -EINVAL;
7057 
7058 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7059 		/* EQoS does not have per-DMA channel SW interrupt,
7060 		 * so we schedule RX Napi straight-away.
7061 		 */
7062 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7063 			__napi_schedule(&ch->rxtx_napi);
7064 	}
7065 
7066 	return 0;
7067 }
7068 
7069 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7070 {
7071 	struct stmmac_priv *priv = netdev_priv(dev);
7072 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7073 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7074 	unsigned int start;
7075 	int q;
7076 
7077 	for (q = 0; q < tx_cnt; q++) {
7078 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7079 		u64 tx_packets;
7080 		u64 tx_bytes;
7081 
7082 		do {
7083 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7084 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7085 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7086 		do {
7087 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7088 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7089 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7090 
7091 		stats->tx_packets += tx_packets;
7092 		stats->tx_bytes += tx_bytes;
7093 	}
7094 
7095 	for (q = 0; q < rx_cnt; q++) {
7096 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7097 		u64 rx_packets;
7098 		u64 rx_bytes;
7099 
7100 		do {
7101 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7102 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7103 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7104 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7105 
7106 		stats->rx_packets += rx_packets;
7107 		stats->rx_bytes += rx_bytes;
7108 	}
7109 
7110 	stats->rx_dropped = priv->xstats.rx_dropped;
7111 	stats->rx_errors = priv->xstats.rx_errors;
7112 	stats->tx_dropped = priv->xstats.tx_dropped;
7113 	stats->tx_errors = priv->xstats.tx_errors;
7114 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7115 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7116 	stats->rx_length_errors = priv->xstats.rx_length;
7117 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7118 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7119 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7120 }
7121 
7122 static const struct net_device_ops stmmac_netdev_ops = {
7123 	.ndo_open = stmmac_open,
7124 	.ndo_start_xmit = stmmac_xmit,
7125 	.ndo_stop = stmmac_release,
7126 	.ndo_change_mtu = stmmac_change_mtu,
7127 	.ndo_fix_features = stmmac_fix_features,
7128 	.ndo_set_features = stmmac_set_features,
7129 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7130 	.ndo_tx_timeout = stmmac_tx_timeout,
7131 	.ndo_eth_ioctl = stmmac_ioctl,
7132 	.ndo_get_stats64 = stmmac_get_stats64,
7133 	.ndo_setup_tc = stmmac_setup_tc,
7134 	.ndo_select_queue = stmmac_select_queue,
7135 	.ndo_set_mac_address = stmmac_set_mac_address,
7136 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7137 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7138 	.ndo_bpf = stmmac_bpf,
7139 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7140 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7141 };
7142 
7143 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7144 {
7145 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7146 		return;
7147 	if (test_bit(STMMAC_DOWN, &priv->state))
7148 		return;
7149 
7150 	netdev_err(priv->dev, "Reset adapter.\n");
7151 
7152 	rtnl_lock();
7153 	netif_trans_update(priv->dev);
7154 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7155 		usleep_range(1000, 2000);
7156 
7157 	set_bit(STMMAC_DOWN, &priv->state);
7158 	dev_close(priv->dev);
7159 	dev_open(priv->dev, NULL);
7160 	clear_bit(STMMAC_DOWN, &priv->state);
7161 	clear_bit(STMMAC_RESETING, &priv->state);
7162 	rtnl_unlock();
7163 }
7164 
7165 static void stmmac_service_task(struct work_struct *work)
7166 {
7167 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7168 			service_task);
7169 
7170 	stmmac_reset_subtask(priv);
7171 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7172 }
7173 
7174 /**
7175  *  stmmac_hw_init - Init the MAC device
7176  *  @priv: driver private structure
7177  *  Description: this function is to configure the MAC device according to
7178  *  some platform parameters or the HW capability register. It prepares the
7179  *  driver to use either ring or chain modes and to setup either enhanced or
7180  *  normal descriptors.
7181  */
7182 static int stmmac_hw_init(struct stmmac_priv *priv)
7183 {
7184 	int ret;
7185 
7186 	/* dwmac-sun8i only work in chain mode */
7187 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7188 		chain_mode = 1;
7189 	priv->chain_mode = chain_mode;
7190 
7191 	/* Initialize HW Interface */
7192 	ret = stmmac_hwif_init(priv);
7193 	if (ret)
7194 		return ret;
7195 
7196 	/* Get the HW capability (new GMAC newer than 3.50a) */
7197 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7198 	if (priv->hw_cap_support) {
7199 		dev_info(priv->device, "DMA HW capability register supported\n");
7200 
7201 		/* We can override some gmac/dma configuration fields: e.g.
7202 		 * enh_desc, tx_coe (e.g. that are passed through the
7203 		 * platform) with the values from the HW capability
7204 		 * register (if supported).
7205 		 */
7206 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7207 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7208 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7209 		priv->hw->pmt = priv->plat->pmt;
7210 		if (priv->dma_cap.hash_tb_sz) {
7211 			priv->hw->multicast_filter_bins =
7212 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7213 			priv->hw->mcast_bits_log2 =
7214 					ilog2(priv->hw->multicast_filter_bins);
7215 		}
7216 
7217 		/* TXCOE doesn't work in thresh DMA mode */
7218 		if (priv->plat->force_thresh_dma_mode)
7219 			priv->plat->tx_coe = 0;
7220 		else
7221 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7222 
7223 		/* In case of GMAC4 rx_coe is from HW cap register. */
7224 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7225 
7226 		if (priv->dma_cap.rx_coe_type2)
7227 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7228 		else if (priv->dma_cap.rx_coe_type1)
7229 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7230 
7231 	} else {
7232 		dev_info(priv->device, "No HW DMA feature register supported\n");
7233 	}
7234 
7235 	if (priv->plat->rx_coe) {
7236 		priv->hw->rx_csum = priv->plat->rx_coe;
7237 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7238 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7239 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7240 	}
7241 	if (priv->plat->tx_coe)
7242 		dev_info(priv->device, "TX Checksum insertion supported\n");
7243 
7244 	if (priv->plat->pmt) {
7245 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7246 		device_set_wakeup_capable(priv->device, 1);
7247 	}
7248 
7249 	if (priv->dma_cap.tsoen)
7250 		dev_info(priv->device, "TSO supported\n");
7251 
7252 	priv->hw->vlan_fail_q_en =
7253 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7254 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7255 
7256 	/* Run HW quirks, if any */
7257 	if (priv->hwif_quirks) {
7258 		ret = priv->hwif_quirks(priv);
7259 		if (ret)
7260 			return ret;
7261 	}
7262 
7263 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7264 	 * In some case, for example on bugged HW this feature
7265 	 * has to be disable and this can be done by passing the
7266 	 * riwt_off field from the platform.
7267 	 */
7268 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7269 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7270 		priv->use_riwt = 1;
7271 		dev_info(priv->device,
7272 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7273 	}
7274 
7275 	return 0;
7276 }
7277 
7278 static void stmmac_napi_add(struct net_device *dev)
7279 {
7280 	struct stmmac_priv *priv = netdev_priv(dev);
7281 	u32 queue, maxq;
7282 
7283 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7284 
7285 	for (queue = 0; queue < maxq; queue++) {
7286 		struct stmmac_channel *ch = &priv->channel[queue];
7287 
7288 		ch->priv_data = priv;
7289 		ch->index = queue;
7290 		spin_lock_init(&ch->lock);
7291 
7292 		if (queue < priv->plat->rx_queues_to_use) {
7293 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7294 		}
7295 		if (queue < priv->plat->tx_queues_to_use) {
7296 			netif_napi_add_tx(dev, &ch->tx_napi,
7297 					  stmmac_napi_poll_tx);
7298 		}
7299 		if (queue < priv->plat->rx_queues_to_use &&
7300 		    queue < priv->plat->tx_queues_to_use) {
7301 			netif_napi_add(dev, &ch->rxtx_napi,
7302 				       stmmac_napi_poll_rxtx);
7303 		}
7304 	}
7305 }
7306 
7307 static void stmmac_napi_del(struct net_device *dev)
7308 {
7309 	struct stmmac_priv *priv = netdev_priv(dev);
7310 	u32 queue, maxq;
7311 
7312 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7313 
7314 	for (queue = 0; queue < maxq; queue++) {
7315 		struct stmmac_channel *ch = &priv->channel[queue];
7316 
7317 		if (queue < priv->plat->rx_queues_to_use)
7318 			netif_napi_del(&ch->rx_napi);
7319 		if (queue < priv->plat->tx_queues_to_use)
7320 			netif_napi_del(&ch->tx_napi);
7321 		if (queue < priv->plat->rx_queues_to_use &&
7322 		    queue < priv->plat->tx_queues_to_use) {
7323 			netif_napi_del(&ch->rxtx_napi);
7324 		}
7325 	}
7326 }
7327 
7328 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7329 {
7330 	struct stmmac_priv *priv = netdev_priv(dev);
7331 	int ret = 0, i;
7332 	int max_speed;
7333 
7334 	if (netif_running(dev))
7335 		stmmac_release(dev);
7336 
7337 	stmmac_napi_del(dev);
7338 
7339 	priv->plat->rx_queues_to_use = rx_cnt;
7340 	priv->plat->tx_queues_to_use = tx_cnt;
7341 	if (!netif_is_rxfh_configured(dev))
7342 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7343 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7344 									rx_cnt);
7345 
7346 	stmmac_mac_phylink_get_caps(priv);
7347 
7348 	priv->phylink_config.mac_capabilities = priv->hw->link.caps;
7349 
7350 	max_speed = priv->plat->max_speed;
7351 	if (max_speed)
7352 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
7353 
7354 	stmmac_napi_add(dev);
7355 
7356 	if (netif_running(dev))
7357 		ret = stmmac_open(dev);
7358 
7359 	return ret;
7360 }
7361 
7362 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7363 {
7364 	struct stmmac_priv *priv = netdev_priv(dev);
7365 	int ret = 0;
7366 
7367 	if (netif_running(dev))
7368 		stmmac_release(dev);
7369 
7370 	priv->dma_conf.dma_rx_size = rx_size;
7371 	priv->dma_conf.dma_tx_size = tx_size;
7372 
7373 	if (netif_running(dev))
7374 		ret = stmmac_open(dev);
7375 
7376 	return ret;
7377 }
7378 
7379 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7380 static void stmmac_fpe_lp_task(struct work_struct *work)
7381 {
7382 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7383 						fpe_task);
7384 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7385 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7386 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7387 	bool *hs_enable = &fpe_cfg->hs_enable;
7388 	bool *enable = &fpe_cfg->enable;
7389 	int retries = 20;
7390 
7391 	while (retries-- > 0) {
7392 		/* Bail out immediately if FPE handshake is OFF */
7393 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7394 			break;
7395 
7396 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7397 		    *lp_state == FPE_STATE_ENTERING_ON) {
7398 			stmmac_fpe_configure(priv, priv->ioaddr,
7399 					     fpe_cfg,
7400 					     priv->plat->tx_queues_to_use,
7401 					     priv->plat->rx_queues_to_use,
7402 					     *enable);
7403 
7404 			netdev_info(priv->dev, "configured FPE\n");
7405 
7406 			*lo_state = FPE_STATE_ON;
7407 			*lp_state = FPE_STATE_ON;
7408 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7409 			break;
7410 		}
7411 
7412 		if ((*lo_state == FPE_STATE_CAPABLE ||
7413 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7414 		     *lp_state != FPE_STATE_ON) {
7415 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7416 				    *lo_state, *lp_state);
7417 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7418 						fpe_cfg,
7419 						MPACKET_VERIFY);
7420 		}
7421 		/* Sleep then retry */
7422 		msleep(500);
7423 	}
7424 
7425 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7426 }
7427 
7428 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7429 {
7430 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7431 		if (enable) {
7432 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7433 						priv->plat->fpe_cfg,
7434 						MPACKET_VERIFY);
7435 		} else {
7436 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7437 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7438 		}
7439 
7440 		priv->plat->fpe_cfg->hs_enable = enable;
7441 	}
7442 }
7443 
7444 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7445 {
7446 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7447 	struct dma_desc *desc_contains_ts = ctx->desc;
7448 	struct stmmac_priv *priv = ctx->priv;
7449 	struct dma_desc *ndesc = ctx->ndesc;
7450 	struct dma_desc *desc = ctx->desc;
7451 	u64 ns = 0;
7452 
7453 	if (!priv->hwts_rx_en)
7454 		return -ENODATA;
7455 
7456 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7457 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7458 		desc_contains_ts = ndesc;
7459 
7460 	/* Check if timestamp is available */
7461 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7462 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7463 		ns -= priv->plat->cdc_error_adj;
7464 		*timestamp = ns_to_ktime(ns);
7465 		return 0;
7466 	}
7467 
7468 	return -ENODATA;
7469 }
7470 
7471 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7472 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7473 };
7474 
7475 /**
7476  * stmmac_dvr_probe
7477  * @device: device pointer
7478  * @plat_dat: platform data pointer
7479  * @res: stmmac resource pointer
7480  * Description: this is the main probe function used to
7481  * call the alloc_etherdev, allocate the priv structure.
7482  * Return:
7483  * returns 0 on success, otherwise errno.
7484  */
7485 int stmmac_dvr_probe(struct device *device,
7486 		     struct plat_stmmacenet_data *plat_dat,
7487 		     struct stmmac_resources *res)
7488 {
7489 	struct net_device *ndev = NULL;
7490 	struct stmmac_priv *priv;
7491 	u32 rxq;
7492 	int i, ret = 0;
7493 
7494 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7495 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7496 	if (!ndev)
7497 		return -ENOMEM;
7498 
7499 	SET_NETDEV_DEV(ndev, device);
7500 
7501 	priv = netdev_priv(ndev);
7502 	priv->device = device;
7503 	priv->dev = ndev;
7504 
7505 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7506 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7507 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7508 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7509 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7510 	}
7511 
7512 	priv->xstats.pcpu_stats =
7513 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7514 	if (!priv->xstats.pcpu_stats)
7515 		return -ENOMEM;
7516 
7517 	stmmac_set_ethtool_ops(ndev);
7518 	priv->pause = pause;
7519 	priv->plat = plat_dat;
7520 	priv->ioaddr = res->addr;
7521 	priv->dev->base_addr = (unsigned long)res->addr;
7522 	priv->plat->dma_cfg->multi_msi_en =
7523 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7524 
7525 	priv->dev->irq = res->irq;
7526 	priv->wol_irq = res->wol_irq;
7527 	priv->lpi_irq = res->lpi_irq;
7528 	priv->sfty_irq = res->sfty_irq;
7529 	priv->sfty_ce_irq = res->sfty_ce_irq;
7530 	priv->sfty_ue_irq = res->sfty_ue_irq;
7531 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7532 		priv->rx_irq[i] = res->rx_irq[i];
7533 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7534 		priv->tx_irq[i] = res->tx_irq[i];
7535 
7536 	if (!is_zero_ether_addr(res->mac))
7537 		eth_hw_addr_set(priv->dev, res->mac);
7538 
7539 	dev_set_drvdata(device, priv->dev);
7540 
7541 	/* Verify driver arguments */
7542 	stmmac_verify_args();
7543 
7544 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7545 	if (!priv->af_xdp_zc_qps)
7546 		return -ENOMEM;
7547 
7548 	/* Allocate workqueue */
7549 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7550 	if (!priv->wq) {
7551 		dev_err(priv->device, "failed to create workqueue\n");
7552 		ret = -ENOMEM;
7553 		goto error_wq_init;
7554 	}
7555 
7556 	INIT_WORK(&priv->service_task, stmmac_service_task);
7557 
7558 	/* Initialize Link Partner FPE workqueue */
7559 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7560 
7561 	/* Override with kernel parameters if supplied XXX CRS XXX
7562 	 * this needs to have multiple instances
7563 	 */
7564 	if ((phyaddr >= 0) && (phyaddr <= 31))
7565 		priv->plat->phy_addr = phyaddr;
7566 
7567 	if (priv->plat->stmmac_rst) {
7568 		ret = reset_control_assert(priv->plat->stmmac_rst);
7569 		reset_control_deassert(priv->plat->stmmac_rst);
7570 		/* Some reset controllers have only reset callback instead of
7571 		 * assert + deassert callbacks pair.
7572 		 */
7573 		if (ret == -ENOTSUPP)
7574 			reset_control_reset(priv->plat->stmmac_rst);
7575 	}
7576 
7577 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7578 	if (ret == -ENOTSUPP)
7579 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7580 			ERR_PTR(ret));
7581 
7582 	/* Wait a bit for the reset to take effect */
7583 	udelay(10);
7584 
7585 	/* Init MAC and get the capabilities */
7586 	ret = stmmac_hw_init(priv);
7587 	if (ret)
7588 		goto error_hw_init;
7589 
7590 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7591 	 */
7592 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7593 		priv->plat->dma_cfg->dche = false;
7594 
7595 	stmmac_check_ether_addr(priv);
7596 
7597 	ndev->netdev_ops = &stmmac_netdev_ops;
7598 
7599 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7600 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7601 
7602 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7603 			    NETIF_F_RXCSUM;
7604 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7605 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7606 
7607 	ret = stmmac_tc_init(priv, priv);
7608 	if (!ret) {
7609 		ndev->hw_features |= NETIF_F_HW_TC;
7610 	}
7611 
7612 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7613 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7614 		if (priv->plat->has_gmac4)
7615 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7616 		priv->tso = true;
7617 		dev_info(priv->device, "TSO feature enabled\n");
7618 	}
7619 
7620 	if (priv->dma_cap.sphen &&
7621 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7622 		ndev->hw_features |= NETIF_F_GRO;
7623 		priv->sph_cap = true;
7624 		priv->sph = priv->sph_cap;
7625 		dev_info(priv->device, "SPH feature enabled\n");
7626 	}
7627 
7628 	/* Ideally our host DMA address width is the same as for the
7629 	 * device. However, it may differ and then we have to use our
7630 	 * host DMA width for allocation and the device DMA width for
7631 	 * register handling.
7632 	 */
7633 	if (priv->plat->host_dma_width)
7634 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7635 	else
7636 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7637 
7638 	if (priv->dma_cap.host_dma_width) {
7639 		ret = dma_set_mask_and_coherent(device,
7640 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7641 		if (!ret) {
7642 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7643 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7644 
7645 			/*
7646 			 * If more than 32 bits can be addressed, make sure to
7647 			 * enable enhanced addressing mode.
7648 			 */
7649 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7650 				priv->plat->dma_cfg->eame = true;
7651 		} else {
7652 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7653 			if (ret) {
7654 				dev_err(priv->device, "Failed to set DMA Mask\n");
7655 				goto error_hw_init;
7656 			}
7657 
7658 			priv->dma_cap.host_dma_width = 32;
7659 		}
7660 	}
7661 
7662 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7663 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7664 #ifdef STMMAC_VLAN_TAG_USED
7665 	/* Both mac100 and gmac support receive VLAN tag detection */
7666 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7667 	ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7668 	priv->hw->hw_vlan_en = true;
7669 
7670 	if (priv->dma_cap.vlhash) {
7671 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7672 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7673 	}
7674 	if (priv->dma_cap.vlins) {
7675 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7676 		if (priv->dma_cap.dvlan)
7677 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7678 	}
7679 #endif
7680 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7681 
7682 	priv->xstats.threshold = tc;
7683 
7684 	/* Initialize RSS */
7685 	rxq = priv->plat->rx_queues_to_use;
7686 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7687 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7688 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7689 
7690 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7691 		ndev->features |= NETIF_F_RXHASH;
7692 
7693 	ndev->vlan_features |= ndev->features;
7694 	/* TSO doesn't work on VLANs yet */
7695 	ndev->vlan_features &= ~NETIF_F_TSO;
7696 
7697 	/* MTU range: 46 - hw-specific max */
7698 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7699 	if (priv->plat->has_xgmac)
7700 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7701 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7702 		ndev->max_mtu = JUMBO_LEN;
7703 	else
7704 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7705 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7706 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7707 	 */
7708 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7709 	    (priv->plat->maxmtu >= ndev->min_mtu))
7710 		ndev->max_mtu = priv->plat->maxmtu;
7711 	else if (priv->plat->maxmtu < ndev->min_mtu)
7712 		dev_warn(priv->device,
7713 			 "%s: warning: maxmtu having invalid value (%d)\n",
7714 			 __func__, priv->plat->maxmtu);
7715 
7716 	if (flow_ctrl)
7717 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7718 
7719 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7720 
7721 	/* Setup channels NAPI */
7722 	stmmac_napi_add(ndev);
7723 
7724 	mutex_init(&priv->lock);
7725 
7726 	/* If a specific clk_csr value is passed from the platform
7727 	 * this means that the CSR Clock Range selection cannot be
7728 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7729 	 * set the MDC clock dynamically according to the csr actual
7730 	 * clock input.
7731 	 */
7732 	if (priv->plat->clk_csr >= 0)
7733 		priv->clk_csr = priv->plat->clk_csr;
7734 	else
7735 		stmmac_clk_csr_set(priv);
7736 
7737 	stmmac_check_pcs_mode(priv);
7738 
7739 	pm_runtime_get_noresume(device);
7740 	pm_runtime_set_active(device);
7741 	if (!pm_runtime_enabled(device))
7742 		pm_runtime_enable(device);
7743 
7744 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7745 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7746 		/* MDIO bus Registration */
7747 		ret = stmmac_mdio_register(ndev);
7748 		if (ret < 0) {
7749 			dev_err_probe(priv->device, ret,
7750 				      "%s: MDIO bus (id: %d) registration failed\n",
7751 				      __func__, priv->plat->bus_id);
7752 			goto error_mdio_register;
7753 		}
7754 	}
7755 
7756 	if (priv->plat->speed_mode_2500)
7757 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7758 
7759 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7760 		ret = stmmac_xpcs_setup(priv->mii);
7761 		if (ret)
7762 			goto error_xpcs_setup;
7763 	}
7764 
7765 	ret = stmmac_phy_setup(priv);
7766 	if (ret) {
7767 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7768 		goto error_phy_setup;
7769 	}
7770 
7771 	ret = register_netdev(ndev);
7772 	if (ret) {
7773 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7774 			__func__, ret);
7775 		goto error_netdev_register;
7776 	}
7777 
7778 #ifdef CONFIG_DEBUG_FS
7779 	stmmac_init_fs(ndev);
7780 #endif
7781 
7782 	if (priv->plat->dump_debug_regs)
7783 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7784 
7785 	/* Let pm_runtime_put() disable the clocks.
7786 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7787 	 */
7788 	pm_runtime_put(device);
7789 
7790 	return ret;
7791 
7792 error_netdev_register:
7793 	phylink_destroy(priv->phylink);
7794 error_xpcs_setup:
7795 error_phy_setup:
7796 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7797 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7798 		stmmac_mdio_unregister(ndev);
7799 error_mdio_register:
7800 	stmmac_napi_del(ndev);
7801 error_hw_init:
7802 	destroy_workqueue(priv->wq);
7803 error_wq_init:
7804 	bitmap_free(priv->af_xdp_zc_qps);
7805 
7806 	return ret;
7807 }
7808 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7809 
7810 /**
7811  * stmmac_dvr_remove
7812  * @dev: device pointer
7813  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7814  * changes the link status, releases the DMA descriptor rings.
7815  */
7816 void stmmac_dvr_remove(struct device *dev)
7817 {
7818 	struct net_device *ndev = dev_get_drvdata(dev);
7819 	struct stmmac_priv *priv = netdev_priv(ndev);
7820 
7821 	netdev_info(priv->dev, "%s: removing driver", __func__);
7822 
7823 	pm_runtime_get_sync(dev);
7824 
7825 	stmmac_stop_all_dma(priv);
7826 	stmmac_mac_set(priv, priv->ioaddr, false);
7827 	netif_carrier_off(ndev);
7828 	unregister_netdev(ndev);
7829 
7830 #ifdef CONFIG_DEBUG_FS
7831 	stmmac_exit_fs(ndev);
7832 #endif
7833 	phylink_destroy(priv->phylink);
7834 	if (priv->plat->stmmac_rst)
7835 		reset_control_assert(priv->plat->stmmac_rst);
7836 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7837 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7838 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7839 		stmmac_mdio_unregister(ndev);
7840 	destroy_workqueue(priv->wq);
7841 	mutex_destroy(&priv->lock);
7842 	bitmap_free(priv->af_xdp_zc_qps);
7843 
7844 	pm_runtime_disable(dev);
7845 	pm_runtime_put_noidle(dev);
7846 }
7847 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7848 
7849 /**
7850  * stmmac_suspend - suspend callback
7851  * @dev: device pointer
7852  * Description: this is the function to suspend the device and it is called
7853  * by the platform driver to stop the network queue, release the resources,
7854  * program the PMT register (for WoL), clean and release driver resources.
7855  */
7856 int stmmac_suspend(struct device *dev)
7857 {
7858 	struct net_device *ndev = dev_get_drvdata(dev);
7859 	struct stmmac_priv *priv = netdev_priv(ndev);
7860 	u32 chan;
7861 
7862 	if (!ndev || !netif_running(ndev))
7863 		return 0;
7864 
7865 	mutex_lock(&priv->lock);
7866 
7867 	netif_device_detach(ndev);
7868 
7869 	stmmac_disable_all_queues(priv);
7870 
7871 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7872 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7873 
7874 	if (priv->eee_enabled) {
7875 		priv->tx_path_in_lpi_mode = false;
7876 		del_timer_sync(&priv->eee_ctrl_timer);
7877 	}
7878 
7879 	/* Stop TX/RX DMA */
7880 	stmmac_stop_all_dma(priv);
7881 
7882 	if (priv->plat->serdes_powerdown)
7883 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7884 
7885 	/* Enable Power down mode by programming the PMT regs */
7886 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7887 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7888 		priv->irq_wake = 1;
7889 	} else {
7890 		stmmac_mac_set(priv, priv->ioaddr, false);
7891 		pinctrl_pm_select_sleep_state(priv->device);
7892 	}
7893 
7894 	mutex_unlock(&priv->lock);
7895 
7896 	rtnl_lock();
7897 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7898 		phylink_suspend(priv->phylink, true);
7899 	} else {
7900 		if (device_may_wakeup(priv->device))
7901 			phylink_speed_down(priv->phylink, false);
7902 		phylink_suspend(priv->phylink, false);
7903 	}
7904 	rtnl_unlock();
7905 
7906 	if (priv->dma_cap.fpesel) {
7907 		/* Disable FPE */
7908 		stmmac_fpe_configure(priv, priv->ioaddr,
7909 				     priv->plat->fpe_cfg,
7910 				     priv->plat->tx_queues_to_use,
7911 				     priv->plat->rx_queues_to_use, false);
7912 
7913 		stmmac_fpe_handshake(priv, false);
7914 		stmmac_fpe_stop_wq(priv);
7915 	}
7916 
7917 	priv->speed = SPEED_UNKNOWN;
7918 	return 0;
7919 }
7920 EXPORT_SYMBOL_GPL(stmmac_suspend);
7921 
7922 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7923 {
7924 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7925 
7926 	rx_q->cur_rx = 0;
7927 	rx_q->dirty_rx = 0;
7928 }
7929 
7930 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7931 {
7932 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7933 
7934 	tx_q->cur_tx = 0;
7935 	tx_q->dirty_tx = 0;
7936 	tx_q->mss = 0;
7937 
7938 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7939 }
7940 
7941 /**
7942  * stmmac_reset_queues_param - reset queue parameters
7943  * @priv: device pointer
7944  */
7945 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7946 {
7947 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7948 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7949 	u32 queue;
7950 
7951 	for (queue = 0; queue < rx_cnt; queue++)
7952 		stmmac_reset_rx_queue(priv, queue);
7953 
7954 	for (queue = 0; queue < tx_cnt; queue++)
7955 		stmmac_reset_tx_queue(priv, queue);
7956 }
7957 
7958 /**
7959  * stmmac_resume - resume callback
7960  * @dev: device pointer
7961  * Description: when resume this function is invoked to setup the DMA and CORE
7962  * in a usable state.
7963  */
7964 int stmmac_resume(struct device *dev)
7965 {
7966 	struct net_device *ndev = dev_get_drvdata(dev);
7967 	struct stmmac_priv *priv = netdev_priv(ndev);
7968 	int ret;
7969 
7970 	if (!netif_running(ndev))
7971 		return 0;
7972 
7973 	/* Power Down bit, into the PM register, is cleared
7974 	 * automatically as soon as a magic packet or a Wake-up frame
7975 	 * is received. Anyway, it's better to manually clear
7976 	 * this bit because it can generate problems while resuming
7977 	 * from another devices (e.g. serial console).
7978 	 */
7979 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7980 		mutex_lock(&priv->lock);
7981 		stmmac_pmt(priv, priv->hw, 0);
7982 		mutex_unlock(&priv->lock);
7983 		priv->irq_wake = 0;
7984 	} else {
7985 		pinctrl_pm_select_default_state(priv->device);
7986 		/* reset the phy so that it's ready */
7987 		if (priv->mii)
7988 			stmmac_mdio_reset(priv->mii);
7989 	}
7990 
7991 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7992 	    priv->plat->serdes_powerup) {
7993 		ret = priv->plat->serdes_powerup(ndev,
7994 						 priv->plat->bsp_priv);
7995 
7996 		if (ret < 0)
7997 			return ret;
7998 	}
7999 
8000 	rtnl_lock();
8001 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
8002 		phylink_resume(priv->phylink);
8003 	} else {
8004 		phylink_resume(priv->phylink);
8005 		if (device_may_wakeup(priv->device))
8006 			phylink_speed_up(priv->phylink);
8007 	}
8008 	rtnl_unlock();
8009 
8010 	rtnl_lock();
8011 	mutex_lock(&priv->lock);
8012 
8013 	stmmac_reset_queues_param(priv);
8014 
8015 	stmmac_free_tx_skbufs(priv);
8016 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8017 
8018 	stmmac_hw_setup(ndev, false);
8019 	stmmac_init_coalesce(priv);
8020 	stmmac_set_rx_mode(ndev);
8021 
8022 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8023 
8024 	stmmac_enable_all_queues(priv);
8025 	stmmac_enable_all_dma_irq(priv);
8026 
8027 	mutex_unlock(&priv->lock);
8028 	rtnl_unlock();
8029 
8030 	netif_device_attach(ndev);
8031 
8032 	return 0;
8033 }
8034 EXPORT_SYMBOL_GPL(stmmac_resume);
8035 
8036 #ifndef MODULE
8037 static int __init stmmac_cmdline_opt(char *str)
8038 {
8039 	char *opt;
8040 
8041 	if (!str || !*str)
8042 		return 1;
8043 	while ((opt = strsep(&str, ",")) != NULL) {
8044 		if (!strncmp(opt, "debug:", 6)) {
8045 			if (kstrtoint(opt + 6, 0, &debug))
8046 				goto err;
8047 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8048 			if (kstrtoint(opt + 8, 0, &phyaddr))
8049 				goto err;
8050 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8051 			if (kstrtoint(opt + 7, 0, &buf_sz))
8052 				goto err;
8053 		} else if (!strncmp(opt, "tc:", 3)) {
8054 			if (kstrtoint(opt + 3, 0, &tc))
8055 				goto err;
8056 		} else if (!strncmp(opt, "watchdog:", 9)) {
8057 			if (kstrtoint(opt + 9, 0, &watchdog))
8058 				goto err;
8059 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8060 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8061 				goto err;
8062 		} else if (!strncmp(opt, "pause:", 6)) {
8063 			if (kstrtoint(opt + 6, 0, &pause))
8064 				goto err;
8065 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8066 			if (kstrtoint(opt + 10, 0, &eee_timer))
8067 				goto err;
8068 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8069 			if (kstrtoint(opt + 11, 0, &chain_mode))
8070 				goto err;
8071 		}
8072 	}
8073 	return 1;
8074 
8075 err:
8076 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8077 	return 1;
8078 }
8079 
8080 __setup("stmmaceth=", stmmac_cmdline_opt);
8081 #endif /* MODULE */
8082 
8083 static int __init stmmac_init(void)
8084 {
8085 #ifdef CONFIG_DEBUG_FS
8086 	/* Create debugfs main directory if it doesn't exist yet */
8087 	if (!stmmac_fs_dir)
8088 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8089 	register_netdevice_notifier(&stmmac_notifier);
8090 #endif
8091 
8092 	return 0;
8093 }
8094 
8095 static void __exit stmmac_exit(void)
8096 {
8097 #ifdef CONFIG_DEBUG_FS
8098 	unregister_netdevice_notifier(&stmmac_notifier);
8099 	debugfs_remove_recursive(stmmac_fs_dir);
8100 #endif
8101 }
8102 
8103 module_init(stmmac_init)
8104 module_exit(stmmac_exit)
8105 
8106 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8107 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8108 MODULE_LICENSE("GPL");
8109