xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 5c1672705a1a2389f5ad78e0fea6f08ed32d6f18)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
940 					 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	/* Refresh the MAC-specific capabilities */
945 	stmmac_mac_update_caps(priv);
946 
947 	config->mac_capabilities = priv->hw->link.caps;
948 
949 	if (priv->plat->max_speed)
950 		phylink_limit_mac_speed(config, priv->plat->max_speed);
951 
952 	return config->mac_capabilities;
953 }
954 
955 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
956 						 phy_interface_t interface)
957 {
958 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
959 
960 	if (priv->hw->xpcs)
961 		return &priv->hw->xpcs->pcs;
962 
963 	return priv->hw->phylink_pcs;
964 }
965 
966 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
967 			      const struct phylink_link_state *state)
968 {
969 	/* Nothing to do, xpcs_config() handles everything */
970 }
971 
972 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
973 {
974 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
975 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
976 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
977 	bool *hs_enable = &fpe_cfg->hs_enable;
978 
979 	if (is_up && *hs_enable) {
980 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
981 					MPACKET_VERIFY);
982 	} else {
983 		*lo_state = FPE_STATE_OFF;
984 		*lp_state = FPE_STATE_OFF;
985 	}
986 }
987 
988 static void stmmac_mac_link_down(struct phylink_config *config,
989 				 unsigned int mode, phy_interface_t interface)
990 {
991 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
992 
993 	stmmac_mac_set(priv, priv->ioaddr, false);
994 	priv->eee_active = false;
995 	priv->tx_lpi_enabled = false;
996 	priv->eee_enabled = stmmac_eee_init(priv);
997 	stmmac_set_eee_pls(priv, priv->hw, false);
998 
999 	if (priv->dma_cap.fpesel)
1000 		stmmac_fpe_link_state_handle(priv, false);
1001 }
1002 
1003 static void stmmac_mac_link_up(struct phylink_config *config,
1004 			       struct phy_device *phy,
1005 			       unsigned int mode, phy_interface_t interface,
1006 			       int speed, int duplex,
1007 			       bool tx_pause, bool rx_pause)
1008 {
1009 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1010 	u32 old_ctrl, ctrl;
1011 
1012 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1013 	    priv->plat->serdes_powerup)
1014 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1015 
1016 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1017 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1018 
1019 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1020 		switch (speed) {
1021 		case SPEED_10000:
1022 			ctrl |= priv->hw->link.xgmii.speed10000;
1023 			break;
1024 		case SPEED_5000:
1025 			ctrl |= priv->hw->link.xgmii.speed5000;
1026 			break;
1027 		case SPEED_2500:
1028 			ctrl |= priv->hw->link.xgmii.speed2500;
1029 			break;
1030 		default:
1031 			return;
1032 		}
1033 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1034 		switch (speed) {
1035 		case SPEED_100000:
1036 			ctrl |= priv->hw->link.xlgmii.speed100000;
1037 			break;
1038 		case SPEED_50000:
1039 			ctrl |= priv->hw->link.xlgmii.speed50000;
1040 			break;
1041 		case SPEED_40000:
1042 			ctrl |= priv->hw->link.xlgmii.speed40000;
1043 			break;
1044 		case SPEED_25000:
1045 			ctrl |= priv->hw->link.xlgmii.speed25000;
1046 			break;
1047 		case SPEED_10000:
1048 			ctrl |= priv->hw->link.xgmii.speed10000;
1049 			break;
1050 		case SPEED_2500:
1051 			ctrl |= priv->hw->link.speed2500;
1052 			break;
1053 		case SPEED_1000:
1054 			ctrl |= priv->hw->link.speed1000;
1055 			break;
1056 		default:
1057 			return;
1058 		}
1059 	} else {
1060 		switch (speed) {
1061 		case SPEED_2500:
1062 			ctrl |= priv->hw->link.speed2500;
1063 			break;
1064 		case SPEED_1000:
1065 			ctrl |= priv->hw->link.speed1000;
1066 			break;
1067 		case SPEED_100:
1068 			ctrl |= priv->hw->link.speed100;
1069 			break;
1070 		case SPEED_10:
1071 			ctrl |= priv->hw->link.speed10;
1072 			break;
1073 		default:
1074 			return;
1075 		}
1076 	}
1077 
1078 	priv->speed = speed;
1079 
1080 	if (priv->plat->fix_mac_speed)
1081 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1082 
1083 	if (!duplex)
1084 		ctrl &= ~priv->hw->link.duplex;
1085 	else
1086 		ctrl |= priv->hw->link.duplex;
1087 
1088 	/* Flow Control operation */
1089 	if (rx_pause && tx_pause)
1090 		priv->flow_ctrl = FLOW_AUTO;
1091 	else if (rx_pause && !tx_pause)
1092 		priv->flow_ctrl = FLOW_RX;
1093 	else if (!rx_pause && tx_pause)
1094 		priv->flow_ctrl = FLOW_TX;
1095 	else
1096 		priv->flow_ctrl = FLOW_OFF;
1097 
1098 	stmmac_mac_flow_ctrl(priv, duplex);
1099 
1100 	if (ctrl != old_ctrl)
1101 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1102 
1103 	stmmac_mac_set(priv, priv->ioaddr, true);
1104 	if (phy && priv->dma_cap.eee) {
1105 		priv->eee_active =
1106 			phy_init_eee(phy, !(priv->plat->flags &
1107 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1108 		priv->eee_enabled = stmmac_eee_init(priv);
1109 		priv->tx_lpi_enabled = priv->eee_enabled;
1110 		stmmac_set_eee_pls(priv, priv->hw, true);
1111 	}
1112 
1113 	if (priv->dma_cap.fpesel)
1114 		stmmac_fpe_link_state_handle(priv, true);
1115 
1116 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1117 		stmmac_hwtstamp_correct_latency(priv, priv);
1118 }
1119 
1120 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1121 	.mac_get_caps = stmmac_mac_get_caps,
1122 	.mac_select_pcs = stmmac_mac_select_pcs,
1123 	.mac_config = stmmac_mac_config,
1124 	.mac_link_down = stmmac_mac_link_down,
1125 	.mac_link_up = stmmac_mac_link_up,
1126 };
1127 
1128 /**
1129  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1130  * @priv: driver private structure
1131  * Description: this is to verify if the HW supports the PCS.
1132  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1133  * configured for the TBI, RTBI, or SGMII PHY interface.
1134  */
1135 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1136 {
1137 	int interface = priv->plat->mac_interface;
1138 
1139 	if (priv->dma_cap.pcs) {
1140 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1141 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1142 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1143 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1144 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1145 			priv->hw->pcs = STMMAC_PCS_RGMII;
1146 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1147 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1148 			priv->hw->pcs = STMMAC_PCS_SGMII;
1149 		}
1150 	}
1151 }
1152 
1153 /**
1154  * stmmac_init_phy - PHY initialization
1155  * @dev: net device structure
1156  * Description: it initializes the driver's PHY state, and attaches the PHY
1157  * to the mac driver.
1158  *  Return value:
1159  *  0 on success
1160  */
1161 static int stmmac_init_phy(struct net_device *dev)
1162 {
1163 	struct stmmac_priv *priv = netdev_priv(dev);
1164 	struct fwnode_handle *phy_fwnode;
1165 	struct fwnode_handle *fwnode;
1166 	int ret;
1167 
1168 	if (!phylink_expects_phy(priv->phylink))
1169 		return 0;
1170 
1171 	fwnode = priv->plat->port_node;
1172 	if (!fwnode)
1173 		fwnode = dev_fwnode(priv->device);
1174 
1175 	if (fwnode)
1176 		phy_fwnode = fwnode_get_phy_node(fwnode);
1177 	else
1178 		phy_fwnode = NULL;
1179 
1180 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1181 	 * manually parse it
1182 	 */
1183 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1184 		int addr = priv->plat->phy_addr;
1185 		struct phy_device *phydev;
1186 
1187 		if (addr < 0) {
1188 			netdev_err(priv->dev, "no phy found\n");
1189 			return -ENODEV;
1190 		}
1191 
1192 		phydev = mdiobus_get_phy(priv->mii, addr);
1193 		if (!phydev) {
1194 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1195 			return -ENODEV;
1196 		}
1197 
1198 		ret = phylink_connect_phy(priv->phylink, phydev);
1199 	} else {
1200 		fwnode_handle_put(phy_fwnode);
1201 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1202 	}
1203 
1204 	if (!priv->plat->pmt) {
1205 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1206 
1207 		phylink_ethtool_get_wol(priv->phylink, &wol);
1208 		device_set_wakeup_capable(priv->device, !!wol.supported);
1209 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1210 	}
1211 
1212 	return ret;
1213 }
1214 
1215 static int stmmac_phy_setup(struct stmmac_priv *priv)
1216 {
1217 	struct stmmac_mdio_bus_data *mdio_bus_data;
1218 	int mode = priv->plat->phy_interface;
1219 	struct fwnode_handle *fwnode;
1220 	struct phylink *phylink;
1221 
1222 	priv->phylink_config.dev = &priv->dev->dev;
1223 	priv->phylink_config.type = PHYLINK_NETDEV;
1224 	priv->phylink_config.mac_managed_pm = true;
1225 
1226 	/* Stmmac always requires an RX clock for hardware initialization */
1227 	priv->phylink_config.mac_requires_rxc = true;
1228 
1229 	mdio_bus_data = priv->plat->mdio_bus_data;
1230 	if (mdio_bus_data)
1231 		priv->phylink_config.ovr_an_inband =
1232 			mdio_bus_data->xpcs_an_inband;
1233 
1234 	/* Set the platform/firmware specified interface mode. Note, phylink
1235 	 * deals with the PHY interface mode, not the MAC interface mode.
1236 	 */
1237 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1238 
1239 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1240 	if (priv->hw->xpcs)
1241 		xpcs_get_interfaces(priv->hw->xpcs,
1242 				    priv->phylink_config.supported_interfaces);
1243 
1244 	fwnode = priv->plat->port_node;
1245 	if (!fwnode)
1246 		fwnode = dev_fwnode(priv->device);
1247 
1248 	phylink = phylink_create(&priv->phylink_config, fwnode,
1249 				 mode, &stmmac_phylink_mac_ops);
1250 	if (IS_ERR(phylink))
1251 		return PTR_ERR(phylink);
1252 
1253 	priv->phylink = phylink;
1254 	return 0;
1255 }
1256 
1257 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1258 				    struct stmmac_dma_conf *dma_conf)
1259 {
1260 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1261 	unsigned int desc_size;
1262 	void *head_rx;
1263 	u32 queue;
1264 
1265 	/* Display RX rings */
1266 	for (queue = 0; queue < rx_cnt; queue++) {
1267 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1268 
1269 		pr_info("\tRX Queue %u rings\n", queue);
1270 
1271 		if (priv->extend_desc) {
1272 			head_rx = (void *)rx_q->dma_erx;
1273 			desc_size = sizeof(struct dma_extended_desc);
1274 		} else {
1275 			head_rx = (void *)rx_q->dma_rx;
1276 			desc_size = sizeof(struct dma_desc);
1277 		}
1278 
1279 		/* Display RX ring */
1280 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1281 				    rx_q->dma_rx_phy, desc_size);
1282 	}
1283 }
1284 
1285 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1286 				    struct stmmac_dma_conf *dma_conf)
1287 {
1288 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1289 	unsigned int desc_size;
1290 	void *head_tx;
1291 	u32 queue;
1292 
1293 	/* Display TX rings */
1294 	for (queue = 0; queue < tx_cnt; queue++) {
1295 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1296 
1297 		pr_info("\tTX Queue %d rings\n", queue);
1298 
1299 		if (priv->extend_desc) {
1300 			head_tx = (void *)tx_q->dma_etx;
1301 			desc_size = sizeof(struct dma_extended_desc);
1302 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1303 			head_tx = (void *)tx_q->dma_entx;
1304 			desc_size = sizeof(struct dma_edesc);
1305 		} else {
1306 			head_tx = (void *)tx_q->dma_tx;
1307 			desc_size = sizeof(struct dma_desc);
1308 		}
1309 
1310 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1311 				    tx_q->dma_tx_phy, desc_size);
1312 	}
1313 }
1314 
1315 static void stmmac_display_rings(struct stmmac_priv *priv,
1316 				 struct stmmac_dma_conf *dma_conf)
1317 {
1318 	/* Display RX ring */
1319 	stmmac_display_rx_rings(priv, dma_conf);
1320 
1321 	/* Display TX ring */
1322 	stmmac_display_tx_rings(priv, dma_conf);
1323 }
1324 
1325 static int stmmac_set_bfsize(int mtu, int bufsize)
1326 {
1327 	int ret = bufsize;
1328 
1329 	if (mtu >= BUF_SIZE_8KiB)
1330 		ret = BUF_SIZE_16KiB;
1331 	else if (mtu >= BUF_SIZE_4KiB)
1332 		ret = BUF_SIZE_8KiB;
1333 	else if (mtu >= BUF_SIZE_2KiB)
1334 		ret = BUF_SIZE_4KiB;
1335 	else if (mtu > DEFAULT_BUFSIZE)
1336 		ret = BUF_SIZE_2KiB;
1337 	else
1338 		ret = DEFAULT_BUFSIZE;
1339 
1340 	return ret;
1341 }
1342 
1343 /**
1344  * stmmac_clear_rx_descriptors - clear RX descriptors
1345  * @priv: driver private structure
1346  * @dma_conf: structure to take the dma data
1347  * @queue: RX queue index
1348  * Description: this function is called to clear the RX descriptors
1349  * in case of both basic and extended descriptors are used.
1350  */
1351 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1352 					struct stmmac_dma_conf *dma_conf,
1353 					u32 queue)
1354 {
1355 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1356 	int i;
1357 
1358 	/* Clear the RX descriptors */
1359 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1360 		if (priv->extend_desc)
1361 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1362 					priv->use_riwt, priv->mode,
1363 					(i == dma_conf->dma_rx_size - 1),
1364 					dma_conf->dma_buf_sz);
1365 		else
1366 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1367 					priv->use_riwt, priv->mode,
1368 					(i == dma_conf->dma_rx_size - 1),
1369 					dma_conf->dma_buf_sz);
1370 }
1371 
1372 /**
1373  * stmmac_clear_tx_descriptors - clear tx descriptors
1374  * @priv: driver private structure
1375  * @dma_conf: structure to take the dma data
1376  * @queue: TX queue index.
1377  * Description: this function is called to clear the TX descriptors
1378  * in case of both basic and extended descriptors are used.
1379  */
1380 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1381 					struct stmmac_dma_conf *dma_conf,
1382 					u32 queue)
1383 {
1384 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1385 	int i;
1386 
1387 	/* Clear the TX descriptors */
1388 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1389 		int last = (i == (dma_conf->dma_tx_size - 1));
1390 		struct dma_desc *p;
1391 
1392 		if (priv->extend_desc)
1393 			p = &tx_q->dma_etx[i].basic;
1394 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1395 			p = &tx_q->dma_entx[i].basic;
1396 		else
1397 			p = &tx_q->dma_tx[i];
1398 
1399 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1400 	}
1401 }
1402 
1403 /**
1404  * stmmac_clear_descriptors - clear descriptors
1405  * @priv: driver private structure
1406  * @dma_conf: structure to take the dma data
1407  * Description: this function is called to clear the TX and RX descriptors
1408  * in case of both basic and extended descriptors are used.
1409  */
1410 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1411 				     struct stmmac_dma_conf *dma_conf)
1412 {
1413 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1414 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1415 	u32 queue;
1416 
1417 	/* Clear the RX descriptors */
1418 	for (queue = 0; queue < rx_queue_cnt; queue++)
1419 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1420 
1421 	/* Clear the TX descriptors */
1422 	for (queue = 0; queue < tx_queue_cnt; queue++)
1423 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1424 }
1425 
1426 /**
1427  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1428  * @priv: driver private structure
1429  * @dma_conf: structure to take the dma data
1430  * @p: descriptor pointer
1431  * @i: descriptor index
1432  * @flags: gfp flag
1433  * @queue: RX queue index
1434  * Description: this function is called to allocate a receive buffer, perform
1435  * the DMA mapping and init the descriptor.
1436  */
1437 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1438 				  struct stmmac_dma_conf *dma_conf,
1439 				  struct dma_desc *p,
1440 				  int i, gfp_t flags, u32 queue)
1441 {
1442 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1443 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1444 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1445 
1446 	if (priv->dma_cap.host_dma_width <= 32)
1447 		gfp |= GFP_DMA32;
1448 
1449 	if (!buf->page) {
1450 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1451 		if (!buf->page)
1452 			return -ENOMEM;
1453 		buf->page_offset = stmmac_rx_offset(priv);
1454 	}
1455 
1456 	if (priv->sph && !buf->sec_page) {
1457 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1458 		if (!buf->sec_page)
1459 			return -ENOMEM;
1460 
1461 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1462 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1463 	} else {
1464 		buf->sec_page = NULL;
1465 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1466 	}
1467 
1468 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1469 
1470 	stmmac_set_desc_addr(priv, p, buf->addr);
1471 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1472 		stmmac_init_desc3(priv, p);
1473 
1474 	return 0;
1475 }
1476 
1477 /**
1478  * stmmac_free_rx_buffer - free RX dma buffers
1479  * @priv: private structure
1480  * @rx_q: RX queue
1481  * @i: buffer index.
1482  */
1483 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1484 				  struct stmmac_rx_queue *rx_q,
1485 				  int i)
1486 {
1487 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1488 
1489 	if (buf->page)
1490 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1491 	buf->page = NULL;
1492 
1493 	if (buf->sec_page)
1494 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1495 	buf->sec_page = NULL;
1496 }
1497 
1498 /**
1499  * stmmac_free_tx_buffer - free RX dma buffers
1500  * @priv: private structure
1501  * @dma_conf: structure to take the dma data
1502  * @queue: RX queue index
1503  * @i: buffer index.
1504  */
1505 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1506 				  struct stmmac_dma_conf *dma_conf,
1507 				  u32 queue, int i)
1508 {
1509 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1510 
1511 	if (tx_q->tx_skbuff_dma[i].buf &&
1512 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1513 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1514 			dma_unmap_page(priv->device,
1515 				       tx_q->tx_skbuff_dma[i].buf,
1516 				       tx_q->tx_skbuff_dma[i].len,
1517 				       DMA_TO_DEVICE);
1518 		else
1519 			dma_unmap_single(priv->device,
1520 					 tx_q->tx_skbuff_dma[i].buf,
1521 					 tx_q->tx_skbuff_dma[i].len,
1522 					 DMA_TO_DEVICE);
1523 	}
1524 
1525 	if (tx_q->xdpf[i] &&
1526 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1527 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1528 		xdp_return_frame(tx_q->xdpf[i]);
1529 		tx_q->xdpf[i] = NULL;
1530 	}
1531 
1532 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1533 		tx_q->xsk_frames_done++;
1534 
1535 	if (tx_q->tx_skbuff[i] &&
1536 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1537 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1538 		tx_q->tx_skbuff[i] = NULL;
1539 	}
1540 
1541 	tx_q->tx_skbuff_dma[i].buf = 0;
1542 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1543 }
1544 
1545 /**
1546  * dma_free_rx_skbufs - free RX dma buffers
1547  * @priv: private structure
1548  * @dma_conf: structure to take the dma data
1549  * @queue: RX queue index
1550  */
1551 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1552 			       struct stmmac_dma_conf *dma_conf,
1553 			       u32 queue)
1554 {
1555 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1556 	int i;
1557 
1558 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1559 		stmmac_free_rx_buffer(priv, rx_q, i);
1560 }
1561 
1562 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1563 				   struct stmmac_dma_conf *dma_conf,
1564 				   u32 queue, gfp_t flags)
1565 {
1566 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1567 	int i;
1568 
1569 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1570 		struct dma_desc *p;
1571 		int ret;
1572 
1573 		if (priv->extend_desc)
1574 			p = &((rx_q->dma_erx + i)->basic);
1575 		else
1576 			p = rx_q->dma_rx + i;
1577 
1578 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1579 					     queue);
1580 		if (ret)
1581 			return ret;
1582 
1583 		rx_q->buf_alloc_num++;
1584 	}
1585 
1586 	return 0;
1587 }
1588 
1589 /**
1590  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1591  * @priv: private structure
1592  * @dma_conf: structure to take the dma data
1593  * @queue: RX queue index
1594  */
1595 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1596 				struct stmmac_dma_conf *dma_conf,
1597 				u32 queue)
1598 {
1599 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1600 	int i;
1601 
1602 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1603 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1604 
1605 		if (!buf->xdp)
1606 			continue;
1607 
1608 		xsk_buff_free(buf->xdp);
1609 		buf->xdp = NULL;
1610 	}
1611 }
1612 
1613 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1614 				      struct stmmac_dma_conf *dma_conf,
1615 				      u32 queue)
1616 {
1617 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1618 	int i;
1619 
1620 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1621 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1622 	 * use this macro to make sure no size violations.
1623 	 */
1624 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1625 
1626 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1627 		struct stmmac_rx_buffer *buf;
1628 		dma_addr_t dma_addr;
1629 		struct dma_desc *p;
1630 
1631 		if (priv->extend_desc)
1632 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1633 		else
1634 			p = rx_q->dma_rx + i;
1635 
1636 		buf = &rx_q->buf_pool[i];
1637 
1638 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1639 		if (!buf->xdp)
1640 			return -ENOMEM;
1641 
1642 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1643 		stmmac_set_desc_addr(priv, p, dma_addr);
1644 		rx_q->buf_alloc_num++;
1645 	}
1646 
1647 	return 0;
1648 }
1649 
1650 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1651 {
1652 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1653 		return NULL;
1654 
1655 	return xsk_get_pool_from_qid(priv->dev, queue);
1656 }
1657 
1658 /**
1659  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1660  * @priv: driver private structure
1661  * @dma_conf: structure to take the dma data
1662  * @queue: RX queue index
1663  * @flags: gfp flag.
1664  * Description: this function initializes the DMA RX descriptors
1665  * and allocates the socket buffers. It supports the chained and ring
1666  * modes.
1667  */
1668 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1669 				    struct stmmac_dma_conf *dma_conf,
1670 				    u32 queue, gfp_t flags)
1671 {
1672 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1673 	int ret;
1674 
1675 	netif_dbg(priv, probe, priv->dev,
1676 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1677 		  (u32)rx_q->dma_rx_phy);
1678 
1679 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1680 
1681 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1682 
1683 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1684 
1685 	if (rx_q->xsk_pool) {
1686 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1687 						   MEM_TYPE_XSK_BUFF_POOL,
1688 						   NULL));
1689 		netdev_info(priv->dev,
1690 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1691 			    rx_q->queue_index);
1692 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1693 	} else {
1694 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695 						   MEM_TYPE_PAGE_POOL,
1696 						   rx_q->page_pool));
1697 		netdev_info(priv->dev,
1698 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1699 			    rx_q->queue_index);
1700 	}
1701 
1702 	if (rx_q->xsk_pool) {
1703 		/* RX XDP ZC buffer pool may not be populated, e.g.
1704 		 * xdpsock TX-only.
1705 		 */
1706 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1707 	} else {
1708 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1709 		if (ret < 0)
1710 			return -ENOMEM;
1711 	}
1712 
1713 	/* Setup the chained descriptor addresses */
1714 	if (priv->mode == STMMAC_CHAIN_MODE) {
1715 		if (priv->extend_desc)
1716 			stmmac_mode_init(priv, rx_q->dma_erx,
1717 					 rx_q->dma_rx_phy,
1718 					 dma_conf->dma_rx_size, 1);
1719 		else
1720 			stmmac_mode_init(priv, rx_q->dma_rx,
1721 					 rx_q->dma_rx_phy,
1722 					 dma_conf->dma_rx_size, 0);
1723 	}
1724 
1725 	return 0;
1726 }
1727 
1728 static int init_dma_rx_desc_rings(struct net_device *dev,
1729 				  struct stmmac_dma_conf *dma_conf,
1730 				  gfp_t flags)
1731 {
1732 	struct stmmac_priv *priv = netdev_priv(dev);
1733 	u32 rx_count = priv->plat->rx_queues_to_use;
1734 	int queue;
1735 	int ret;
1736 
1737 	/* RX INITIALIZATION */
1738 	netif_dbg(priv, probe, priv->dev,
1739 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1740 
1741 	for (queue = 0; queue < rx_count; queue++) {
1742 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1743 		if (ret)
1744 			goto err_init_rx_buffers;
1745 	}
1746 
1747 	return 0;
1748 
1749 err_init_rx_buffers:
1750 	while (queue >= 0) {
1751 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1752 
1753 		if (rx_q->xsk_pool)
1754 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1755 		else
1756 			dma_free_rx_skbufs(priv, dma_conf, queue);
1757 
1758 		rx_q->buf_alloc_num = 0;
1759 		rx_q->xsk_pool = NULL;
1760 
1761 		queue--;
1762 	}
1763 
1764 	return ret;
1765 }
1766 
1767 /**
1768  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1769  * @priv: driver private structure
1770  * @dma_conf: structure to take the dma data
1771  * @queue: TX queue index
1772  * Description: this function initializes the DMA TX descriptors
1773  * and allocates the socket buffers. It supports the chained and ring
1774  * modes.
1775  */
1776 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1777 				    struct stmmac_dma_conf *dma_conf,
1778 				    u32 queue)
1779 {
1780 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1781 	int i;
1782 
1783 	netif_dbg(priv, probe, priv->dev,
1784 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1785 		  (u32)tx_q->dma_tx_phy);
1786 
1787 	/* Setup the chained descriptor addresses */
1788 	if (priv->mode == STMMAC_CHAIN_MODE) {
1789 		if (priv->extend_desc)
1790 			stmmac_mode_init(priv, tx_q->dma_etx,
1791 					 tx_q->dma_tx_phy,
1792 					 dma_conf->dma_tx_size, 1);
1793 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1794 			stmmac_mode_init(priv, tx_q->dma_tx,
1795 					 tx_q->dma_tx_phy,
1796 					 dma_conf->dma_tx_size, 0);
1797 	}
1798 
1799 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1800 
1801 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1802 		struct dma_desc *p;
1803 
1804 		if (priv->extend_desc)
1805 			p = &((tx_q->dma_etx + i)->basic);
1806 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1807 			p = &((tx_q->dma_entx + i)->basic);
1808 		else
1809 			p = tx_q->dma_tx + i;
1810 
1811 		stmmac_clear_desc(priv, p);
1812 
1813 		tx_q->tx_skbuff_dma[i].buf = 0;
1814 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1815 		tx_q->tx_skbuff_dma[i].len = 0;
1816 		tx_q->tx_skbuff_dma[i].last_segment = false;
1817 		tx_q->tx_skbuff[i] = NULL;
1818 	}
1819 
1820 	return 0;
1821 }
1822 
1823 static int init_dma_tx_desc_rings(struct net_device *dev,
1824 				  struct stmmac_dma_conf *dma_conf)
1825 {
1826 	struct stmmac_priv *priv = netdev_priv(dev);
1827 	u32 tx_queue_cnt;
1828 	u32 queue;
1829 
1830 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1831 
1832 	for (queue = 0; queue < tx_queue_cnt; queue++)
1833 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1834 
1835 	return 0;
1836 }
1837 
1838 /**
1839  * init_dma_desc_rings - init the RX/TX descriptor rings
1840  * @dev: net device structure
1841  * @dma_conf: structure to take the dma data
1842  * @flags: gfp flag.
1843  * Description: this function initializes the DMA RX/TX descriptors
1844  * and allocates the socket buffers. It supports the chained and ring
1845  * modes.
1846  */
1847 static int init_dma_desc_rings(struct net_device *dev,
1848 			       struct stmmac_dma_conf *dma_conf,
1849 			       gfp_t flags)
1850 {
1851 	struct stmmac_priv *priv = netdev_priv(dev);
1852 	int ret;
1853 
1854 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1855 	if (ret)
1856 		return ret;
1857 
1858 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1859 
1860 	stmmac_clear_descriptors(priv, dma_conf);
1861 
1862 	if (netif_msg_hw(priv))
1863 		stmmac_display_rings(priv, dma_conf);
1864 
1865 	return ret;
1866 }
1867 
1868 /**
1869  * dma_free_tx_skbufs - free TX dma buffers
1870  * @priv: private structure
1871  * @dma_conf: structure to take the dma data
1872  * @queue: TX queue index
1873  */
1874 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1875 			       struct stmmac_dma_conf *dma_conf,
1876 			       u32 queue)
1877 {
1878 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1879 	int i;
1880 
1881 	tx_q->xsk_frames_done = 0;
1882 
1883 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1884 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1885 
1886 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1887 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1888 		tx_q->xsk_frames_done = 0;
1889 		tx_q->xsk_pool = NULL;
1890 	}
1891 }
1892 
1893 /**
1894  * stmmac_free_tx_skbufs - free TX skb buffers
1895  * @priv: private structure
1896  */
1897 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1898 {
1899 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1900 	u32 queue;
1901 
1902 	for (queue = 0; queue < tx_queue_cnt; queue++)
1903 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1904 }
1905 
1906 /**
1907  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1908  * @priv: private structure
1909  * @dma_conf: structure to take the dma data
1910  * @queue: RX queue index
1911  */
1912 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1913 					 struct stmmac_dma_conf *dma_conf,
1914 					 u32 queue)
1915 {
1916 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1917 
1918 	/* Release the DMA RX socket buffers */
1919 	if (rx_q->xsk_pool)
1920 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1921 	else
1922 		dma_free_rx_skbufs(priv, dma_conf, queue);
1923 
1924 	rx_q->buf_alloc_num = 0;
1925 	rx_q->xsk_pool = NULL;
1926 
1927 	/* Free DMA regions of consistent memory previously allocated */
1928 	if (!priv->extend_desc)
1929 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1930 				  sizeof(struct dma_desc),
1931 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1932 	else
1933 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1934 				  sizeof(struct dma_extended_desc),
1935 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1936 
1937 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1938 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1939 
1940 	kfree(rx_q->buf_pool);
1941 	if (rx_q->page_pool)
1942 		page_pool_destroy(rx_q->page_pool);
1943 }
1944 
1945 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1946 				       struct stmmac_dma_conf *dma_conf)
1947 {
1948 	u32 rx_count = priv->plat->rx_queues_to_use;
1949 	u32 queue;
1950 
1951 	/* Free RX queue resources */
1952 	for (queue = 0; queue < rx_count; queue++)
1953 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1954 }
1955 
1956 /**
1957  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1958  * @priv: private structure
1959  * @dma_conf: structure to take the dma data
1960  * @queue: TX queue index
1961  */
1962 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1963 					 struct stmmac_dma_conf *dma_conf,
1964 					 u32 queue)
1965 {
1966 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1967 	size_t size;
1968 	void *addr;
1969 
1970 	/* Release the DMA TX socket buffers */
1971 	dma_free_tx_skbufs(priv, dma_conf, queue);
1972 
1973 	if (priv->extend_desc) {
1974 		size = sizeof(struct dma_extended_desc);
1975 		addr = tx_q->dma_etx;
1976 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1977 		size = sizeof(struct dma_edesc);
1978 		addr = tx_q->dma_entx;
1979 	} else {
1980 		size = sizeof(struct dma_desc);
1981 		addr = tx_q->dma_tx;
1982 	}
1983 
1984 	size *= dma_conf->dma_tx_size;
1985 
1986 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1987 
1988 	kfree(tx_q->tx_skbuff_dma);
1989 	kfree(tx_q->tx_skbuff);
1990 }
1991 
1992 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1993 				       struct stmmac_dma_conf *dma_conf)
1994 {
1995 	u32 tx_count = priv->plat->tx_queues_to_use;
1996 	u32 queue;
1997 
1998 	/* Free TX queue resources */
1999 	for (queue = 0; queue < tx_count; queue++)
2000 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2001 }
2002 
2003 /**
2004  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2005  * @priv: private structure
2006  * @dma_conf: structure to take the dma data
2007  * @queue: RX queue index
2008  * Description: according to which descriptor can be used (extend or basic)
2009  * this function allocates the resources for TX and RX paths. In case of
2010  * reception, for example, it pre-allocated the RX socket buffer in order to
2011  * allow zero-copy mechanism.
2012  */
2013 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2014 					 struct stmmac_dma_conf *dma_conf,
2015 					 u32 queue)
2016 {
2017 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2018 	struct stmmac_channel *ch = &priv->channel[queue];
2019 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2020 	struct page_pool_params pp_params = { 0 };
2021 	unsigned int num_pages;
2022 	unsigned int napi_id;
2023 	int ret;
2024 
2025 	rx_q->queue_index = queue;
2026 	rx_q->priv_data = priv;
2027 
2028 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2029 	pp_params.pool_size = dma_conf->dma_rx_size;
2030 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2031 	pp_params.order = ilog2(num_pages);
2032 	pp_params.nid = dev_to_node(priv->device);
2033 	pp_params.dev = priv->device;
2034 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2035 	pp_params.offset = stmmac_rx_offset(priv);
2036 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2037 
2038 	rx_q->page_pool = page_pool_create(&pp_params);
2039 	if (IS_ERR(rx_q->page_pool)) {
2040 		ret = PTR_ERR(rx_q->page_pool);
2041 		rx_q->page_pool = NULL;
2042 		return ret;
2043 	}
2044 
2045 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2046 				 sizeof(*rx_q->buf_pool),
2047 				 GFP_KERNEL);
2048 	if (!rx_q->buf_pool)
2049 		return -ENOMEM;
2050 
2051 	if (priv->extend_desc) {
2052 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2053 						   dma_conf->dma_rx_size *
2054 						   sizeof(struct dma_extended_desc),
2055 						   &rx_q->dma_rx_phy,
2056 						   GFP_KERNEL);
2057 		if (!rx_q->dma_erx)
2058 			return -ENOMEM;
2059 
2060 	} else {
2061 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2062 						  dma_conf->dma_rx_size *
2063 						  sizeof(struct dma_desc),
2064 						  &rx_q->dma_rx_phy,
2065 						  GFP_KERNEL);
2066 		if (!rx_q->dma_rx)
2067 			return -ENOMEM;
2068 	}
2069 
2070 	if (stmmac_xdp_is_enabled(priv) &&
2071 	    test_bit(queue, priv->af_xdp_zc_qps))
2072 		napi_id = ch->rxtx_napi.napi_id;
2073 	else
2074 		napi_id = ch->rx_napi.napi_id;
2075 
2076 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2077 			       rx_q->queue_index,
2078 			       napi_id);
2079 	if (ret) {
2080 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2081 		return -EINVAL;
2082 	}
2083 
2084 	return 0;
2085 }
2086 
2087 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2088 				       struct stmmac_dma_conf *dma_conf)
2089 {
2090 	u32 rx_count = priv->plat->rx_queues_to_use;
2091 	u32 queue;
2092 	int ret;
2093 
2094 	/* RX queues buffers and DMA */
2095 	for (queue = 0; queue < rx_count; queue++) {
2096 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2097 		if (ret)
2098 			goto err_dma;
2099 	}
2100 
2101 	return 0;
2102 
2103 err_dma:
2104 	free_dma_rx_desc_resources(priv, dma_conf);
2105 
2106 	return ret;
2107 }
2108 
2109 /**
2110  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2111  * @priv: private structure
2112  * @dma_conf: structure to take the dma data
2113  * @queue: TX queue index
2114  * Description: according to which descriptor can be used (extend or basic)
2115  * this function allocates the resources for TX and RX paths. In case of
2116  * reception, for example, it pre-allocated the RX socket buffer in order to
2117  * allow zero-copy mechanism.
2118  */
2119 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2120 					 struct stmmac_dma_conf *dma_conf,
2121 					 u32 queue)
2122 {
2123 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2124 	size_t size;
2125 	void *addr;
2126 
2127 	tx_q->queue_index = queue;
2128 	tx_q->priv_data = priv;
2129 
2130 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2131 				      sizeof(*tx_q->tx_skbuff_dma),
2132 				      GFP_KERNEL);
2133 	if (!tx_q->tx_skbuff_dma)
2134 		return -ENOMEM;
2135 
2136 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2137 				  sizeof(struct sk_buff *),
2138 				  GFP_KERNEL);
2139 	if (!tx_q->tx_skbuff)
2140 		return -ENOMEM;
2141 
2142 	if (priv->extend_desc)
2143 		size = sizeof(struct dma_extended_desc);
2144 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2145 		size = sizeof(struct dma_edesc);
2146 	else
2147 		size = sizeof(struct dma_desc);
2148 
2149 	size *= dma_conf->dma_tx_size;
2150 
2151 	addr = dma_alloc_coherent(priv->device, size,
2152 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2153 	if (!addr)
2154 		return -ENOMEM;
2155 
2156 	if (priv->extend_desc)
2157 		tx_q->dma_etx = addr;
2158 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2159 		tx_q->dma_entx = addr;
2160 	else
2161 		tx_q->dma_tx = addr;
2162 
2163 	return 0;
2164 }
2165 
2166 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2167 				       struct stmmac_dma_conf *dma_conf)
2168 {
2169 	u32 tx_count = priv->plat->tx_queues_to_use;
2170 	u32 queue;
2171 	int ret;
2172 
2173 	/* TX queues buffers and DMA */
2174 	for (queue = 0; queue < tx_count; queue++) {
2175 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2176 		if (ret)
2177 			goto err_dma;
2178 	}
2179 
2180 	return 0;
2181 
2182 err_dma:
2183 	free_dma_tx_desc_resources(priv, dma_conf);
2184 	return ret;
2185 }
2186 
2187 /**
2188  * alloc_dma_desc_resources - alloc TX/RX resources.
2189  * @priv: private structure
2190  * @dma_conf: structure to take the dma data
2191  * Description: according to which descriptor can be used (extend or basic)
2192  * this function allocates the resources for TX and RX paths. In case of
2193  * reception, for example, it pre-allocated the RX socket buffer in order to
2194  * allow zero-copy mechanism.
2195  */
2196 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2197 				    struct stmmac_dma_conf *dma_conf)
2198 {
2199 	/* RX Allocation */
2200 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2201 
2202 	if (ret)
2203 		return ret;
2204 
2205 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2206 
2207 	return ret;
2208 }
2209 
2210 /**
2211  * free_dma_desc_resources - free dma desc resources
2212  * @priv: private structure
2213  * @dma_conf: structure to take the dma data
2214  */
2215 static void free_dma_desc_resources(struct stmmac_priv *priv,
2216 				    struct stmmac_dma_conf *dma_conf)
2217 {
2218 	/* Release the DMA TX socket buffers */
2219 	free_dma_tx_desc_resources(priv, dma_conf);
2220 
2221 	/* Release the DMA RX socket buffers later
2222 	 * to ensure all pending XDP_TX buffers are returned.
2223 	 */
2224 	free_dma_rx_desc_resources(priv, dma_conf);
2225 }
2226 
2227 /**
2228  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2229  *  @priv: driver private structure
2230  *  Description: It is used for enabling the rx queues in the MAC
2231  */
2232 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2233 {
2234 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2235 	int queue;
2236 	u8 mode;
2237 
2238 	for (queue = 0; queue < rx_queues_count; queue++) {
2239 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2240 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2241 	}
2242 }
2243 
2244 /**
2245  * stmmac_start_rx_dma - start RX DMA channel
2246  * @priv: driver private structure
2247  * @chan: RX channel index
2248  * Description:
2249  * This starts a RX DMA channel
2250  */
2251 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2252 {
2253 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2254 	stmmac_start_rx(priv, priv->ioaddr, chan);
2255 }
2256 
2257 /**
2258  * stmmac_start_tx_dma - start TX DMA channel
2259  * @priv: driver private structure
2260  * @chan: TX channel index
2261  * Description:
2262  * This starts a TX DMA channel
2263  */
2264 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2265 {
2266 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2267 	stmmac_start_tx(priv, priv->ioaddr, chan);
2268 }
2269 
2270 /**
2271  * stmmac_stop_rx_dma - stop RX DMA channel
2272  * @priv: driver private structure
2273  * @chan: RX channel index
2274  * Description:
2275  * This stops a RX DMA channel
2276  */
2277 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2278 {
2279 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2280 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2281 }
2282 
2283 /**
2284  * stmmac_stop_tx_dma - stop TX DMA channel
2285  * @priv: driver private structure
2286  * @chan: TX channel index
2287  * Description:
2288  * This stops a TX DMA channel
2289  */
2290 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2291 {
2292 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2293 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2294 }
2295 
2296 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2297 {
2298 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2299 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2300 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2301 	u32 chan;
2302 
2303 	for (chan = 0; chan < dma_csr_ch; chan++) {
2304 		struct stmmac_channel *ch = &priv->channel[chan];
2305 		unsigned long flags;
2306 
2307 		spin_lock_irqsave(&ch->lock, flags);
2308 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2309 		spin_unlock_irqrestore(&ch->lock, flags);
2310 	}
2311 }
2312 
2313 /**
2314  * stmmac_start_all_dma - start all RX and TX DMA channels
2315  * @priv: driver private structure
2316  * Description:
2317  * This starts all the RX and TX DMA channels
2318  */
2319 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2320 {
2321 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2322 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2323 	u32 chan = 0;
2324 
2325 	for (chan = 0; chan < rx_channels_count; chan++)
2326 		stmmac_start_rx_dma(priv, chan);
2327 
2328 	for (chan = 0; chan < tx_channels_count; chan++)
2329 		stmmac_start_tx_dma(priv, chan);
2330 }
2331 
2332 /**
2333  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2334  * @priv: driver private structure
2335  * Description:
2336  * This stops the RX and TX DMA channels
2337  */
2338 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2339 {
2340 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2341 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2342 	u32 chan = 0;
2343 
2344 	for (chan = 0; chan < rx_channels_count; chan++)
2345 		stmmac_stop_rx_dma(priv, chan);
2346 
2347 	for (chan = 0; chan < tx_channels_count; chan++)
2348 		stmmac_stop_tx_dma(priv, chan);
2349 }
2350 
2351 /**
2352  *  stmmac_dma_operation_mode - HW DMA operation mode
2353  *  @priv: driver private structure
2354  *  Description: it is used for configuring the DMA operation mode register in
2355  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2356  */
2357 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2358 {
2359 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2360 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2361 	int rxfifosz = priv->plat->rx_fifo_size;
2362 	int txfifosz = priv->plat->tx_fifo_size;
2363 	u32 txmode = 0;
2364 	u32 rxmode = 0;
2365 	u32 chan = 0;
2366 	u8 qmode = 0;
2367 
2368 	if (rxfifosz == 0)
2369 		rxfifosz = priv->dma_cap.rx_fifo_size;
2370 	if (txfifosz == 0)
2371 		txfifosz = priv->dma_cap.tx_fifo_size;
2372 
2373 	/* Adjust for real per queue fifo size */
2374 	rxfifosz /= rx_channels_count;
2375 	txfifosz /= tx_channels_count;
2376 
2377 	if (priv->plat->force_thresh_dma_mode) {
2378 		txmode = tc;
2379 		rxmode = tc;
2380 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2381 		/*
2382 		 * In case of GMAC, SF mode can be enabled
2383 		 * to perform the TX COE in HW. This depends on:
2384 		 * 1) TX COE if actually supported
2385 		 * 2) There is no bugged Jumbo frame support
2386 		 *    that needs to not insert csum in the TDES.
2387 		 */
2388 		txmode = SF_DMA_MODE;
2389 		rxmode = SF_DMA_MODE;
2390 		priv->xstats.threshold = SF_DMA_MODE;
2391 	} else {
2392 		txmode = tc;
2393 		rxmode = SF_DMA_MODE;
2394 	}
2395 
2396 	/* configure all channels */
2397 	for (chan = 0; chan < rx_channels_count; chan++) {
2398 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2399 		u32 buf_size;
2400 
2401 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2402 
2403 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2404 				rxfifosz, qmode);
2405 
2406 		if (rx_q->xsk_pool) {
2407 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2408 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2409 					      buf_size,
2410 					      chan);
2411 		} else {
2412 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2413 					      priv->dma_conf.dma_buf_sz,
2414 					      chan);
2415 		}
2416 	}
2417 
2418 	for (chan = 0; chan < tx_channels_count; chan++) {
2419 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2420 
2421 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2422 				txfifosz, qmode);
2423 	}
2424 }
2425 
2426 static void stmmac_xsk_request_timestamp(void *_priv)
2427 {
2428 	struct stmmac_metadata_request *meta_req = _priv;
2429 
2430 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2431 	*meta_req->set_ic = true;
2432 }
2433 
2434 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2435 {
2436 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2437 	struct stmmac_priv *priv = tx_compl->priv;
2438 	struct dma_desc *desc = tx_compl->desc;
2439 	bool found = false;
2440 	u64 ns = 0;
2441 
2442 	if (!priv->hwts_tx_en)
2443 		return 0;
2444 
2445 	/* check tx tstamp status */
2446 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2447 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2448 		found = true;
2449 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2450 		found = true;
2451 	}
2452 
2453 	if (found) {
2454 		ns -= priv->plat->cdc_error_adj;
2455 		return ns_to_ktime(ns);
2456 	}
2457 
2458 	return 0;
2459 }
2460 
2461 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2462 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2463 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2464 };
2465 
2466 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2467 {
2468 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2469 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2470 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2471 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2472 	unsigned int entry = tx_q->cur_tx;
2473 	struct dma_desc *tx_desc = NULL;
2474 	struct xdp_desc xdp_desc;
2475 	bool work_done = true;
2476 	u32 tx_set_ic_bit = 0;
2477 
2478 	/* Avoids TX time-out as we are sharing with slow path */
2479 	txq_trans_cond_update(nq);
2480 
2481 	budget = min(budget, stmmac_tx_avail(priv, queue));
2482 
2483 	while (budget-- > 0) {
2484 		struct stmmac_metadata_request meta_req;
2485 		struct xsk_tx_metadata *meta = NULL;
2486 		dma_addr_t dma_addr;
2487 		bool set_ic;
2488 
2489 		/* We are sharing with slow path and stop XSK TX desc submission when
2490 		 * available TX ring is less than threshold.
2491 		 */
2492 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2493 		    !netif_carrier_ok(priv->dev)) {
2494 			work_done = false;
2495 			break;
2496 		}
2497 
2498 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2499 			break;
2500 
2501 		if (priv->est && priv->est->enable &&
2502 		    priv->est->max_sdu[queue] &&
2503 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2504 			priv->xstats.max_sdu_txq_drop[queue]++;
2505 			continue;
2506 		}
2507 
2508 		if (likely(priv->extend_desc))
2509 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2510 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2511 			tx_desc = &tx_q->dma_entx[entry].basic;
2512 		else
2513 			tx_desc = tx_q->dma_tx + entry;
2514 
2515 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2516 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2517 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2518 
2519 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2520 
2521 		/* To return XDP buffer to XSK pool, we simple call
2522 		 * xsk_tx_completed(), so we don't need to fill up
2523 		 * 'buf' and 'xdpf'.
2524 		 */
2525 		tx_q->tx_skbuff_dma[entry].buf = 0;
2526 		tx_q->xdpf[entry] = NULL;
2527 
2528 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2529 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2530 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2531 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2532 
2533 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2534 
2535 		tx_q->tx_count_frames++;
2536 
2537 		if (!priv->tx_coal_frames[queue])
2538 			set_ic = false;
2539 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2540 			set_ic = true;
2541 		else
2542 			set_ic = false;
2543 
2544 		meta_req.priv = priv;
2545 		meta_req.tx_desc = tx_desc;
2546 		meta_req.set_ic = &set_ic;
2547 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2548 					&meta_req);
2549 		if (set_ic) {
2550 			tx_q->tx_count_frames = 0;
2551 			stmmac_set_tx_ic(priv, tx_desc);
2552 			tx_set_ic_bit++;
2553 		}
2554 
2555 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2556 				       true, priv->mode, true, true,
2557 				       xdp_desc.len);
2558 
2559 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2560 
2561 		xsk_tx_metadata_to_compl(meta,
2562 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2563 
2564 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2565 		entry = tx_q->cur_tx;
2566 	}
2567 	u64_stats_update_begin(&txq_stats->napi_syncp);
2568 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2569 	u64_stats_update_end(&txq_stats->napi_syncp);
2570 
2571 	if (tx_desc) {
2572 		stmmac_flush_tx_descriptors(priv, queue);
2573 		xsk_tx_release(pool);
2574 	}
2575 
2576 	/* Return true if all of the 3 conditions are met
2577 	 *  a) TX Budget is still available
2578 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2579 	 *     pending XSK TX for transmission)
2580 	 */
2581 	return !!budget && work_done;
2582 }
2583 
2584 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2585 {
2586 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2587 		tc += 64;
2588 
2589 		if (priv->plat->force_thresh_dma_mode)
2590 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2591 		else
2592 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2593 						      chan);
2594 
2595 		priv->xstats.threshold = tc;
2596 	}
2597 }
2598 
2599 /**
2600  * stmmac_tx_clean - to manage the transmission completion
2601  * @priv: driver private structure
2602  * @budget: napi budget limiting this functions packet handling
2603  * @queue: TX queue index
2604  * @pending_packets: signal to arm the TX coal timer
2605  * Description: it reclaims the transmit resources after transmission completes.
2606  * If some packets still needs to be handled, due to TX coalesce, set
2607  * pending_packets to true to make NAPI arm the TX coal timer.
2608  */
2609 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2610 			   bool *pending_packets)
2611 {
2612 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2613 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2614 	unsigned int bytes_compl = 0, pkts_compl = 0;
2615 	unsigned int entry, xmits = 0, count = 0;
2616 	u32 tx_packets = 0, tx_errors = 0;
2617 
2618 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2619 
2620 	tx_q->xsk_frames_done = 0;
2621 
2622 	entry = tx_q->dirty_tx;
2623 
2624 	/* Try to clean all TX complete frame in 1 shot */
2625 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2626 		struct xdp_frame *xdpf;
2627 		struct sk_buff *skb;
2628 		struct dma_desc *p;
2629 		int status;
2630 
2631 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2632 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2633 			xdpf = tx_q->xdpf[entry];
2634 			skb = NULL;
2635 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2636 			xdpf = NULL;
2637 			skb = tx_q->tx_skbuff[entry];
2638 		} else {
2639 			xdpf = NULL;
2640 			skb = NULL;
2641 		}
2642 
2643 		if (priv->extend_desc)
2644 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2645 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2646 			p = &tx_q->dma_entx[entry].basic;
2647 		else
2648 			p = tx_q->dma_tx + entry;
2649 
2650 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2651 		/* Check if the descriptor is owned by the DMA */
2652 		if (unlikely(status & tx_dma_own))
2653 			break;
2654 
2655 		count++;
2656 
2657 		/* Make sure descriptor fields are read after reading
2658 		 * the own bit.
2659 		 */
2660 		dma_rmb();
2661 
2662 		/* Just consider the last segment and ...*/
2663 		if (likely(!(status & tx_not_ls))) {
2664 			/* ... verify the status error condition */
2665 			if (unlikely(status & tx_err)) {
2666 				tx_errors++;
2667 				if (unlikely(status & tx_err_bump_tc))
2668 					stmmac_bump_dma_threshold(priv, queue);
2669 			} else {
2670 				tx_packets++;
2671 			}
2672 			if (skb) {
2673 				stmmac_get_tx_hwtstamp(priv, p, skb);
2674 			} else if (tx_q->xsk_pool &&
2675 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2676 				struct stmmac_xsk_tx_complete tx_compl = {
2677 					.priv = priv,
2678 					.desc = p,
2679 				};
2680 
2681 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2682 							 &stmmac_xsk_tx_metadata_ops,
2683 							 &tx_compl);
2684 			}
2685 		}
2686 
2687 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2688 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2689 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2690 				dma_unmap_page(priv->device,
2691 					       tx_q->tx_skbuff_dma[entry].buf,
2692 					       tx_q->tx_skbuff_dma[entry].len,
2693 					       DMA_TO_DEVICE);
2694 			else
2695 				dma_unmap_single(priv->device,
2696 						 tx_q->tx_skbuff_dma[entry].buf,
2697 						 tx_q->tx_skbuff_dma[entry].len,
2698 						 DMA_TO_DEVICE);
2699 			tx_q->tx_skbuff_dma[entry].buf = 0;
2700 			tx_q->tx_skbuff_dma[entry].len = 0;
2701 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2702 		}
2703 
2704 		stmmac_clean_desc3(priv, tx_q, p);
2705 
2706 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2707 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2708 
2709 		if (xdpf &&
2710 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2711 			xdp_return_frame_rx_napi(xdpf);
2712 			tx_q->xdpf[entry] = NULL;
2713 		}
2714 
2715 		if (xdpf &&
2716 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2717 			xdp_return_frame(xdpf);
2718 			tx_q->xdpf[entry] = NULL;
2719 		}
2720 
2721 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2722 			tx_q->xsk_frames_done++;
2723 
2724 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2725 			if (likely(skb)) {
2726 				pkts_compl++;
2727 				bytes_compl += skb->len;
2728 				dev_consume_skb_any(skb);
2729 				tx_q->tx_skbuff[entry] = NULL;
2730 			}
2731 		}
2732 
2733 		stmmac_release_tx_desc(priv, p, priv->mode);
2734 
2735 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2736 	}
2737 	tx_q->dirty_tx = entry;
2738 
2739 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2740 				  pkts_compl, bytes_compl);
2741 
2742 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2743 								queue))) &&
2744 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2745 
2746 		netif_dbg(priv, tx_done, priv->dev,
2747 			  "%s: restart transmit\n", __func__);
2748 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2749 	}
2750 
2751 	if (tx_q->xsk_pool) {
2752 		bool work_done;
2753 
2754 		if (tx_q->xsk_frames_done)
2755 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2756 
2757 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2758 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2759 
2760 		/* For XSK TX, we try to send as many as possible.
2761 		 * If XSK work done (XSK TX desc empty and budget still
2762 		 * available), return "budget - 1" to reenable TX IRQ.
2763 		 * Else, return "budget" to make NAPI continue polling.
2764 		 */
2765 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2766 					       STMMAC_XSK_TX_BUDGET_MAX);
2767 		if (work_done)
2768 			xmits = budget - 1;
2769 		else
2770 			xmits = budget;
2771 	}
2772 
2773 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2774 	    priv->eee_sw_timer_en) {
2775 		if (stmmac_enable_eee_mode(priv))
2776 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2777 	}
2778 
2779 	/* We still have pending packets, let's call for a new scheduling */
2780 	if (tx_q->dirty_tx != tx_q->cur_tx)
2781 		*pending_packets = true;
2782 
2783 	u64_stats_update_begin(&txq_stats->napi_syncp);
2784 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2785 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2786 	u64_stats_inc(&txq_stats->napi.tx_clean);
2787 	u64_stats_update_end(&txq_stats->napi_syncp);
2788 
2789 	priv->xstats.tx_errors += tx_errors;
2790 
2791 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2792 
2793 	/* Combine decisions from TX clean and XSK TX */
2794 	return max(count, xmits);
2795 }
2796 
2797 /**
2798  * stmmac_tx_err - to manage the tx error
2799  * @priv: driver private structure
2800  * @chan: channel index
2801  * Description: it cleans the descriptors and restarts the transmission
2802  * in case of transmission errors.
2803  */
2804 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2805 {
2806 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2807 
2808 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2809 
2810 	stmmac_stop_tx_dma(priv, chan);
2811 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2812 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2813 	stmmac_reset_tx_queue(priv, chan);
2814 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2815 			    tx_q->dma_tx_phy, chan);
2816 	stmmac_start_tx_dma(priv, chan);
2817 
2818 	priv->xstats.tx_errors++;
2819 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2820 }
2821 
2822 /**
2823  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2824  *  @priv: driver private structure
2825  *  @txmode: TX operating mode
2826  *  @rxmode: RX operating mode
2827  *  @chan: channel index
2828  *  Description: it is used for configuring of the DMA operation mode in
2829  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2830  *  mode.
2831  */
2832 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2833 					  u32 rxmode, u32 chan)
2834 {
2835 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2836 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2837 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2838 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2839 	int rxfifosz = priv->plat->rx_fifo_size;
2840 	int txfifosz = priv->plat->tx_fifo_size;
2841 
2842 	if (rxfifosz == 0)
2843 		rxfifosz = priv->dma_cap.rx_fifo_size;
2844 	if (txfifosz == 0)
2845 		txfifosz = priv->dma_cap.tx_fifo_size;
2846 
2847 	/* Adjust for real per queue fifo size */
2848 	rxfifosz /= rx_channels_count;
2849 	txfifosz /= tx_channels_count;
2850 
2851 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2852 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2853 }
2854 
2855 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2856 {
2857 	int ret;
2858 
2859 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2860 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2861 	if (ret && (ret != -EINVAL)) {
2862 		stmmac_global_err(priv);
2863 		return true;
2864 	}
2865 
2866 	return false;
2867 }
2868 
2869 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2870 {
2871 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2872 						 &priv->xstats, chan, dir);
2873 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2874 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2875 	struct stmmac_channel *ch = &priv->channel[chan];
2876 	struct napi_struct *rx_napi;
2877 	struct napi_struct *tx_napi;
2878 	unsigned long flags;
2879 
2880 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2881 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2882 
2883 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2884 		if (napi_schedule_prep(rx_napi)) {
2885 			spin_lock_irqsave(&ch->lock, flags);
2886 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2887 			spin_unlock_irqrestore(&ch->lock, flags);
2888 			__napi_schedule(rx_napi);
2889 		}
2890 	}
2891 
2892 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2893 		if (napi_schedule_prep(tx_napi)) {
2894 			spin_lock_irqsave(&ch->lock, flags);
2895 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2896 			spin_unlock_irqrestore(&ch->lock, flags);
2897 			__napi_schedule(tx_napi);
2898 		}
2899 	}
2900 
2901 	return status;
2902 }
2903 
2904 /**
2905  * stmmac_dma_interrupt - DMA ISR
2906  * @priv: driver private structure
2907  * Description: this is the DMA ISR. It is called by the main ISR.
2908  * It calls the dwmac dma routine and schedule poll method in case of some
2909  * work can be done.
2910  */
2911 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2912 {
2913 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2914 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2915 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2916 				tx_channel_count : rx_channel_count;
2917 	u32 chan;
2918 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2919 
2920 	/* Make sure we never check beyond our status buffer. */
2921 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2922 		channels_to_check = ARRAY_SIZE(status);
2923 
2924 	for (chan = 0; chan < channels_to_check; chan++)
2925 		status[chan] = stmmac_napi_check(priv, chan,
2926 						 DMA_DIR_RXTX);
2927 
2928 	for (chan = 0; chan < tx_channel_count; chan++) {
2929 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2930 			/* Try to bump up the dma threshold on this failure */
2931 			stmmac_bump_dma_threshold(priv, chan);
2932 		} else if (unlikely(status[chan] == tx_hard_error)) {
2933 			stmmac_tx_err(priv, chan);
2934 		}
2935 	}
2936 }
2937 
2938 /**
2939  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2940  * @priv: driver private structure
2941  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2942  */
2943 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2944 {
2945 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2946 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2947 
2948 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2949 
2950 	if (priv->dma_cap.rmon) {
2951 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2952 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2953 	} else
2954 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2955 }
2956 
2957 /**
2958  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2959  * @priv: driver private structure
2960  * Description:
2961  *  new GMAC chip generations have a new register to indicate the
2962  *  presence of the optional feature/functions.
2963  *  This can be also used to override the value passed through the
2964  *  platform and necessary for old MAC10/100 and GMAC chips.
2965  */
2966 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2967 {
2968 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2969 }
2970 
2971 /**
2972  * stmmac_check_ether_addr - check if the MAC addr is valid
2973  * @priv: driver private structure
2974  * Description:
2975  * it is to verify if the MAC address is valid, in case of failures it
2976  * generates a random MAC address
2977  */
2978 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2979 {
2980 	u8 addr[ETH_ALEN];
2981 
2982 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2983 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2984 		if (is_valid_ether_addr(addr))
2985 			eth_hw_addr_set(priv->dev, addr);
2986 		else
2987 			eth_hw_addr_random(priv->dev);
2988 		dev_info(priv->device, "device MAC address %pM\n",
2989 			 priv->dev->dev_addr);
2990 	}
2991 }
2992 
2993 /**
2994  * stmmac_init_dma_engine - DMA init.
2995  * @priv: driver private structure
2996  * Description:
2997  * It inits the DMA invoking the specific MAC/GMAC callback.
2998  * Some DMA parameters can be passed from the platform;
2999  * in case of these are not passed a default is kept for the MAC or GMAC.
3000  */
3001 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3002 {
3003 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3004 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3005 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3006 	struct stmmac_rx_queue *rx_q;
3007 	struct stmmac_tx_queue *tx_q;
3008 	u32 chan = 0;
3009 	int atds = 0;
3010 	int ret = 0;
3011 
3012 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3013 		dev_err(priv->device, "Invalid DMA configuration\n");
3014 		return -EINVAL;
3015 	}
3016 
3017 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3018 		atds = 1;
3019 
3020 	ret = stmmac_reset(priv, priv->ioaddr);
3021 	if (ret) {
3022 		dev_err(priv->device, "Failed to reset the dma\n");
3023 		return ret;
3024 	}
3025 
3026 	/* DMA Configuration */
3027 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3028 
3029 	if (priv->plat->axi)
3030 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3031 
3032 	/* DMA CSR Channel configuration */
3033 	for (chan = 0; chan < dma_csr_ch; chan++) {
3034 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3035 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3036 	}
3037 
3038 	/* DMA RX Channel Configuration */
3039 	for (chan = 0; chan < rx_channels_count; chan++) {
3040 		rx_q = &priv->dma_conf.rx_queue[chan];
3041 
3042 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3043 				    rx_q->dma_rx_phy, chan);
3044 
3045 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3046 				     (rx_q->buf_alloc_num *
3047 				      sizeof(struct dma_desc));
3048 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3049 				       rx_q->rx_tail_addr, chan);
3050 	}
3051 
3052 	/* DMA TX Channel Configuration */
3053 	for (chan = 0; chan < tx_channels_count; chan++) {
3054 		tx_q = &priv->dma_conf.tx_queue[chan];
3055 
3056 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3057 				    tx_q->dma_tx_phy, chan);
3058 
3059 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3060 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3061 				       tx_q->tx_tail_addr, chan);
3062 	}
3063 
3064 	return ret;
3065 }
3066 
3067 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3068 {
3069 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3070 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3071 	struct stmmac_channel *ch;
3072 	struct napi_struct *napi;
3073 
3074 	if (!tx_coal_timer)
3075 		return;
3076 
3077 	ch = &priv->channel[tx_q->queue_index];
3078 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3079 
3080 	/* Arm timer only if napi is not already scheduled.
3081 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3082 	 * again in the next scheduled napi.
3083 	 */
3084 	if (unlikely(!napi_is_scheduled(napi)))
3085 		hrtimer_start(&tx_q->txtimer,
3086 			      STMMAC_COAL_TIMER(tx_coal_timer),
3087 			      HRTIMER_MODE_REL);
3088 	else
3089 		hrtimer_try_to_cancel(&tx_q->txtimer);
3090 }
3091 
3092 /**
3093  * stmmac_tx_timer - mitigation sw timer for tx.
3094  * @t: data pointer
3095  * Description:
3096  * This is the timer handler to directly invoke the stmmac_tx_clean.
3097  */
3098 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3099 {
3100 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3101 	struct stmmac_priv *priv = tx_q->priv_data;
3102 	struct stmmac_channel *ch;
3103 	struct napi_struct *napi;
3104 
3105 	ch = &priv->channel[tx_q->queue_index];
3106 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3107 
3108 	if (likely(napi_schedule_prep(napi))) {
3109 		unsigned long flags;
3110 
3111 		spin_lock_irqsave(&ch->lock, flags);
3112 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3113 		spin_unlock_irqrestore(&ch->lock, flags);
3114 		__napi_schedule(napi);
3115 	}
3116 
3117 	return HRTIMER_NORESTART;
3118 }
3119 
3120 /**
3121  * stmmac_init_coalesce - init mitigation options.
3122  * @priv: driver private structure
3123  * Description:
3124  * This inits the coalesce parameters: i.e. timer rate,
3125  * timer handler and default threshold used for enabling the
3126  * interrupt on completion bit.
3127  */
3128 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3129 {
3130 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3131 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3132 	u32 chan;
3133 
3134 	for (chan = 0; chan < tx_channel_count; chan++) {
3135 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3136 
3137 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3138 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3139 
3140 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3141 		tx_q->txtimer.function = stmmac_tx_timer;
3142 	}
3143 
3144 	for (chan = 0; chan < rx_channel_count; chan++)
3145 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3146 }
3147 
3148 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3149 {
3150 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3151 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3152 	u32 chan;
3153 
3154 	/* set TX ring length */
3155 	for (chan = 0; chan < tx_channels_count; chan++)
3156 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3157 				       (priv->dma_conf.dma_tx_size - 1), chan);
3158 
3159 	/* set RX ring length */
3160 	for (chan = 0; chan < rx_channels_count; chan++)
3161 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3162 				       (priv->dma_conf.dma_rx_size - 1), chan);
3163 }
3164 
3165 /**
3166  *  stmmac_set_tx_queue_weight - Set TX queue weight
3167  *  @priv: driver private structure
3168  *  Description: It is used for setting TX queues weight
3169  */
3170 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3171 {
3172 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3173 	u32 weight;
3174 	u32 queue;
3175 
3176 	for (queue = 0; queue < tx_queues_count; queue++) {
3177 		weight = priv->plat->tx_queues_cfg[queue].weight;
3178 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3179 	}
3180 }
3181 
3182 /**
3183  *  stmmac_configure_cbs - Configure CBS in TX queue
3184  *  @priv: driver private structure
3185  *  Description: It is used for configuring CBS in AVB TX queues
3186  */
3187 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3188 {
3189 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3190 	u32 mode_to_use;
3191 	u32 queue;
3192 
3193 	/* queue 0 is reserved for legacy traffic */
3194 	for (queue = 1; queue < tx_queues_count; queue++) {
3195 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3196 		if (mode_to_use == MTL_QUEUE_DCB)
3197 			continue;
3198 
3199 		stmmac_config_cbs(priv, priv->hw,
3200 				priv->plat->tx_queues_cfg[queue].send_slope,
3201 				priv->plat->tx_queues_cfg[queue].idle_slope,
3202 				priv->plat->tx_queues_cfg[queue].high_credit,
3203 				priv->plat->tx_queues_cfg[queue].low_credit,
3204 				queue);
3205 	}
3206 }
3207 
3208 /**
3209  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3210  *  @priv: driver private structure
3211  *  Description: It is used for mapping RX queues to RX dma channels
3212  */
3213 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3214 {
3215 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3216 	u32 queue;
3217 	u32 chan;
3218 
3219 	for (queue = 0; queue < rx_queues_count; queue++) {
3220 		chan = priv->plat->rx_queues_cfg[queue].chan;
3221 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3222 	}
3223 }
3224 
3225 /**
3226  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3227  *  @priv: driver private structure
3228  *  Description: It is used for configuring the RX Queue Priority
3229  */
3230 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3231 {
3232 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3233 	u32 queue;
3234 	u32 prio;
3235 
3236 	for (queue = 0; queue < rx_queues_count; queue++) {
3237 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3238 			continue;
3239 
3240 		prio = priv->plat->rx_queues_cfg[queue].prio;
3241 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3242 	}
3243 }
3244 
3245 /**
3246  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3247  *  @priv: driver private structure
3248  *  Description: It is used for configuring the TX Queue Priority
3249  */
3250 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3251 {
3252 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3253 	u32 queue;
3254 	u32 prio;
3255 
3256 	for (queue = 0; queue < tx_queues_count; queue++) {
3257 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3258 			continue;
3259 
3260 		prio = priv->plat->tx_queues_cfg[queue].prio;
3261 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3262 	}
3263 }
3264 
3265 /**
3266  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3267  *  @priv: driver private structure
3268  *  Description: It is used for configuring the RX queue routing
3269  */
3270 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3271 {
3272 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3273 	u32 queue;
3274 	u8 packet;
3275 
3276 	for (queue = 0; queue < rx_queues_count; queue++) {
3277 		/* no specific packet type routing specified for the queue */
3278 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3279 			continue;
3280 
3281 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3282 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3283 	}
3284 }
3285 
3286 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3287 {
3288 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3289 		priv->rss.enable = false;
3290 		return;
3291 	}
3292 
3293 	if (priv->dev->features & NETIF_F_RXHASH)
3294 		priv->rss.enable = true;
3295 	else
3296 		priv->rss.enable = false;
3297 
3298 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3299 			     priv->plat->rx_queues_to_use);
3300 }
3301 
3302 /**
3303  *  stmmac_mtl_configuration - Configure MTL
3304  *  @priv: driver private structure
3305  *  Description: It is used for configurring MTL
3306  */
3307 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3308 {
3309 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3310 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3311 
3312 	if (tx_queues_count > 1)
3313 		stmmac_set_tx_queue_weight(priv);
3314 
3315 	/* Configure MTL RX algorithms */
3316 	if (rx_queues_count > 1)
3317 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3318 				priv->plat->rx_sched_algorithm);
3319 
3320 	/* Configure MTL TX algorithms */
3321 	if (tx_queues_count > 1)
3322 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3323 				priv->plat->tx_sched_algorithm);
3324 
3325 	/* Configure CBS in AVB TX queues */
3326 	if (tx_queues_count > 1)
3327 		stmmac_configure_cbs(priv);
3328 
3329 	/* Map RX MTL to DMA channels */
3330 	stmmac_rx_queue_dma_chan_map(priv);
3331 
3332 	/* Enable MAC RX Queues */
3333 	stmmac_mac_enable_rx_queues(priv);
3334 
3335 	/* Set RX priorities */
3336 	if (rx_queues_count > 1)
3337 		stmmac_mac_config_rx_queues_prio(priv);
3338 
3339 	/* Set TX priorities */
3340 	if (tx_queues_count > 1)
3341 		stmmac_mac_config_tx_queues_prio(priv);
3342 
3343 	/* Set RX routing */
3344 	if (rx_queues_count > 1)
3345 		stmmac_mac_config_rx_queues_routing(priv);
3346 
3347 	/* Receive Side Scaling */
3348 	if (rx_queues_count > 1)
3349 		stmmac_mac_config_rss(priv);
3350 }
3351 
3352 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3353 {
3354 	if (priv->dma_cap.asp) {
3355 		netdev_info(priv->dev, "Enabling Safety Features\n");
3356 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3357 					  priv->plat->safety_feat_cfg);
3358 	} else {
3359 		netdev_info(priv->dev, "No Safety Features support found\n");
3360 	}
3361 }
3362 
3363 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3364 {
3365 	char *name;
3366 
3367 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3368 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3369 
3370 	name = priv->wq_name;
3371 	sprintf(name, "%s-fpe", priv->dev->name);
3372 
3373 	priv->fpe_wq = create_singlethread_workqueue(name);
3374 	if (!priv->fpe_wq) {
3375 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3376 
3377 		return -ENOMEM;
3378 	}
3379 	netdev_info(priv->dev, "FPE workqueue start");
3380 
3381 	return 0;
3382 }
3383 
3384 /**
3385  * stmmac_hw_setup - setup mac in a usable state.
3386  *  @dev : pointer to the device structure.
3387  *  @ptp_register: register PTP if set
3388  *  Description:
3389  *  this is the main function to setup the HW in a usable state because the
3390  *  dma engine is reset, the core registers are configured (e.g. AXI,
3391  *  Checksum features, timers). The DMA is ready to start receiving and
3392  *  transmitting.
3393  *  Return value:
3394  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3395  *  file on failure.
3396  */
3397 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3398 {
3399 	struct stmmac_priv *priv = netdev_priv(dev);
3400 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3401 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3402 	bool sph_en;
3403 	u32 chan;
3404 	int ret;
3405 
3406 	/* Make sure RX clock is enabled */
3407 	if (priv->hw->phylink_pcs)
3408 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3409 
3410 	/* DMA initialization and SW reset */
3411 	ret = stmmac_init_dma_engine(priv);
3412 	if (ret < 0) {
3413 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3414 			   __func__);
3415 		return ret;
3416 	}
3417 
3418 	/* Copy the MAC addr into the HW  */
3419 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3420 
3421 	/* PS and related bits will be programmed according to the speed */
3422 	if (priv->hw->pcs) {
3423 		int speed = priv->plat->mac_port_sel_speed;
3424 
3425 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3426 		    (speed == SPEED_1000)) {
3427 			priv->hw->ps = speed;
3428 		} else {
3429 			dev_warn(priv->device, "invalid port speed\n");
3430 			priv->hw->ps = 0;
3431 		}
3432 	}
3433 
3434 	/* Initialize the MAC Core */
3435 	stmmac_core_init(priv, priv->hw, dev);
3436 
3437 	/* Initialize MTL*/
3438 	stmmac_mtl_configuration(priv);
3439 
3440 	/* Initialize Safety Features */
3441 	stmmac_safety_feat_configuration(priv);
3442 
3443 	ret = stmmac_rx_ipc(priv, priv->hw);
3444 	if (!ret) {
3445 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3446 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3447 		priv->hw->rx_csum = 0;
3448 	}
3449 
3450 	/* Enable the MAC Rx/Tx */
3451 	stmmac_mac_set(priv, priv->ioaddr, true);
3452 
3453 	/* Set the HW DMA mode and the COE */
3454 	stmmac_dma_operation_mode(priv);
3455 
3456 	stmmac_mmc_setup(priv);
3457 
3458 	if (ptp_register) {
3459 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3460 		if (ret < 0)
3461 			netdev_warn(priv->dev,
3462 				    "failed to enable PTP reference clock: %pe\n",
3463 				    ERR_PTR(ret));
3464 	}
3465 
3466 	ret = stmmac_init_ptp(priv);
3467 	if (ret == -EOPNOTSUPP)
3468 		netdev_info(priv->dev, "PTP not supported by HW\n");
3469 	else if (ret)
3470 		netdev_warn(priv->dev, "PTP init failed\n");
3471 	else if (ptp_register)
3472 		stmmac_ptp_register(priv);
3473 
3474 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3475 
3476 	/* Convert the timer from msec to usec */
3477 	if (!priv->tx_lpi_timer)
3478 		priv->tx_lpi_timer = eee_timer * 1000;
3479 
3480 	if (priv->use_riwt) {
3481 		u32 queue;
3482 
3483 		for (queue = 0; queue < rx_cnt; queue++) {
3484 			if (!priv->rx_riwt[queue])
3485 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3486 
3487 			stmmac_rx_watchdog(priv, priv->ioaddr,
3488 					   priv->rx_riwt[queue], queue);
3489 		}
3490 	}
3491 
3492 	if (priv->hw->pcs)
3493 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3494 
3495 	/* set TX and RX rings length */
3496 	stmmac_set_rings_length(priv);
3497 
3498 	/* Enable TSO */
3499 	if (priv->tso) {
3500 		for (chan = 0; chan < tx_cnt; chan++) {
3501 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3502 
3503 			/* TSO and TBS cannot co-exist */
3504 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3505 				continue;
3506 
3507 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3508 		}
3509 	}
3510 
3511 	/* Enable Split Header */
3512 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3513 	for (chan = 0; chan < rx_cnt; chan++)
3514 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3515 
3516 
3517 	/* VLAN Tag Insertion */
3518 	if (priv->dma_cap.vlins)
3519 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3520 
3521 	/* TBS */
3522 	for (chan = 0; chan < tx_cnt; chan++) {
3523 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3524 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3525 
3526 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3527 	}
3528 
3529 	/* Configure real RX and TX queues */
3530 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3531 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3532 
3533 	/* Start the ball rolling... */
3534 	stmmac_start_all_dma(priv);
3535 
3536 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3537 
3538 	if (priv->dma_cap.fpesel) {
3539 		stmmac_fpe_start_wq(priv);
3540 
3541 		if (priv->plat->fpe_cfg->enable)
3542 			stmmac_fpe_handshake(priv, true);
3543 	}
3544 
3545 	return 0;
3546 }
3547 
3548 static void stmmac_hw_teardown(struct net_device *dev)
3549 {
3550 	struct stmmac_priv *priv = netdev_priv(dev);
3551 
3552 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3553 }
3554 
3555 static void stmmac_free_irq(struct net_device *dev,
3556 			    enum request_irq_err irq_err, int irq_idx)
3557 {
3558 	struct stmmac_priv *priv = netdev_priv(dev);
3559 	int j;
3560 
3561 	switch (irq_err) {
3562 	case REQ_IRQ_ERR_ALL:
3563 		irq_idx = priv->plat->tx_queues_to_use;
3564 		fallthrough;
3565 	case REQ_IRQ_ERR_TX:
3566 		for (j = irq_idx - 1; j >= 0; j--) {
3567 			if (priv->tx_irq[j] > 0) {
3568 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3569 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3570 			}
3571 		}
3572 		irq_idx = priv->plat->rx_queues_to_use;
3573 		fallthrough;
3574 	case REQ_IRQ_ERR_RX:
3575 		for (j = irq_idx - 1; j >= 0; j--) {
3576 			if (priv->rx_irq[j] > 0) {
3577 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3578 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3579 			}
3580 		}
3581 
3582 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3583 			free_irq(priv->sfty_ue_irq, dev);
3584 		fallthrough;
3585 	case REQ_IRQ_ERR_SFTY_UE:
3586 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3587 			free_irq(priv->sfty_ce_irq, dev);
3588 		fallthrough;
3589 	case REQ_IRQ_ERR_SFTY_CE:
3590 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3591 			free_irq(priv->lpi_irq, dev);
3592 		fallthrough;
3593 	case REQ_IRQ_ERR_LPI:
3594 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3595 			free_irq(priv->wol_irq, dev);
3596 		fallthrough;
3597 	case REQ_IRQ_ERR_SFTY:
3598 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3599 			free_irq(priv->sfty_irq, dev);
3600 		fallthrough;
3601 	case REQ_IRQ_ERR_WOL:
3602 		free_irq(dev->irq, dev);
3603 		fallthrough;
3604 	case REQ_IRQ_ERR_MAC:
3605 	case REQ_IRQ_ERR_NO:
3606 		/* If MAC IRQ request error, no more IRQ to free */
3607 		break;
3608 	}
3609 }
3610 
3611 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3612 {
3613 	struct stmmac_priv *priv = netdev_priv(dev);
3614 	enum request_irq_err irq_err;
3615 	cpumask_t cpu_mask;
3616 	int irq_idx = 0;
3617 	char *int_name;
3618 	int ret;
3619 	int i;
3620 
3621 	/* For common interrupt */
3622 	int_name = priv->int_name_mac;
3623 	sprintf(int_name, "%s:%s", dev->name, "mac");
3624 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3625 			  0, int_name, dev);
3626 	if (unlikely(ret < 0)) {
3627 		netdev_err(priv->dev,
3628 			   "%s: alloc mac MSI %d (error: %d)\n",
3629 			   __func__, dev->irq, ret);
3630 		irq_err = REQ_IRQ_ERR_MAC;
3631 		goto irq_error;
3632 	}
3633 
3634 	/* Request the Wake IRQ in case of another line
3635 	 * is used for WoL
3636 	 */
3637 	priv->wol_irq_disabled = true;
3638 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3639 		int_name = priv->int_name_wol;
3640 		sprintf(int_name, "%s:%s", dev->name, "wol");
3641 		ret = request_irq(priv->wol_irq,
3642 				  stmmac_mac_interrupt,
3643 				  0, int_name, dev);
3644 		if (unlikely(ret < 0)) {
3645 			netdev_err(priv->dev,
3646 				   "%s: alloc wol MSI %d (error: %d)\n",
3647 				   __func__, priv->wol_irq, ret);
3648 			irq_err = REQ_IRQ_ERR_WOL;
3649 			goto irq_error;
3650 		}
3651 	}
3652 
3653 	/* Request the LPI IRQ in case of another line
3654 	 * is used for LPI
3655 	 */
3656 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3657 		int_name = priv->int_name_lpi;
3658 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3659 		ret = request_irq(priv->lpi_irq,
3660 				  stmmac_mac_interrupt,
3661 				  0, int_name, dev);
3662 		if (unlikely(ret < 0)) {
3663 			netdev_err(priv->dev,
3664 				   "%s: alloc lpi MSI %d (error: %d)\n",
3665 				   __func__, priv->lpi_irq, ret);
3666 			irq_err = REQ_IRQ_ERR_LPI;
3667 			goto irq_error;
3668 		}
3669 	}
3670 
3671 	/* Request the common Safety Feature Correctible/Uncorrectible
3672 	 * Error line in case of another line is used
3673 	 */
3674 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3675 		int_name = priv->int_name_sfty;
3676 		sprintf(int_name, "%s:%s", dev->name, "safety");
3677 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3678 				  0, int_name, dev);
3679 		if (unlikely(ret < 0)) {
3680 			netdev_err(priv->dev,
3681 				   "%s: alloc sfty MSI %d (error: %d)\n",
3682 				   __func__, priv->sfty_irq, ret);
3683 			irq_err = REQ_IRQ_ERR_SFTY;
3684 			goto irq_error;
3685 		}
3686 	}
3687 
3688 	/* Request the Safety Feature Correctible Error line in
3689 	 * case of another line is used
3690 	 */
3691 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3692 		int_name = priv->int_name_sfty_ce;
3693 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3694 		ret = request_irq(priv->sfty_ce_irq,
3695 				  stmmac_safety_interrupt,
3696 				  0, int_name, dev);
3697 		if (unlikely(ret < 0)) {
3698 			netdev_err(priv->dev,
3699 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3700 				   __func__, priv->sfty_ce_irq, ret);
3701 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3702 			goto irq_error;
3703 		}
3704 	}
3705 
3706 	/* Request the Safety Feature Uncorrectible Error line in
3707 	 * case of another line is used
3708 	 */
3709 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3710 		int_name = priv->int_name_sfty_ue;
3711 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3712 		ret = request_irq(priv->sfty_ue_irq,
3713 				  stmmac_safety_interrupt,
3714 				  0, int_name, dev);
3715 		if (unlikely(ret < 0)) {
3716 			netdev_err(priv->dev,
3717 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3718 				   __func__, priv->sfty_ue_irq, ret);
3719 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3720 			goto irq_error;
3721 		}
3722 	}
3723 
3724 	/* Request Rx MSI irq */
3725 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3726 		if (i >= MTL_MAX_RX_QUEUES)
3727 			break;
3728 		if (priv->rx_irq[i] == 0)
3729 			continue;
3730 
3731 		int_name = priv->int_name_rx_irq[i];
3732 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3733 		ret = request_irq(priv->rx_irq[i],
3734 				  stmmac_msi_intr_rx,
3735 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3736 		if (unlikely(ret < 0)) {
3737 			netdev_err(priv->dev,
3738 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3739 				   __func__, i, priv->rx_irq[i], ret);
3740 			irq_err = REQ_IRQ_ERR_RX;
3741 			irq_idx = i;
3742 			goto irq_error;
3743 		}
3744 		cpumask_clear(&cpu_mask);
3745 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3746 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3747 	}
3748 
3749 	/* Request Tx MSI irq */
3750 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3751 		if (i >= MTL_MAX_TX_QUEUES)
3752 			break;
3753 		if (priv->tx_irq[i] == 0)
3754 			continue;
3755 
3756 		int_name = priv->int_name_tx_irq[i];
3757 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3758 		ret = request_irq(priv->tx_irq[i],
3759 				  stmmac_msi_intr_tx,
3760 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3761 		if (unlikely(ret < 0)) {
3762 			netdev_err(priv->dev,
3763 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3764 				   __func__, i, priv->tx_irq[i], ret);
3765 			irq_err = REQ_IRQ_ERR_TX;
3766 			irq_idx = i;
3767 			goto irq_error;
3768 		}
3769 		cpumask_clear(&cpu_mask);
3770 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3771 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3772 	}
3773 
3774 	return 0;
3775 
3776 irq_error:
3777 	stmmac_free_irq(dev, irq_err, irq_idx);
3778 	return ret;
3779 }
3780 
3781 static int stmmac_request_irq_single(struct net_device *dev)
3782 {
3783 	struct stmmac_priv *priv = netdev_priv(dev);
3784 	enum request_irq_err irq_err;
3785 	int ret;
3786 
3787 	ret = request_irq(dev->irq, stmmac_interrupt,
3788 			  IRQF_SHARED, dev->name, dev);
3789 	if (unlikely(ret < 0)) {
3790 		netdev_err(priv->dev,
3791 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3792 			   __func__, dev->irq, ret);
3793 		irq_err = REQ_IRQ_ERR_MAC;
3794 		goto irq_error;
3795 	}
3796 
3797 	/* Request the Wake IRQ in case of another line
3798 	 * is used for WoL
3799 	 */
3800 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3801 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3802 				  IRQF_SHARED, dev->name, dev);
3803 		if (unlikely(ret < 0)) {
3804 			netdev_err(priv->dev,
3805 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3806 				   __func__, priv->wol_irq, ret);
3807 			irq_err = REQ_IRQ_ERR_WOL;
3808 			goto irq_error;
3809 		}
3810 	}
3811 
3812 	/* Request the IRQ lines */
3813 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3814 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3815 				  IRQF_SHARED, dev->name, dev);
3816 		if (unlikely(ret < 0)) {
3817 			netdev_err(priv->dev,
3818 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3819 				   __func__, priv->lpi_irq, ret);
3820 			irq_err = REQ_IRQ_ERR_LPI;
3821 			goto irq_error;
3822 		}
3823 	}
3824 
3825 	/* Request the common Safety Feature Correctible/Uncorrectible
3826 	 * Error line in case of another line is used
3827 	 */
3828 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3829 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3830 				  IRQF_SHARED, dev->name, dev);
3831 		if (unlikely(ret < 0)) {
3832 			netdev_err(priv->dev,
3833 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3834 				   __func__, priv->sfty_irq, ret);
3835 			irq_err = REQ_IRQ_ERR_SFTY;
3836 			goto irq_error;
3837 		}
3838 	}
3839 
3840 	return 0;
3841 
3842 irq_error:
3843 	stmmac_free_irq(dev, irq_err, 0);
3844 	return ret;
3845 }
3846 
3847 static int stmmac_request_irq(struct net_device *dev)
3848 {
3849 	struct stmmac_priv *priv = netdev_priv(dev);
3850 	int ret;
3851 
3852 	/* Request the IRQ lines */
3853 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3854 		ret = stmmac_request_irq_multi_msi(dev);
3855 	else
3856 		ret = stmmac_request_irq_single(dev);
3857 
3858 	return ret;
3859 }
3860 
3861 /**
3862  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3863  *  @priv: driver private structure
3864  *  @mtu: MTU to setup the dma queue and buf with
3865  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3866  *  Allocate the Tx/Rx DMA queue and init them.
3867  *  Return value:
3868  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3869  */
3870 static struct stmmac_dma_conf *
3871 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3872 {
3873 	struct stmmac_dma_conf *dma_conf;
3874 	int chan, bfsize, ret;
3875 
3876 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3877 	if (!dma_conf) {
3878 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3879 			   __func__);
3880 		return ERR_PTR(-ENOMEM);
3881 	}
3882 
3883 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3884 	if (bfsize < 0)
3885 		bfsize = 0;
3886 
3887 	if (bfsize < BUF_SIZE_16KiB)
3888 		bfsize = stmmac_set_bfsize(mtu, 0);
3889 
3890 	dma_conf->dma_buf_sz = bfsize;
3891 	/* Chose the tx/rx size from the already defined one in the
3892 	 * priv struct. (if defined)
3893 	 */
3894 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3895 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3896 
3897 	if (!dma_conf->dma_tx_size)
3898 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3899 	if (!dma_conf->dma_rx_size)
3900 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3901 
3902 	/* Earlier check for TBS */
3903 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3904 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3905 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3906 
3907 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3908 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3909 	}
3910 
3911 	ret = alloc_dma_desc_resources(priv, dma_conf);
3912 	if (ret < 0) {
3913 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3914 			   __func__);
3915 		goto alloc_error;
3916 	}
3917 
3918 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3919 	if (ret < 0) {
3920 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3921 			   __func__);
3922 		goto init_error;
3923 	}
3924 
3925 	return dma_conf;
3926 
3927 init_error:
3928 	free_dma_desc_resources(priv, dma_conf);
3929 alloc_error:
3930 	kfree(dma_conf);
3931 	return ERR_PTR(ret);
3932 }
3933 
3934 /**
3935  *  __stmmac_open - open entry point of the driver
3936  *  @dev : pointer to the device structure.
3937  *  @dma_conf :  structure to take the dma data
3938  *  Description:
3939  *  This function is the open entry point of the driver.
3940  *  Return value:
3941  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3942  *  file on failure.
3943  */
3944 static int __stmmac_open(struct net_device *dev,
3945 			 struct stmmac_dma_conf *dma_conf)
3946 {
3947 	struct stmmac_priv *priv = netdev_priv(dev);
3948 	int mode = priv->plat->phy_interface;
3949 	u32 chan;
3950 	int ret;
3951 
3952 	ret = pm_runtime_resume_and_get(priv->device);
3953 	if (ret < 0)
3954 		return ret;
3955 
3956 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3957 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3958 	    (!priv->hw->xpcs ||
3959 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3960 		ret = stmmac_init_phy(dev);
3961 		if (ret) {
3962 			netdev_err(priv->dev,
3963 				   "%s: Cannot attach to PHY (error: %d)\n",
3964 				   __func__, ret);
3965 			goto init_phy_error;
3966 		}
3967 	}
3968 
3969 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3970 
3971 	buf_sz = dma_conf->dma_buf_sz;
3972 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3973 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3974 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3975 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3976 
3977 	stmmac_reset_queues_param(priv);
3978 
3979 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3980 	    priv->plat->serdes_powerup) {
3981 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3982 		if (ret < 0) {
3983 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3984 				   __func__);
3985 			goto init_error;
3986 		}
3987 	}
3988 
3989 	ret = stmmac_hw_setup(dev, true);
3990 	if (ret < 0) {
3991 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3992 		goto init_error;
3993 	}
3994 
3995 	stmmac_init_coalesce(priv);
3996 
3997 	phylink_start(priv->phylink);
3998 	/* We may have called phylink_speed_down before */
3999 	phylink_speed_up(priv->phylink);
4000 
4001 	ret = stmmac_request_irq(dev);
4002 	if (ret)
4003 		goto irq_error;
4004 
4005 	stmmac_enable_all_queues(priv);
4006 	netif_tx_start_all_queues(priv->dev);
4007 	stmmac_enable_all_dma_irq(priv);
4008 
4009 	return 0;
4010 
4011 irq_error:
4012 	phylink_stop(priv->phylink);
4013 
4014 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4015 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4016 
4017 	stmmac_hw_teardown(dev);
4018 init_error:
4019 	phylink_disconnect_phy(priv->phylink);
4020 init_phy_error:
4021 	pm_runtime_put(priv->device);
4022 	return ret;
4023 }
4024 
4025 static int stmmac_open(struct net_device *dev)
4026 {
4027 	struct stmmac_priv *priv = netdev_priv(dev);
4028 	struct stmmac_dma_conf *dma_conf;
4029 	int ret;
4030 
4031 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4032 	if (IS_ERR(dma_conf))
4033 		return PTR_ERR(dma_conf);
4034 
4035 	ret = __stmmac_open(dev, dma_conf);
4036 	if (ret)
4037 		free_dma_desc_resources(priv, dma_conf);
4038 
4039 	kfree(dma_conf);
4040 	return ret;
4041 }
4042 
4043 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4044 {
4045 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4046 
4047 	if (priv->fpe_wq) {
4048 		destroy_workqueue(priv->fpe_wq);
4049 		priv->fpe_wq = NULL;
4050 	}
4051 
4052 	netdev_info(priv->dev, "FPE workqueue stop");
4053 }
4054 
4055 /**
4056  *  stmmac_release - close entry point of the driver
4057  *  @dev : device pointer.
4058  *  Description:
4059  *  This is the stop entry point of the driver.
4060  */
4061 static int stmmac_release(struct net_device *dev)
4062 {
4063 	struct stmmac_priv *priv = netdev_priv(dev);
4064 	u32 chan;
4065 
4066 	if (device_may_wakeup(priv->device))
4067 		phylink_speed_down(priv->phylink, false);
4068 	/* Stop and disconnect the PHY */
4069 	phylink_stop(priv->phylink);
4070 	phylink_disconnect_phy(priv->phylink);
4071 
4072 	stmmac_disable_all_queues(priv);
4073 
4074 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4075 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4076 
4077 	netif_tx_disable(dev);
4078 
4079 	/* Free the IRQ lines */
4080 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4081 
4082 	if (priv->eee_enabled) {
4083 		priv->tx_path_in_lpi_mode = false;
4084 		del_timer_sync(&priv->eee_ctrl_timer);
4085 	}
4086 
4087 	/* Stop TX/RX DMA and clear the descriptors */
4088 	stmmac_stop_all_dma(priv);
4089 
4090 	/* Release and free the Rx/Tx resources */
4091 	free_dma_desc_resources(priv, &priv->dma_conf);
4092 
4093 	/* Disable the MAC Rx/Tx */
4094 	stmmac_mac_set(priv, priv->ioaddr, false);
4095 
4096 	/* Powerdown Serdes if there is */
4097 	if (priv->plat->serdes_powerdown)
4098 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4099 
4100 	netif_carrier_off(dev);
4101 
4102 	stmmac_release_ptp(priv);
4103 
4104 	pm_runtime_put(priv->device);
4105 
4106 	if (priv->dma_cap.fpesel)
4107 		stmmac_fpe_stop_wq(priv);
4108 
4109 	return 0;
4110 }
4111 
4112 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4113 			       struct stmmac_tx_queue *tx_q)
4114 {
4115 	u16 tag = 0x0, inner_tag = 0x0;
4116 	u32 inner_type = 0x0;
4117 	struct dma_desc *p;
4118 
4119 	if (!priv->dma_cap.vlins)
4120 		return false;
4121 	if (!skb_vlan_tag_present(skb))
4122 		return false;
4123 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4124 		inner_tag = skb_vlan_tag_get(skb);
4125 		inner_type = STMMAC_VLAN_INSERT;
4126 	}
4127 
4128 	tag = skb_vlan_tag_get(skb);
4129 
4130 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4131 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4132 	else
4133 		p = &tx_q->dma_tx[tx_q->cur_tx];
4134 
4135 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4136 		return false;
4137 
4138 	stmmac_set_tx_owner(priv, p);
4139 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4140 	return true;
4141 }
4142 
4143 /**
4144  *  stmmac_tso_allocator - close entry point of the driver
4145  *  @priv: driver private structure
4146  *  @des: buffer start address
4147  *  @total_len: total length to fill in descriptors
4148  *  @last_segment: condition for the last descriptor
4149  *  @queue: TX queue index
4150  *  Description:
4151  *  This function fills descriptor and request new descriptors according to
4152  *  buffer length to fill
4153  */
4154 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4155 				 int total_len, bool last_segment, u32 queue)
4156 {
4157 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4158 	struct dma_desc *desc;
4159 	u32 buff_size;
4160 	int tmp_len;
4161 
4162 	tmp_len = total_len;
4163 
4164 	while (tmp_len > 0) {
4165 		dma_addr_t curr_addr;
4166 
4167 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4168 						priv->dma_conf.dma_tx_size);
4169 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4170 
4171 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4172 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4173 		else
4174 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4175 
4176 		curr_addr = des + (total_len - tmp_len);
4177 		if (priv->dma_cap.addr64 <= 32)
4178 			desc->des0 = cpu_to_le32(curr_addr);
4179 		else
4180 			stmmac_set_desc_addr(priv, desc, curr_addr);
4181 
4182 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4183 			    TSO_MAX_BUFF_SIZE : tmp_len;
4184 
4185 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4186 				0, 1,
4187 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4188 				0, 0);
4189 
4190 		tmp_len -= TSO_MAX_BUFF_SIZE;
4191 	}
4192 }
4193 
4194 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4195 {
4196 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4197 	int desc_size;
4198 
4199 	if (likely(priv->extend_desc))
4200 		desc_size = sizeof(struct dma_extended_desc);
4201 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4202 		desc_size = sizeof(struct dma_edesc);
4203 	else
4204 		desc_size = sizeof(struct dma_desc);
4205 
4206 	/* The own bit must be the latest setting done when prepare the
4207 	 * descriptor and then barrier is needed to make sure that
4208 	 * all is coherent before granting the DMA engine.
4209 	 */
4210 	wmb();
4211 
4212 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4213 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4214 }
4215 
4216 /**
4217  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4218  *  @skb : the socket buffer
4219  *  @dev : device pointer
4220  *  Description: this is the transmit function that is called on TSO frames
4221  *  (support available on GMAC4 and newer chips).
4222  *  Diagram below show the ring programming in case of TSO frames:
4223  *
4224  *  First Descriptor
4225  *   --------
4226  *   | DES0 |---> buffer1 = L2/L3/L4 header
4227  *   | DES1 |---> TCP Payload (can continue on next descr...)
4228  *   | DES2 |---> buffer 1 and 2 len
4229  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4230  *   --------
4231  *	|
4232  *     ...
4233  *	|
4234  *   --------
4235  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4236  *   | DES1 | --|
4237  *   | DES2 | --> buffer 1 and 2 len
4238  *   | DES3 |
4239  *   --------
4240  *
4241  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4242  */
4243 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4244 {
4245 	struct dma_desc *desc, *first, *mss_desc = NULL;
4246 	struct stmmac_priv *priv = netdev_priv(dev);
4247 	int nfrags = skb_shinfo(skb)->nr_frags;
4248 	u32 queue = skb_get_queue_mapping(skb);
4249 	unsigned int first_entry, tx_packets;
4250 	struct stmmac_txq_stats *txq_stats;
4251 	int tmp_pay_len = 0, first_tx;
4252 	struct stmmac_tx_queue *tx_q;
4253 	bool has_vlan, set_ic;
4254 	u8 proto_hdr_len, hdr;
4255 	u32 pay_len, mss;
4256 	dma_addr_t des;
4257 	int i;
4258 
4259 	tx_q = &priv->dma_conf.tx_queue[queue];
4260 	txq_stats = &priv->xstats.txq_stats[queue];
4261 	first_tx = tx_q->cur_tx;
4262 
4263 	/* Compute header lengths */
4264 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4265 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4266 		hdr = sizeof(struct udphdr);
4267 	} else {
4268 		proto_hdr_len = skb_tcp_all_headers(skb);
4269 		hdr = tcp_hdrlen(skb);
4270 	}
4271 
4272 	/* Desc availability based on threshold should be enough safe */
4273 	if (unlikely(stmmac_tx_avail(priv, queue) <
4274 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4275 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4276 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4277 								queue));
4278 			/* This is a hard error, log it. */
4279 			netdev_err(priv->dev,
4280 				   "%s: Tx Ring full when queue awake\n",
4281 				   __func__);
4282 		}
4283 		return NETDEV_TX_BUSY;
4284 	}
4285 
4286 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4287 
4288 	mss = skb_shinfo(skb)->gso_size;
4289 
4290 	/* set new MSS value if needed */
4291 	if (mss != tx_q->mss) {
4292 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4293 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4294 		else
4295 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4296 
4297 		stmmac_set_mss(priv, mss_desc, mss);
4298 		tx_q->mss = mss;
4299 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4300 						priv->dma_conf.dma_tx_size);
4301 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4302 	}
4303 
4304 	if (netif_msg_tx_queued(priv)) {
4305 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4306 			__func__, hdr, proto_hdr_len, pay_len, mss);
4307 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4308 			skb->data_len);
4309 	}
4310 
4311 	/* Check if VLAN can be inserted by HW */
4312 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4313 
4314 	first_entry = tx_q->cur_tx;
4315 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4316 
4317 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4318 		desc = &tx_q->dma_entx[first_entry].basic;
4319 	else
4320 		desc = &tx_q->dma_tx[first_entry];
4321 	first = desc;
4322 
4323 	if (has_vlan)
4324 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4325 
4326 	/* first descriptor: fill Headers on Buf1 */
4327 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4328 			     DMA_TO_DEVICE);
4329 	if (dma_mapping_error(priv->device, des))
4330 		goto dma_map_err;
4331 
4332 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4333 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4334 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4335 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4336 
4337 	if (priv->dma_cap.addr64 <= 32) {
4338 		first->des0 = cpu_to_le32(des);
4339 
4340 		/* Fill start of payload in buff2 of first descriptor */
4341 		if (pay_len)
4342 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4343 
4344 		/* If needed take extra descriptors to fill the remaining payload */
4345 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4346 	} else {
4347 		stmmac_set_desc_addr(priv, first, des);
4348 		tmp_pay_len = pay_len;
4349 		des += proto_hdr_len;
4350 		pay_len = 0;
4351 	}
4352 
4353 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4354 
4355 	/* Prepare fragments */
4356 	for (i = 0; i < nfrags; i++) {
4357 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4358 
4359 		des = skb_frag_dma_map(priv->device, frag, 0,
4360 				       skb_frag_size(frag),
4361 				       DMA_TO_DEVICE);
4362 		if (dma_mapping_error(priv->device, des))
4363 			goto dma_map_err;
4364 
4365 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4366 				     (i == nfrags - 1), queue);
4367 
4368 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4369 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4370 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4371 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4372 	}
4373 
4374 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4375 
4376 	/* Only the last descriptor gets to point to the skb. */
4377 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4378 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4379 
4380 	/* Manage tx mitigation */
4381 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4382 	tx_q->tx_count_frames += tx_packets;
4383 
4384 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4385 		set_ic = true;
4386 	else if (!priv->tx_coal_frames[queue])
4387 		set_ic = false;
4388 	else if (tx_packets > priv->tx_coal_frames[queue])
4389 		set_ic = true;
4390 	else if ((tx_q->tx_count_frames %
4391 		  priv->tx_coal_frames[queue]) < tx_packets)
4392 		set_ic = true;
4393 	else
4394 		set_ic = false;
4395 
4396 	if (set_ic) {
4397 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4398 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4399 		else
4400 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4401 
4402 		tx_q->tx_count_frames = 0;
4403 		stmmac_set_tx_ic(priv, desc);
4404 	}
4405 
4406 	/* We've used all descriptors we need for this skb, however,
4407 	 * advance cur_tx so that it references a fresh descriptor.
4408 	 * ndo_start_xmit will fill this descriptor the next time it's
4409 	 * called and stmmac_tx_clean may clean up to this descriptor.
4410 	 */
4411 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4412 
4413 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4414 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4415 			  __func__);
4416 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4417 	}
4418 
4419 	u64_stats_update_begin(&txq_stats->q_syncp);
4420 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4421 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4422 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4423 	if (set_ic)
4424 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4425 	u64_stats_update_end(&txq_stats->q_syncp);
4426 
4427 	if (priv->sarc_type)
4428 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4429 
4430 	skb_tx_timestamp(skb);
4431 
4432 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4433 		     priv->hwts_tx_en)) {
4434 		/* declare that device is doing timestamping */
4435 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4436 		stmmac_enable_tx_timestamp(priv, first);
4437 	}
4438 
4439 	/* Complete the first descriptor before granting the DMA */
4440 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4441 			proto_hdr_len,
4442 			pay_len,
4443 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4444 			hdr / 4, (skb->len - proto_hdr_len));
4445 
4446 	/* If context desc is used to change MSS */
4447 	if (mss_desc) {
4448 		/* Make sure that first descriptor has been completely
4449 		 * written, including its own bit. This is because MSS is
4450 		 * actually before first descriptor, so we need to make
4451 		 * sure that MSS's own bit is the last thing written.
4452 		 */
4453 		dma_wmb();
4454 		stmmac_set_tx_owner(priv, mss_desc);
4455 	}
4456 
4457 	if (netif_msg_pktdata(priv)) {
4458 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4459 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4460 			tx_q->cur_tx, first, nfrags);
4461 		pr_info(">>> frame to be transmitted: ");
4462 		print_pkt(skb->data, skb_headlen(skb));
4463 	}
4464 
4465 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4466 
4467 	stmmac_flush_tx_descriptors(priv, queue);
4468 	stmmac_tx_timer_arm(priv, queue);
4469 
4470 	return NETDEV_TX_OK;
4471 
4472 dma_map_err:
4473 	dev_err(priv->device, "Tx dma map failed\n");
4474 	dev_kfree_skb(skb);
4475 	priv->xstats.tx_dropped++;
4476 	return NETDEV_TX_OK;
4477 }
4478 
4479 /**
4480  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4481  * @skb: socket buffer to check
4482  *
4483  * Check if a packet has an ethertype that will trigger the IP header checks
4484  * and IP/TCP checksum engine of the stmmac core.
4485  *
4486  * Return: true if the ethertype can trigger the checksum engine, false
4487  * otherwise
4488  */
4489 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4490 {
4491 	int depth = 0;
4492 	__be16 proto;
4493 
4494 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4495 				    &depth);
4496 
4497 	return (depth <= ETH_HLEN) &&
4498 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4499 }
4500 
4501 /**
4502  *  stmmac_xmit - Tx entry point of the driver
4503  *  @skb : the socket buffer
4504  *  @dev : device pointer
4505  *  Description : this is the tx entry point of the driver.
4506  *  It programs the chain or the ring and supports oversized frames
4507  *  and SG feature.
4508  */
4509 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4510 {
4511 	unsigned int first_entry, tx_packets, enh_desc;
4512 	struct stmmac_priv *priv = netdev_priv(dev);
4513 	unsigned int nopaged_len = skb_headlen(skb);
4514 	int i, csum_insertion = 0, is_jumbo = 0;
4515 	u32 queue = skb_get_queue_mapping(skb);
4516 	int nfrags = skb_shinfo(skb)->nr_frags;
4517 	int gso = skb_shinfo(skb)->gso_type;
4518 	struct stmmac_txq_stats *txq_stats;
4519 	struct dma_edesc *tbs_desc = NULL;
4520 	struct dma_desc *desc, *first;
4521 	struct stmmac_tx_queue *tx_q;
4522 	bool has_vlan, set_ic;
4523 	int entry, first_tx;
4524 	dma_addr_t des;
4525 
4526 	tx_q = &priv->dma_conf.tx_queue[queue];
4527 	txq_stats = &priv->xstats.txq_stats[queue];
4528 	first_tx = tx_q->cur_tx;
4529 
4530 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4531 		stmmac_disable_eee_mode(priv);
4532 
4533 	/* Manage oversized TCP frames for GMAC4 device */
4534 	if (skb_is_gso(skb) && priv->tso) {
4535 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4536 			return stmmac_tso_xmit(skb, dev);
4537 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4538 			return stmmac_tso_xmit(skb, dev);
4539 	}
4540 
4541 	if (priv->est && priv->est->enable &&
4542 	    priv->est->max_sdu[queue] &&
4543 	    skb->len > priv->est->max_sdu[queue]){
4544 		priv->xstats.max_sdu_txq_drop[queue]++;
4545 		goto max_sdu_err;
4546 	}
4547 
4548 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4549 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4550 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4551 								queue));
4552 			/* This is a hard error, log it. */
4553 			netdev_err(priv->dev,
4554 				   "%s: Tx Ring full when queue awake\n",
4555 				   __func__);
4556 		}
4557 		return NETDEV_TX_BUSY;
4558 	}
4559 
4560 	/* Check if VLAN can be inserted by HW */
4561 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4562 
4563 	entry = tx_q->cur_tx;
4564 	first_entry = entry;
4565 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4566 
4567 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4568 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4569 	 * queues. In that case, checksum offloading for those queues that don't
4570 	 * support tx coe needs to fallback to software checksum calculation.
4571 	 *
4572 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4573 	 * also have to be checksummed in software.
4574 	 */
4575 	if (csum_insertion &&
4576 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4577 	     !stmmac_has_ip_ethertype(skb))) {
4578 		if (unlikely(skb_checksum_help(skb)))
4579 			goto dma_map_err;
4580 		csum_insertion = !csum_insertion;
4581 	}
4582 
4583 	if (likely(priv->extend_desc))
4584 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4585 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4586 		desc = &tx_q->dma_entx[entry].basic;
4587 	else
4588 		desc = tx_q->dma_tx + entry;
4589 
4590 	first = desc;
4591 
4592 	if (has_vlan)
4593 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4594 
4595 	enh_desc = priv->plat->enh_desc;
4596 	/* To program the descriptors according to the size of the frame */
4597 	if (enh_desc)
4598 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4599 
4600 	if (unlikely(is_jumbo)) {
4601 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4602 		if (unlikely(entry < 0) && (entry != -EINVAL))
4603 			goto dma_map_err;
4604 	}
4605 
4606 	for (i = 0; i < nfrags; i++) {
4607 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4608 		int len = skb_frag_size(frag);
4609 		bool last_segment = (i == (nfrags - 1));
4610 
4611 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4612 		WARN_ON(tx_q->tx_skbuff[entry]);
4613 
4614 		if (likely(priv->extend_desc))
4615 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4616 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4617 			desc = &tx_q->dma_entx[entry].basic;
4618 		else
4619 			desc = tx_q->dma_tx + entry;
4620 
4621 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4622 				       DMA_TO_DEVICE);
4623 		if (dma_mapping_error(priv->device, des))
4624 			goto dma_map_err; /* should reuse desc w/o issues */
4625 
4626 		tx_q->tx_skbuff_dma[entry].buf = des;
4627 
4628 		stmmac_set_desc_addr(priv, desc, des);
4629 
4630 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4631 		tx_q->tx_skbuff_dma[entry].len = len;
4632 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4633 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4634 
4635 		/* Prepare the descriptor and set the own bit too */
4636 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4637 				priv->mode, 1, last_segment, skb->len);
4638 	}
4639 
4640 	/* Only the last descriptor gets to point to the skb. */
4641 	tx_q->tx_skbuff[entry] = skb;
4642 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4643 
4644 	/* According to the coalesce parameter the IC bit for the latest
4645 	 * segment is reset and the timer re-started to clean the tx status.
4646 	 * This approach takes care about the fragments: desc is the first
4647 	 * element in case of no SG.
4648 	 */
4649 	tx_packets = (entry + 1) - first_tx;
4650 	tx_q->tx_count_frames += tx_packets;
4651 
4652 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4653 		set_ic = true;
4654 	else if (!priv->tx_coal_frames[queue])
4655 		set_ic = false;
4656 	else if (tx_packets > priv->tx_coal_frames[queue])
4657 		set_ic = true;
4658 	else if ((tx_q->tx_count_frames %
4659 		  priv->tx_coal_frames[queue]) < tx_packets)
4660 		set_ic = true;
4661 	else
4662 		set_ic = false;
4663 
4664 	if (set_ic) {
4665 		if (likely(priv->extend_desc))
4666 			desc = &tx_q->dma_etx[entry].basic;
4667 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4668 			desc = &tx_q->dma_entx[entry].basic;
4669 		else
4670 			desc = &tx_q->dma_tx[entry];
4671 
4672 		tx_q->tx_count_frames = 0;
4673 		stmmac_set_tx_ic(priv, desc);
4674 	}
4675 
4676 	/* We've used all descriptors we need for this skb, however,
4677 	 * advance cur_tx so that it references a fresh descriptor.
4678 	 * ndo_start_xmit will fill this descriptor the next time it's
4679 	 * called and stmmac_tx_clean may clean up to this descriptor.
4680 	 */
4681 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4682 	tx_q->cur_tx = entry;
4683 
4684 	if (netif_msg_pktdata(priv)) {
4685 		netdev_dbg(priv->dev,
4686 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4687 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4688 			   entry, first, nfrags);
4689 
4690 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4691 		print_pkt(skb->data, skb->len);
4692 	}
4693 
4694 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4695 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4696 			  __func__);
4697 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4698 	}
4699 
4700 	u64_stats_update_begin(&txq_stats->q_syncp);
4701 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4702 	if (set_ic)
4703 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4704 	u64_stats_update_end(&txq_stats->q_syncp);
4705 
4706 	if (priv->sarc_type)
4707 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4708 
4709 	skb_tx_timestamp(skb);
4710 
4711 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4712 	 * problems because all the descriptors are actually ready to be
4713 	 * passed to the DMA engine.
4714 	 */
4715 	if (likely(!is_jumbo)) {
4716 		bool last_segment = (nfrags == 0);
4717 
4718 		des = dma_map_single(priv->device, skb->data,
4719 				     nopaged_len, DMA_TO_DEVICE);
4720 		if (dma_mapping_error(priv->device, des))
4721 			goto dma_map_err;
4722 
4723 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4724 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4725 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4726 
4727 		stmmac_set_desc_addr(priv, first, des);
4728 
4729 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4730 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4731 
4732 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4733 			     priv->hwts_tx_en)) {
4734 			/* declare that device is doing timestamping */
4735 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4736 			stmmac_enable_tx_timestamp(priv, first);
4737 		}
4738 
4739 		/* Prepare the first descriptor setting the OWN bit too */
4740 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4741 				csum_insertion, priv->mode, 0, last_segment,
4742 				skb->len);
4743 	}
4744 
4745 	if (tx_q->tbs & STMMAC_TBS_EN) {
4746 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4747 
4748 		tbs_desc = &tx_q->dma_entx[first_entry];
4749 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4750 	}
4751 
4752 	stmmac_set_tx_owner(priv, first);
4753 
4754 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4755 
4756 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4757 
4758 	stmmac_flush_tx_descriptors(priv, queue);
4759 	stmmac_tx_timer_arm(priv, queue);
4760 
4761 	return NETDEV_TX_OK;
4762 
4763 dma_map_err:
4764 	netdev_err(priv->dev, "Tx DMA map failed\n");
4765 max_sdu_err:
4766 	dev_kfree_skb(skb);
4767 	priv->xstats.tx_dropped++;
4768 	return NETDEV_TX_OK;
4769 }
4770 
4771 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4772 {
4773 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4774 	__be16 vlan_proto = veth->h_vlan_proto;
4775 	u16 vlanid;
4776 
4777 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4778 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4779 	    (vlan_proto == htons(ETH_P_8021AD) &&
4780 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4781 		/* pop the vlan tag */
4782 		vlanid = ntohs(veth->h_vlan_TCI);
4783 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4784 		skb_pull(skb, VLAN_HLEN);
4785 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4786 	}
4787 }
4788 
4789 /**
4790  * stmmac_rx_refill - refill used skb preallocated buffers
4791  * @priv: driver private structure
4792  * @queue: RX queue index
4793  * Description : this is to reallocate the skb for the reception process
4794  * that is based on zero-copy.
4795  */
4796 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4797 {
4798 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4799 	int dirty = stmmac_rx_dirty(priv, queue);
4800 	unsigned int entry = rx_q->dirty_rx;
4801 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4802 
4803 	if (priv->dma_cap.host_dma_width <= 32)
4804 		gfp |= GFP_DMA32;
4805 
4806 	while (dirty-- > 0) {
4807 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4808 		struct dma_desc *p;
4809 		bool use_rx_wd;
4810 
4811 		if (priv->extend_desc)
4812 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4813 		else
4814 			p = rx_q->dma_rx + entry;
4815 
4816 		if (!buf->page) {
4817 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4818 			if (!buf->page)
4819 				break;
4820 		}
4821 
4822 		if (priv->sph && !buf->sec_page) {
4823 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4824 			if (!buf->sec_page)
4825 				break;
4826 
4827 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4828 		}
4829 
4830 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4831 
4832 		stmmac_set_desc_addr(priv, p, buf->addr);
4833 		if (priv->sph)
4834 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4835 		else
4836 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4837 		stmmac_refill_desc3(priv, rx_q, p);
4838 
4839 		rx_q->rx_count_frames++;
4840 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4841 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4842 			rx_q->rx_count_frames = 0;
4843 
4844 		use_rx_wd = !priv->rx_coal_frames[queue];
4845 		use_rx_wd |= rx_q->rx_count_frames > 0;
4846 		if (!priv->use_riwt)
4847 			use_rx_wd = false;
4848 
4849 		dma_wmb();
4850 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4851 
4852 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4853 	}
4854 	rx_q->dirty_rx = entry;
4855 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4856 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4857 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4858 }
4859 
4860 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4861 				       struct dma_desc *p,
4862 				       int status, unsigned int len)
4863 {
4864 	unsigned int plen = 0, hlen = 0;
4865 	int coe = priv->hw->rx_csum;
4866 
4867 	/* Not first descriptor, buffer is always zero */
4868 	if (priv->sph && len)
4869 		return 0;
4870 
4871 	/* First descriptor, get split header length */
4872 	stmmac_get_rx_header_len(priv, p, &hlen);
4873 	if (priv->sph && hlen) {
4874 		priv->xstats.rx_split_hdr_pkt_n++;
4875 		return hlen;
4876 	}
4877 
4878 	/* First descriptor, not last descriptor and not split header */
4879 	if (status & rx_not_ls)
4880 		return priv->dma_conf.dma_buf_sz;
4881 
4882 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4883 
4884 	/* First descriptor and last descriptor and not split header */
4885 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4886 }
4887 
4888 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4889 				       struct dma_desc *p,
4890 				       int status, unsigned int len)
4891 {
4892 	int coe = priv->hw->rx_csum;
4893 	unsigned int plen = 0;
4894 
4895 	/* Not split header, buffer is not available */
4896 	if (!priv->sph)
4897 		return 0;
4898 
4899 	/* Not last descriptor */
4900 	if (status & rx_not_ls)
4901 		return priv->dma_conf.dma_buf_sz;
4902 
4903 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4904 
4905 	/* Last descriptor */
4906 	return plen - len;
4907 }
4908 
4909 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4910 				struct xdp_frame *xdpf, bool dma_map)
4911 {
4912 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4913 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4914 	unsigned int entry = tx_q->cur_tx;
4915 	struct dma_desc *tx_desc;
4916 	dma_addr_t dma_addr;
4917 	bool set_ic;
4918 
4919 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4920 		return STMMAC_XDP_CONSUMED;
4921 
4922 	if (priv->est && priv->est->enable &&
4923 	    priv->est->max_sdu[queue] &&
4924 	    xdpf->len > priv->est->max_sdu[queue]) {
4925 		priv->xstats.max_sdu_txq_drop[queue]++;
4926 		return STMMAC_XDP_CONSUMED;
4927 	}
4928 
4929 	if (likely(priv->extend_desc))
4930 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4931 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4932 		tx_desc = &tx_q->dma_entx[entry].basic;
4933 	else
4934 		tx_desc = tx_q->dma_tx + entry;
4935 
4936 	if (dma_map) {
4937 		dma_addr = dma_map_single(priv->device, xdpf->data,
4938 					  xdpf->len, DMA_TO_DEVICE);
4939 		if (dma_mapping_error(priv->device, dma_addr))
4940 			return STMMAC_XDP_CONSUMED;
4941 
4942 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4943 	} else {
4944 		struct page *page = virt_to_page(xdpf->data);
4945 
4946 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4947 			   xdpf->headroom;
4948 		dma_sync_single_for_device(priv->device, dma_addr,
4949 					   xdpf->len, DMA_BIDIRECTIONAL);
4950 
4951 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4952 	}
4953 
4954 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4955 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4956 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4957 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4958 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4959 
4960 	tx_q->xdpf[entry] = xdpf;
4961 
4962 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4963 
4964 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4965 			       true, priv->mode, true, true,
4966 			       xdpf->len);
4967 
4968 	tx_q->tx_count_frames++;
4969 
4970 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4971 		set_ic = true;
4972 	else
4973 		set_ic = false;
4974 
4975 	if (set_ic) {
4976 		tx_q->tx_count_frames = 0;
4977 		stmmac_set_tx_ic(priv, tx_desc);
4978 		u64_stats_update_begin(&txq_stats->q_syncp);
4979 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4980 		u64_stats_update_end(&txq_stats->q_syncp);
4981 	}
4982 
4983 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4984 
4985 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4986 	tx_q->cur_tx = entry;
4987 
4988 	return STMMAC_XDP_TX;
4989 }
4990 
4991 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4992 				   int cpu)
4993 {
4994 	int index = cpu;
4995 
4996 	if (unlikely(index < 0))
4997 		index = 0;
4998 
4999 	while (index >= priv->plat->tx_queues_to_use)
5000 		index -= priv->plat->tx_queues_to_use;
5001 
5002 	return index;
5003 }
5004 
5005 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5006 				struct xdp_buff *xdp)
5007 {
5008 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5009 	int cpu = smp_processor_id();
5010 	struct netdev_queue *nq;
5011 	int queue;
5012 	int res;
5013 
5014 	if (unlikely(!xdpf))
5015 		return STMMAC_XDP_CONSUMED;
5016 
5017 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5018 	nq = netdev_get_tx_queue(priv->dev, queue);
5019 
5020 	__netif_tx_lock(nq, cpu);
5021 	/* Avoids TX time-out as we are sharing with slow path */
5022 	txq_trans_cond_update(nq);
5023 
5024 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5025 	if (res == STMMAC_XDP_TX)
5026 		stmmac_flush_tx_descriptors(priv, queue);
5027 
5028 	__netif_tx_unlock(nq);
5029 
5030 	return res;
5031 }
5032 
5033 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5034 				 struct bpf_prog *prog,
5035 				 struct xdp_buff *xdp)
5036 {
5037 	u32 act;
5038 	int res;
5039 
5040 	act = bpf_prog_run_xdp(prog, xdp);
5041 	switch (act) {
5042 	case XDP_PASS:
5043 		res = STMMAC_XDP_PASS;
5044 		break;
5045 	case XDP_TX:
5046 		res = stmmac_xdp_xmit_back(priv, xdp);
5047 		break;
5048 	case XDP_REDIRECT:
5049 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5050 			res = STMMAC_XDP_CONSUMED;
5051 		else
5052 			res = STMMAC_XDP_REDIRECT;
5053 		break;
5054 	default:
5055 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5056 		fallthrough;
5057 	case XDP_ABORTED:
5058 		trace_xdp_exception(priv->dev, prog, act);
5059 		fallthrough;
5060 	case XDP_DROP:
5061 		res = STMMAC_XDP_CONSUMED;
5062 		break;
5063 	}
5064 
5065 	return res;
5066 }
5067 
5068 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5069 					   struct xdp_buff *xdp)
5070 {
5071 	struct bpf_prog *prog;
5072 	int res;
5073 
5074 	prog = READ_ONCE(priv->xdp_prog);
5075 	if (!prog) {
5076 		res = STMMAC_XDP_PASS;
5077 		goto out;
5078 	}
5079 
5080 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5081 out:
5082 	return ERR_PTR(-res);
5083 }
5084 
5085 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5086 				   int xdp_status)
5087 {
5088 	int cpu = smp_processor_id();
5089 	int queue;
5090 
5091 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5092 
5093 	if (xdp_status & STMMAC_XDP_TX)
5094 		stmmac_tx_timer_arm(priv, queue);
5095 
5096 	if (xdp_status & STMMAC_XDP_REDIRECT)
5097 		xdp_do_flush();
5098 }
5099 
5100 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5101 					       struct xdp_buff *xdp)
5102 {
5103 	unsigned int metasize = xdp->data - xdp->data_meta;
5104 	unsigned int datasize = xdp->data_end - xdp->data;
5105 	struct sk_buff *skb;
5106 
5107 	skb = napi_alloc_skb(&ch->rxtx_napi,
5108 			     xdp->data_end - xdp->data_hard_start);
5109 	if (unlikely(!skb))
5110 		return NULL;
5111 
5112 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5113 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5114 	if (metasize)
5115 		skb_metadata_set(skb, metasize);
5116 
5117 	return skb;
5118 }
5119 
5120 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5121 				   struct dma_desc *p, struct dma_desc *np,
5122 				   struct xdp_buff *xdp)
5123 {
5124 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5125 	struct stmmac_channel *ch = &priv->channel[queue];
5126 	unsigned int len = xdp->data_end - xdp->data;
5127 	enum pkt_hash_types hash_type;
5128 	int coe = priv->hw->rx_csum;
5129 	struct sk_buff *skb;
5130 	u32 hash;
5131 
5132 	skb = stmmac_construct_skb_zc(ch, xdp);
5133 	if (!skb) {
5134 		priv->xstats.rx_dropped++;
5135 		return;
5136 	}
5137 
5138 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5139 	if (priv->hw->hw_vlan_en)
5140 		/* MAC level stripping. */
5141 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5142 	else
5143 		/* Driver level stripping. */
5144 		stmmac_rx_vlan(priv->dev, skb);
5145 	skb->protocol = eth_type_trans(skb, priv->dev);
5146 
5147 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5148 		skb_checksum_none_assert(skb);
5149 	else
5150 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5151 
5152 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5153 		skb_set_hash(skb, hash, hash_type);
5154 
5155 	skb_record_rx_queue(skb, queue);
5156 	napi_gro_receive(&ch->rxtx_napi, skb);
5157 
5158 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5159 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5160 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5161 	u64_stats_update_end(&rxq_stats->napi_syncp);
5162 }
5163 
5164 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5165 {
5166 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5167 	unsigned int entry = rx_q->dirty_rx;
5168 	struct dma_desc *rx_desc = NULL;
5169 	bool ret = true;
5170 
5171 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5172 
5173 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5174 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5175 		dma_addr_t dma_addr;
5176 		bool use_rx_wd;
5177 
5178 		if (!buf->xdp) {
5179 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5180 			if (!buf->xdp) {
5181 				ret = false;
5182 				break;
5183 			}
5184 		}
5185 
5186 		if (priv->extend_desc)
5187 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5188 		else
5189 			rx_desc = rx_q->dma_rx + entry;
5190 
5191 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5192 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5193 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5194 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5195 
5196 		rx_q->rx_count_frames++;
5197 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5198 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5199 			rx_q->rx_count_frames = 0;
5200 
5201 		use_rx_wd = !priv->rx_coal_frames[queue];
5202 		use_rx_wd |= rx_q->rx_count_frames > 0;
5203 		if (!priv->use_riwt)
5204 			use_rx_wd = false;
5205 
5206 		dma_wmb();
5207 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5208 
5209 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5210 	}
5211 
5212 	if (rx_desc) {
5213 		rx_q->dirty_rx = entry;
5214 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5215 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5216 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5217 	}
5218 
5219 	return ret;
5220 }
5221 
5222 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5223 {
5224 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5225 	 * to represent incoming packet, whereas cb field in the same structure
5226 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5227 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5228 	 */
5229 	return (struct stmmac_xdp_buff *)xdp;
5230 }
5231 
5232 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5233 {
5234 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5235 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5236 	unsigned int count = 0, error = 0, len = 0;
5237 	int dirty = stmmac_rx_dirty(priv, queue);
5238 	unsigned int next_entry = rx_q->cur_rx;
5239 	u32 rx_errors = 0, rx_dropped = 0;
5240 	unsigned int desc_size;
5241 	struct bpf_prog *prog;
5242 	bool failure = false;
5243 	int xdp_status = 0;
5244 	int status = 0;
5245 
5246 	if (netif_msg_rx_status(priv)) {
5247 		void *rx_head;
5248 
5249 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5250 		if (priv->extend_desc) {
5251 			rx_head = (void *)rx_q->dma_erx;
5252 			desc_size = sizeof(struct dma_extended_desc);
5253 		} else {
5254 			rx_head = (void *)rx_q->dma_rx;
5255 			desc_size = sizeof(struct dma_desc);
5256 		}
5257 
5258 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5259 				    rx_q->dma_rx_phy, desc_size);
5260 	}
5261 	while (count < limit) {
5262 		struct stmmac_rx_buffer *buf;
5263 		struct stmmac_xdp_buff *ctx;
5264 		unsigned int buf1_len = 0;
5265 		struct dma_desc *np, *p;
5266 		int entry;
5267 		int res;
5268 
5269 		if (!count && rx_q->state_saved) {
5270 			error = rx_q->state.error;
5271 			len = rx_q->state.len;
5272 		} else {
5273 			rx_q->state_saved = false;
5274 			error = 0;
5275 			len = 0;
5276 		}
5277 
5278 		if (count >= limit)
5279 			break;
5280 
5281 read_again:
5282 		buf1_len = 0;
5283 		entry = next_entry;
5284 		buf = &rx_q->buf_pool[entry];
5285 
5286 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5287 			failure = failure ||
5288 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5289 			dirty = 0;
5290 		}
5291 
5292 		if (priv->extend_desc)
5293 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5294 		else
5295 			p = rx_q->dma_rx + entry;
5296 
5297 		/* read the status of the incoming frame */
5298 		status = stmmac_rx_status(priv, &priv->xstats, p);
5299 		/* check if managed by the DMA otherwise go ahead */
5300 		if (unlikely(status & dma_own))
5301 			break;
5302 
5303 		/* Prefetch the next RX descriptor */
5304 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5305 						priv->dma_conf.dma_rx_size);
5306 		next_entry = rx_q->cur_rx;
5307 
5308 		if (priv->extend_desc)
5309 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5310 		else
5311 			np = rx_q->dma_rx + next_entry;
5312 
5313 		prefetch(np);
5314 
5315 		/* Ensure a valid XSK buffer before proceed */
5316 		if (!buf->xdp)
5317 			break;
5318 
5319 		if (priv->extend_desc)
5320 			stmmac_rx_extended_status(priv, &priv->xstats,
5321 						  rx_q->dma_erx + entry);
5322 		if (unlikely(status == discard_frame)) {
5323 			xsk_buff_free(buf->xdp);
5324 			buf->xdp = NULL;
5325 			dirty++;
5326 			error = 1;
5327 			if (!priv->hwts_rx_en)
5328 				rx_errors++;
5329 		}
5330 
5331 		if (unlikely(error && (status & rx_not_ls)))
5332 			goto read_again;
5333 		if (unlikely(error)) {
5334 			count++;
5335 			continue;
5336 		}
5337 
5338 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5339 		if (likely(status & rx_not_ls)) {
5340 			xsk_buff_free(buf->xdp);
5341 			buf->xdp = NULL;
5342 			dirty++;
5343 			count++;
5344 			goto read_again;
5345 		}
5346 
5347 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5348 		ctx->priv = priv;
5349 		ctx->desc = p;
5350 		ctx->ndesc = np;
5351 
5352 		/* XDP ZC Frame only support primary buffers for now */
5353 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5354 		len += buf1_len;
5355 
5356 		/* ACS is disabled; strip manually. */
5357 		if (likely(!(status & rx_not_ls))) {
5358 			buf1_len -= ETH_FCS_LEN;
5359 			len -= ETH_FCS_LEN;
5360 		}
5361 
5362 		/* RX buffer is good and fit into a XSK pool buffer */
5363 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5364 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5365 
5366 		prog = READ_ONCE(priv->xdp_prog);
5367 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5368 
5369 		switch (res) {
5370 		case STMMAC_XDP_PASS:
5371 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5372 			xsk_buff_free(buf->xdp);
5373 			break;
5374 		case STMMAC_XDP_CONSUMED:
5375 			xsk_buff_free(buf->xdp);
5376 			rx_dropped++;
5377 			break;
5378 		case STMMAC_XDP_TX:
5379 		case STMMAC_XDP_REDIRECT:
5380 			xdp_status |= res;
5381 			break;
5382 		}
5383 
5384 		buf->xdp = NULL;
5385 		dirty++;
5386 		count++;
5387 	}
5388 
5389 	if (status & rx_not_ls) {
5390 		rx_q->state_saved = true;
5391 		rx_q->state.error = error;
5392 		rx_q->state.len = len;
5393 	}
5394 
5395 	stmmac_finalize_xdp_rx(priv, xdp_status);
5396 
5397 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5398 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5399 	u64_stats_update_end(&rxq_stats->napi_syncp);
5400 
5401 	priv->xstats.rx_dropped += rx_dropped;
5402 	priv->xstats.rx_errors += rx_errors;
5403 
5404 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5405 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5406 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5407 		else
5408 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5409 
5410 		return (int)count;
5411 	}
5412 
5413 	return failure ? limit : (int)count;
5414 }
5415 
5416 /**
5417  * stmmac_rx - manage the receive process
5418  * @priv: driver private structure
5419  * @limit: napi bugget
5420  * @queue: RX queue index.
5421  * Description :  this the function called by the napi poll method.
5422  * It gets all the frames inside the ring.
5423  */
5424 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5425 {
5426 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5427 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5428 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5429 	struct stmmac_channel *ch = &priv->channel[queue];
5430 	unsigned int count = 0, error = 0, len = 0;
5431 	int status = 0, coe = priv->hw->rx_csum;
5432 	unsigned int next_entry = rx_q->cur_rx;
5433 	enum dma_data_direction dma_dir;
5434 	unsigned int desc_size;
5435 	struct sk_buff *skb = NULL;
5436 	struct stmmac_xdp_buff ctx;
5437 	int xdp_status = 0;
5438 	int buf_sz;
5439 
5440 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5441 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5442 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5443 
5444 	if (netif_msg_rx_status(priv)) {
5445 		void *rx_head;
5446 
5447 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5448 		if (priv->extend_desc) {
5449 			rx_head = (void *)rx_q->dma_erx;
5450 			desc_size = sizeof(struct dma_extended_desc);
5451 		} else {
5452 			rx_head = (void *)rx_q->dma_rx;
5453 			desc_size = sizeof(struct dma_desc);
5454 		}
5455 
5456 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5457 				    rx_q->dma_rx_phy, desc_size);
5458 	}
5459 	while (count < limit) {
5460 		unsigned int buf1_len = 0, buf2_len = 0;
5461 		enum pkt_hash_types hash_type;
5462 		struct stmmac_rx_buffer *buf;
5463 		struct dma_desc *np, *p;
5464 		int entry;
5465 		u32 hash;
5466 
5467 		if (!count && rx_q->state_saved) {
5468 			skb = rx_q->state.skb;
5469 			error = rx_q->state.error;
5470 			len = rx_q->state.len;
5471 		} else {
5472 			rx_q->state_saved = false;
5473 			skb = NULL;
5474 			error = 0;
5475 			len = 0;
5476 		}
5477 
5478 read_again:
5479 		if (count >= limit)
5480 			break;
5481 
5482 		buf1_len = 0;
5483 		buf2_len = 0;
5484 		entry = next_entry;
5485 		buf = &rx_q->buf_pool[entry];
5486 
5487 		if (priv->extend_desc)
5488 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5489 		else
5490 			p = rx_q->dma_rx + entry;
5491 
5492 		/* read the status of the incoming frame */
5493 		status = stmmac_rx_status(priv, &priv->xstats, p);
5494 		/* check if managed by the DMA otherwise go ahead */
5495 		if (unlikely(status & dma_own))
5496 			break;
5497 
5498 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5499 						priv->dma_conf.dma_rx_size);
5500 		next_entry = rx_q->cur_rx;
5501 
5502 		if (priv->extend_desc)
5503 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5504 		else
5505 			np = rx_q->dma_rx + next_entry;
5506 
5507 		prefetch(np);
5508 
5509 		if (priv->extend_desc)
5510 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5511 		if (unlikely(status == discard_frame)) {
5512 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5513 			buf->page = NULL;
5514 			error = 1;
5515 			if (!priv->hwts_rx_en)
5516 				rx_errors++;
5517 		}
5518 
5519 		if (unlikely(error && (status & rx_not_ls)))
5520 			goto read_again;
5521 		if (unlikely(error)) {
5522 			dev_kfree_skb(skb);
5523 			skb = NULL;
5524 			count++;
5525 			continue;
5526 		}
5527 
5528 		/* Buffer is good. Go on. */
5529 
5530 		prefetch(page_address(buf->page) + buf->page_offset);
5531 		if (buf->sec_page)
5532 			prefetch(page_address(buf->sec_page));
5533 
5534 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5535 		len += buf1_len;
5536 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5537 		len += buf2_len;
5538 
5539 		/* ACS is disabled; strip manually. */
5540 		if (likely(!(status & rx_not_ls))) {
5541 			if (buf2_len) {
5542 				buf2_len -= ETH_FCS_LEN;
5543 				len -= ETH_FCS_LEN;
5544 			} else if (buf1_len) {
5545 				buf1_len -= ETH_FCS_LEN;
5546 				len -= ETH_FCS_LEN;
5547 			}
5548 		}
5549 
5550 		if (!skb) {
5551 			unsigned int pre_len, sync_len;
5552 
5553 			dma_sync_single_for_cpu(priv->device, buf->addr,
5554 						buf1_len, dma_dir);
5555 
5556 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5557 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5558 					 buf->page_offset, buf1_len, true);
5559 
5560 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5561 				  buf->page_offset;
5562 
5563 			ctx.priv = priv;
5564 			ctx.desc = p;
5565 			ctx.ndesc = np;
5566 
5567 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5568 			/* Due xdp_adjust_tail: DMA sync for_device
5569 			 * cover max len CPU touch
5570 			 */
5571 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5572 				   buf->page_offset;
5573 			sync_len = max(sync_len, pre_len);
5574 
5575 			/* For Not XDP_PASS verdict */
5576 			if (IS_ERR(skb)) {
5577 				unsigned int xdp_res = -PTR_ERR(skb);
5578 
5579 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5580 					page_pool_put_page(rx_q->page_pool,
5581 							   virt_to_head_page(ctx.xdp.data),
5582 							   sync_len, true);
5583 					buf->page = NULL;
5584 					rx_dropped++;
5585 
5586 					/* Clear skb as it was set as
5587 					 * status by XDP program.
5588 					 */
5589 					skb = NULL;
5590 
5591 					if (unlikely((status & rx_not_ls)))
5592 						goto read_again;
5593 
5594 					count++;
5595 					continue;
5596 				} else if (xdp_res & (STMMAC_XDP_TX |
5597 						      STMMAC_XDP_REDIRECT)) {
5598 					xdp_status |= xdp_res;
5599 					buf->page = NULL;
5600 					skb = NULL;
5601 					count++;
5602 					continue;
5603 				}
5604 			}
5605 		}
5606 
5607 		if (!skb) {
5608 			/* XDP program may expand or reduce tail */
5609 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5610 
5611 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5612 			if (!skb) {
5613 				rx_dropped++;
5614 				count++;
5615 				goto drain_data;
5616 			}
5617 
5618 			/* XDP program may adjust header */
5619 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5620 			skb_put(skb, buf1_len);
5621 
5622 			/* Data payload copied into SKB, page ready for recycle */
5623 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5624 			buf->page = NULL;
5625 		} else if (buf1_len) {
5626 			dma_sync_single_for_cpu(priv->device, buf->addr,
5627 						buf1_len, dma_dir);
5628 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5629 					buf->page, buf->page_offset, buf1_len,
5630 					priv->dma_conf.dma_buf_sz);
5631 
5632 			/* Data payload appended into SKB */
5633 			skb_mark_for_recycle(skb);
5634 			buf->page = NULL;
5635 		}
5636 
5637 		if (buf2_len) {
5638 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5639 						buf2_len, dma_dir);
5640 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5641 					buf->sec_page, 0, buf2_len,
5642 					priv->dma_conf.dma_buf_sz);
5643 
5644 			/* Data payload appended into SKB */
5645 			skb_mark_for_recycle(skb);
5646 			buf->sec_page = NULL;
5647 		}
5648 
5649 drain_data:
5650 		if (likely(status & rx_not_ls))
5651 			goto read_again;
5652 		if (!skb)
5653 			continue;
5654 
5655 		/* Got entire packet into SKB. Finish it. */
5656 
5657 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5658 
5659 		if (priv->hw->hw_vlan_en)
5660 			/* MAC level stripping. */
5661 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5662 		else
5663 			/* Driver level stripping. */
5664 			stmmac_rx_vlan(priv->dev, skb);
5665 
5666 		skb->protocol = eth_type_trans(skb, priv->dev);
5667 
5668 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5669 			skb_checksum_none_assert(skb);
5670 		else
5671 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5672 
5673 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5674 			skb_set_hash(skb, hash, hash_type);
5675 
5676 		skb_record_rx_queue(skb, queue);
5677 		napi_gro_receive(&ch->rx_napi, skb);
5678 		skb = NULL;
5679 
5680 		rx_packets++;
5681 		rx_bytes += len;
5682 		count++;
5683 	}
5684 
5685 	if (status & rx_not_ls || skb) {
5686 		rx_q->state_saved = true;
5687 		rx_q->state.skb = skb;
5688 		rx_q->state.error = error;
5689 		rx_q->state.len = len;
5690 	}
5691 
5692 	stmmac_finalize_xdp_rx(priv, xdp_status);
5693 
5694 	stmmac_rx_refill(priv, queue);
5695 
5696 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5697 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5698 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5699 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5700 	u64_stats_update_end(&rxq_stats->napi_syncp);
5701 
5702 	priv->xstats.rx_dropped += rx_dropped;
5703 	priv->xstats.rx_errors += rx_errors;
5704 
5705 	return count;
5706 }
5707 
5708 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5709 {
5710 	struct stmmac_channel *ch =
5711 		container_of(napi, struct stmmac_channel, rx_napi);
5712 	struct stmmac_priv *priv = ch->priv_data;
5713 	struct stmmac_rxq_stats *rxq_stats;
5714 	u32 chan = ch->index;
5715 	int work_done;
5716 
5717 	rxq_stats = &priv->xstats.rxq_stats[chan];
5718 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5719 	u64_stats_inc(&rxq_stats->napi.poll);
5720 	u64_stats_update_end(&rxq_stats->napi_syncp);
5721 
5722 	work_done = stmmac_rx(priv, budget, chan);
5723 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5724 		unsigned long flags;
5725 
5726 		spin_lock_irqsave(&ch->lock, flags);
5727 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5728 		spin_unlock_irqrestore(&ch->lock, flags);
5729 	}
5730 
5731 	return work_done;
5732 }
5733 
5734 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5735 {
5736 	struct stmmac_channel *ch =
5737 		container_of(napi, struct stmmac_channel, tx_napi);
5738 	struct stmmac_priv *priv = ch->priv_data;
5739 	struct stmmac_txq_stats *txq_stats;
5740 	bool pending_packets = false;
5741 	u32 chan = ch->index;
5742 	int work_done;
5743 
5744 	txq_stats = &priv->xstats.txq_stats[chan];
5745 	u64_stats_update_begin(&txq_stats->napi_syncp);
5746 	u64_stats_inc(&txq_stats->napi.poll);
5747 	u64_stats_update_end(&txq_stats->napi_syncp);
5748 
5749 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5750 	work_done = min(work_done, budget);
5751 
5752 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5753 		unsigned long flags;
5754 
5755 		spin_lock_irqsave(&ch->lock, flags);
5756 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5757 		spin_unlock_irqrestore(&ch->lock, flags);
5758 	}
5759 
5760 	/* TX still have packet to handle, check if we need to arm tx timer */
5761 	if (pending_packets)
5762 		stmmac_tx_timer_arm(priv, chan);
5763 
5764 	return work_done;
5765 }
5766 
5767 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5768 {
5769 	struct stmmac_channel *ch =
5770 		container_of(napi, struct stmmac_channel, rxtx_napi);
5771 	struct stmmac_priv *priv = ch->priv_data;
5772 	bool tx_pending_packets = false;
5773 	int rx_done, tx_done, rxtx_done;
5774 	struct stmmac_rxq_stats *rxq_stats;
5775 	struct stmmac_txq_stats *txq_stats;
5776 	u32 chan = ch->index;
5777 
5778 	rxq_stats = &priv->xstats.rxq_stats[chan];
5779 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5780 	u64_stats_inc(&rxq_stats->napi.poll);
5781 	u64_stats_update_end(&rxq_stats->napi_syncp);
5782 
5783 	txq_stats = &priv->xstats.txq_stats[chan];
5784 	u64_stats_update_begin(&txq_stats->napi_syncp);
5785 	u64_stats_inc(&txq_stats->napi.poll);
5786 	u64_stats_update_end(&txq_stats->napi_syncp);
5787 
5788 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5789 	tx_done = min(tx_done, budget);
5790 
5791 	rx_done = stmmac_rx_zc(priv, budget, chan);
5792 
5793 	rxtx_done = max(tx_done, rx_done);
5794 
5795 	/* If either TX or RX work is not complete, return budget
5796 	 * and keep pooling
5797 	 */
5798 	if (rxtx_done >= budget)
5799 		return budget;
5800 
5801 	/* all work done, exit the polling mode */
5802 	if (napi_complete_done(napi, rxtx_done)) {
5803 		unsigned long flags;
5804 
5805 		spin_lock_irqsave(&ch->lock, flags);
5806 		/* Both RX and TX work done are compelte,
5807 		 * so enable both RX & TX IRQs.
5808 		 */
5809 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5810 		spin_unlock_irqrestore(&ch->lock, flags);
5811 	}
5812 
5813 	/* TX still have packet to handle, check if we need to arm tx timer */
5814 	if (tx_pending_packets)
5815 		stmmac_tx_timer_arm(priv, chan);
5816 
5817 	return min(rxtx_done, budget - 1);
5818 }
5819 
5820 /**
5821  *  stmmac_tx_timeout
5822  *  @dev : Pointer to net device structure
5823  *  @txqueue: the index of the hanging transmit queue
5824  *  Description: this function is called when a packet transmission fails to
5825  *   complete within a reasonable time. The driver will mark the error in the
5826  *   netdev structure and arrange for the device to be reset to a sane state
5827  *   in order to transmit a new packet.
5828  */
5829 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5830 {
5831 	struct stmmac_priv *priv = netdev_priv(dev);
5832 
5833 	stmmac_global_err(priv);
5834 }
5835 
5836 /**
5837  *  stmmac_set_rx_mode - entry point for multicast addressing
5838  *  @dev : pointer to the device structure
5839  *  Description:
5840  *  This function is a driver entry point which gets called by the kernel
5841  *  whenever multicast addresses must be enabled/disabled.
5842  *  Return value:
5843  *  void.
5844  */
5845 static void stmmac_set_rx_mode(struct net_device *dev)
5846 {
5847 	struct stmmac_priv *priv = netdev_priv(dev);
5848 
5849 	stmmac_set_filter(priv, priv->hw, dev);
5850 }
5851 
5852 /**
5853  *  stmmac_change_mtu - entry point to change MTU size for the device.
5854  *  @dev : device pointer.
5855  *  @new_mtu : the new MTU size for the device.
5856  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5857  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5858  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5859  *  Return value:
5860  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5861  *  file on failure.
5862  */
5863 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5864 {
5865 	struct stmmac_priv *priv = netdev_priv(dev);
5866 	int txfifosz = priv->plat->tx_fifo_size;
5867 	struct stmmac_dma_conf *dma_conf;
5868 	const int mtu = new_mtu;
5869 	int ret;
5870 
5871 	if (txfifosz == 0)
5872 		txfifosz = priv->dma_cap.tx_fifo_size;
5873 
5874 	txfifosz /= priv->plat->tx_queues_to_use;
5875 
5876 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5877 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5878 		return -EINVAL;
5879 	}
5880 
5881 	new_mtu = STMMAC_ALIGN(new_mtu);
5882 
5883 	/* If condition true, FIFO is too small or MTU too large */
5884 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5885 		return -EINVAL;
5886 
5887 	if (netif_running(dev)) {
5888 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5889 		/* Try to allocate the new DMA conf with the new mtu */
5890 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5891 		if (IS_ERR(dma_conf)) {
5892 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5893 				   mtu);
5894 			return PTR_ERR(dma_conf);
5895 		}
5896 
5897 		stmmac_release(dev);
5898 
5899 		ret = __stmmac_open(dev, dma_conf);
5900 		if (ret) {
5901 			free_dma_desc_resources(priv, dma_conf);
5902 			kfree(dma_conf);
5903 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5904 			return ret;
5905 		}
5906 
5907 		kfree(dma_conf);
5908 
5909 		stmmac_set_rx_mode(dev);
5910 	}
5911 
5912 	WRITE_ONCE(dev->mtu, mtu);
5913 	netdev_update_features(dev);
5914 
5915 	return 0;
5916 }
5917 
5918 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5919 					     netdev_features_t features)
5920 {
5921 	struct stmmac_priv *priv = netdev_priv(dev);
5922 
5923 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5924 		features &= ~NETIF_F_RXCSUM;
5925 
5926 	if (!priv->plat->tx_coe)
5927 		features &= ~NETIF_F_CSUM_MASK;
5928 
5929 	/* Some GMAC devices have a bugged Jumbo frame support that
5930 	 * needs to have the Tx COE disabled for oversized frames
5931 	 * (due to limited buffer sizes). In this case we disable
5932 	 * the TX csum insertion in the TDES and not use SF.
5933 	 */
5934 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5935 		features &= ~NETIF_F_CSUM_MASK;
5936 
5937 	/* Disable tso if asked by ethtool */
5938 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5939 		if (features & NETIF_F_TSO)
5940 			priv->tso = true;
5941 		else
5942 			priv->tso = false;
5943 	}
5944 
5945 	return features;
5946 }
5947 
5948 static int stmmac_set_features(struct net_device *netdev,
5949 			       netdev_features_t features)
5950 {
5951 	struct stmmac_priv *priv = netdev_priv(netdev);
5952 
5953 	/* Keep the COE Type in case of csum is supporting */
5954 	if (features & NETIF_F_RXCSUM)
5955 		priv->hw->rx_csum = priv->plat->rx_coe;
5956 	else
5957 		priv->hw->rx_csum = 0;
5958 	/* No check needed because rx_coe has been set before and it will be
5959 	 * fixed in case of issue.
5960 	 */
5961 	stmmac_rx_ipc(priv, priv->hw);
5962 
5963 	if (priv->sph_cap) {
5964 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5965 		u32 chan;
5966 
5967 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5968 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5969 	}
5970 
5971 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5972 		priv->hw->hw_vlan_en = true;
5973 	else
5974 		priv->hw->hw_vlan_en = false;
5975 
5976 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5977 
5978 	return 0;
5979 }
5980 
5981 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5982 {
5983 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5984 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5985 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5986 	bool *hs_enable = &fpe_cfg->hs_enable;
5987 
5988 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5989 		return;
5990 
5991 	/* If LP has sent verify mPacket, LP is FPE capable */
5992 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5993 		if (*lp_state < FPE_STATE_CAPABLE)
5994 			*lp_state = FPE_STATE_CAPABLE;
5995 
5996 		/* If user has requested FPE enable, quickly response */
5997 		if (*hs_enable)
5998 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5999 						fpe_cfg,
6000 						MPACKET_RESPONSE);
6001 	}
6002 
6003 	/* If Local has sent verify mPacket, Local is FPE capable */
6004 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
6005 		if (*lo_state < FPE_STATE_CAPABLE)
6006 			*lo_state = FPE_STATE_CAPABLE;
6007 	}
6008 
6009 	/* If LP has sent response mPacket, LP is entering FPE ON */
6010 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
6011 		*lp_state = FPE_STATE_ENTERING_ON;
6012 
6013 	/* If Local has sent response mPacket, Local is entering FPE ON */
6014 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
6015 		*lo_state = FPE_STATE_ENTERING_ON;
6016 
6017 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
6018 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
6019 	    priv->fpe_wq) {
6020 		queue_work(priv->fpe_wq, &priv->fpe_task);
6021 	}
6022 }
6023 
6024 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6025 {
6026 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6027 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6028 	u32 queues_count;
6029 	u32 queue;
6030 	bool xmac;
6031 
6032 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6033 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6034 
6035 	if (priv->irq_wake)
6036 		pm_wakeup_event(priv->device, 0);
6037 
6038 	if (priv->dma_cap.estsel)
6039 		stmmac_est_irq_status(priv, priv, priv->dev,
6040 				      &priv->xstats, tx_cnt);
6041 
6042 	if (priv->dma_cap.fpesel) {
6043 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6044 						   priv->dev);
6045 
6046 		stmmac_fpe_event_status(priv, status);
6047 	}
6048 
6049 	/* To handle GMAC own interrupts */
6050 	if ((priv->plat->has_gmac) || xmac) {
6051 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6052 
6053 		if (unlikely(status)) {
6054 			/* For LPI we need to save the tx status */
6055 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6056 				priv->tx_path_in_lpi_mode = true;
6057 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6058 				priv->tx_path_in_lpi_mode = false;
6059 		}
6060 
6061 		for (queue = 0; queue < queues_count; queue++)
6062 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6063 
6064 		/* PCS link status */
6065 		if (priv->hw->pcs &&
6066 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6067 			if (priv->xstats.pcs_link)
6068 				netif_carrier_on(priv->dev);
6069 			else
6070 				netif_carrier_off(priv->dev);
6071 		}
6072 
6073 		stmmac_timestamp_interrupt(priv, priv);
6074 	}
6075 }
6076 
6077 /**
6078  *  stmmac_interrupt - main ISR
6079  *  @irq: interrupt number.
6080  *  @dev_id: to pass the net device pointer.
6081  *  Description: this is the main driver interrupt service routine.
6082  *  It can call:
6083  *  o DMA service routine (to manage incoming frame reception and transmission
6084  *    status)
6085  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6086  *    interrupts.
6087  */
6088 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6089 {
6090 	struct net_device *dev = (struct net_device *)dev_id;
6091 	struct stmmac_priv *priv = netdev_priv(dev);
6092 
6093 	/* Check if adapter is up */
6094 	if (test_bit(STMMAC_DOWN, &priv->state))
6095 		return IRQ_HANDLED;
6096 
6097 	/* Check ASP error if it isn't delivered via an individual IRQ */
6098 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6099 		return IRQ_HANDLED;
6100 
6101 	/* To handle Common interrupts */
6102 	stmmac_common_interrupt(priv);
6103 
6104 	/* To handle DMA interrupts */
6105 	stmmac_dma_interrupt(priv);
6106 
6107 	return IRQ_HANDLED;
6108 }
6109 
6110 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6111 {
6112 	struct net_device *dev = (struct net_device *)dev_id;
6113 	struct stmmac_priv *priv = netdev_priv(dev);
6114 
6115 	/* Check if adapter is up */
6116 	if (test_bit(STMMAC_DOWN, &priv->state))
6117 		return IRQ_HANDLED;
6118 
6119 	/* To handle Common interrupts */
6120 	stmmac_common_interrupt(priv);
6121 
6122 	return IRQ_HANDLED;
6123 }
6124 
6125 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6126 {
6127 	struct net_device *dev = (struct net_device *)dev_id;
6128 	struct stmmac_priv *priv = netdev_priv(dev);
6129 
6130 	/* Check if adapter is up */
6131 	if (test_bit(STMMAC_DOWN, &priv->state))
6132 		return IRQ_HANDLED;
6133 
6134 	/* Check if a fatal error happened */
6135 	stmmac_safety_feat_interrupt(priv);
6136 
6137 	return IRQ_HANDLED;
6138 }
6139 
6140 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6141 {
6142 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6143 	struct stmmac_dma_conf *dma_conf;
6144 	int chan = tx_q->queue_index;
6145 	struct stmmac_priv *priv;
6146 	int status;
6147 
6148 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6149 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6150 
6151 	/* Check if adapter is up */
6152 	if (test_bit(STMMAC_DOWN, &priv->state))
6153 		return IRQ_HANDLED;
6154 
6155 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6156 
6157 	if (unlikely(status & tx_hard_error_bump_tc)) {
6158 		/* Try to bump up the dma threshold on this failure */
6159 		stmmac_bump_dma_threshold(priv, chan);
6160 	} else if (unlikely(status == tx_hard_error)) {
6161 		stmmac_tx_err(priv, chan);
6162 	}
6163 
6164 	return IRQ_HANDLED;
6165 }
6166 
6167 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6168 {
6169 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6170 	struct stmmac_dma_conf *dma_conf;
6171 	int chan = rx_q->queue_index;
6172 	struct stmmac_priv *priv;
6173 
6174 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6175 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6176 
6177 	/* Check if adapter is up */
6178 	if (test_bit(STMMAC_DOWN, &priv->state))
6179 		return IRQ_HANDLED;
6180 
6181 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6182 
6183 	return IRQ_HANDLED;
6184 }
6185 
6186 /**
6187  *  stmmac_ioctl - Entry point for the Ioctl
6188  *  @dev: Device pointer.
6189  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6190  *  a proprietary structure used to pass information to the driver.
6191  *  @cmd: IOCTL command
6192  *  Description:
6193  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6194  */
6195 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6196 {
6197 	struct stmmac_priv *priv = netdev_priv (dev);
6198 	int ret = -EOPNOTSUPP;
6199 
6200 	if (!netif_running(dev))
6201 		return -EINVAL;
6202 
6203 	switch (cmd) {
6204 	case SIOCGMIIPHY:
6205 	case SIOCGMIIREG:
6206 	case SIOCSMIIREG:
6207 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6208 		break;
6209 	case SIOCSHWTSTAMP:
6210 		ret = stmmac_hwtstamp_set(dev, rq);
6211 		break;
6212 	case SIOCGHWTSTAMP:
6213 		ret = stmmac_hwtstamp_get(dev, rq);
6214 		break;
6215 	default:
6216 		break;
6217 	}
6218 
6219 	return ret;
6220 }
6221 
6222 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6223 				    void *cb_priv)
6224 {
6225 	struct stmmac_priv *priv = cb_priv;
6226 	int ret = -EOPNOTSUPP;
6227 
6228 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6229 		return ret;
6230 
6231 	__stmmac_disable_all_queues(priv);
6232 
6233 	switch (type) {
6234 	case TC_SETUP_CLSU32:
6235 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6236 		break;
6237 	case TC_SETUP_CLSFLOWER:
6238 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6239 		break;
6240 	default:
6241 		break;
6242 	}
6243 
6244 	stmmac_enable_all_queues(priv);
6245 	return ret;
6246 }
6247 
6248 static LIST_HEAD(stmmac_block_cb_list);
6249 
6250 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6251 			   void *type_data)
6252 {
6253 	struct stmmac_priv *priv = netdev_priv(ndev);
6254 
6255 	switch (type) {
6256 	case TC_QUERY_CAPS:
6257 		return stmmac_tc_query_caps(priv, priv, type_data);
6258 	case TC_SETUP_BLOCK:
6259 		return flow_block_cb_setup_simple(type_data,
6260 						  &stmmac_block_cb_list,
6261 						  stmmac_setup_tc_block_cb,
6262 						  priv, priv, true);
6263 	case TC_SETUP_QDISC_CBS:
6264 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6265 	case TC_SETUP_QDISC_TAPRIO:
6266 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6267 	case TC_SETUP_QDISC_ETF:
6268 		return stmmac_tc_setup_etf(priv, priv, type_data);
6269 	default:
6270 		return -EOPNOTSUPP;
6271 	}
6272 }
6273 
6274 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6275 			       struct net_device *sb_dev)
6276 {
6277 	int gso = skb_shinfo(skb)->gso_type;
6278 
6279 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6280 		/*
6281 		 * There is no way to determine the number of TSO/USO
6282 		 * capable Queues. Let's use always the Queue 0
6283 		 * because if TSO/USO is supported then at least this
6284 		 * one will be capable.
6285 		 */
6286 		return 0;
6287 	}
6288 
6289 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6290 }
6291 
6292 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6293 {
6294 	struct stmmac_priv *priv = netdev_priv(ndev);
6295 	int ret = 0;
6296 
6297 	ret = pm_runtime_resume_and_get(priv->device);
6298 	if (ret < 0)
6299 		return ret;
6300 
6301 	ret = eth_mac_addr(ndev, addr);
6302 	if (ret)
6303 		goto set_mac_error;
6304 
6305 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6306 
6307 set_mac_error:
6308 	pm_runtime_put(priv->device);
6309 
6310 	return ret;
6311 }
6312 
6313 #ifdef CONFIG_DEBUG_FS
6314 static struct dentry *stmmac_fs_dir;
6315 
6316 static void sysfs_display_ring(void *head, int size, int extend_desc,
6317 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6318 {
6319 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6320 	struct dma_desc *p = (struct dma_desc *)head;
6321 	unsigned int desc_size;
6322 	dma_addr_t dma_addr;
6323 	int i;
6324 
6325 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6326 	for (i = 0; i < size; i++) {
6327 		dma_addr = dma_phy_addr + i * desc_size;
6328 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6329 				i, &dma_addr,
6330 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6331 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6332 		if (extend_desc)
6333 			p = &(++ep)->basic;
6334 		else
6335 			p++;
6336 	}
6337 }
6338 
6339 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6340 {
6341 	struct net_device *dev = seq->private;
6342 	struct stmmac_priv *priv = netdev_priv(dev);
6343 	u32 rx_count = priv->plat->rx_queues_to_use;
6344 	u32 tx_count = priv->plat->tx_queues_to_use;
6345 	u32 queue;
6346 
6347 	if ((dev->flags & IFF_UP) == 0)
6348 		return 0;
6349 
6350 	for (queue = 0; queue < rx_count; queue++) {
6351 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6352 
6353 		seq_printf(seq, "RX Queue %d:\n", queue);
6354 
6355 		if (priv->extend_desc) {
6356 			seq_printf(seq, "Extended descriptor ring:\n");
6357 			sysfs_display_ring((void *)rx_q->dma_erx,
6358 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6359 		} else {
6360 			seq_printf(seq, "Descriptor ring:\n");
6361 			sysfs_display_ring((void *)rx_q->dma_rx,
6362 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6363 		}
6364 	}
6365 
6366 	for (queue = 0; queue < tx_count; queue++) {
6367 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6368 
6369 		seq_printf(seq, "TX Queue %d:\n", queue);
6370 
6371 		if (priv->extend_desc) {
6372 			seq_printf(seq, "Extended descriptor ring:\n");
6373 			sysfs_display_ring((void *)tx_q->dma_etx,
6374 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6375 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6376 			seq_printf(seq, "Descriptor ring:\n");
6377 			sysfs_display_ring((void *)tx_q->dma_tx,
6378 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6379 		}
6380 	}
6381 
6382 	return 0;
6383 }
6384 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6385 
6386 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6387 {
6388 	static const char * const dwxgmac_timestamp_source[] = {
6389 		"None",
6390 		"Internal",
6391 		"External",
6392 		"Both",
6393 	};
6394 	static const char * const dwxgmac_safety_feature_desc[] = {
6395 		"No",
6396 		"All Safety Features with ECC and Parity",
6397 		"All Safety Features without ECC or Parity",
6398 		"All Safety Features with Parity Only",
6399 		"ECC Only",
6400 		"UNDEFINED",
6401 		"UNDEFINED",
6402 		"UNDEFINED",
6403 	};
6404 	struct net_device *dev = seq->private;
6405 	struct stmmac_priv *priv = netdev_priv(dev);
6406 
6407 	if (!priv->hw_cap_support) {
6408 		seq_printf(seq, "DMA HW features not supported\n");
6409 		return 0;
6410 	}
6411 
6412 	seq_printf(seq, "==============================\n");
6413 	seq_printf(seq, "\tDMA HW features\n");
6414 	seq_printf(seq, "==============================\n");
6415 
6416 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6417 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6418 	seq_printf(seq, "\t1000 Mbps: %s\n",
6419 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6420 	seq_printf(seq, "\tHalf duplex: %s\n",
6421 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6422 	if (priv->plat->has_xgmac) {
6423 		seq_printf(seq,
6424 			   "\tNumber of Additional MAC address registers: %d\n",
6425 			   priv->dma_cap.multi_addr);
6426 	} else {
6427 		seq_printf(seq, "\tHash Filter: %s\n",
6428 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6429 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6430 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6431 	}
6432 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6433 		   (priv->dma_cap.pcs) ? "Y" : "N");
6434 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6435 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6436 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6437 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6438 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6439 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6440 	seq_printf(seq, "\tRMON module: %s\n",
6441 		   (priv->dma_cap.rmon) ? "Y" : "N");
6442 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6443 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6444 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6445 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6446 	if (priv->plat->has_xgmac)
6447 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6448 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6449 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6450 		   (priv->dma_cap.eee) ? "Y" : "N");
6451 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6452 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6453 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6454 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6455 	    priv->plat->has_xgmac) {
6456 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6457 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6458 	} else {
6459 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6460 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6461 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6462 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6463 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6464 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6465 	}
6466 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6467 		   priv->dma_cap.number_rx_channel);
6468 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6469 		   priv->dma_cap.number_tx_channel);
6470 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6471 		   priv->dma_cap.number_rx_queues);
6472 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6473 		   priv->dma_cap.number_tx_queues);
6474 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6475 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6476 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6477 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6478 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6479 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6480 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6481 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6482 		   priv->dma_cap.pps_out_num);
6483 	seq_printf(seq, "\tSafety Features: %s\n",
6484 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6485 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6486 		   priv->dma_cap.frpsel ? "Y" : "N");
6487 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6488 		   priv->dma_cap.host_dma_width);
6489 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6490 		   priv->dma_cap.rssen ? "Y" : "N");
6491 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6492 		   priv->dma_cap.vlhash ? "Y" : "N");
6493 	seq_printf(seq, "\tSplit Header: %s\n",
6494 		   priv->dma_cap.sphen ? "Y" : "N");
6495 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6496 		   priv->dma_cap.vlins ? "Y" : "N");
6497 	seq_printf(seq, "\tDouble VLAN: %s\n",
6498 		   priv->dma_cap.dvlan ? "Y" : "N");
6499 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6500 		   priv->dma_cap.l3l4fnum);
6501 	seq_printf(seq, "\tARP Offloading: %s\n",
6502 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6503 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6504 		   priv->dma_cap.estsel ? "Y" : "N");
6505 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6506 		   priv->dma_cap.fpesel ? "Y" : "N");
6507 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6508 		   priv->dma_cap.tbssel ? "Y" : "N");
6509 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6510 		   priv->dma_cap.tbs_ch_num);
6511 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6512 		   priv->dma_cap.sgfsel ? "Y" : "N");
6513 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6514 		   BIT(priv->dma_cap.ttsfd) >> 1);
6515 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6516 		   priv->dma_cap.numtc);
6517 	seq_printf(seq, "\tDCB Feature: %s\n",
6518 		   priv->dma_cap.dcben ? "Y" : "N");
6519 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6520 		   priv->dma_cap.advthword ? "Y" : "N");
6521 	seq_printf(seq, "\tPTP Offload: %s\n",
6522 		   priv->dma_cap.ptoen ? "Y" : "N");
6523 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6524 		   priv->dma_cap.osten ? "Y" : "N");
6525 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6526 		   priv->dma_cap.pfcen ? "Y" : "N");
6527 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6528 		   BIT(priv->dma_cap.frpes) << 6);
6529 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6530 		   BIT(priv->dma_cap.frpbs) << 6);
6531 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6532 		   priv->dma_cap.frppipe_num);
6533 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6534 		   priv->dma_cap.nrvf_num ?
6535 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6536 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6537 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6538 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6539 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6540 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6541 		   priv->dma_cap.cbtisel ? "Y" : "N");
6542 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6543 		   priv->dma_cap.aux_snapshot_n);
6544 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6545 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6546 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6547 		   priv->dma_cap.edma ? "Y" : "N");
6548 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6549 		   priv->dma_cap.ediffc ? "Y" : "N");
6550 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6551 		   priv->dma_cap.vxn ? "Y" : "N");
6552 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6553 		   priv->dma_cap.dbgmem ? "Y" : "N");
6554 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6555 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6556 	return 0;
6557 }
6558 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6559 
6560 /* Use network device events to rename debugfs file entries.
6561  */
6562 static int stmmac_device_event(struct notifier_block *unused,
6563 			       unsigned long event, void *ptr)
6564 {
6565 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6566 	struct stmmac_priv *priv = netdev_priv(dev);
6567 
6568 	if (dev->netdev_ops != &stmmac_netdev_ops)
6569 		goto done;
6570 
6571 	switch (event) {
6572 	case NETDEV_CHANGENAME:
6573 		if (priv->dbgfs_dir)
6574 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6575 							 priv->dbgfs_dir,
6576 							 stmmac_fs_dir,
6577 							 dev->name);
6578 		break;
6579 	}
6580 done:
6581 	return NOTIFY_DONE;
6582 }
6583 
6584 static struct notifier_block stmmac_notifier = {
6585 	.notifier_call = stmmac_device_event,
6586 };
6587 
6588 static void stmmac_init_fs(struct net_device *dev)
6589 {
6590 	struct stmmac_priv *priv = netdev_priv(dev);
6591 
6592 	rtnl_lock();
6593 
6594 	/* Create per netdev entries */
6595 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6596 
6597 	/* Entry to report DMA RX/TX rings */
6598 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6599 			    &stmmac_rings_status_fops);
6600 
6601 	/* Entry to report the DMA HW features */
6602 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6603 			    &stmmac_dma_cap_fops);
6604 
6605 	rtnl_unlock();
6606 }
6607 
6608 static void stmmac_exit_fs(struct net_device *dev)
6609 {
6610 	struct stmmac_priv *priv = netdev_priv(dev);
6611 
6612 	debugfs_remove_recursive(priv->dbgfs_dir);
6613 }
6614 #endif /* CONFIG_DEBUG_FS */
6615 
6616 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6617 {
6618 	unsigned char *data = (unsigned char *)&vid_le;
6619 	unsigned char data_byte = 0;
6620 	u32 crc = ~0x0;
6621 	u32 temp = 0;
6622 	int i, bits;
6623 
6624 	bits = get_bitmask_order(VLAN_VID_MASK);
6625 	for (i = 0; i < bits; i++) {
6626 		if ((i % 8) == 0)
6627 			data_byte = data[i / 8];
6628 
6629 		temp = ((crc & 1) ^ data_byte) & 1;
6630 		crc >>= 1;
6631 		data_byte >>= 1;
6632 
6633 		if (temp)
6634 			crc ^= 0xedb88320;
6635 	}
6636 
6637 	return crc;
6638 }
6639 
6640 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6641 {
6642 	u32 crc, hash = 0;
6643 	__le16 pmatch = 0;
6644 	int count = 0;
6645 	u16 vid = 0;
6646 
6647 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6648 		__le16 vid_le = cpu_to_le16(vid);
6649 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6650 		hash |= (1 << crc);
6651 		count++;
6652 	}
6653 
6654 	if (!priv->dma_cap.vlhash) {
6655 		if (count > 2) /* VID = 0 always passes filter */
6656 			return -EOPNOTSUPP;
6657 
6658 		pmatch = cpu_to_le16(vid);
6659 		hash = 0;
6660 	}
6661 
6662 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6663 }
6664 
6665 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6666 {
6667 	struct stmmac_priv *priv = netdev_priv(ndev);
6668 	bool is_double = false;
6669 	int ret;
6670 
6671 	ret = pm_runtime_resume_and_get(priv->device);
6672 	if (ret < 0)
6673 		return ret;
6674 
6675 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6676 		is_double = true;
6677 
6678 	set_bit(vid, priv->active_vlans);
6679 	ret = stmmac_vlan_update(priv, is_double);
6680 	if (ret) {
6681 		clear_bit(vid, priv->active_vlans);
6682 		goto err_pm_put;
6683 	}
6684 
6685 	if (priv->hw->num_vlan) {
6686 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6687 		if (ret)
6688 			goto err_pm_put;
6689 	}
6690 err_pm_put:
6691 	pm_runtime_put(priv->device);
6692 
6693 	return ret;
6694 }
6695 
6696 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6697 {
6698 	struct stmmac_priv *priv = netdev_priv(ndev);
6699 	bool is_double = false;
6700 	int ret;
6701 
6702 	ret = pm_runtime_resume_and_get(priv->device);
6703 	if (ret < 0)
6704 		return ret;
6705 
6706 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6707 		is_double = true;
6708 
6709 	clear_bit(vid, priv->active_vlans);
6710 
6711 	if (priv->hw->num_vlan) {
6712 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6713 		if (ret)
6714 			goto del_vlan_error;
6715 	}
6716 
6717 	ret = stmmac_vlan_update(priv, is_double);
6718 
6719 del_vlan_error:
6720 	pm_runtime_put(priv->device);
6721 
6722 	return ret;
6723 }
6724 
6725 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6726 {
6727 	struct stmmac_priv *priv = netdev_priv(dev);
6728 
6729 	switch (bpf->command) {
6730 	case XDP_SETUP_PROG:
6731 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6732 	case XDP_SETUP_XSK_POOL:
6733 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6734 					     bpf->xsk.queue_id);
6735 	default:
6736 		return -EOPNOTSUPP;
6737 	}
6738 }
6739 
6740 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6741 			   struct xdp_frame **frames, u32 flags)
6742 {
6743 	struct stmmac_priv *priv = netdev_priv(dev);
6744 	int cpu = smp_processor_id();
6745 	struct netdev_queue *nq;
6746 	int i, nxmit = 0;
6747 	int queue;
6748 
6749 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6750 		return -ENETDOWN;
6751 
6752 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6753 		return -EINVAL;
6754 
6755 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6756 	nq = netdev_get_tx_queue(priv->dev, queue);
6757 
6758 	__netif_tx_lock(nq, cpu);
6759 	/* Avoids TX time-out as we are sharing with slow path */
6760 	txq_trans_cond_update(nq);
6761 
6762 	for (i = 0; i < num_frames; i++) {
6763 		int res;
6764 
6765 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6766 		if (res == STMMAC_XDP_CONSUMED)
6767 			break;
6768 
6769 		nxmit++;
6770 	}
6771 
6772 	if (flags & XDP_XMIT_FLUSH) {
6773 		stmmac_flush_tx_descriptors(priv, queue);
6774 		stmmac_tx_timer_arm(priv, queue);
6775 	}
6776 
6777 	__netif_tx_unlock(nq);
6778 
6779 	return nxmit;
6780 }
6781 
6782 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6783 {
6784 	struct stmmac_channel *ch = &priv->channel[queue];
6785 	unsigned long flags;
6786 
6787 	spin_lock_irqsave(&ch->lock, flags);
6788 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6789 	spin_unlock_irqrestore(&ch->lock, flags);
6790 
6791 	stmmac_stop_rx_dma(priv, queue);
6792 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6793 }
6794 
6795 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6796 {
6797 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6798 	struct stmmac_channel *ch = &priv->channel[queue];
6799 	unsigned long flags;
6800 	u32 buf_size;
6801 	int ret;
6802 
6803 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6804 	if (ret) {
6805 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6806 		return;
6807 	}
6808 
6809 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6810 	if (ret) {
6811 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6812 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6813 		return;
6814 	}
6815 
6816 	stmmac_reset_rx_queue(priv, queue);
6817 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6818 
6819 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6820 			    rx_q->dma_rx_phy, rx_q->queue_index);
6821 
6822 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6823 			     sizeof(struct dma_desc));
6824 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6825 			       rx_q->rx_tail_addr, rx_q->queue_index);
6826 
6827 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6828 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6829 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6830 				      buf_size,
6831 				      rx_q->queue_index);
6832 	} else {
6833 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6834 				      priv->dma_conf.dma_buf_sz,
6835 				      rx_q->queue_index);
6836 	}
6837 
6838 	stmmac_start_rx_dma(priv, queue);
6839 
6840 	spin_lock_irqsave(&ch->lock, flags);
6841 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6842 	spin_unlock_irqrestore(&ch->lock, flags);
6843 }
6844 
6845 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6846 {
6847 	struct stmmac_channel *ch = &priv->channel[queue];
6848 	unsigned long flags;
6849 
6850 	spin_lock_irqsave(&ch->lock, flags);
6851 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6852 	spin_unlock_irqrestore(&ch->lock, flags);
6853 
6854 	stmmac_stop_tx_dma(priv, queue);
6855 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6856 }
6857 
6858 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6859 {
6860 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6861 	struct stmmac_channel *ch = &priv->channel[queue];
6862 	unsigned long flags;
6863 	int ret;
6864 
6865 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6866 	if (ret) {
6867 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6868 		return;
6869 	}
6870 
6871 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6872 	if (ret) {
6873 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6874 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6875 		return;
6876 	}
6877 
6878 	stmmac_reset_tx_queue(priv, queue);
6879 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6880 
6881 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6882 			    tx_q->dma_tx_phy, tx_q->queue_index);
6883 
6884 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6885 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6886 
6887 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6888 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6889 			       tx_q->tx_tail_addr, tx_q->queue_index);
6890 
6891 	stmmac_start_tx_dma(priv, queue);
6892 
6893 	spin_lock_irqsave(&ch->lock, flags);
6894 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6895 	spin_unlock_irqrestore(&ch->lock, flags);
6896 }
6897 
6898 void stmmac_xdp_release(struct net_device *dev)
6899 {
6900 	struct stmmac_priv *priv = netdev_priv(dev);
6901 	u32 chan;
6902 
6903 	/* Ensure tx function is not running */
6904 	netif_tx_disable(dev);
6905 
6906 	/* Disable NAPI process */
6907 	stmmac_disable_all_queues(priv);
6908 
6909 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6910 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6911 
6912 	/* Free the IRQ lines */
6913 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6914 
6915 	/* Stop TX/RX DMA channels */
6916 	stmmac_stop_all_dma(priv);
6917 
6918 	/* Release and free the Rx/Tx resources */
6919 	free_dma_desc_resources(priv, &priv->dma_conf);
6920 
6921 	/* Disable the MAC Rx/Tx */
6922 	stmmac_mac_set(priv, priv->ioaddr, false);
6923 
6924 	/* set trans_start so we don't get spurious
6925 	 * watchdogs during reset
6926 	 */
6927 	netif_trans_update(dev);
6928 	netif_carrier_off(dev);
6929 }
6930 
6931 int stmmac_xdp_open(struct net_device *dev)
6932 {
6933 	struct stmmac_priv *priv = netdev_priv(dev);
6934 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6935 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6936 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6937 	struct stmmac_rx_queue *rx_q;
6938 	struct stmmac_tx_queue *tx_q;
6939 	u32 buf_size;
6940 	bool sph_en;
6941 	u32 chan;
6942 	int ret;
6943 
6944 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6945 	if (ret < 0) {
6946 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6947 			   __func__);
6948 		goto dma_desc_error;
6949 	}
6950 
6951 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6952 	if (ret < 0) {
6953 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6954 			   __func__);
6955 		goto init_error;
6956 	}
6957 
6958 	stmmac_reset_queues_param(priv);
6959 
6960 	/* DMA CSR Channel configuration */
6961 	for (chan = 0; chan < dma_csr_ch; chan++) {
6962 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6963 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6964 	}
6965 
6966 	/* Adjust Split header */
6967 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6968 
6969 	/* DMA RX Channel Configuration */
6970 	for (chan = 0; chan < rx_cnt; chan++) {
6971 		rx_q = &priv->dma_conf.rx_queue[chan];
6972 
6973 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6974 				    rx_q->dma_rx_phy, chan);
6975 
6976 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6977 				     (rx_q->buf_alloc_num *
6978 				      sizeof(struct dma_desc));
6979 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6980 				       rx_q->rx_tail_addr, chan);
6981 
6982 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6983 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6984 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6985 					      buf_size,
6986 					      rx_q->queue_index);
6987 		} else {
6988 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6989 					      priv->dma_conf.dma_buf_sz,
6990 					      rx_q->queue_index);
6991 		}
6992 
6993 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6994 	}
6995 
6996 	/* DMA TX Channel Configuration */
6997 	for (chan = 0; chan < tx_cnt; chan++) {
6998 		tx_q = &priv->dma_conf.tx_queue[chan];
6999 
7000 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7001 				    tx_q->dma_tx_phy, chan);
7002 
7003 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7004 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7005 				       tx_q->tx_tail_addr, chan);
7006 
7007 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7008 		tx_q->txtimer.function = stmmac_tx_timer;
7009 	}
7010 
7011 	/* Enable the MAC Rx/Tx */
7012 	stmmac_mac_set(priv, priv->ioaddr, true);
7013 
7014 	/* Start Rx & Tx DMA Channels */
7015 	stmmac_start_all_dma(priv);
7016 
7017 	ret = stmmac_request_irq(dev);
7018 	if (ret)
7019 		goto irq_error;
7020 
7021 	/* Enable NAPI process*/
7022 	stmmac_enable_all_queues(priv);
7023 	netif_carrier_on(dev);
7024 	netif_tx_start_all_queues(dev);
7025 	stmmac_enable_all_dma_irq(priv);
7026 
7027 	return 0;
7028 
7029 irq_error:
7030 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7031 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7032 
7033 	stmmac_hw_teardown(dev);
7034 init_error:
7035 	free_dma_desc_resources(priv, &priv->dma_conf);
7036 dma_desc_error:
7037 	return ret;
7038 }
7039 
7040 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7041 {
7042 	struct stmmac_priv *priv = netdev_priv(dev);
7043 	struct stmmac_rx_queue *rx_q;
7044 	struct stmmac_tx_queue *tx_q;
7045 	struct stmmac_channel *ch;
7046 
7047 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7048 	    !netif_carrier_ok(priv->dev))
7049 		return -ENETDOWN;
7050 
7051 	if (!stmmac_xdp_is_enabled(priv))
7052 		return -EINVAL;
7053 
7054 	if (queue >= priv->plat->rx_queues_to_use ||
7055 	    queue >= priv->plat->tx_queues_to_use)
7056 		return -EINVAL;
7057 
7058 	rx_q = &priv->dma_conf.rx_queue[queue];
7059 	tx_q = &priv->dma_conf.tx_queue[queue];
7060 	ch = &priv->channel[queue];
7061 
7062 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7063 		return -EINVAL;
7064 
7065 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7066 		/* EQoS does not have per-DMA channel SW interrupt,
7067 		 * so we schedule RX Napi straight-away.
7068 		 */
7069 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7070 			__napi_schedule(&ch->rxtx_napi);
7071 	}
7072 
7073 	return 0;
7074 }
7075 
7076 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7077 {
7078 	struct stmmac_priv *priv = netdev_priv(dev);
7079 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7080 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7081 	unsigned int start;
7082 	int q;
7083 
7084 	for (q = 0; q < tx_cnt; q++) {
7085 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7086 		u64 tx_packets;
7087 		u64 tx_bytes;
7088 
7089 		do {
7090 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7091 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7092 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7093 		do {
7094 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7095 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7096 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7097 
7098 		stats->tx_packets += tx_packets;
7099 		stats->tx_bytes += tx_bytes;
7100 	}
7101 
7102 	for (q = 0; q < rx_cnt; q++) {
7103 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7104 		u64 rx_packets;
7105 		u64 rx_bytes;
7106 
7107 		do {
7108 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7109 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7110 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7111 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7112 
7113 		stats->rx_packets += rx_packets;
7114 		stats->rx_bytes += rx_bytes;
7115 	}
7116 
7117 	stats->rx_dropped = priv->xstats.rx_dropped;
7118 	stats->rx_errors = priv->xstats.rx_errors;
7119 	stats->tx_dropped = priv->xstats.tx_dropped;
7120 	stats->tx_errors = priv->xstats.tx_errors;
7121 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7122 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7123 	stats->rx_length_errors = priv->xstats.rx_length;
7124 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7125 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7126 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7127 }
7128 
7129 static const struct net_device_ops stmmac_netdev_ops = {
7130 	.ndo_open = stmmac_open,
7131 	.ndo_start_xmit = stmmac_xmit,
7132 	.ndo_stop = stmmac_release,
7133 	.ndo_change_mtu = stmmac_change_mtu,
7134 	.ndo_fix_features = stmmac_fix_features,
7135 	.ndo_set_features = stmmac_set_features,
7136 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7137 	.ndo_tx_timeout = stmmac_tx_timeout,
7138 	.ndo_eth_ioctl = stmmac_ioctl,
7139 	.ndo_get_stats64 = stmmac_get_stats64,
7140 	.ndo_setup_tc = stmmac_setup_tc,
7141 	.ndo_select_queue = stmmac_select_queue,
7142 	.ndo_set_mac_address = stmmac_set_mac_address,
7143 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7144 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7145 	.ndo_bpf = stmmac_bpf,
7146 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7147 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7148 };
7149 
7150 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7151 {
7152 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7153 		return;
7154 	if (test_bit(STMMAC_DOWN, &priv->state))
7155 		return;
7156 
7157 	netdev_err(priv->dev, "Reset adapter.\n");
7158 
7159 	rtnl_lock();
7160 	netif_trans_update(priv->dev);
7161 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7162 		usleep_range(1000, 2000);
7163 
7164 	set_bit(STMMAC_DOWN, &priv->state);
7165 	dev_close(priv->dev);
7166 	dev_open(priv->dev, NULL);
7167 	clear_bit(STMMAC_DOWN, &priv->state);
7168 	clear_bit(STMMAC_RESETING, &priv->state);
7169 	rtnl_unlock();
7170 }
7171 
7172 static void stmmac_service_task(struct work_struct *work)
7173 {
7174 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7175 			service_task);
7176 
7177 	stmmac_reset_subtask(priv);
7178 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7179 }
7180 
7181 /**
7182  *  stmmac_hw_init - Init the MAC device
7183  *  @priv: driver private structure
7184  *  Description: this function is to configure the MAC device according to
7185  *  some platform parameters or the HW capability register. It prepares the
7186  *  driver to use either ring or chain modes and to setup either enhanced or
7187  *  normal descriptors.
7188  */
7189 static int stmmac_hw_init(struct stmmac_priv *priv)
7190 {
7191 	int ret;
7192 
7193 	/* dwmac-sun8i only work in chain mode */
7194 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7195 		chain_mode = 1;
7196 	priv->chain_mode = chain_mode;
7197 
7198 	/* Initialize HW Interface */
7199 	ret = stmmac_hwif_init(priv);
7200 	if (ret)
7201 		return ret;
7202 
7203 	/* Get the HW capability (new GMAC newer than 3.50a) */
7204 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7205 	if (priv->hw_cap_support) {
7206 		dev_info(priv->device, "DMA HW capability register supported\n");
7207 
7208 		/* We can override some gmac/dma configuration fields: e.g.
7209 		 * enh_desc, tx_coe (e.g. that are passed through the
7210 		 * platform) with the values from the HW capability
7211 		 * register (if supported).
7212 		 */
7213 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7214 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7215 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7216 		priv->hw->pmt = priv->plat->pmt;
7217 		if (priv->dma_cap.hash_tb_sz) {
7218 			priv->hw->multicast_filter_bins =
7219 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7220 			priv->hw->mcast_bits_log2 =
7221 					ilog2(priv->hw->multicast_filter_bins);
7222 		}
7223 
7224 		/* TXCOE doesn't work in thresh DMA mode */
7225 		if (priv->plat->force_thresh_dma_mode)
7226 			priv->plat->tx_coe = 0;
7227 		else
7228 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7229 
7230 		/* In case of GMAC4 rx_coe is from HW cap register. */
7231 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7232 
7233 		if (priv->dma_cap.rx_coe_type2)
7234 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7235 		else if (priv->dma_cap.rx_coe_type1)
7236 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7237 
7238 	} else {
7239 		dev_info(priv->device, "No HW DMA feature register supported\n");
7240 	}
7241 
7242 	if (priv->plat->rx_coe) {
7243 		priv->hw->rx_csum = priv->plat->rx_coe;
7244 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7245 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7246 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7247 	}
7248 	if (priv->plat->tx_coe)
7249 		dev_info(priv->device, "TX Checksum insertion supported\n");
7250 
7251 	if (priv->plat->pmt) {
7252 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7253 		device_set_wakeup_capable(priv->device, 1);
7254 	}
7255 
7256 	if (priv->dma_cap.tsoen)
7257 		dev_info(priv->device, "TSO supported\n");
7258 
7259 	priv->hw->vlan_fail_q_en =
7260 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7261 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7262 
7263 	/* Run HW quirks, if any */
7264 	if (priv->hwif_quirks) {
7265 		ret = priv->hwif_quirks(priv);
7266 		if (ret)
7267 			return ret;
7268 	}
7269 
7270 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7271 	 * In some case, for example on bugged HW this feature
7272 	 * has to be disable and this can be done by passing the
7273 	 * riwt_off field from the platform.
7274 	 */
7275 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7276 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7277 		priv->use_riwt = 1;
7278 		dev_info(priv->device,
7279 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7280 	}
7281 
7282 	return 0;
7283 }
7284 
7285 static void stmmac_napi_add(struct net_device *dev)
7286 {
7287 	struct stmmac_priv *priv = netdev_priv(dev);
7288 	u32 queue, maxq;
7289 
7290 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7291 
7292 	for (queue = 0; queue < maxq; queue++) {
7293 		struct stmmac_channel *ch = &priv->channel[queue];
7294 
7295 		ch->priv_data = priv;
7296 		ch->index = queue;
7297 		spin_lock_init(&ch->lock);
7298 
7299 		if (queue < priv->plat->rx_queues_to_use) {
7300 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7301 		}
7302 		if (queue < priv->plat->tx_queues_to_use) {
7303 			netif_napi_add_tx(dev, &ch->tx_napi,
7304 					  stmmac_napi_poll_tx);
7305 		}
7306 		if (queue < priv->plat->rx_queues_to_use &&
7307 		    queue < priv->plat->tx_queues_to_use) {
7308 			netif_napi_add(dev, &ch->rxtx_napi,
7309 				       stmmac_napi_poll_rxtx);
7310 		}
7311 	}
7312 }
7313 
7314 static void stmmac_napi_del(struct net_device *dev)
7315 {
7316 	struct stmmac_priv *priv = netdev_priv(dev);
7317 	u32 queue, maxq;
7318 
7319 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7320 
7321 	for (queue = 0; queue < maxq; queue++) {
7322 		struct stmmac_channel *ch = &priv->channel[queue];
7323 
7324 		if (queue < priv->plat->rx_queues_to_use)
7325 			netif_napi_del(&ch->rx_napi);
7326 		if (queue < priv->plat->tx_queues_to_use)
7327 			netif_napi_del(&ch->tx_napi);
7328 		if (queue < priv->plat->rx_queues_to_use &&
7329 		    queue < priv->plat->tx_queues_to_use) {
7330 			netif_napi_del(&ch->rxtx_napi);
7331 		}
7332 	}
7333 }
7334 
7335 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7336 {
7337 	struct stmmac_priv *priv = netdev_priv(dev);
7338 	int ret = 0, i;
7339 
7340 	if (netif_running(dev))
7341 		stmmac_release(dev);
7342 
7343 	stmmac_napi_del(dev);
7344 
7345 	priv->plat->rx_queues_to_use = rx_cnt;
7346 	priv->plat->tx_queues_to_use = tx_cnt;
7347 	if (!netif_is_rxfh_configured(dev))
7348 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7349 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7350 									rx_cnt);
7351 
7352 	stmmac_napi_add(dev);
7353 
7354 	if (netif_running(dev))
7355 		ret = stmmac_open(dev);
7356 
7357 	return ret;
7358 }
7359 
7360 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7361 {
7362 	struct stmmac_priv *priv = netdev_priv(dev);
7363 	int ret = 0;
7364 
7365 	if (netif_running(dev))
7366 		stmmac_release(dev);
7367 
7368 	priv->dma_conf.dma_rx_size = rx_size;
7369 	priv->dma_conf.dma_tx_size = tx_size;
7370 
7371 	if (netif_running(dev))
7372 		ret = stmmac_open(dev);
7373 
7374 	return ret;
7375 }
7376 
7377 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7378 static void stmmac_fpe_lp_task(struct work_struct *work)
7379 {
7380 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7381 						fpe_task);
7382 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7383 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7384 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7385 	bool *hs_enable = &fpe_cfg->hs_enable;
7386 	bool *enable = &fpe_cfg->enable;
7387 	int retries = 20;
7388 
7389 	while (retries-- > 0) {
7390 		/* Bail out immediately if FPE handshake is OFF */
7391 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7392 			break;
7393 
7394 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7395 		    *lp_state == FPE_STATE_ENTERING_ON) {
7396 			stmmac_fpe_configure(priv, priv->ioaddr,
7397 					     fpe_cfg,
7398 					     priv->plat->tx_queues_to_use,
7399 					     priv->plat->rx_queues_to_use,
7400 					     *enable);
7401 
7402 			netdev_info(priv->dev, "configured FPE\n");
7403 
7404 			*lo_state = FPE_STATE_ON;
7405 			*lp_state = FPE_STATE_ON;
7406 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7407 			break;
7408 		}
7409 
7410 		if ((*lo_state == FPE_STATE_CAPABLE ||
7411 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7412 		     *lp_state != FPE_STATE_ON) {
7413 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7414 				    *lo_state, *lp_state);
7415 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7416 						fpe_cfg,
7417 						MPACKET_VERIFY);
7418 		}
7419 		/* Sleep then retry */
7420 		msleep(500);
7421 	}
7422 
7423 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7424 }
7425 
7426 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7427 {
7428 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7429 		if (enable) {
7430 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7431 						priv->plat->fpe_cfg,
7432 						MPACKET_VERIFY);
7433 		} else {
7434 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7435 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7436 		}
7437 
7438 		priv->plat->fpe_cfg->hs_enable = enable;
7439 	}
7440 }
7441 
7442 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7443 {
7444 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7445 	struct dma_desc *desc_contains_ts = ctx->desc;
7446 	struct stmmac_priv *priv = ctx->priv;
7447 	struct dma_desc *ndesc = ctx->ndesc;
7448 	struct dma_desc *desc = ctx->desc;
7449 	u64 ns = 0;
7450 
7451 	if (!priv->hwts_rx_en)
7452 		return -ENODATA;
7453 
7454 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7455 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7456 		desc_contains_ts = ndesc;
7457 
7458 	/* Check if timestamp is available */
7459 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7460 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7461 		ns -= priv->plat->cdc_error_adj;
7462 		*timestamp = ns_to_ktime(ns);
7463 		return 0;
7464 	}
7465 
7466 	return -ENODATA;
7467 }
7468 
7469 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7470 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7471 };
7472 
7473 /**
7474  * stmmac_dvr_probe
7475  * @device: device pointer
7476  * @plat_dat: platform data pointer
7477  * @res: stmmac resource pointer
7478  * Description: this is the main probe function used to
7479  * call the alloc_etherdev, allocate the priv structure.
7480  * Return:
7481  * returns 0 on success, otherwise errno.
7482  */
7483 int stmmac_dvr_probe(struct device *device,
7484 		     struct plat_stmmacenet_data *plat_dat,
7485 		     struct stmmac_resources *res)
7486 {
7487 	struct net_device *ndev = NULL;
7488 	struct stmmac_priv *priv;
7489 	u32 rxq;
7490 	int i, ret = 0;
7491 
7492 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7493 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7494 	if (!ndev)
7495 		return -ENOMEM;
7496 
7497 	SET_NETDEV_DEV(ndev, device);
7498 
7499 	priv = netdev_priv(ndev);
7500 	priv->device = device;
7501 	priv->dev = ndev;
7502 
7503 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7504 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7505 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7506 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7507 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7508 	}
7509 
7510 	priv->xstats.pcpu_stats =
7511 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7512 	if (!priv->xstats.pcpu_stats)
7513 		return -ENOMEM;
7514 
7515 	stmmac_set_ethtool_ops(ndev);
7516 	priv->pause = pause;
7517 	priv->plat = plat_dat;
7518 	priv->ioaddr = res->addr;
7519 	priv->dev->base_addr = (unsigned long)res->addr;
7520 	priv->plat->dma_cfg->multi_msi_en =
7521 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7522 
7523 	priv->dev->irq = res->irq;
7524 	priv->wol_irq = res->wol_irq;
7525 	priv->lpi_irq = res->lpi_irq;
7526 	priv->sfty_irq = res->sfty_irq;
7527 	priv->sfty_ce_irq = res->sfty_ce_irq;
7528 	priv->sfty_ue_irq = res->sfty_ue_irq;
7529 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7530 		priv->rx_irq[i] = res->rx_irq[i];
7531 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7532 		priv->tx_irq[i] = res->tx_irq[i];
7533 
7534 	if (!is_zero_ether_addr(res->mac))
7535 		eth_hw_addr_set(priv->dev, res->mac);
7536 
7537 	dev_set_drvdata(device, priv->dev);
7538 
7539 	/* Verify driver arguments */
7540 	stmmac_verify_args();
7541 
7542 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7543 	if (!priv->af_xdp_zc_qps)
7544 		return -ENOMEM;
7545 
7546 	/* Allocate workqueue */
7547 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7548 	if (!priv->wq) {
7549 		dev_err(priv->device, "failed to create workqueue\n");
7550 		ret = -ENOMEM;
7551 		goto error_wq_init;
7552 	}
7553 
7554 	INIT_WORK(&priv->service_task, stmmac_service_task);
7555 
7556 	/* Initialize Link Partner FPE workqueue */
7557 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7558 
7559 	/* Override with kernel parameters if supplied XXX CRS XXX
7560 	 * this needs to have multiple instances
7561 	 */
7562 	if ((phyaddr >= 0) && (phyaddr <= 31))
7563 		priv->plat->phy_addr = phyaddr;
7564 
7565 	if (priv->plat->stmmac_rst) {
7566 		ret = reset_control_assert(priv->plat->stmmac_rst);
7567 		reset_control_deassert(priv->plat->stmmac_rst);
7568 		/* Some reset controllers have only reset callback instead of
7569 		 * assert + deassert callbacks pair.
7570 		 */
7571 		if (ret == -ENOTSUPP)
7572 			reset_control_reset(priv->plat->stmmac_rst);
7573 	}
7574 
7575 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7576 	if (ret == -ENOTSUPP)
7577 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7578 			ERR_PTR(ret));
7579 
7580 	/* Wait a bit for the reset to take effect */
7581 	udelay(10);
7582 
7583 	/* Init MAC and get the capabilities */
7584 	ret = stmmac_hw_init(priv);
7585 	if (ret)
7586 		goto error_hw_init;
7587 
7588 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7589 	 */
7590 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7591 		priv->plat->dma_cfg->dche = false;
7592 
7593 	stmmac_check_ether_addr(priv);
7594 
7595 	ndev->netdev_ops = &stmmac_netdev_ops;
7596 
7597 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7598 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7599 
7600 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7601 			    NETIF_F_RXCSUM;
7602 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7603 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7604 
7605 	ret = stmmac_tc_init(priv, priv);
7606 	if (!ret) {
7607 		ndev->hw_features |= NETIF_F_HW_TC;
7608 	}
7609 
7610 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7611 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7612 		if (priv->plat->has_gmac4)
7613 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7614 		priv->tso = true;
7615 		dev_info(priv->device, "TSO feature enabled\n");
7616 	}
7617 
7618 	if (priv->dma_cap.sphen &&
7619 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7620 		ndev->hw_features |= NETIF_F_GRO;
7621 		priv->sph_cap = true;
7622 		priv->sph = priv->sph_cap;
7623 		dev_info(priv->device, "SPH feature enabled\n");
7624 	}
7625 
7626 	/* Ideally our host DMA address width is the same as for the
7627 	 * device. However, it may differ and then we have to use our
7628 	 * host DMA width for allocation and the device DMA width for
7629 	 * register handling.
7630 	 */
7631 	if (priv->plat->host_dma_width)
7632 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7633 	else
7634 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7635 
7636 	if (priv->dma_cap.host_dma_width) {
7637 		ret = dma_set_mask_and_coherent(device,
7638 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7639 		if (!ret) {
7640 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7641 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7642 
7643 			/*
7644 			 * If more than 32 bits can be addressed, make sure to
7645 			 * enable enhanced addressing mode.
7646 			 */
7647 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7648 				priv->plat->dma_cfg->eame = true;
7649 		} else {
7650 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7651 			if (ret) {
7652 				dev_err(priv->device, "Failed to set DMA Mask\n");
7653 				goto error_hw_init;
7654 			}
7655 
7656 			priv->dma_cap.host_dma_width = 32;
7657 		}
7658 	}
7659 
7660 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7661 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7662 #ifdef STMMAC_VLAN_TAG_USED
7663 	/* Both mac100 and gmac support receive VLAN tag detection */
7664 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7665 	ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7666 	priv->hw->hw_vlan_en = true;
7667 
7668 	if (priv->dma_cap.vlhash) {
7669 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7670 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7671 	}
7672 	if (priv->dma_cap.vlins) {
7673 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7674 		if (priv->dma_cap.dvlan)
7675 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7676 	}
7677 #endif
7678 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7679 
7680 	priv->xstats.threshold = tc;
7681 
7682 	/* Initialize RSS */
7683 	rxq = priv->plat->rx_queues_to_use;
7684 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7685 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7686 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7687 
7688 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7689 		ndev->features |= NETIF_F_RXHASH;
7690 
7691 	ndev->vlan_features |= ndev->features;
7692 	/* TSO doesn't work on VLANs yet */
7693 	ndev->vlan_features &= ~NETIF_F_TSO;
7694 
7695 	/* MTU range: 46 - hw-specific max */
7696 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7697 	if (priv->plat->has_xgmac)
7698 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7699 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7700 		ndev->max_mtu = JUMBO_LEN;
7701 	else
7702 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7703 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7704 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7705 	 */
7706 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7707 	    (priv->plat->maxmtu >= ndev->min_mtu))
7708 		ndev->max_mtu = priv->plat->maxmtu;
7709 	else if (priv->plat->maxmtu < ndev->min_mtu)
7710 		dev_warn(priv->device,
7711 			 "%s: warning: maxmtu having invalid value (%d)\n",
7712 			 __func__, priv->plat->maxmtu);
7713 
7714 	if (flow_ctrl)
7715 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7716 
7717 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7718 
7719 	/* Setup channels NAPI */
7720 	stmmac_napi_add(ndev);
7721 
7722 	mutex_init(&priv->lock);
7723 
7724 	/* If a specific clk_csr value is passed from the platform
7725 	 * this means that the CSR Clock Range selection cannot be
7726 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7727 	 * set the MDC clock dynamically according to the csr actual
7728 	 * clock input.
7729 	 */
7730 	if (priv->plat->clk_csr >= 0)
7731 		priv->clk_csr = priv->plat->clk_csr;
7732 	else
7733 		stmmac_clk_csr_set(priv);
7734 
7735 	stmmac_check_pcs_mode(priv);
7736 
7737 	pm_runtime_get_noresume(device);
7738 	pm_runtime_set_active(device);
7739 	if (!pm_runtime_enabled(device))
7740 		pm_runtime_enable(device);
7741 
7742 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7743 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7744 		/* MDIO bus Registration */
7745 		ret = stmmac_mdio_register(ndev);
7746 		if (ret < 0) {
7747 			dev_err_probe(priv->device, ret,
7748 				      "%s: MDIO bus (id: %d) registration failed\n",
7749 				      __func__, priv->plat->bus_id);
7750 			goto error_mdio_register;
7751 		}
7752 	}
7753 
7754 	if (priv->plat->speed_mode_2500)
7755 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7756 
7757 	ret = stmmac_pcs_setup(ndev);
7758 	if (ret)
7759 		goto error_pcs_setup;
7760 
7761 	ret = stmmac_phy_setup(priv);
7762 	if (ret) {
7763 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7764 		goto error_phy_setup;
7765 	}
7766 
7767 	ret = register_netdev(ndev);
7768 	if (ret) {
7769 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7770 			__func__, ret);
7771 		goto error_netdev_register;
7772 	}
7773 
7774 #ifdef CONFIG_DEBUG_FS
7775 	stmmac_init_fs(ndev);
7776 #endif
7777 
7778 	if (priv->plat->dump_debug_regs)
7779 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7780 
7781 	/* Let pm_runtime_put() disable the clocks.
7782 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7783 	 */
7784 	pm_runtime_put(device);
7785 
7786 	return ret;
7787 
7788 error_netdev_register:
7789 	phylink_destroy(priv->phylink);
7790 error_phy_setup:
7791 	stmmac_pcs_clean(ndev);
7792 error_pcs_setup:
7793 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7794 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7795 		stmmac_mdio_unregister(ndev);
7796 error_mdio_register:
7797 	stmmac_napi_del(ndev);
7798 error_hw_init:
7799 	destroy_workqueue(priv->wq);
7800 error_wq_init:
7801 	bitmap_free(priv->af_xdp_zc_qps);
7802 
7803 	return ret;
7804 }
7805 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7806 
7807 /**
7808  * stmmac_dvr_remove
7809  * @dev: device pointer
7810  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7811  * changes the link status, releases the DMA descriptor rings.
7812  */
7813 void stmmac_dvr_remove(struct device *dev)
7814 {
7815 	struct net_device *ndev = dev_get_drvdata(dev);
7816 	struct stmmac_priv *priv = netdev_priv(ndev);
7817 
7818 	netdev_info(priv->dev, "%s: removing driver", __func__);
7819 
7820 	pm_runtime_get_sync(dev);
7821 
7822 	stmmac_stop_all_dma(priv);
7823 	stmmac_mac_set(priv, priv->ioaddr, false);
7824 	netif_carrier_off(ndev);
7825 	unregister_netdev(ndev);
7826 
7827 #ifdef CONFIG_DEBUG_FS
7828 	stmmac_exit_fs(ndev);
7829 #endif
7830 	phylink_destroy(priv->phylink);
7831 	if (priv->plat->stmmac_rst)
7832 		reset_control_assert(priv->plat->stmmac_rst);
7833 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7834 
7835 	stmmac_pcs_clean(ndev);
7836 
7837 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7838 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7839 		stmmac_mdio_unregister(ndev);
7840 	destroy_workqueue(priv->wq);
7841 	mutex_destroy(&priv->lock);
7842 	bitmap_free(priv->af_xdp_zc_qps);
7843 
7844 	pm_runtime_disable(dev);
7845 	pm_runtime_put_noidle(dev);
7846 }
7847 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7848 
7849 /**
7850  * stmmac_suspend - suspend callback
7851  * @dev: device pointer
7852  * Description: this is the function to suspend the device and it is called
7853  * by the platform driver to stop the network queue, release the resources,
7854  * program the PMT register (for WoL), clean and release driver resources.
7855  */
7856 int stmmac_suspend(struct device *dev)
7857 {
7858 	struct net_device *ndev = dev_get_drvdata(dev);
7859 	struct stmmac_priv *priv = netdev_priv(ndev);
7860 	u32 chan;
7861 
7862 	if (!ndev || !netif_running(ndev))
7863 		return 0;
7864 
7865 	mutex_lock(&priv->lock);
7866 
7867 	netif_device_detach(ndev);
7868 
7869 	stmmac_disable_all_queues(priv);
7870 
7871 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7872 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7873 
7874 	if (priv->eee_enabled) {
7875 		priv->tx_path_in_lpi_mode = false;
7876 		del_timer_sync(&priv->eee_ctrl_timer);
7877 	}
7878 
7879 	/* Stop TX/RX DMA */
7880 	stmmac_stop_all_dma(priv);
7881 
7882 	if (priv->plat->serdes_powerdown)
7883 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7884 
7885 	/* Enable Power down mode by programming the PMT regs */
7886 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7887 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7888 		priv->irq_wake = 1;
7889 	} else {
7890 		stmmac_mac_set(priv, priv->ioaddr, false);
7891 		pinctrl_pm_select_sleep_state(priv->device);
7892 	}
7893 
7894 	mutex_unlock(&priv->lock);
7895 
7896 	rtnl_lock();
7897 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7898 		phylink_suspend(priv->phylink, true);
7899 	} else {
7900 		if (device_may_wakeup(priv->device))
7901 			phylink_speed_down(priv->phylink, false);
7902 		phylink_suspend(priv->phylink, false);
7903 	}
7904 	rtnl_unlock();
7905 
7906 	if (priv->dma_cap.fpesel) {
7907 		/* Disable FPE */
7908 		stmmac_fpe_configure(priv, priv->ioaddr,
7909 				     priv->plat->fpe_cfg,
7910 				     priv->plat->tx_queues_to_use,
7911 				     priv->plat->rx_queues_to_use, false);
7912 
7913 		stmmac_fpe_handshake(priv, false);
7914 		stmmac_fpe_stop_wq(priv);
7915 	}
7916 
7917 	priv->speed = SPEED_UNKNOWN;
7918 	return 0;
7919 }
7920 EXPORT_SYMBOL_GPL(stmmac_suspend);
7921 
7922 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7923 {
7924 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7925 
7926 	rx_q->cur_rx = 0;
7927 	rx_q->dirty_rx = 0;
7928 }
7929 
7930 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7931 {
7932 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7933 
7934 	tx_q->cur_tx = 0;
7935 	tx_q->dirty_tx = 0;
7936 	tx_q->mss = 0;
7937 
7938 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7939 }
7940 
7941 /**
7942  * stmmac_reset_queues_param - reset queue parameters
7943  * @priv: device pointer
7944  */
7945 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7946 {
7947 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7948 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7949 	u32 queue;
7950 
7951 	for (queue = 0; queue < rx_cnt; queue++)
7952 		stmmac_reset_rx_queue(priv, queue);
7953 
7954 	for (queue = 0; queue < tx_cnt; queue++)
7955 		stmmac_reset_tx_queue(priv, queue);
7956 }
7957 
7958 /**
7959  * stmmac_resume - resume callback
7960  * @dev: device pointer
7961  * Description: when resume this function is invoked to setup the DMA and CORE
7962  * in a usable state.
7963  */
7964 int stmmac_resume(struct device *dev)
7965 {
7966 	struct net_device *ndev = dev_get_drvdata(dev);
7967 	struct stmmac_priv *priv = netdev_priv(ndev);
7968 	int ret;
7969 
7970 	if (!netif_running(ndev))
7971 		return 0;
7972 
7973 	/* Power Down bit, into the PM register, is cleared
7974 	 * automatically as soon as a magic packet or a Wake-up frame
7975 	 * is received. Anyway, it's better to manually clear
7976 	 * this bit because it can generate problems while resuming
7977 	 * from another devices (e.g. serial console).
7978 	 */
7979 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7980 		mutex_lock(&priv->lock);
7981 		stmmac_pmt(priv, priv->hw, 0);
7982 		mutex_unlock(&priv->lock);
7983 		priv->irq_wake = 0;
7984 	} else {
7985 		pinctrl_pm_select_default_state(priv->device);
7986 		/* reset the phy so that it's ready */
7987 		if (priv->mii)
7988 			stmmac_mdio_reset(priv->mii);
7989 	}
7990 
7991 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7992 	    priv->plat->serdes_powerup) {
7993 		ret = priv->plat->serdes_powerup(ndev,
7994 						 priv->plat->bsp_priv);
7995 
7996 		if (ret < 0)
7997 			return ret;
7998 	}
7999 
8000 	rtnl_lock();
8001 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
8002 		phylink_resume(priv->phylink);
8003 	} else {
8004 		phylink_resume(priv->phylink);
8005 		if (device_may_wakeup(priv->device))
8006 			phylink_speed_up(priv->phylink);
8007 	}
8008 	rtnl_unlock();
8009 
8010 	rtnl_lock();
8011 	mutex_lock(&priv->lock);
8012 
8013 	stmmac_reset_queues_param(priv);
8014 
8015 	stmmac_free_tx_skbufs(priv);
8016 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8017 
8018 	stmmac_hw_setup(ndev, false);
8019 	stmmac_init_coalesce(priv);
8020 	stmmac_set_rx_mode(ndev);
8021 
8022 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8023 
8024 	stmmac_enable_all_queues(priv);
8025 	stmmac_enable_all_dma_irq(priv);
8026 
8027 	mutex_unlock(&priv->lock);
8028 	rtnl_unlock();
8029 
8030 	netif_device_attach(ndev);
8031 
8032 	return 0;
8033 }
8034 EXPORT_SYMBOL_GPL(stmmac_resume);
8035 
8036 #ifndef MODULE
8037 static int __init stmmac_cmdline_opt(char *str)
8038 {
8039 	char *opt;
8040 
8041 	if (!str || !*str)
8042 		return 1;
8043 	while ((opt = strsep(&str, ",")) != NULL) {
8044 		if (!strncmp(opt, "debug:", 6)) {
8045 			if (kstrtoint(opt + 6, 0, &debug))
8046 				goto err;
8047 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8048 			if (kstrtoint(opt + 8, 0, &phyaddr))
8049 				goto err;
8050 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8051 			if (kstrtoint(opt + 7, 0, &buf_sz))
8052 				goto err;
8053 		} else if (!strncmp(opt, "tc:", 3)) {
8054 			if (kstrtoint(opt + 3, 0, &tc))
8055 				goto err;
8056 		} else if (!strncmp(opt, "watchdog:", 9)) {
8057 			if (kstrtoint(opt + 9, 0, &watchdog))
8058 				goto err;
8059 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8060 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8061 				goto err;
8062 		} else if (!strncmp(opt, "pause:", 6)) {
8063 			if (kstrtoint(opt + 6, 0, &pause))
8064 				goto err;
8065 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8066 			if (kstrtoint(opt + 10, 0, &eee_timer))
8067 				goto err;
8068 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8069 			if (kstrtoint(opt + 11, 0, &chain_mode))
8070 				goto err;
8071 		}
8072 	}
8073 	return 1;
8074 
8075 err:
8076 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8077 	return 1;
8078 }
8079 
8080 __setup("stmmaceth=", stmmac_cmdline_opt);
8081 #endif /* MODULE */
8082 
8083 static int __init stmmac_init(void)
8084 {
8085 #ifdef CONFIG_DEBUG_FS
8086 	/* Create debugfs main directory if it doesn't exist yet */
8087 	if (!stmmac_fs_dir)
8088 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8089 	register_netdevice_notifier(&stmmac_notifier);
8090 #endif
8091 
8092 	return 0;
8093 }
8094 
8095 static void __exit stmmac_exit(void)
8096 {
8097 #ifdef CONFIG_DEBUG_FS
8098 	unregister_netdevice_notifier(&stmmac_notifier);
8099 	debugfs_remove_recursive(stmmac_fs_dir);
8100 #endif
8101 }
8102 
8103 module_init(stmmac_init)
8104 module_exit(stmmac_exit)
8105 
8106 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8107 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8108 MODULE_LICENSE("GPL");
8109