xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Check if MAC core supports the EEE feature. */
475 	if (!priv->dma_cap.eee)
476 		return false;
477 
478 	mutex_lock(&priv->lock);
479 
480 	/* Check if it needs to be deactivated */
481 	if (!priv->eee_active) {
482 		if (priv->eee_enabled) {
483 			netdev_dbg(priv->dev, "disable EEE\n");
484 			stmmac_lpi_entry_timer_config(priv, 0);
485 			del_timer_sync(&priv->eee_ctrl_timer);
486 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
487 			if (priv->hw->xpcs)
488 				xpcs_config_eee(priv->hw->xpcs,
489 						priv->plat->mult_fact_100ns,
490 						false);
491 		}
492 		mutex_unlock(&priv->lock);
493 		return false;
494 	}
495 
496 	if (priv->eee_active && !priv->eee_enabled) {
497 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
498 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
499 				     eee_tw_timer);
500 		if (priv->hw->xpcs)
501 			xpcs_config_eee(priv->hw->xpcs,
502 					priv->plat->mult_fact_100ns,
503 					true);
504 	}
505 
506 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
507 		del_timer_sync(&priv->eee_ctrl_timer);
508 		priv->tx_path_in_lpi_mode = false;
509 		stmmac_lpi_entry_timer_config(priv, 1);
510 	} else {
511 		stmmac_lpi_entry_timer_config(priv, 0);
512 		mod_timer(&priv->eee_ctrl_timer,
513 			  STMMAC_LPI_T(priv->tx_lpi_timer));
514 	}
515 
516 	mutex_unlock(&priv->lock);
517 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
518 	return true;
519 }
520 
521 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
522  * @priv: driver private structure
523  * @p : descriptor pointer
524  * @skb : the socket buffer
525  * Description :
526  * This function will read timestamp from the descriptor & pass it to stack.
527  * and also perform some sanity checks.
528  */
529 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
530 				   struct dma_desc *p, struct sk_buff *skb)
531 {
532 	struct skb_shared_hwtstamps shhwtstamp;
533 	bool found = false;
534 	u64 ns = 0;
535 
536 	if (!priv->hwts_tx_en)
537 		return;
538 
539 	/* exit if skb doesn't support hw tstamp */
540 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
541 		return;
542 
543 	/* check tx tstamp status */
544 	if (stmmac_get_tx_timestamp_status(priv, p)) {
545 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
546 		found = true;
547 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
548 		found = true;
549 	}
550 
551 	if (found) {
552 		ns -= priv->plat->cdc_error_adj;
553 
554 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
555 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
556 
557 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
558 		/* pass tstamp to stack */
559 		skb_tstamp_tx(skb, &shhwtstamp);
560 	}
561 }
562 
563 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
564  * @priv: driver private structure
565  * @p : descriptor pointer
566  * @np : next descriptor pointer
567  * @skb : the socket buffer
568  * Description :
569  * This function will read received packet's timestamp from the descriptor
570  * and pass it to stack. It also perform some sanity checks.
571  */
572 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
573 				   struct dma_desc *np, struct sk_buff *skb)
574 {
575 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
576 	struct dma_desc *desc = p;
577 	u64 ns = 0;
578 
579 	if (!priv->hwts_rx_en)
580 		return;
581 	/* For GMAC4, the valid timestamp is from CTX next desc. */
582 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
583 		desc = np;
584 
585 	/* Check if timestamp is available */
586 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
587 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
588 
589 		ns -= priv->plat->cdc_error_adj;
590 
591 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
592 		shhwtstamp = skb_hwtstamps(skb);
593 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
594 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
595 	} else  {
596 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
597 	}
598 }
599 
600 /**
601  *  stmmac_hwtstamp_set - control hardware timestamping.
602  *  @dev: device pointer.
603  *  @ifr: An IOCTL specific structure, that can contain a pointer to
604  *  a proprietary structure used to pass information to the driver.
605  *  Description:
606  *  This function configures the MAC to enable/disable both outgoing(TX)
607  *  and incoming(RX) packets time stamping based on user input.
608  *  Return Value:
609  *  0 on success and an appropriate -ve integer on failure.
610  */
611 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
612 {
613 	struct stmmac_priv *priv = netdev_priv(dev);
614 	struct hwtstamp_config config;
615 	u32 ptp_v2 = 0;
616 	u32 tstamp_all = 0;
617 	u32 ptp_over_ipv4_udp = 0;
618 	u32 ptp_over_ipv6_udp = 0;
619 	u32 ptp_over_ethernet = 0;
620 	u32 snap_type_sel = 0;
621 	u32 ts_master_en = 0;
622 	u32 ts_event_en = 0;
623 
624 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
625 		netdev_alert(priv->dev, "No support for HW time stamping\n");
626 		priv->hwts_tx_en = 0;
627 		priv->hwts_rx_en = 0;
628 
629 		return -EOPNOTSUPP;
630 	}
631 
632 	if (copy_from_user(&config, ifr->ifr_data,
633 			   sizeof(config)))
634 		return -EFAULT;
635 
636 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
637 		   __func__, config.flags, config.tx_type, config.rx_filter);
638 
639 	if (config.tx_type != HWTSTAMP_TX_OFF &&
640 	    config.tx_type != HWTSTAMP_TX_ON)
641 		return -ERANGE;
642 
643 	if (priv->adv_ts) {
644 		switch (config.rx_filter) {
645 		case HWTSTAMP_FILTER_NONE:
646 			/* time stamp no incoming packet at all */
647 			config.rx_filter = HWTSTAMP_FILTER_NONE;
648 			break;
649 
650 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
651 			/* PTP v1, UDP, any kind of event packet */
652 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
653 			/* 'xmac' hardware can support Sync, Pdelay_Req and
654 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
655 			 * This leaves Delay_Req timestamps out.
656 			 * Enable all events *and* general purpose message
657 			 * timestamping
658 			 */
659 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			break;
663 
664 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
665 			/* PTP v1, UDP, Sync packet */
666 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
667 			/* take time stamp for SYNC messages only */
668 			ts_event_en = PTP_TCR_TSEVNTENA;
669 
670 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
671 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
672 			break;
673 
674 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
675 			/* PTP v1, UDP, Delay_req packet */
676 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
677 			/* take time stamp for Delay_Req messages only */
678 			ts_master_en = PTP_TCR_TSMSTRENA;
679 			ts_event_en = PTP_TCR_TSEVNTENA;
680 
681 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
682 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
683 			break;
684 
685 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
686 			/* PTP v2, UDP, any kind of event packet */
687 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
688 			ptp_v2 = PTP_TCR_TSVER2ENA;
689 			/* take time stamp for all event messages */
690 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
691 
692 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
693 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
694 			break;
695 
696 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
697 			/* PTP v2, UDP, Sync packet */
698 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
699 			ptp_v2 = PTP_TCR_TSVER2ENA;
700 			/* take time stamp for SYNC messages only */
701 			ts_event_en = PTP_TCR_TSEVNTENA;
702 
703 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
704 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
705 			break;
706 
707 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
708 			/* PTP v2, UDP, Delay_req packet */
709 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
710 			ptp_v2 = PTP_TCR_TSVER2ENA;
711 			/* take time stamp for Delay_Req messages only */
712 			ts_master_en = PTP_TCR_TSMSTRENA;
713 			ts_event_en = PTP_TCR_TSEVNTENA;
714 
715 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
716 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
717 			break;
718 
719 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
720 			/* PTP v2/802.AS1 any layer, any kind of event packet */
721 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
722 			ptp_v2 = PTP_TCR_TSVER2ENA;
723 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
724 			if (priv->synopsys_id < DWMAC_CORE_4_10)
725 				ts_event_en = PTP_TCR_TSEVNTENA;
726 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
727 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
728 			ptp_over_ethernet = PTP_TCR_TSIPENA;
729 			break;
730 
731 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
732 			/* PTP v2/802.AS1, any layer, Sync packet */
733 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
734 			ptp_v2 = PTP_TCR_TSVER2ENA;
735 			/* take time stamp for SYNC messages only */
736 			ts_event_en = PTP_TCR_TSEVNTENA;
737 
738 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
739 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
740 			ptp_over_ethernet = PTP_TCR_TSIPENA;
741 			break;
742 
743 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
744 			/* PTP v2/802.AS1, any layer, Delay_req packet */
745 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
746 			ptp_v2 = PTP_TCR_TSVER2ENA;
747 			/* take time stamp for Delay_Req messages only */
748 			ts_master_en = PTP_TCR_TSMSTRENA;
749 			ts_event_en = PTP_TCR_TSEVNTENA;
750 
751 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
752 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
753 			ptp_over_ethernet = PTP_TCR_TSIPENA;
754 			break;
755 
756 		case HWTSTAMP_FILTER_NTP_ALL:
757 		case HWTSTAMP_FILTER_ALL:
758 			/* time stamp any incoming packet */
759 			config.rx_filter = HWTSTAMP_FILTER_ALL;
760 			tstamp_all = PTP_TCR_TSENALL;
761 			break;
762 
763 		default:
764 			return -ERANGE;
765 		}
766 	} else {
767 		switch (config.rx_filter) {
768 		case HWTSTAMP_FILTER_NONE:
769 			config.rx_filter = HWTSTAMP_FILTER_NONE;
770 			break;
771 		default:
772 			/* PTP v1, UDP, any kind of event packet */
773 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
774 			break;
775 		}
776 	}
777 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
778 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
779 
780 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
781 
782 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
783 		priv->systime_flags |= tstamp_all | ptp_v2 |
784 				       ptp_over_ethernet | ptp_over_ipv6_udp |
785 				       ptp_over_ipv4_udp | ts_event_en |
786 				       ts_master_en | snap_type_sel;
787 	}
788 
789 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
790 
791 	memcpy(&priv->tstamp_config, &config, sizeof(config));
792 
793 	return copy_to_user(ifr->ifr_data, &config,
794 			    sizeof(config)) ? -EFAULT : 0;
795 }
796 
797 /**
798  *  stmmac_hwtstamp_get - read hardware timestamping.
799  *  @dev: device pointer.
800  *  @ifr: An IOCTL specific structure, that can contain a pointer to
801  *  a proprietary structure used to pass information to the driver.
802  *  Description:
803  *  This function obtain the current hardware timestamping settings
804  *  as requested.
805  */
806 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
807 {
808 	struct stmmac_priv *priv = netdev_priv(dev);
809 	struct hwtstamp_config *config = &priv->tstamp_config;
810 
811 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
812 		return -EOPNOTSUPP;
813 
814 	return copy_to_user(ifr->ifr_data, config,
815 			    sizeof(*config)) ? -EFAULT : 0;
816 }
817 
818 /**
819  * stmmac_init_tstamp_counter - init hardware timestamping counter
820  * @priv: driver private structure
821  * @systime_flags: timestamping flags
822  * Description:
823  * Initialize hardware counter for packet timestamping.
824  * This is valid as long as the interface is open and not suspended.
825  * Will be rerun after resuming from suspend, case in which the timestamping
826  * flags updated by stmmac_hwtstamp_set() also need to be restored.
827  */
828 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
829 {
830 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
831 	struct timespec64 now;
832 	u32 sec_inc = 0;
833 	u64 temp = 0;
834 
835 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
836 		return -EOPNOTSUPP;
837 
838 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
839 	priv->systime_flags = systime_flags;
840 
841 	/* program Sub Second Increment reg */
842 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
843 					   priv->plat->clk_ptp_rate,
844 					   xmac, &sec_inc);
845 	temp = div_u64(1000000000ULL, sec_inc);
846 
847 	/* Store sub second increment for later use */
848 	priv->sub_second_inc = sec_inc;
849 
850 	/* calculate default added value:
851 	 * formula is :
852 	 * addend = (2^32)/freq_div_ratio;
853 	 * where, freq_div_ratio = 1e9ns/sec_inc
854 	 */
855 	temp = (u64)(temp << 32);
856 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
857 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
858 
859 	/* initialize system time */
860 	ktime_get_real_ts64(&now);
861 
862 	/* lower 32 bits of tv_sec are safe until y2106 */
863 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
864 
865 	return 0;
866 }
867 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
868 
869 /**
870  * stmmac_init_ptp - init PTP
871  * @priv: driver private structure
872  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
873  * This is done by looking at the HW cap. register.
874  * This function also registers the ptp driver.
875  */
876 static int stmmac_init_ptp(struct stmmac_priv *priv)
877 {
878 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
879 	int ret;
880 
881 	if (priv->plat->ptp_clk_freq_config)
882 		priv->plat->ptp_clk_freq_config(priv);
883 
884 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
885 	if (ret)
886 		return ret;
887 
888 	priv->adv_ts = 0;
889 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
890 	if (xmac && priv->dma_cap.atime_stamp)
891 		priv->adv_ts = 1;
892 	/* Dwmac 3.x core with extend_desc can support adv_ts */
893 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
894 		priv->adv_ts = 1;
895 
896 	if (priv->dma_cap.time_stamp)
897 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
898 
899 	if (priv->adv_ts)
900 		netdev_info(priv->dev,
901 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
902 
903 	priv->hwts_tx_en = 0;
904 	priv->hwts_rx_en = 0;
905 
906 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
907 		stmmac_hwtstamp_correct_latency(priv, priv);
908 
909 	return 0;
910 }
911 
912 static void stmmac_release_ptp(struct stmmac_priv *priv)
913 {
914 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
915 	stmmac_ptp_unregister(priv);
916 }
917 
918 /**
919  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
920  *  @priv: driver private structure
921  *  @duplex: duplex passed to the next function
922  *  Description: It is used for configuring the flow control in all queues
923  */
924 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
925 {
926 	u32 tx_cnt = priv->plat->tx_queues_to_use;
927 
928 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
929 			priv->pause, tx_cnt);
930 }
931 
932 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
933 					 phy_interface_t interface)
934 {
935 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
936 
937 	/* Refresh the MAC-specific capabilities */
938 	stmmac_mac_update_caps(priv);
939 
940 	config->mac_capabilities = priv->hw->link.caps;
941 
942 	if (priv->plat->max_speed)
943 		phylink_limit_mac_speed(config, priv->plat->max_speed);
944 
945 	return config->mac_capabilities;
946 }
947 
948 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
949 						 phy_interface_t interface)
950 {
951 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952 	struct phylink_pcs *pcs;
953 
954 	if (priv->plat->select_pcs) {
955 		pcs = priv->plat->select_pcs(priv, interface);
956 		if (!IS_ERR(pcs))
957 			return pcs;
958 	}
959 
960 	return NULL;
961 }
962 
963 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
964 			      const struct phylink_link_state *state)
965 {
966 	/* Nothing to do, xpcs_config() handles everything */
967 }
968 
969 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
970 {
971 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
972 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
973 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
974 	bool *hs_enable = &fpe_cfg->hs_enable;
975 
976 	if (is_up && *hs_enable) {
977 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
978 					MPACKET_VERIFY);
979 	} else {
980 		*lo_state = FPE_STATE_OFF;
981 		*lp_state = FPE_STATE_OFF;
982 	}
983 }
984 
985 static void stmmac_mac_link_down(struct phylink_config *config,
986 				 unsigned int mode, phy_interface_t interface)
987 {
988 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989 
990 	stmmac_mac_set(priv, priv->ioaddr, false);
991 	priv->eee_active = false;
992 	priv->tx_lpi_enabled = false;
993 	priv->eee_enabled = stmmac_eee_init(priv);
994 	stmmac_set_eee_pls(priv, priv->hw, false);
995 
996 	if (priv->dma_cap.fpesel)
997 		stmmac_fpe_link_state_handle(priv, false);
998 }
999 
1000 static void stmmac_mac_link_up(struct phylink_config *config,
1001 			       struct phy_device *phy,
1002 			       unsigned int mode, phy_interface_t interface,
1003 			       int speed, int duplex,
1004 			       bool tx_pause, bool rx_pause)
1005 {
1006 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1007 	u32 old_ctrl, ctrl;
1008 
1009 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1010 	    priv->plat->serdes_powerup)
1011 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1012 
1013 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1014 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1015 
1016 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1017 		switch (speed) {
1018 		case SPEED_10000:
1019 			ctrl |= priv->hw->link.xgmii.speed10000;
1020 			break;
1021 		case SPEED_5000:
1022 			ctrl |= priv->hw->link.xgmii.speed5000;
1023 			break;
1024 		case SPEED_2500:
1025 			ctrl |= priv->hw->link.xgmii.speed2500;
1026 			break;
1027 		default:
1028 			return;
1029 		}
1030 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1031 		switch (speed) {
1032 		case SPEED_100000:
1033 			ctrl |= priv->hw->link.xlgmii.speed100000;
1034 			break;
1035 		case SPEED_50000:
1036 			ctrl |= priv->hw->link.xlgmii.speed50000;
1037 			break;
1038 		case SPEED_40000:
1039 			ctrl |= priv->hw->link.xlgmii.speed40000;
1040 			break;
1041 		case SPEED_25000:
1042 			ctrl |= priv->hw->link.xlgmii.speed25000;
1043 			break;
1044 		case SPEED_10000:
1045 			ctrl |= priv->hw->link.xgmii.speed10000;
1046 			break;
1047 		case SPEED_2500:
1048 			ctrl |= priv->hw->link.speed2500;
1049 			break;
1050 		case SPEED_1000:
1051 			ctrl |= priv->hw->link.speed1000;
1052 			break;
1053 		default:
1054 			return;
1055 		}
1056 	} else {
1057 		switch (speed) {
1058 		case SPEED_2500:
1059 			ctrl |= priv->hw->link.speed2500;
1060 			break;
1061 		case SPEED_1000:
1062 			ctrl |= priv->hw->link.speed1000;
1063 			break;
1064 		case SPEED_100:
1065 			ctrl |= priv->hw->link.speed100;
1066 			break;
1067 		case SPEED_10:
1068 			ctrl |= priv->hw->link.speed10;
1069 			break;
1070 		default:
1071 			return;
1072 		}
1073 	}
1074 
1075 	priv->speed = speed;
1076 
1077 	if (priv->plat->fix_mac_speed)
1078 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1079 
1080 	if (!duplex)
1081 		ctrl &= ~priv->hw->link.duplex;
1082 	else
1083 		ctrl |= priv->hw->link.duplex;
1084 
1085 	/* Flow Control operation */
1086 	if (rx_pause && tx_pause)
1087 		priv->flow_ctrl = FLOW_AUTO;
1088 	else if (rx_pause && !tx_pause)
1089 		priv->flow_ctrl = FLOW_RX;
1090 	else if (!rx_pause && tx_pause)
1091 		priv->flow_ctrl = FLOW_TX;
1092 	else
1093 		priv->flow_ctrl = FLOW_OFF;
1094 
1095 	stmmac_mac_flow_ctrl(priv, duplex);
1096 
1097 	if (ctrl != old_ctrl)
1098 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1099 
1100 	stmmac_mac_set(priv, priv->ioaddr, true);
1101 	if (phy && priv->dma_cap.eee) {
1102 		priv->eee_active =
1103 			phy_init_eee(phy, !(priv->plat->flags &
1104 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1105 		priv->eee_enabled = stmmac_eee_init(priv);
1106 		priv->tx_lpi_enabled = priv->eee_enabled;
1107 		stmmac_set_eee_pls(priv, priv->hw, true);
1108 	}
1109 
1110 	if (priv->dma_cap.fpesel)
1111 		stmmac_fpe_link_state_handle(priv, true);
1112 
1113 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1114 		stmmac_hwtstamp_correct_latency(priv, priv);
1115 }
1116 
1117 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1118 	.mac_get_caps = stmmac_mac_get_caps,
1119 	.mac_select_pcs = stmmac_mac_select_pcs,
1120 	.mac_config = stmmac_mac_config,
1121 	.mac_link_down = stmmac_mac_link_down,
1122 	.mac_link_up = stmmac_mac_link_up,
1123 };
1124 
1125 /**
1126  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1127  * @priv: driver private structure
1128  * Description: this is to verify if the HW supports the PCS.
1129  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1130  * configured for the TBI, RTBI, or SGMII PHY interface.
1131  */
1132 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1133 {
1134 	int interface = priv->plat->mac_interface;
1135 
1136 	if (priv->dma_cap.pcs) {
1137 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1138 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1139 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1140 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1141 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1142 			priv->hw->pcs = STMMAC_PCS_RGMII;
1143 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1144 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1145 			priv->hw->pcs = STMMAC_PCS_SGMII;
1146 		}
1147 	}
1148 }
1149 
1150 /**
1151  * stmmac_init_phy - PHY initialization
1152  * @dev: net device structure
1153  * Description: it initializes the driver's PHY state, and attaches the PHY
1154  * to the mac driver.
1155  *  Return value:
1156  *  0 on success
1157  */
1158 static int stmmac_init_phy(struct net_device *dev)
1159 {
1160 	struct stmmac_priv *priv = netdev_priv(dev);
1161 	struct fwnode_handle *phy_fwnode;
1162 	struct fwnode_handle *fwnode;
1163 	int ret;
1164 
1165 	if (!phylink_expects_phy(priv->phylink))
1166 		return 0;
1167 
1168 	fwnode = priv->plat->port_node;
1169 	if (!fwnode)
1170 		fwnode = dev_fwnode(priv->device);
1171 
1172 	if (fwnode)
1173 		phy_fwnode = fwnode_get_phy_node(fwnode);
1174 	else
1175 		phy_fwnode = NULL;
1176 
1177 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1178 	 * manually parse it
1179 	 */
1180 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1181 		int addr = priv->plat->phy_addr;
1182 		struct phy_device *phydev;
1183 
1184 		if (addr < 0) {
1185 			netdev_err(priv->dev, "no phy found\n");
1186 			return -ENODEV;
1187 		}
1188 
1189 		phydev = mdiobus_get_phy(priv->mii, addr);
1190 		if (!phydev) {
1191 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1192 			return -ENODEV;
1193 		}
1194 
1195 		ret = phylink_connect_phy(priv->phylink, phydev);
1196 	} else {
1197 		fwnode_handle_put(phy_fwnode);
1198 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1199 	}
1200 
1201 	if (!priv->plat->pmt) {
1202 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1203 
1204 		phylink_ethtool_get_wol(priv->phylink, &wol);
1205 		device_set_wakeup_capable(priv->device, !!wol.supported);
1206 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1207 	}
1208 
1209 	return ret;
1210 }
1211 
1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214 	struct stmmac_mdio_bus_data *mdio_bus_data;
1215 	int mode = priv->plat->phy_interface;
1216 	struct fwnode_handle *fwnode;
1217 	struct phylink *phylink;
1218 
1219 	priv->phylink_config.dev = &priv->dev->dev;
1220 	priv->phylink_config.type = PHYLINK_NETDEV;
1221 	priv->phylink_config.mac_managed_pm = true;
1222 
1223 	/* Stmmac always requires an RX clock for hardware initialization */
1224 	priv->phylink_config.mac_requires_rxc = true;
1225 
1226 	mdio_bus_data = priv->plat->mdio_bus_data;
1227 	if (mdio_bus_data)
1228 		priv->phylink_config.default_an_inband =
1229 			mdio_bus_data->default_an_inband;
1230 
1231 	/* Set the platform/firmware specified interface mode. Note, phylink
1232 	 * deals with the PHY interface mode, not the MAC interface mode.
1233 	 */
1234 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1235 
1236 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1237 	if (priv->hw->xpcs)
1238 		xpcs_get_interfaces(priv->hw->xpcs,
1239 				    priv->phylink_config.supported_interfaces);
1240 
1241 	fwnode = priv->plat->port_node;
1242 	if (!fwnode)
1243 		fwnode = dev_fwnode(priv->device);
1244 
1245 	phylink = phylink_create(&priv->phylink_config, fwnode,
1246 				 mode, &stmmac_phylink_mac_ops);
1247 	if (IS_ERR(phylink))
1248 		return PTR_ERR(phylink);
1249 
1250 	priv->phylink = phylink;
1251 	return 0;
1252 }
1253 
1254 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1255 				    struct stmmac_dma_conf *dma_conf)
1256 {
1257 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1258 	unsigned int desc_size;
1259 	void *head_rx;
1260 	u32 queue;
1261 
1262 	/* Display RX rings */
1263 	for (queue = 0; queue < rx_cnt; queue++) {
1264 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1265 
1266 		pr_info("\tRX Queue %u rings\n", queue);
1267 
1268 		if (priv->extend_desc) {
1269 			head_rx = (void *)rx_q->dma_erx;
1270 			desc_size = sizeof(struct dma_extended_desc);
1271 		} else {
1272 			head_rx = (void *)rx_q->dma_rx;
1273 			desc_size = sizeof(struct dma_desc);
1274 		}
1275 
1276 		/* Display RX ring */
1277 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1278 				    rx_q->dma_rx_phy, desc_size);
1279 	}
1280 }
1281 
1282 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1283 				    struct stmmac_dma_conf *dma_conf)
1284 {
1285 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1286 	unsigned int desc_size;
1287 	void *head_tx;
1288 	u32 queue;
1289 
1290 	/* Display TX rings */
1291 	for (queue = 0; queue < tx_cnt; queue++) {
1292 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1293 
1294 		pr_info("\tTX Queue %d rings\n", queue);
1295 
1296 		if (priv->extend_desc) {
1297 			head_tx = (void *)tx_q->dma_etx;
1298 			desc_size = sizeof(struct dma_extended_desc);
1299 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1300 			head_tx = (void *)tx_q->dma_entx;
1301 			desc_size = sizeof(struct dma_edesc);
1302 		} else {
1303 			head_tx = (void *)tx_q->dma_tx;
1304 			desc_size = sizeof(struct dma_desc);
1305 		}
1306 
1307 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1308 				    tx_q->dma_tx_phy, desc_size);
1309 	}
1310 }
1311 
1312 static void stmmac_display_rings(struct stmmac_priv *priv,
1313 				 struct stmmac_dma_conf *dma_conf)
1314 {
1315 	/* Display RX ring */
1316 	stmmac_display_rx_rings(priv, dma_conf);
1317 
1318 	/* Display TX ring */
1319 	stmmac_display_tx_rings(priv, dma_conf);
1320 }
1321 
1322 static int stmmac_set_bfsize(int mtu, int bufsize)
1323 {
1324 	int ret = bufsize;
1325 
1326 	if (mtu >= BUF_SIZE_8KiB)
1327 		ret = BUF_SIZE_16KiB;
1328 	else if (mtu >= BUF_SIZE_4KiB)
1329 		ret = BUF_SIZE_8KiB;
1330 	else if (mtu >= BUF_SIZE_2KiB)
1331 		ret = BUF_SIZE_4KiB;
1332 	else if (mtu > DEFAULT_BUFSIZE)
1333 		ret = BUF_SIZE_2KiB;
1334 	else
1335 		ret = DEFAULT_BUFSIZE;
1336 
1337 	return ret;
1338 }
1339 
1340 /**
1341  * stmmac_clear_rx_descriptors - clear RX descriptors
1342  * @priv: driver private structure
1343  * @dma_conf: structure to take the dma data
1344  * @queue: RX queue index
1345  * Description: this function is called to clear the RX descriptors
1346  * in case of both basic and extended descriptors are used.
1347  */
1348 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1349 					struct stmmac_dma_conf *dma_conf,
1350 					u32 queue)
1351 {
1352 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1353 	int i;
1354 
1355 	/* Clear the RX descriptors */
1356 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1357 		if (priv->extend_desc)
1358 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1359 					priv->use_riwt, priv->mode,
1360 					(i == dma_conf->dma_rx_size - 1),
1361 					dma_conf->dma_buf_sz);
1362 		else
1363 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1364 					priv->use_riwt, priv->mode,
1365 					(i == dma_conf->dma_rx_size - 1),
1366 					dma_conf->dma_buf_sz);
1367 }
1368 
1369 /**
1370  * stmmac_clear_tx_descriptors - clear tx descriptors
1371  * @priv: driver private structure
1372  * @dma_conf: structure to take the dma data
1373  * @queue: TX queue index.
1374  * Description: this function is called to clear the TX descriptors
1375  * in case of both basic and extended descriptors are used.
1376  */
1377 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1378 					struct stmmac_dma_conf *dma_conf,
1379 					u32 queue)
1380 {
1381 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1382 	int i;
1383 
1384 	/* Clear the TX descriptors */
1385 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1386 		int last = (i == (dma_conf->dma_tx_size - 1));
1387 		struct dma_desc *p;
1388 
1389 		if (priv->extend_desc)
1390 			p = &tx_q->dma_etx[i].basic;
1391 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1392 			p = &tx_q->dma_entx[i].basic;
1393 		else
1394 			p = &tx_q->dma_tx[i];
1395 
1396 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1397 	}
1398 }
1399 
1400 /**
1401  * stmmac_clear_descriptors - clear descriptors
1402  * @priv: driver private structure
1403  * @dma_conf: structure to take the dma data
1404  * Description: this function is called to clear the TX and RX descriptors
1405  * in case of both basic and extended descriptors are used.
1406  */
1407 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1408 				     struct stmmac_dma_conf *dma_conf)
1409 {
1410 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1411 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1412 	u32 queue;
1413 
1414 	/* Clear the RX descriptors */
1415 	for (queue = 0; queue < rx_queue_cnt; queue++)
1416 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1417 
1418 	/* Clear the TX descriptors */
1419 	for (queue = 0; queue < tx_queue_cnt; queue++)
1420 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1421 }
1422 
1423 /**
1424  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1425  * @priv: driver private structure
1426  * @dma_conf: structure to take the dma data
1427  * @p: descriptor pointer
1428  * @i: descriptor index
1429  * @flags: gfp flag
1430  * @queue: RX queue index
1431  * Description: this function is called to allocate a receive buffer, perform
1432  * the DMA mapping and init the descriptor.
1433  */
1434 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1435 				  struct stmmac_dma_conf *dma_conf,
1436 				  struct dma_desc *p,
1437 				  int i, gfp_t flags, u32 queue)
1438 {
1439 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1440 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1441 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1442 
1443 	if (priv->dma_cap.host_dma_width <= 32)
1444 		gfp |= GFP_DMA32;
1445 
1446 	if (!buf->page) {
1447 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1448 		if (!buf->page)
1449 			return -ENOMEM;
1450 		buf->page_offset = stmmac_rx_offset(priv);
1451 	}
1452 
1453 	if (priv->sph && !buf->sec_page) {
1454 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1455 		if (!buf->sec_page)
1456 			return -ENOMEM;
1457 
1458 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1459 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1460 	} else {
1461 		buf->sec_page = NULL;
1462 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1463 	}
1464 
1465 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1466 
1467 	stmmac_set_desc_addr(priv, p, buf->addr);
1468 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1469 		stmmac_init_desc3(priv, p);
1470 
1471 	return 0;
1472 }
1473 
1474 /**
1475  * stmmac_free_rx_buffer - free RX dma buffers
1476  * @priv: private structure
1477  * @rx_q: RX queue
1478  * @i: buffer index.
1479  */
1480 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1481 				  struct stmmac_rx_queue *rx_q,
1482 				  int i)
1483 {
1484 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1485 
1486 	if (buf->page)
1487 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1488 	buf->page = NULL;
1489 
1490 	if (buf->sec_page)
1491 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1492 	buf->sec_page = NULL;
1493 }
1494 
1495 /**
1496  * stmmac_free_tx_buffer - free RX dma buffers
1497  * @priv: private structure
1498  * @dma_conf: structure to take the dma data
1499  * @queue: RX queue index
1500  * @i: buffer index.
1501  */
1502 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1503 				  struct stmmac_dma_conf *dma_conf,
1504 				  u32 queue, int i)
1505 {
1506 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1507 
1508 	if (tx_q->tx_skbuff_dma[i].buf &&
1509 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1510 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1511 			dma_unmap_page(priv->device,
1512 				       tx_q->tx_skbuff_dma[i].buf,
1513 				       tx_q->tx_skbuff_dma[i].len,
1514 				       DMA_TO_DEVICE);
1515 		else
1516 			dma_unmap_single(priv->device,
1517 					 tx_q->tx_skbuff_dma[i].buf,
1518 					 tx_q->tx_skbuff_dma[i].len,
1519 					 DMA_TO_DEVICE);
1520 	}
1521 
1522 	if (tx_q->xdpf[i] &&
1523 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1524 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1525 		xdp_return_frame(tx_q->xdpf[i]);
1526 		tx_q->xdpf[i] = NULL;
1527 	}
1528 
1529 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1530 		tx_q->xsk_frames_done++;
1531 
1532 	if (tx_q->tx_skbuff[i] &&
1533 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1534 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1535 		tx_q->tx_skbuff[i] = NULL;
1536 	}
1537 
1538 	tx_q->tx_skbuff_dma[i].buf = 0;
1539 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1540 }
1541 
1542 /**
1543  * dma_free_rx_skbufs - free RX dma buffers
1544  * @priv: private structure
1545  * @dma_conf: structure to take the dma data
1546  * @queue: RX queue index
1547  */
1548 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1549 			       struct stmmac_dma_conf *dma_conf,
1550 			       u32 queue)
1551 {
1552 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1553 	int i;
1554 
1555 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1556 		stmmac_free_rx_buffer(priv, rx_q, i);
1557 }
1558 
1559 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1560 				   struct stmmac_dma_conf *dma_conf,
1561 				   u32 queue, gfp_t flags)
1562 {
1563 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 	int i;
1565 
1566 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1567 		struct dma_desc *p;
1568 		int ret;
1569 
1570 		if (priv->extend_desc)
1571 			p = &((rx_q->dma_erx + i)->basic);
1572 		else
1573 			p = rx_q->dma_rx + i;
1574 
1575 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1576 					     queue);
1577 		if (ret)
1578 			return ret;
1579 
1580 		rx_q->buf_alloc_num++;
1581 	}
1582 
1583 	return 0;
1584 }
1585 
1586 /**
1587  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1588  * @priv: private structure
1589  * @dma_conf: structure to take the dma data
1590  * @queue: RX queue index
1591  */
1592 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1593 				struct stmmac_dma_conf *dma_conf,
1594 				u32 queue)
1595 {
1596 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1597 	int i;
1598 
1599 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1600 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1601 
1602 		if (!buf->xdp)
1603 			continue;
1604 
1605 		xsk_buff_free(buf->xdp);
1606 		buf->xdp = NULL;
1607 	}
1608 }
1609 
1610 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1611 				      struct stmmac_dma_conf *dma_conf,
1612 				      u32 queue)
1613 {
1614 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1615 	int i;
1616 
1617 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1618 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1619 	 * use this macro to make sure no size violations.
1620 	 */
1621 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1622 
1623 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1624 		struct stmmac_rx_buffer *buf;
1625 		dma_addr_t dma_addr;
1626 		struct dma_desc *p;
1627 
1628 		if (priv->extend_desc)
1629 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1630 		else
1631 			p = rx_q->dma_rx + i;
1632 
1633 		buf = &rx_q->buf_pool[i];
1634 
1635 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1636 		if (!buf->xdp)
1637 			return -ENOMEM;
1638 
1639 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1640 		stmmac_set_desc_addr(priv, p, dma_addr);
1641 		rx_q->buf_alloc_num++;
1642 	}
1643 
1644 	return 0;
1645 }
1646 
1647 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1648 {
1649 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1650 		return NULL;
1651 
1652 	return xsk_get_pool_from_qid(priv->dev, queue);
1653 }
1654 
1655 /**
1656  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1657  * @priv: driver private structure
1658  * @dma_conf: structure to take the dma data
1659  * @queue: RX queue index
1660  * @flags: gfp flag.
1661  * Description: this function initializes the DMA RX descriptors
1662  * and allocates the socket buffers. It supports the chained and ring
1663  * modes.
1664  */
1665 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1666 				    struct stmmac_dma_conf *dma_conf,
1667 				    u32 queue, gfp_t flags)
1668 {
1669 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1670 	int ret;
1671 
1672 	netif_dbg(priv, probe, priv->dev,
1673 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1674 		  (u32)rx_q->dma_rx_phy);
1675 
1676 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1677 
1678 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1679 
1680 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1681 
1682 	if (rx_q->xsk_pool) {
1683 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1684 						   MEM_TYPE_XSK_BUFF_POOL,
1685 						   NULL));
1686 		netdev_info(priv->dev,
1687 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1688 			    rx_q->queue_index);
1689 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1690 	} else {
1691 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1692 						   MEM_TYPE_PAGE_POOL,
1693 						   rx_q->page_pool));
1694 		netdev_info(priv->dev,
1695 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1696 			    rx_q->queue_index);
1697 	}
1698 
1699 	if (rx_q->xsk_pool) {
1700 		/* RX XDP ZC buffer pool may not be populated, e.g.
1701 		 * xdpsock TX-only.
1702 		 */
1703 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1704 	} else {
1705 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1706 		if (ret < 0)
1707 			return -ENOMEM;
1708 	}
1709 
1710 	/* Setup the chained descriptor addresses */
1711 	if (priv->mode == STMMAC_CHAIN_MODE) {
1712 		if (priv->extend_desc)
1713 			stmmac_mode_init(priv, rx_q->dma_erx,
1714 					 rx_q->dma_rx_phy,
1715 					 dma_conf->dma_rx_size, 1);
1716 		else
1717 			stmmac_mode_init(priv, rx_q->dma_rx,
1718 					 rx_q->dma_rx_phy,
1719 					 dma_conf->dma_rx_size, 0);
1720 	}
1721 
1722 	return 0;
1723 }
1724 
1725 static int init_dma_rx_desc_rings(struct net_device *dev,
1726 				  struct stmmac_dma_conf *dma_conf,
1727 				  gfp_t flags)
1728 {
1729 	struct stmmac_priv *priv = netdev_priv(dev);
1730 	u32 rx_count = priv->plat->rx_queues_to_use;
1731 	int queue;
1732 	int ret;
1733 
1734 	/* RX INITIALIZATION */
1735 	netif_dbg(priv, probe, priv->dev,
1736 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1737 
1738 	for (queue = 0; queue < rx_count; queue++) {
1739 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1740 		if (ret)
1741 			goto err_init_rx_buffers;
1742 	}
1743 
1744 	return 0;
1745 
1746 err_init_rx_buffers:
1747 	while (queue >= 0) {
1748 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1749 
1750 		if (rx_q->xsk_pool)
1751 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1752 		else
1753 			dma_free_rx_skbufs(priv, dma_conf, queue);
1754 
1755 		rx_q->buf_alloc_num = 0;
1756 		rx_q->xsk_pool = NULL;
1757 
1758 		queue--;
1759 	}
1760 
1761 	return ret;
1762 }
1763 
1764 /**
1765  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1766  * @priv: driver private structure
1767  * @dma_conf: structure to take the dma data
1768  * @queue: TX queue index
1769  * Description: this function initializes the DMA TX descriptors
1770  * and allocates the socket buffers. It supports the chained and ring
1771  * modes.
1772  */
1773 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1774 				    struct stmmac_dma_conf *dma_conf,
1775 				    u32 queue)
1776 {
1777 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1778 	int i;
1779 
1780 	netif_dbg(priv, probe, priv->dev,
1781 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1782 		  (u32)tx_q->dma_tx_phy);
1783 
1784 	/* Setup the chained descriptor addresses */
1785 	if (priv->mode == STMMAC_CHAIN_MODE) {
1786 		if (priv->extend_desc)
1787 			stmmac_mode_init(priv, tx_q->dma_etx,
1788 					 tx_q->dma_tx_phy,
1789 					 dma_conf->dma_tx_size, 1);
1790 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1791 			stmmac_mode_init(priv, tx_q->dma_tx,
1792 					 tx_q->dma_tx_phy,
1793 					 dma_conf->dma_tx_size, 0);
1794 	}
1795 
1796 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1797 
1798 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1799 		struct dma_desc *p;
1800 
1801 		if (priv->extend_desc)
1802 			p = &((tx_q->dma_etx + i)->basic);
1803 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1804 			p = &((tx_q->dma_entx + i)->basic);
1805 		else
1806 			p = tx_q->dma_tx + i;
1807 
1808 		stmmac_clear_desc(priv, p);
1809 
1810 		tx_q->tx_skbuff_dma[i].buf = 0;
1811 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1812 		tx_q->tx_skbuff_dma[i].len = 0;
1813 		tx_q->tx_skbuff_dma[i].last_segment = false;
1814 		tx_q->tx_skbuff[i] = NULL;
1815 	}
1816 
1817 	return 0;
1818 }
1819 
1820 static int init_dma_tx_desc_rings(struct net_device *dev,
1821 				  struct stmmac_dma_conf *dma_conf)
1822 {
1823 	struct stmmac_priv *priv = netdev_priv(dev);
1824 	u32 tx_queue_cnt;
1825 	u32 queue;
1826 
1827 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1828 
1829 	for (queue = 0; queue < tx_queue_cnt; queue++)
1830 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1831 
1832 	return 0;
1833 }
1834 
1835 /**
1836  * init_dma_desc_rings - init the RX/TX descriptor rings
1837  * @dev: net device structure
1838  * @dma_conf: structure to take the dma data
1839  * @flags: gfp flag.
1840  * Description: this function initializes the DMA RX/TX descriptors
1841  * and allocates the socket buffers. It supports the chained and ring
1842  * modes.
1843  */
1844 static int init_dma_desc_rings(struct net_device *dev,
1845 			       struct stmmac_dma_conf *dma_conf,
1846 			       gfp_t flags)
1847 {
1848 	struct stmmac_priv *priv = netdev_priv(dev);
1849 	int ret;
1850 
1851 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1852 	if (ret)
1853 		return ret;
1854 
1855 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1856 
1857 	stmmac_clear_descriptors(priv, dma_conf);
1858 
1859 	if (netif_msg_hw(priv))
1860 		stmmac_display_rings(priv, dma_conf);
1861 
1862 	return ret;
1863 }
1864 
1865 /**
1866  * dma_free_tx_skbufs - free TX dma buffers
1867  * @priv: private structure
1868  * @dma_conf: structure to take the dma data
1869  * @queue: TX queue index
1870  */
1871 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1872 			       struct stmmac_dma_conf *dma_conf,
1873 			       u32 queue)
1874 {
1875 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1876 	int i;
1877 
1878 	tx_q->xsk_frames_done = 0;
1879 
1880 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1881 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1882 
1883 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1884 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1885 		tx_q->xsk_frames_done = 0;
1886 		tx_q->xsk_pool = NULL;
1887 	}
1888 }
1889 
1890 /**
1891  * stmmac_free_tx_skbufs - free TX skb buffers
1892  * @priv: private structure
1893  */
1894 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1895 {
1896 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1897 	u32 queue;
1898 
1899 	for (queue = 0; queue < tx_queue_cnt; queue++)
1900 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1901 }
1902 
1903 /**
1904  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1905  * @priv: private structure
1906  * @dma_conf: structure to take the dma data
1907  * @queue: RX queue index
1908  */
1909 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1910 					 struct stmmac_dma_conf *dma_conf,
1911 					 u32 queue)
1912 {
1913 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1914 
1915 	/* Release the DMA RX socket buffers */
1916 	if (rx_q->xsk_pool)
1917 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1918 	else
1919 		dma_free_rx_skbufs(priv, dma_conf, queue);
1920 
1921 	rx_q->buf_alloc_num = 0;
1922 	rx_q->xsk_pool = NULL;
1923 
1924 	/* Free DMA regions of consistent memory previously allocated */
1925 	if (!priv->extend_desc)
1926 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1927 				  sizeof(struct dma_desc),
1928 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1929 	else
1930 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1931 				  sizeof(struct dma_extended_desc),
1932 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1933 
1934 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1935 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1936 
1937 	kfree(rx_q->buf_pool);
1938 	if (rx_q->page_pool)
1939 		page_pool_destroy(rx_q->page_pool);
1940 }
1941 
1942 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1943 				       struct stmmac_dma_conf *dma_conf)
1944 {
1945 	u32 rx_count = priv->plat->rx_queues_to_use;
1946 	u32 queue;
1947 
1948 	/* Free RX queue resources */
1949 	for (queue = 0; queue < rx_count; queue++)
1950 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1951 }
1952 
1953 /**
1954  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1955  * @priv: private structure
1956  * @dma_conf: structure to take the dma data
1957  * @queue: TX queue index
1958  */
1959 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1960 					 struct stmmac_dma_conf *dma_conf,
1961 					 u32 queue)
1962 {
1963 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1964 	size_t size;
1965 	void *addr;
1966 
1967 	/* Release the DMA TX socket buffers */
1968 	dma_free_tx_skbufs(priv, dma_conf, queue);
1969 
1970 	if (priv->extend_desc) {
1971 		size = sizeof(struct dma_extended_desc);
1972 		addr = tx_q->dma_etx;
1973 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1974 		size = sizeof(struct dma_edesc);
1975 		addr = tx_q->dma_entx;
1976 	} else {
1977 		size = sizeof(struct dma_desc);
1978 		addr = tx_q->dma_tx;
1979 	}
1980 
1981 	size *= dma_conf->dma_tx_size;
1982 
1983 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1984 
1985 	kfree(tx_q->tx_skbuff_dma);
1986 	kfree(tx_q->tx_skbuff);
1987 }
1988 
1989 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1990 				       struct stmmac_dma_conf *dma_conf)
1991 {
1992 	u32 tx_count = priv->plat->tx_queues_to_use;
1993 	u32 queue;
1994 
1995 	/* Free TX queue resources */
1996 	for (queue = 0; queue < tx_count; queue++)
1997 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1998 }
1999 
2000 /**
2001  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2002  * @priv: private structure
2003  * @dma_conf: structure to take the dma data
2004  * @queue: RX queue index
2005  * Description: according to which descriptor can be used (extend or basic)
2006  * this function allocates the resources for TX and RX paths. In case of
2007  * reception, for example, it pre-allocated the RX socket buffer in order to
2008  * allow zero-copy mechanism.
2009  */
2010 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2011 					 struct stmmac_dma_conf *dma_conf,
2012 					 u32 queue)
2013 {
2014 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2015 	struct stmmac_channel *ch = &priv->channel[queue];
2016 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2017 	struct page_pool_params pp_params = { 0 };
2018 	unsigned int num_pages;
2019 	unsigned int napi_id;
2020 	int ret;
2021 
2022 	rx_q->queue_index = queue;
2023 	rx_q->priv_data = priv;
2024 
2025 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2026 	pp_params.pool_size = dma_conf->dma_rx_size;
2027 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2028 	pp_params.order = ilog2(num_pages);
2029 	pp_params.nid = dev_to_node(priv->device);
2030 	pp_params.dev = priv->device;
2031 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2032 	pp_params.offset = stmmac_rx_offset(priv);
2033 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2034 
2035 	rx_q->page_pool = page_pool_create(&pp_params);
2036 	if (IS_ERR(rx_q->page_pool)) {
2037 		ret = PTR_ERR(rx_q->page_pool);
2038 		rx_q->page_pool = NULL;
2039 		return ret;
2040 	}
2041 
2042 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2043 				 sizeof(*rx_q->buf_pool),
2044 				 GFP_KERNEL);
2045 	if (!rx_q->buf_pool)
2046 		return -ENOMEM;
2047 
2048 	if (priv->extend_desc) {
2049 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2050 						   dma_conf->dma_rx_size *
2051 						   sizeof(struct dma_extended_desc),
2052 						   &rx_q->dma_rx_phy,
2053 						   GFP_KERNEL);
2054 		if (!rx_q->dma_erx)
2055 			return -ENOMEM;
2056 
2057 	} else {
2058 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2059 						  dma_conf->dma_rx_size *
2060 						  sizeof(struct dma_desc),
2061 						  &rx_q->dma_rx_phy,
2062 						  GFP_KERNEL);
2063 		if (!rx_q->dma_rx)
2064 			return -ENOMEM;
2065 	}
2066 
2067 	if (stmmac_xdp_is_enabled(priv) &&
2068 	    test_bit(queue, priv->af_xdp_zc_qps))
2069 		napi_id = ch->rxtx_napi.napi_id;
2070 	else
2071 		napi_id = ch->rx_napi.napi_id;
2072 
2073 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2074 			       rx_q->queue_index,
2075 			       napi_id);
2076 	if (ret) {
2077 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2078 		return -EINVAL;
2079 	}
2080 
2081 	return 0;
2082 }
2083 
2084 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2085 				       struct stmmac_dma_conf *dma_conf)
2086 {
2087 	u32 rx_count = priv->plat->rx_queues_to_use;
2088 	u32 queue;
2089 	int ret;
2090 
2091 	/* RX queues buffers and DMA */
2092 	for (queue = 0; queue < rx_count; queue++) {
2093 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2094 		if (ret)
2095 			goto err_dma;
2096 	}
2097 
2098 	return 0;
2099 
2100 err_dma:
2101 	free_dma_rx_desc_resources(priv, dma_conf);
2102 
2103 	return ret;
2104 }
2105 
2106 /**
2107  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2108  * @priv: private structure
2109  * @dma_conf: structure to take the dma data
2110  * @queue: TX queue index
2111  * Description: according to which descriptor can be used (extend or basic)
2112  * this function allocates the resources for TX and RX paths. In case of
2113  * reception, for example, it pre-allocated the RX socket buffer in order to
2114  * allow zero-copy mechanism.
2115  */
2116 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2117 					 struct stmmac_dma_conf *dma_conf,
2118 					 u32 queue)
2119 {
2120 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2121 	size_t size;
2122 	void *addr;
2123 
2124 	tx_q->queue_index = queue;
2125 	tx_q->priv_data = priv;
2126 
2127 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2128 				      sizeof(*tx_q->tx_skbuff_dma),
2129 				      GFP_KERNEL);
2130 	if (!tx_q->tx_skbuff_dma)
2131 		return -ENOMEM;
2132 
2133 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2134 				  sizeof(struct sk_buff *),
2135 				  GFP_KERNEL);
2136 	if (!tx_q->tx_skbuff)
2137 		return -ENOMEM;
2138 
2139 	if (priv->extend_desc)
2140 		size = sizeof(struct dma_extended_desc);
2141 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2142 		size = sizeof(struct dma_edesc);
2143 	else
2144 		size = sizeof(struct dma_desc);
2145 
2146 	size *= dma_conf->dma_tx_size;
2147 
2148 	addr = dma_alloc_coherent(priv->device, size,
2149 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2150 	if (!addr)
2151 		return -ENOMEM;
2152 
2153 	if (priv->extend_desc)
2154 		tx_q->dma_etx = addr;
2155 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2156 		tx_q->dma_entx = addr;
2157 	else
2158 		tx_q->dma_tx = addr;
2159 
2160 	return 0;
2161 }
2162 
2163 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2164 				       struct stmmac_dma_conf *dma_conf)
2165 {
2166 	u32 tx_count = priv->plat->tx_queues_to_use;
2167 	u32 queue;
2168 	int ret;
2169 
2170 	/* TX queues buffers and DMA */
2171 	for (queue = 0; queue < tx_count; queue++) {
2172 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2173 		if (ret)
2174 			goto err_dma;
2175 	}
2176 
2177 	return 0;
2178 
2179 err_dma:
2180 	free_dma_tx_desc_resources(priv, dma_conf);
2181 	return ret;
2182 }
2183 
2184 /**
2185  * alloc_dma_desc_resources - alloc TX/RX resources.
2186  * @priv: private structure
2187  * @dma_conf: structure to take the dma data
2188  * Description: according to which descriptor can be used (extend or basic)
2189  * this function allocates the resources for TX and RX paths. In case of
2190  * reception, for example, it pre-allocated the RX socket buffer in order to
2191  * allow zero-copy mechanism.
2192  */
2193 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2194 				    struct stmmac_dma_conf *dma_conf)
2195 {
2196 	/* RX Allocation */
2197 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2198 
2199 	if (ret)
2200 		return ret;
2201 
2202 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2203 
2204 	return ret;
2205 }
2206 
2207 /**
2208  * free_dma_desc_resources - free dma desc resources
2209  * @priv: private structure
2210  * @dma_conf: structure to take the dma data
2211  */
2212 static void free_dma_desc_resources(struct stmmac_priv *priv,
2213 				    struct stmmac_dma_conf *dma_conf)
2214 {
2215 	/* Release the DMA TX socket buffers */
2216 	free_dma_tx_desc_resources(priv, dma_conf);
2217 
2218 	/* Release the DMA RX socket buffers later
2219 	 * to ensure all pending XDP_TX buffers are returned.
2220 	 */
2221 	free_dma_rx_desc_resources(priv, dma_conf);
2222 }
2223 
2224 /**
2225  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2226  *  @priv: driver private structure
2227  *  Description: It is used for enabling the rx queues in the MAC
2228  */
2229 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2230 {
2231 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2232 	int queue;
2233 	u8 mode;
2234 
2235 	for (queue = 0; queue < rx_queues_count; queue++) {
2236 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2237 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2238 	}
2239 }
2240 
2241 /**
2242  * stmmac_start_rx_dma - start RX DMA channel
2243  * @priv: driver private structure
2244  * @chan: RX channel index
2245  * Description:
2246  * This starts a RX DMA channel
2247  */
2248 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2249 {
2250 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2251 	stmmac_start_rx(priv, priv->ioaddr, chan);
2252 }
2253 
2254 /**
2255  * stmmac_start_tx_dma - start TX DMA channel
2256  * @priv: driver private structure
2257  * @chan: TX channel index
2258  * Description:
2259  * This starts a TX DMA channel
2260  */
2261 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2262 {
2263 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2264 	stmmac_start_tx(priv, priv->ioaddr, chan);
2265 }
2266 
2267 /**
2268  * stmmac_stop_rx_dma - stop RX DMA channel
2269  * @priv: driver private structure
2270  * @chan: RX channel index
2271  * Description:
2272  * This stops a RX DMA channel
2273  */
2274 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2275 {
2276 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2277 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2278 }
2279 
2280 /**
2281  * stmmac_stop_tx_dma - stop TX DMA channel
2282  * @priv: driver private structure
2283  * @chan: TX channel index
2284  * Description:
2285  * This stops a TX DMA channel
2286  */
2287 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2288 {
2289 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2290 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2291 }
2292 
2293 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2294 {
2295 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2296 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2297 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2298 	u32 chan;
2299 
2300 	for (chan = 0; chan < dma_csr_ch; chan++) {
2301 		struct stmmac_channel *ch = &priv->channel[chan];
2302 		unsigned long flags;
2303 
2304 		spin_lock_irqsave(&ch->lock, flags);
2305 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2306 		spin_unlock_irqrestore(&ch->lock, flags);
2307 	}
2308 }
2309 
2310 /**
2311  * stmmac_start_all_dma - start all RX and TX DMA channels
2312  * @priv: driver private structure
2313  * Description:
2314  * This starts all the RX and TX DMA channels
2315  */
2316 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2317 {
2318 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2319 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2320 	u32 chan = 0;
2321 
2322 	for (chan = 0; chan < rx_channels_count; chan++)
2323 		stmmac_start_rx_dma(priv, chan);
2324 
2325 	for (chan = 0; chan < tx_channels_count; chan++)
2326 		stmmac_start_tx_dma(priv, chan);
2327 }
2328 
2329 /**
2330  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2331  * @priv: driver private structure
2332  * Description:
2333  * This stops the RX and TX DMA channels
2334  */
2335 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2336 {
2337 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2338 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2339 	u32 chan = 0;
2340 
2341 	for (chan = 0; chan < rx_channels_count; chan++)
2342 		stmmac_stop_rx_dma(priv, chan);
2343 
2344 	for (chan = 0; chan < tx_channels_count; chan++)
2345 		stmmac_stop_tx_dma(priv, chan);
2346 }
2347 
2348 /**
2349  *  stmmac_dma_operation_mode - HW DMA operation mode
2350  *  @priv: driver private structure
2351  *  Description: it is used for configuring the DMA operation mode register in
2352  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2353  */
2354 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2355 {
2356 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2357 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2358 	int rxfifosz = priv->plat->rx_fifo_size;
2359 	int txfifosz = priv->plat->tx_fifo_size;
2360 	u32 txmode = 0;
2361 	u32 rxmode = 0;
2362 	u32 chan = 0;
2363 	u8 qmode = 0;
2364 
2365 	if (rxfifosz == 0)
2366 		rxfifosz = priv->dma_cap.rx_fifo_size;
2367 	if (txfifosz == 0)
2368 		txfifosz = priv->dma_cap.tx_fifo_size;
2369 
2370 	/* Adjust for real per queue fifo size */
2371 	rxfifosz /= rx_channels_count;
2372 	txfifosz /= tx_channels_count;
2373 
2374 	if (priv->plat->force_thresh_dma_mode) {
2375 		txmode = tc;
2376 		rxmode = tc;
2377 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2378 		/*
2379 		 * In case of GMAC, SF mode can be enabled
2380 		 * to perform the TX COE in HW. This depends on:
2381 		 * 1) TX COE if actually supported
2382 		 * 2) There is no bugged Jumbo frame support
2383 		 *    that needs to not insert csum in the TDES.
2384 		 */
2385 		txmode = SF_DMA_MODE;
2386 		rxmode = SF_DMA_MODE;
2387 		priv->xstats.threshold = SF_DMA_MODE;
2388 	} else {
2389 		txmode = tc;
2390 		rxmode = SF_DMA_MODE;
2391 	}
2392 
2393 	/* configure all channels */
2394 	for (chan = 0; chan < rx_channels_count; chan++) {
2395 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2396 		u32 buf_size;
2397 
2398 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2399 
2400 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2401 				rxfifosz, qmode);
2402 
2403 		if (rx_q->xsk_pool) {
2404 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2405 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2406 					      buf_size,
2407 					      chan);
2408 		} else {
2409 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2410 					      priv->dma_conf.dma_buf_sz,
2411 					      chan);
2412 		}
2413 	}
2414 
2415 	for (chan = 0; chan < tx_channels_count; chan++) {
2416 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2417 
2418 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2419 				txfifosz, qmode);
2420 	}
2421 }
2422 
2423 static void stmmac_xsk_request_timestamp(void *_priv)
2424 {
2425 	struct stmmac_metadata_request *meta_req = _priv;
2426 
2427 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2428 	*meta_req->set_ic = true;
2429 }
2430 
2431 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2432 {
2433 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2434 	struct stmmac_priv *priv = tx_compl->priv;
2435 	struct dma_desc *desc = tx_compl->desc;
2436 	bool found = false;
2437 	u64 ns = 0;
2438 
2439 	if (!priv->hwts_tx_en)
2440 		return 0;
2441 
2442 	/* check tx tstamp status */
2443 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2444 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2445 		found = true;
2446 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2447 		found = true;
2448 	}
2449 
2450 	if (found) {
2451 		ns -= priv->plat->cdc_error_adj;
2452 		return ns_to_ktime(ns);
2453 	}
2454 
2455 	return 0;
2456 }
2457 
2458 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2459 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2460 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2461 };
2462 
2463 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2464 {
2465 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2466 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2467 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2468 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2469 	unsigned int entry = tx_q->cur_tx;
2470 	struct dma_desc *tx_desc = NULL;
2471 	struct xdp_desc xdp_desc;
2472 	bool work_done = true;
2473 	u32 tx_set_ic_bit = 0;
2474 
2475 	/* Avoids TX time-out as we are sharing with slow path */
2476 	txq_trans_cond_update(nq);
2477 
2478 	budget = min(budget, stmmac_tx_avail(priv, queue));
2479 
2480 	while (budget-- > 0) {
2481 		struct stmmac_metadata_request meta_req;
2482 		struct xsk_tx_metadata *meta = NULL;
2483 		dma_addr_t dma_addr;
2484 		bool set_ic;
2485 
2486 		/* We are sharing with slow path and stop XSK TX desc submission when
2487 		 * available TX ring is less than threshold.
2488 		 */
2489 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2490 		    !netif_carrier_ok(priv->dev)) {
2491 			work_done = false;
2492 			break;
2493 		}
2494 
2495 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2496 			break;
2497 
2498 		if (priv->est && priv->est->enable &&
2499 		    priv->est->max_sdu[queue] &&
2500 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2501 			priv->xstats.max_sdu_txq_drop[queue]++;
2502 			continue;
2503 		}
2504 
2505 		if (likely(priv->extend_desc))
2506 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2507 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2508 			tx_desc = &tx_q->dma_entx[entry].basic;
2509 		else
2510 			tx_desc = tx_q->dma_tx + entry;
2511 
2512 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2513 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2514 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2515 
2516 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2517 
2518 		/* To return XDP buffer to XSK pool, we simple call
2519 		 * xsk_tx_completed(), so we don't need to fill up
2520 		 * 'buf' and 'xdpf'.
2521 		 */
2522 		tx_q->tx_skbuff_dma[entry].buf = 0;
2523 		tx_q->xdpf[entry] = NULL;
2524 
2525 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2526 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2527 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2528 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2529 
2530 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2531 
2532 		tx_q->tx_count_frames++;
2533 
2534 		if (!priv->tx_coal_frames[queue])
2535 			set_ic = false;
2536 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2537 			set_ic = true;
2538 		else
2539 			set_ic = false;
2540 
2541 		meta_req.priv = priv;
2542 		meta_req.tx_desc = tx_desc;
2543 		meta_req.set_ic = &set_ic;
2544 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2545 					&meta_req);
2546 		if (set_ic) {
2547 			tx_q->tx_count_frames = 0;
2548 			stmmac_set_tx_ic(priv, tx_desc);
2549 			tx_set_ic_bit++;
2550 		}
2551 
2552 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2553 				       true, priv->mode, true, true,
2554 				       xdp_desc.len);
2555 
2556 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2557 
2558 		xsk_tx_metadata_to_compl(meta,
2559 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2560 
2561 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2562 		entry = tx_q->cur_tx;
2563 	}
2564 	u64_stats_update_begin(&txq_stats->napi_syncp);
2565 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2566 	u64_stats_update_end(&txq_stats->napi_syncp);
2567 
2568 	if (tx_desc) {
2569 		stmmac_flush_tx_descriptors(priv, queue);
2570 		xsk_tx_release(pool);
2571 	}
2572 
2573 	/* Return true if all of the 3 conditions are met
2574 	 *  a) TX Budget is still available
2575 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2576 	 *     pending XSK TX for transmission)
2577 	 */
2578 	return !!budget && work_done;
2579 }
2580 
2581 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2582 {
2583 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2584 		tc += 64;
2585 
2586 		if (priv->plat->force_thresh_dma_mode)
2587 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2588 		else
2589 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2590 						      chan);
2591 
2592 		priv->xstats.threshold = tc;
2593 	}
2594 }
2595 
2596 /**
2597  * stmmac_tx_clean - to manage the transmission completion
2598  * @priv: driver private structure
2599  * @budget: napi budget limiting this functions packet handling
2600  * @queue: TX queue index
2601  * @pending_packets: signal to arm the TX coal timer
2602  * Description: it reclaims the transmit resources after transmission completes.
2603  * If some packets still needs to be handled, due to TX coalesce, set
2604  * pending_packets to true to make NAPI arm the TX coal timer.
2605  */
2606 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2607 			   bool *pending_packets)
2608 {
2609 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2610 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2611 	unsigned int bytes_compl = 0, pkts_compl = 0;
2612 	unsigned int entry, xmits = 0, count = 0;
2613 	u32 tx_packets = 0, tx_errors = 0;
2614 
2615 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2616 
2617 	tx_q->xsk_frames_done = 0;
2618 
2619 	entry = tx_q->dirty_tx;
2620 
2621 	/* Try to clean all TX complete frame in 1 shot */
2622 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2623 		struct xdp_frame *xdpf;
2624 		struct sk_buff *skb;
2625 		struct dma_desc *p;
2626 		int status;
2627 
2628 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2629 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2630 			xdpf = tx_q->xdpf[entry];
2631 			skb = NULL;
2632 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2633 			xdpf = NULL;
2634 			skb = tx_q->tx_skbuff[entry];
2635 		} else {
2636 			xdpf = NULL;
2637 			skb = NULL;
2638 		}
2639 
2640 		if (priv->extend_desc)
2641 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2642 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2643 			p = &tx_q->dma_entx[entry].basic;
2644 		else
2645 			p = tx_q->dma_tx + entry;
2646 
2647 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2648 		/* Check if the descriptor is owned by the DMA */
2649 		if (unlikely(status & tx_dma_own))
2650 			break;
2651 
2652 		count++;
2653 
2654 		/* Make sure descriptor fields are read after reading
2655 		 * the own bit.
2656 		 */
2657 		dma_rmb();
2658 
2659 		/* Just consider the last segment and ...*/
2660 		if (likely(!(status & tx_not_ls))) {
2661 			/* ... verify the status error condition */
2662 			if (unlikely(status & tx_err)) {
2663 				tx_errors++;
2664 				if (unlikely(status & tx_err_bump_tc))
2665 					stmmac_bump_dma_threshold(priv, queue);
2666 			} else {
2667 				tx_packets++;
2668 			}
2669 			if (skb) {
2670 				stmmac_get_tx_hwtstamp(priv, p, skb);
2671 			} else if (tx_q->xsk_pool &&
2672 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2673 				struct stmmac_xsk_tx_complete tx_compl = {
2674 					.priv = priv,
2675 					.desc = p,
2676 				};
2677 
2678 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2679 							 &stmmac_xsk_tx_metadata_ops,
2680 							 &tx_compl);
2681 			}
2682 		}
2683 
2684 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2685 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2686 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2687 				dma_unmap_page(priv->device,
2688 					       tx_q->tx_skbuff_dma[entry].buf,
2689 					       tx_q->tx_skbuff_dma[entry].len,
2690 					       DMA_TO_DEVICE);
2691 			else
2692 				dma_unmap_single(priv->device,
2693 						 tx_q->tx_skbuff_dma[entry].buf,
2694 						 tx_q->tx_skbuff_dma[entry].len,
2695 						 DMA_TO_DEVICE);
2696 			tx_q->tx_skbuff_dma[entry].buf = 0;
2697 			tx_q->tx_skbuff_dma[entry].len = 0;
2698 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2699 		}
2700 
2701 		stmmac_clean_desc3(priv, tx_q, p);
2702 
2703 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2704 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2705 
2706 		if (xdpf &&
2707 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2708 			xdp_return_frame_rx_napi(xdpf);
2709 			tx_q->xdpf[entry] = NULL;
2710 		}
2711 
2712 		if (xdpf &&
2713 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2714 			xdp_return_frame(xdpf);
2715 			tx_q->xdpf[entry] = NULL;
2716 		}
2717 
2718 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2719 			tx_q->xsk_frames_done++;
2720 
2721 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2722 			if (likely(skb)) {
2723 				pkts_compl++;
2724 				bytes_compl += skb->len;
2725 				dev_consume_skb_any(skb);
2726 				tx_q->tx_skbuff[entry] = NULL;
2727 			}
2728 		}
2729 
2730 		stmmac_release_tx_desc(priv, p, priv->mode);
2731 
2732 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2733 	}
2734 	tx_q->dirty_tx = entry;
2735 
2736 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2737 				  pkts_compl, bytes_compl);
2738 
2739 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2740 								queue))) &&
2741 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2742 
2743 		netif_dbg(priv, tx_done, priv->dev,
2744 			  "%s: restart transmit\n", __func__);
2745 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2746 	}
2747 
2748 	if (tx_q->xsk_pool) {
2749 		bool work_done;
2750 
2751 		if (tx_q->xsk_frames_done)
2752 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2753 
2754 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2755 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2756 
2757 		/* For XSK TX, we try to send as many as possible.
2758 		 * If XSK work done (XSK TX desc empty and budget still
2759 		 * available), return "budget - 1" to reenable TX IRQ.
2760 		 * Else, return "budget" to make NAPI continue polling.
2761 		 */
2762 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2763 					       STMMAC_XSK_TX_BUDGET_MAX);
2764 		if (work_done)
2765 			xmits = budget - 1;
2766 		else
2767 			xmits = budget;
2768 	}
2769 
2770 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2771 	    priv->eee_sw_timer_en) {
2772 		if (stmmac_enable_eee_mode(priv))
2773 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2774 	}
2775 
2776 	/* We still have pending packets, let's call for a new scheduling */
2777 	if (tx_q->dirty_tx != tx_q->cur_tx)
2778 		*pending_packets = true;
2779 
2780 	u64_stats_update_begin(&txq_stats->napi_syncp);
2781 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2782 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2783 	u64_stats_inc(&txq_stats->napi.tx_clean);
2784 	u64_stats_update_end(&txq_stats->napi_syncp);
2785 
2786 	priv->xstats.tx_errors += tx_errors;
2787 
2788 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2789 
2790 	/* Combine decisions from TX clean and XSK TX */
2791 	return max(count, xmits);
2792 }
2793 
2794 /**
2795  * stmmac_tx_err - to manage the tx error
2796  * @priv: driver private structure
2797  * @chan: channel index
2798  * Description: it cleans the descriptors and restarts the transmission
2799  * in case of transmission errors.
2800  */
2801 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2802 {
2803 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2804 
2805 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2806 
2807 	stmmac_stop_tx_dma(priv, chan);
2808 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2809 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2810 	stmmac_reset_tx_queue(priv, chan);
2811 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2812 			    tx_q->dma_tx_phy, chan);
2813 	stmmac_start_tx_dma(priv, chan);
2814 
2815 	priv->xstats.tx_errors++;
2816 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2817 }
2818 
2819 /**
2820  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2821  *  @priv: driver private structure
2822  *  @txmode: TX operating mode
2823  *  @rxmode: RX operating mode
2824  *  @chan: channel index
2825  *  Description: it is used for configuring of the DMA operation mode in
2826  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2827  *  mode.
2828  */
2829 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2830 					  u32 rxmode, u32 chan)
2831 {
2832 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2833 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2834 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2835 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2836 	int rxfifosz = priv->plat->rx_fifo_size;
2837 	int txfifosz = priv->plat->tx_fifo_size;
2838 
2839 	if (rxfifosz == 0)
2840 		rxfifosz = priv->dma_cap.rx_fifo_size;
2841 	if (txfifosz == 0)
2842 		txfifosz = priv->dma_cap.tx_fifo_size;
2843 
2844 	/* Adjust for real per queue fifo size */
2845 	rxfifosz /= rx_channels_count;
2846 	txfifosz /= tx_channels_count;
2847 
2848 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2849 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2850 }
2851 
2852 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2853 {
2854 	int ret;
2855 
2856 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2857 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2858 	if (ret && (ret != -EINVAL)) {
2859 		stmmac_global_err(priv);
2860 		return true;
2861 	}
2862 
2863 	return false;
2864 }
2865 
2866 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2867 {
2868 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2869 						 &priv->xstats, chan, dir);
2870 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2871 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2872 	struct stmmac_channel *ch = &priv->channel[chan];
2873 	struct napi_struct *rx_napi;
2874 	struct napi_struct *tx_napi;
2875 	unsigned long flags;
2876 
2877 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2878 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2879 
2880 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2881 		if (napi_schedule_prep(rx_napi)) {
2882 			spin_lock_irqsave(&ch->lock, flags);
2883 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2884 			spin_unlock_irqrestore(&ch->lock, flags);
2885 			__napi_schedule(rx_napi);
2886 		}
2887 	}
2888 
2889 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2890 		if (napi_schedule_prep(tx_napi)) {
2891 			spin_lock_irqsave(&ch->lock, flags);
2892 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2893 			spin_unlock_irqrestore(&ch->lock, flags);
2894 			__napi_schedule(tx_napi);
2895 		}
2896 	}
2897 
2898 	return status;
2899 }
2900 
2901 /**
2902  * stmmac_dma_interrupt - DMA ISR
2903  * @priv: driver private structure
2904  * Description: this is the DMA ISR. It is called by the main ISR.
2905  * It calls the dwmac dma routine and schedule poll method in case of some
2906  * work can be done.
2907  */
2908 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2909 {
2910 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2911 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2912 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2913 				tx_channel_count : rx_channel_count;
2914 	u32 chan;
2915 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2916 
2917 	/* Make sure we never check beyond our status buffer. */
2918 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2919 		channels_to_check = ARRAY_SIZE(status);
2920 
2921 	for (chan = 0; chan < channels_to_check; chan++)
2922 		status[chan] = stmmac_napi_check(priv, chan,
2923 						 DMA_DIR_RXTX);
2924 
2925 	for (chan = 0; chan < tx_channel_count; chan++) {
2926 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2927 			/* Try to bump up the dma threshold on this failure */
2928 			stmmac_bump_dma_threshold(priv, chan);
2929 		} else if (unlikely(status[chan] == tx_hard_error)) {
2930 			stmmac_tx_err(priv, chan);
2931 		}
2932 	}
2933 }
2934 
2935 /**
2936  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2937  * @priv: driver private structure
2938  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2939  */
2940 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2941 {
2942 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2943 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2944 
2945 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2946 
2947 	if (priv->dma_cap.rmon) {
2948 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2949 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2950 	} else
2951 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2952 }
2953 
2954 /**
2955  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2956  * @priv: driver private structure
2957  * Description:
2958  *  new GMAC chip generations have a new register to indicate the
2959  *  presence of the optional feature/functions.
2960  *  This can be also used to override the value passed through the
2961  *  platform and necessary for old MAC10/100 and GMAC chips.
2962  */
2963 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2964 {
2965 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2966 }
2967 
2968 /**
2969  * stmmac_check_ether_addr - check if the MAC addr is valid
2970  * @priv: driver private structure
2971  * Description:
2972  * it is to verify if the MAC address is valid, in case of failures it
2973  * generates a random MAC address
2974  */
2975 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2976 {
2977 	u8 addr[ETH_ALEN];
2978 
2979 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2980 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2981 		if (is_valid_ether_addr(addr))
2982 			eth_hw_addr_set(priv->dev, addr);
2983 		else
2984 			eth_hw_addr_random(priv->dev);
2985 		dev_info(priv->device, "device MAC address %pM\n",
2986 			 priv->dev->dev_addr);
2987 	}
2988 }
2989 
2990 /**
2991  * stmmac_init_dma_engine - DMA init.
2992  * @priv: driver private structure
2993  * Description:
2994  * It inits the DMA invoking the specific MAC/GMAC callback.
2995  * Some DMA parameters can be passed from the platform;
2996  * in case of these are not passed a default is kept for the MAC or GMAC.
2997  */
2998 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2999 {
3000 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3001 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3002 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3003 	struct stmmac_rx_queue *rx_q;
3004 	struct stmmac_tx_queue *tx_q;
3005 	u32 chan = 0;
3006 	int atds = 0;
3007 	int ret = 0;
3008 
3009 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3010 		dev_err(priv->device, "Invalid DMA configuration\n");
3011 		return -EINVAL;
3012 	}
3013 
3014 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3015 		atds = 1;
3016 
3017 	ret = stmmac_reset(priv, priv->ioaddr);
3018 	if (ret) {
3019 		dev_err(priv->device, "Failed to reset the dma\n");
3020 		return ret;
3021 	}
3022 
3023 	/* DMA Configuration */
3024 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3025 
3026 	if (priv->plat->axi)
3027 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3028 
3029 	/* DMA CSR Channel configuration */
3030 	for (chan = 0; chan < dma_csr_ch; chan++) {
3031 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3032 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3033 	}
3034 
3035 	/* DMA RX Channel Configuration */
3036 	for (chan = 0; chan < rx_channels_count; chan++) {
3037 		rx_q = &priv->dma_conf.rx_queue[chan];
3038 
3039 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3040 				    rx_q->dma_rx_phy, chan);
3041 
3042 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3043 				     (rx_q->buf_alloc_num *
3044 				      sizeof(struct dma_desc));
3045 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3046 				       rx_q->rx_tail_addr, chan);
3047 	}
3048 
3049 	/* DMA TX Channel Configuration */
3050 	for (chan = 0; chan < tx_channels_count; chan++) {
3051 		tx_q = &priv->dma_conf.tx_queue[chan];
3052 
3053 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3054 				    tx_q->dma_tx_phy, chan);
3055 
3056 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3057 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3058 				       tx_q->tx_tail_addr, chan);
3059 	}
3060 
3061 	return ret;
3062 }
3063 
3064 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3065 {
3066 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3067 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3068 	struct stmmac_channel *ch;
3069 	struct napi_struct *napi;
3070 
3071 	if (!tx_coal_timer)
3072 		return;
3073 
3074 	ch = &priv->channel[tx_q->queue_index];
3075 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3076 
3077 	/* Arm timer only if napi is not already scheduled.
3078 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3079 	 * again in the next scheduled napi.
3080 	 */
3081 	if (unlikely(!napi_is_scheduled(napi)))
3082 		hrtimer_start(&tx_q->txtimer,
3083 			      STMMAC_COAL_TIMER(tx_coal_timer),
3084 			      HRTIMER_MODE_REL);
3085 	else
3086 		hrtimer_try_to_cancel(&tx_q->txtimer);
3087 }
3088 
3089 /**
3090  * stmmac_tx_timer - mitigation sw timer for tx.
3091  * @t: data pointer
3092  * Description:
3093  * This is the timer handler to directly invoke the stmmac_tx_clean.
3094  */
3095 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3096 {
3097 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3098 	struct stmmac_priv *priv = tx_q->priv_data;
3099 	struct stmmac_channel *ch;
3100 	struct napi_struct *napi;
3101 
3102 	ch = &priv->channel[tx_q->queue_index];
3103 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3104 
3105 	if (likely(napi_schedule_prep(napi))) {
3106 		unsigned long flags;
3107 
3108 		spin_lock_irqsave(&ch->lock, flags);
3109 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3110 		spin_unlock_irqrestore(&ch->lock, flags);
3111 		__napi_schedule(napi);
3112 	}
3113 
3114 	return HRTIMER_NORESTART;
3115 }
3116 
3117 /**
3118  * stmmac_init_coalesce - init mitigation options.
3119  * @priv: driver private structure
3120  * Description:
3121  * This inits the coalesce parameters: i.e. timer rate,
3122  * timer handler and default threshold used for enabling the
3123  * interrupt on completion bit.
3124  */
3125 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3126 {
3127 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3128 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3129 	u32 chan;
3130 
3131 	for (chan = 0; chan < tx_channel_count; chan++) {
3132 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3133 
3134 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3135 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3136 
3137 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3138 		tx_q->txtimer.function = stmmac_tx_timer;
3139 	}
3140 
3141 	for (chan = 0; chan < rx_channel_count; chan++)
3142 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3143 }
3144 
3145 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3146 {
3147 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3148 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3149 	u32 chan;
3150 
3151 	/* set TX ring length */
3152 	for (chan = 0; chan < tx_channels_count; chan++)
3153 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3154 				       (priv->dma_conf.dma_tx_size - 1), chan);
3155 
3156 	/* set RX ring length */
3157 	for (chan = 0; chan < rx_channels_count; chan++)
3158 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3159 				       (priv->dma_conf.dma_rx_size - 1), chan);
3160 }
3161 
3162 /**
3163  *  stmmac_set_tx_queue_weight - Set TX queue weight
3164  *  @priv: driver private structure
3165  *  Description: It is used for setting TX queues weight
3166  */
3167 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3168 {
3169 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3170 	u32 weight;
3171 	u32 queue;
3172 
3173 	for (queue = 0; queue < tx_queues_count; queue++) {
3174 		weight = priv->plat->tx_queues_cfg[queue].weight;
3175 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3176 	}
3177 }
3178 
3179 /**
3180  *  stmmac_configure_cbs - Configure CBS in TX queue
3181  *  @priv: driver private structure
3182  *  Description: It is used for configuring CBS in AVB TX queues
3183  */
3184 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3185 {
3186 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3187 	u32 mode_to_use;
3188 	u32 queue;
3189 
3190 	/* queue 0 is reserved for legacy traffic */
3191 	for (queue = 1; queue < tx_queues_count; queue++) {
3192 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3193 		if (mode_to_use == MTL_QUEUE_DCB)
3194 			continue;
3195 
3196 		stmmac_config_cbs(priv, priv->hw,
3197 				priv->plat->tx_queues_cfg[queue].send_slope,
3198 				priv->plat->tx_queues_cfg[queue].idle_slope,
3199 				priv->plat->tx_queues_cfg[queue].high_credit,
3200 				priv->plat->tx_queues_cfg[queue].low_credit,
3201 				queue);
3202 	}
3203 }
3204 
3205 /**
3206  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3207  *  @priv: driver private structure
3208  *  Description: It is used for mapping RX queues to RX dma channels
3209  */
3210 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3211 {
3212 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3213 	u32 queue;
3214 	u32 chan;
3215 
3216 	for (queue = 0; queue < rx_queues_count; queue++) {
3217 		chan = priv->plat->rx_queues_cfg[queue].chan;
3218 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3219 	}
3220 }
3221 
3222 /**
3223  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3224  *  @priv: driver private structure
3225  *  Description: It is used for configuring the RX Queue Priority
3226  */
3227 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3228 {
3229 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3230 	u32 queue;
3231 	u32 prio;
3232 
3233 	for (queue = 0; queue < rx_queues_count; queue++) {
3234 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3235 			continue;
3236 
3237 		prio = priv->plat->rx_queues_cfg[queue].prio;
3238 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3239 	}
3240 }
3241 
3242 /**
3243  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3244  *  @priv: driver private structure
3245  *  Description: It is used for configuring the TX Queue Priority
3246  */
3247 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3248 {
3249 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3250 	u32 queue;
3251 	u32 prio;
3252 
3253 	for (queue = 0; queue < tx_queues_count; queue++) {
3254 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3255 			continue;
3256 
3257 		prio = priv->plat->tx_queues_cfg[queue].prio;
3258 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3259 	}
3260 }
3261 
3262 /**
3263  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3264  *  @priv: driver private structure
3265  *  Description: It is used for configuring the RX queue routing
3266  */
3267 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3268 {
3269 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3270 	u32 queue;
3271 	u8 packet;
3272 
3273 	for (queue = 0; queue < rx_queues_count; queue++) {
3274 		/* no specific packet type routing specified for the queue */
3275 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3276 			continue;
3277 
3278 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3279 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3280 	}
3281 }
3282 
3283 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3284 {
3285 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3286 		priv->rss.enable = false;
3287 		return;
3288 	}
3289 
3290 	if (priv->dev->features & NETIF_F_RXHASH)
3291 		priv->rss.enable = true;
3292 	else
3293 		priv->rss.enable = false;
3294 
3295 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3296 			     priv->plat->rx_queues_to_use);
3297 }
3298 
3299 /**
3300  *  stmmac_mtl_configuration - Configure MTL
3301  *  @priv: driver private structure
3302  *  Description: It is used for configurring MTL
3303  */
3304 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3305 {
3306 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3307 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3308 
3309 	if (tx_queues_count > 1)
3310 		stmmac_set_tx_queue_weight(priv);
3311 
3312 	/* Configure MTL RX algorithms */
3313 	if (rx_queues_count > 1)
3314 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3315 				priv->plat->rx_sched_algorithm);
3316 
3317 	/* Configure MTL TX algorithms */
3318 	if (tx_queues_count > 1)
3319 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3320 				priv->plat->tx_sched_algorithm);
3321 
3322 	/* Configure CBS in AVB TX queues */
3323 	if (tx_queues_count > 1)
3324 		stmmac_configure_cbs(priv);
3325 
3326 	/* Map RX MTL to DMA channels */
3327 	stmmac_rx_queue_dma_chan_map(priv);
3328 
3329 	/* Enable MAC RX Queues */
3330 	stmmac_mac_enable_rx_queues(priv);
3331 
3332 	/* Set RX priorities */
3333 	if (rx_queues_count > 1)
3334 		stmmac_mac_config_rx_queues_prio(priv);
3335 
3336 	/* Set TX priorities */
3337 	if (tx_queues_count > 1)
3338 		stmmac_mac_config_tx_queues_prio(priv);
3339 
3340 	/* Set RX routing */
3341 	if (rx_queues_count > 1)
3342 		stmmac_mac_config_rx_queues_routing(priv);
3343 
3344 	/* Receive Side Scaling */
3345 	if (rx_queues_count > 1)
3346 		stmmac_mac_config_rss(priv);
3347 }
3348 
3349 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3350 {
3351 	if (priv->dma_cap.asp) {
3352 		netdev_info(priv->dev, "Enabling Safety Features\n");
3353 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3354 					  priv->plat->safety_feat_cfg);
3355 	} else {
3356 		netdev_info(priv->dev, "No Safety Features support found\n");
3357 	}
3358 }
3359 
3360 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3361 {
3362 	char *name;
3363 
3364 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3365 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3366 
3367 	name = priv->wq_name;
3368 	sprintf(name, "%s-fpe", priv->dev->name);
3369 
3370 	priv->fpe_wq = create_singlethread_workqueue(name);
3371 	if (!priv->fpe_wq) {
3372 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3373 
3374 		return -ENOMEM;
3375 	}
3376 	netdev_info(priv->dev, "FPE workqueue start");
3377 
3378 	return 0;
3379 }
3380 
3381 /**
3382  * stmmac_hw_setup - setup mac in a usable state.
3383  *  @dev : pointer to the device structure.
3384  *  @ptp_register: register PTP if set
3385  *  Description:
3386  *  this is the main function to setup the HW in a usable state because the
3387  *  dma engine is reset, the core registers are configured (e.g. AXI,
3388  *  Checksum features, timers). The DMA is ready to start receiving and
3389  *  transmitting.
3390  *  Return value:
3391  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3392  *  file on failure.
3393  */
3394 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3395 {
3396 	struct stmmac_priv *priv = netdev_priv(dev);
3397 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3398 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3399 	bool sph_en;
3400 	u32 chan;
3401 	int ret;
3402 
3403 	/* Make sure RX clock is enabled */
3404 	if (priv->hw->phylink_pcs)
3405 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3406 
3407 	/* DMA initialization and SW reset */
3408 	ret = stmmac_init_dma_engine(priv);
3409 	if (ret < 0) {
3410 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3411 			   __func__);
3412 		return ret;
3413 	}
3414 
3415 	/* Copy the MAC addr into the HW  */
3416 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3417 
3418 	/* PS and related bits will be programmed according to the speed */
3419 	if (priv->hw->pcs) {
3420 		int speed = priv->plat->mac_port_sel_speed;
3421 
3422 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3423 		    (speed == SPEED_1000)) {
3424 			priv->hw->ps = speed;
3425 		} else {
3426 			dev_warn(priv->device, "invalid port speed\n");
3427 			priv->hw->ps = 0;
3428 		}
3429 	}
3430 
3431 	/* Initialize the MAC Core */
3432 	stmmac_core_init(priv, priv->hw, dev);
3433 
3434 	/* Initialize MTL*/
3435 	stmmac_mtl_configuration(priv);
3436 
3437 	/* Initialize Safety Features */
3438 	stmmac_safety_feat_configuration(priv);
3439 
3440 	ret = stmmac_rx_ipc(priv, priv->hw);
3441 	if (!ret) {
3442 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3443 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3444 		priv->hw->rx_csum = 0;
3445 	}
3446 
3447 	/* Enable the MAC Rx/Tx */
3448 	stmmac_mac_set(priv, priv->ioaddr, true);
3449 
3450 	/* Set the HW DMA mode and the COE */
3451 	stmmac_dma_operation_mode(priv);
3452 
3453 	stmmac_mmc_setup(priv);
3454 
3455 	if (ptp_register) {
3456 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3457 		if (ret < 0)
3458 			netdev_warn(priv->dev,
3459 				    "failed to enable PTP reference clock: %pe\n",
3460 				    ERR_PTR(ret));
3461 	}
3462 
3463 	ret = stmmac_init_ptp(priv);
3464 	if (ret == -EOPNOTSUPP)
3465 		netdev_info(priv->dev, "PTP not supported by HW\n");
3466 	else if (ret)
3467 		netdev_warn(priv->dev, "PTP init failed\n");
3468 	else if (ptp_register)
3469 		stmmac_ptp_register(priv);
3470 
3471 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3472 
3473 	/* Convert the timer from msec to usec */
3474 	if (!priv->tx_lpi_timer)
3475 		priv->tx_lpi_timer = eee_timer * 1000;
3476 
3477 	if (priv->use_riwt) {
3478 		u32 queue;
3479 
3480 		for (queue = 0; queue < rx_cnt; queue++) {
3481 			if (!priv->rx_riwt[queue])
3482 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3483 
3484 			stmmac_rx_watchdog(priv, priv->ioaddr,
3485 					   priv->rx_riwt[queue], queue);
3486 		}
3487 	}
3488 
3489 	if (priv->hw->pcs)
3490 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3491 
3492 	/* set TX and RX rings length */
3493 	stmmac_set_rings_length(priv);
3494 
3495 	/* Enable TSO */
3496 	if (priv->tso) {
3497 		for (chan = 0; chan < tx_cnt; chan++) {
3498 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3499 
3500 			/* TSO and TBS cannot co-exist */
3501 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3502 				continue;
3503 
3504 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3505 		}
3506 	}
3507 
3508 	/* Enable Split Header */
3509 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3510 	for (chan = 0; chan < rx_cnt; chan++)
3511 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3512 
3513 
3514 	/* VLAN Tag Insertion */
3515 	if (priv->dma_cap.vlins)
3516 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3517 
3518 	/* TBS */
3519 	for (chan = 0; chan < tx_cnt; chan++) {
3520 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3521 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3522 
3523 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3524 	}
3525 
3526 	/* Configure real RX and TX queues */
3527 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3528 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3529 
3530 	/* Start the ball rolling... */
3531 	stmmac_start_all_dma(priv);
3532 
3533 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3534 
3535 	if (priv->dma_cap.fpesel) {
3536 		stmmac_fpe_start_wq(priv);
3537 
3538 		if (priv->plat->fpe_cfg->enable)
3539 			stmmac_fpe_handshake(priv, true);
3540 	}
3541 
3542 	return 0;
3543 }
3544 
3545 static void stmmac_hw_teardown(struct net_device *dev)
3546 {
3547 	struct stmmac_priv *priv = netdev_priv(dev);
3548 
3549 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3550 }
3551 
3552 static void stmmac_free_irq(struct net_device *dev,
3553 			    enum request_irq_err irq_err, int irq_idx)
3554 {
3555 	struct stmmac_priv *priv = netdev_priv(dev);
3556 	int j;
3557 
3558 	switch (irq_err) {
3559 	case REQ_IRQ_ERR_ALL:
3560 		irq_idx = priv->plat->tx_queues_to_use;
3561 		fallthrough;
3562 	case REQ_IRQ_ERR_TX:
3563 		for (j = irq_idx - 1; j >= 0; j--) {
3564 			if (priv->tx_irq[j] > 0) {
3565 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3566 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3567 			}
3568 		}
3569 		irq_idx = priv->plat->rx_queues_to_use;
3570 		fallthrough;
3571 	case REQ_IRQ_ERR_RX:
3572 		for (j = irq_idx - 1; j >= 0; j--) {
3573 			if (priv->rx_irq[j] > 0) {
3574 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3575 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3576 			}
3577 		}
3578 
3579 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3580 			free_irq(priv->sfty_ue_irq, dev);
3581 		fallthrough;
3582 	case REQ_IRQ_ERR_SFTY_UE:
3583 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3584 			free_irq(priv->sfty_ce_irq, dev);
3585 		fallthrough;
3586 	case REQ_IRQ_ERR_SFTY_CE:
3587 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3588 			free_irq(priv->lpi_irq, dev);
3589 		fallthrough;
3590 	case REQ_IRQ_ERR_LPI:
3591 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3592 			free_irq(priv->wol_irq, dev);
3593 		fallthrough;
3594 	case REQ_IRQ_ERR_SFTY:
3595 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3596 			free_irq(priv->sfty_irq, dev);
3597 		fallthrough;
3598 	case REQ_IRQ_ERR_WOL:
3599 		free_irq(dev->irq, dev);
3600 		fallthrough;
3601 	case REQ_IRQ_ERR_MAC:
3602 	case REQ_IRQ_ERR_NO:
3603 		/* If MAC IRQ request error, no more IRQ to free */
3604 		break;
3605 	}
3606 }
3607 
3608 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3609 {
3610 	struct stmmac_priv *priv = netdev_priv(dev);
3611 	enum request_irq_err irq_err;
3612 	cpumask_t cpu_mask;
3613 	int irq_idx = 0;
3614 	char *int_name;
3615 	int ret;
3616 	int i;
3617 
3618 	/* For common interrupt */
3619 	int_name = priv->int_name_mac;
3620 	sprintf(int_name, "%s:%s", dev->name, "mac");
3621 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3622 			  0, int_name, dev);
3623 	if (unlikely(ret < 0)) {
3624 		netdev_err(priv->dev,
3625 			   "%s: alloc mac MSI %d (error: %d)\n",
3626 			   __func__, dev->irq, ret);
3627 		irq_err = REQ_IRQ_ERR_MAC;
3628 		goto irq_error;
3629 	}
3630 
3631 	/* Request the Wake IRQ in case of another line
3632 	 * is used for WoL
3633 	 */
3634 	priv->wol_irq_disabled = true;
3635 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3636 		int_name = priv->int_name_wol;
3637 		sprintf(int_name, "%s:%s", dev->name, "wol");
3638 		ret = request_irq(priv->wol_irq,
3639 				  stmmac_mac_interrupt,
3640 				  0, int_name, dev);
3641 		if (unlikely(ret < 0)) {
3642 			netdev_err(priv->dev,
3643 				   "%s: alloc wol MSI %d (error: %d)\n",
3644 				   __func__, priv->wol_irq, ret);
3645 			irq_err = REQ_IRQ_ERR_WOL;
3646 			goto irq_error;
3647 		}
3648 	}
3649 
3650 	/* Request the LPI IRQ in case of another line
3651 	 * is used for LPI
3652 	 */
3653 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3654 		int_name = priv->int_name_lpi;
3655 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3656 		ret = request_irq(priv->lpi_irq,
3657 				  stmmac_mac_interrupt,
3658 				  0, int_name, dev);
3659 		if (unlikely(ret < 0)) {
3660 			netdev_err(priv->dev,
3661 				   "%s: alloc lpi MSI %d (error: %d)\n",
3662 				   __func__, priv->lpi_irq, ret);
3663 			irq_err = REQ_IRQ_ERR_LPI;
3664 			goto irq_error;
3665 		}
3666 	}
3667 
3668 	/* Request the common Safety Feature Correctible/Uncorrectible
3669 	 * Error line in case of another line is used
3670 	 */
3671 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3672 		int_name = priv->int_name_sfty;
3673 		sprintf(int_name, "%s:%s", dev->name, "safety");
3674 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3675 				  0, int_name, dev);
3676 		if (unlikely(ret < 0)) {
3677 			netdev_err(priv->dev,
3678 				   "%s: alloc sfty MSI %d (error: %d)\n",
3679 				   __func__, priv->sfty_irq, ret);
3680 			irq_err = REQ_IRQ_ERR_SFTY;
3681 			goto irq_error;
3682 		}
3683 	}
3684 
3685 	/* Request the Safety Feature Correctible Error line in
3686 	 * case of another line is used
3687 	 */
3688 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3689 		int_name = priv->int_name_sfty_ce;
3690 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3691 		ret = request_irq(priv->sfty_ce_irq,
3692 				  stmmac_safety_interrupt,
3693 				  0, int_name, dev);
3694 		if (unlikely(ret < 0)) {
3695 			netdev_err(priv->dev,
3696 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3697 				   __func__, priv->sfty_ce_irq, ret);
3698 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3699 			goto irq_error;
3700 		}
3701 	}
3702 
3703 	/* Request the Safety Feature Uncorrectible Error line in
3704 	 * case of another line is used
3705 	 */
3706 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3707 		int_name = priv->int_name_sfty_ue;
3708 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3709 		ret = request_irq(priv->sfty_ue_irq,
3710 				  stmmac_safety_interrupt,
3711 				  0, int_name, dev);
3712 		if (unlikely(ret < 0)) {
3713 			netdev_err(priv->dev,
3714 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3715 				   __func__, priv->sfty_ue_irq, ret);
3716 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3717 			goto irq_error;
3718 		}
3719 	}
3720 
3721 	/* Request Rx MSI irq */
3722 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3723 		if (i >= MTL_MAX_RX_QUEUES)
3724 			break;
3725 		if (priv->rx_irq[i] == 0)
3726 			continue;
3727 
3728 		int_name = priv->int_name_rx_irq[i];
3729 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3730 		ret = request_irq(priv->rx_irq[i],
3731 				  stmmac_msi_intr_rx,
3732 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3733 		if (unlikely(ret < 0)) {
3734 			netdev_err(priv->dev,
3735 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3736 				   __func__, i, priv->rx_irq[i], ret);
3737 			irq_err = REQ_IRQ_ERR_RX;
3738 			irq_idx = i;
3739 			goto irq_error;
3740 		}
3741 		cpumask_clear(&cpu_mask);
3742 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3743 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3744 	}
3745 
3746 	/* Request Tx MSI irq */
3747 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3748 		if (i >= MTL_MAX_TX_QUEUES)
3749 			break;
3750 		if (priv->tx_irq[i] == 0)
3751 			continue;
3752 
3753 		int_name = priv->int_name_tx_irq[i];
3754 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3755 		ret = request_irq(priv->tx_irq[i],
3756 				  stmmac_msi_intr_tx,
3757 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3758 		if (unlikely(ret < 0)) {
3759 			netdev_err(priv->dev,
3760 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3761 				   __func__, i, priv->tx_irq[i], ret);
3762 			irq_err = REQ_IRQ_ERR_TX;
3763 			irq_idx = i;
3764 			goto irq_error;
3765 		}
3766 		cpumask_clear(&cpu_mask);
3767 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3768 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3769 	}
3770 
3771 	return 0;
3772 
3773 irq_error:
3774 	stmmac_free_irq(dev, irq_err, irq_idx);
3775 	return ret;
3776 }
3777 
3778 static int stmmac_request_irq_single(struct net_device *dev)
3779 {
3780 	struct stmmac_priv *priv = netdev_priv(dev);
3781 	enum request_irq_err irq_err;
3782 	int ret;
3783 
3784 	ret = request_irq(dev->irq, stmmac_interrupt,
3785 			  IRQF_SHARED, dev->name, dev);
3786 	if (unlikely(ret < 0)) {
3787 		netdev_err(priv->dev,
3788 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3789 			   __func__, dev->irq, ret);
3790 		irq_err = REQ_IRQ_ERR_MAC;
3791 		goto irq_error;
3792 	}
3793 
3794 	/* Request the Wake IRQ in case of another line
3795 	 * is used for WoL
3796 	 */
3797 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3798 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3799 				  IRQF_SHARED, dev->name, dev);
3800 		if (unlikely(ret < 0)) {
3801 			netdev_err(priv->dev,
3802 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3803 				   __func__, priv->wol_irq, ret);
3804 			irq_err = REQ_IRQ_ERR_WOL;
3805 			goto irq_error;
3806 		}
3807 	}
3808 
3809 	/* Request the IRQ lines */
3810 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3811 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3812 				  IRQF_SHARED, dev->name, dev);
3813 		if (unlikely(ret < 0)) {
3814 			netdev_err(priv->dev,
3815 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3816 				   __func__, priv->lpi_irq, ret);
3817 			irq_err = REQ_IRQ_ERR_LPI;
3818 			goto irq_error;
3819 		}
3820 	}
3821 
3822 	/* Request the common Safety Feature Correctible/Uncorrectible
3823 	 * Error line in case of another line is used
3824 	 */
3825 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3826 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3827 				  IRQF_SHARED, dev->name, dev);
3828 		if (unlikely(ret < 0)) {
3829 			netdev_err(priv->dev,
3830 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3831 				   __func__, priv->sfty_irq, ret);
3832 			irq_err = REQ_IRQ_ERR_SFTY;
3833 			goto irq_error;
3834 		}
3835 	}
3836 
3837 	return 0;
3838 
3839 irq_error:
3840 	stmmac_free_irq(dev, irq_err, 0);
3841 	return ret;
3842 }
3843 
3844 static int stmmac_request_irq(struct net_device *dev)
3845 {
3846 	struct stmmac_priv *priv = netdev_priv(dev);
3847 	int ret;
3848 
3849 	/* Request the IRQ lines */
3850 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3851 		ret = stmmac_request_irq_multi_msi(dev);
3852 	else
3853 		ret = stmmac_request_irq_single(dev);
3854 
3855 	return ret;
3856 }
3857 
3858 /**
3859  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3860  *  @priv: driver private structure
3861  *  @mtu: MTU to setup the dma queue and buf with
3862  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3863  *  Allocate the Tx/Rx DMA queue and init them.
3864  *  Return value:
3865  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3866  */
3867 static struct stmmac_dma_conf *
3868 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3869 {
3870 	struct stmmac_dma_conf *dma_conf;
3871 	int chan, bfsize, ret;
3872 
3873 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3874 	if (!dma_conf) {
3875 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3876 			   __func__);
3877 		return ERR_PTR(-ENOMEM);
3878 	}
3879 
3880 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3881 	if (bfsize < 0)
3882 		bfsize = 0;
3883 
3884 	if (bfsize < BUF_SIZE_16KiB)
3885 		bfsize = stmmac_set_bfsize(mtu, 0);
3886 
3887 	dma_conf->dma_buf_sz = bfsize;
3888 	/* Chose the tx/rx size from the already defined one in the
3889 	 * priv struct. (if defined)
3890 	 */
3891 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3892 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3893 
3894 	if (!dma_conf->dma_tx_size)
3895 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3896 	if (!dma_conf->dma_rx_size)
3897 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3898 
3899 	/* Earlier check for TBS */
3900 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3901 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3902 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3903 
3904 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3905 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3906 	}
3907 
3908 	ret = alloc_dma_desc_resources(priv, dma_conf);
3909 	if (ret < 0) {
3910 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3911 			   __func__);
3912 		goto alloc_error;
3913 	}
3914 
3915 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3916 	if (ret < 0) {
3917 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3918 			   __func__);
3919 		goto init_error;
3920 	}
3921 
3922 	return dma_conf;
3923 
3924 init_error:
3925 	free_dma_desc_resources(priv, dma_conf);
3926 alloc_error:
3927 	kfree(dma_conf);
3928 	return ERR_PTR(ret);
3929 }
3930 
3931 /**
3932  *  __stmmac_open - open entry point of the driver
3933  *  @dev : pointer to the device structure.
3934  *  @dma_conf :  structure to take the dma data
3935  *  Description:
3936  *  This function is the open entry point of the driver.
3937  *  Return value:
3938  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3939  *  file on failure.
3940  */
3941 static int __stmmac_open(struct net_device *dev,
3942 			 struct stmmac_dma_conf *dma_conf)
3943 {
3944 	struct stmmac_priv *priv = netdev_priv(dev);
3945 	int mode = priv->plat->phy_interface;
3946 	u32 chan;
3947 	int ret;
3948 
3949 	ret = pm_runtime_resume_and_get(priv->device);
3950 	if (ret < 0)
3951 		return ret;
3952 
3953 	if ((!priv->hw->xpcs ||
3954 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3955 		ret = stmmac_init_phy(dev);
3956 		if (ret) {
3957 			netdev_err(priv->dev,
3958 				   "%s: Cannot attach to PHY (error: %d)\n",
3959 				   __func__, ret);
3960 			goto init_phy_error;
3961 		}
3962 	}
3963 
3964 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3965 
3966 	buf_sz = dma_conf->dma_buf_sz;
3967 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3968 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3969 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3970 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3971 
3972 	stmmac_reset_queues_param(priv);
3973 
3974 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3975 	    priv->plat->serdes_powerup) {
3976 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3977 		if (ret < 0) {
3978 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3979 				   __func__);
3980 			goto init_error;
3981 		}
3982 	}
3983 
3984 	ret = stmmac_hw_setup(dev, true);
3985 	if (ret < 0) {
3986 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3987 		goto init_error;
3988 	}
3989 
3990 	stmmac_init_coalesce(priv);
3991 
3992 	phylink_start(priv->phylink);
3993 	/* We may have called phylink_speed_down before */
3994 	phylink_speed_up(priv->phylink);
3995 
3996 	ret = stmmac_request_irq(dev);
3997 	if (ret)
3998 		goto irq_error;
3999 
4000 	stmmac_enable_all_queues(priv);
4001 	netif_tx_start_all_queues(priv->dev);
4002 	stmmac_enable_all_dma_irq(priv);
4003 
4004 	return 0;
4005 
4006 irq_error:
4007 	phylink_stop(priv->phylink);
4008 
4009 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4010 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4011 
4012 	stmmac_hw_teardown(dev);
4013 init_error:
4014 	phylink_disconnect_phy(priv->phylink);
4015 init_phy_error:
4016 	pm_runtime_put(priv->device);
4017 	return ret;
4018 }
4019 
4020 static int stmmac_open(struct net_device *dev)
4021 {
4022 	struct stmmac_priv *priv = netdev_priv(dev);
4023 	struct stmmac_dma_conf *dma_conf;
4024 	int ret;
4025 
4026 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4027 	if (IS_ERR(dma_conf))
4028 		return PTR_ERR(dma_conf);
4029 
4030 	ret = __stmmac_open(dev, dma_conf);
4031 	if (ret)
4032 		free_dma_desc_resources(priv, dma_conf);
4033 
4034 	kfree(dma_conf);
4035 	return ret;
4036 }
4037 
4038 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4039 {
4040 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4041 
4042 	if (priv->fpe_wq) {
4043 		destroy_workqueue(priv->fpe_wq);
4044 		priv->fpe_wq = NULL;
4045 	}
4046 
4047 	netdev_info(priv->dev, "FPE workqueue stop");
4048 }
4049 
4050 /**
4051  *  stmmac_release - close entry point of the driver
4052  *  @dev : device pointer.
4053  *  Description:
4054  *  This is the stop entry point of the driver.
4055  */
4056 static int stmmac_release(struct net_device *dev)
4057 {
4058 	struct stmmac_priv *priv = netdev_priv(dev);
4059 	u32 chan;
4060 
4061 	if (device_may_wakeup(priv->device))
4062 		phylink_speed_down(priv->phylink, false);
4063 	/* Stop and disconnect the PHY */
4064 	phylink_stop(priv->phylink);
4065 	phylink_disconnect_phy(priv->phylink);
4066 
4067 	stmmac_disable_all_queues(priv);
4068 
4069 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4070 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4071 
4072 	netif_tx_disable(dev);
4073 
4074 	/* Free the IRQ lines */
4075 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4076 
4077 	if (priv->eee_enabled) {
4078 		priv->tx_path_in_lpi_mode = false;
4079 		del_timer_sync(&priv->eee_ctrl_timer);
4080 	}
4081 
4082 	/* Stop TX/RX DMA and clear the descriptors */
4083 	stmmac_stop_all_dma(priv);
4084 
4085 	/* Release and free the Rx/Tx resources */
4086 	free_dma_desc_resources(priv, &priv->dma_conf);
4087 
4088 	/* Disable the MAC Rx/Tx */
4089 	stmmac_mac_set(priv, priv->ioaddr, false);
4090 
4091 	/* Powerdown Serdes if there is */
4092 	if (priv->plat->serdes_powerdown)
4093 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4094 
4095 	stmmac_release_ptp(priv);
4096 
4097 	pm_runtime_put(priv->device);
4098 
4099 	if (priv->dma_cap.fpesel)
4100 		stmmac_fpe_stop_wq(priv);
4101 
4102 	return 0;
4103 }
4104 
4105 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4106 			       struct stmmac_tx_queue *tx_q)
4107 {
4108 	u16 tag = 0x0, inner_tag = 0x0;
4109 	u32 inner_type = 0x0;
4110 	struct dma_desc *p;
4111 
4112 	if (!priv->dma_cap.vlins)
4113 		return false;
4114 	if (!skb_vlan_tag_present(skb))
4115 		return false;
4116 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4117 		inner_tag = skb_vlan_tag_get(skb);
4118 		inner_type = STMMAC_VLAN_INSERT;
4119 	}
4120 
4121 	tag = skb_vlan_tag_get(skb);
4122 
4123 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4124 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4125 	else
4126 		p = &tx_q->dma_tx[tx_q->cur_tx];
4127 
4128 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4129 		return false;
4130 
4131 	stmmac_set_tx_owner(priv, p);
4132 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4133 	return true;
4134 }
4135 
4136 /**
4137  *  stmmac_tso_allocator - close entry point of the driver
4138  *  @priv: driver private structure
4139  *  @des: buffer start address
4140  *  @total_len: total length to fill in descriptors
4141  *  @last_segment: condition for the last descriptor
4142  *  @queue: TX queue index
4143  *  Description:
4144  *  This function fills descriptor and request new descriptors according to
4145  *  buffer length to fill
4146  */
4147 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4148 				 int total_len, bool last_segment, u32 queue)
4149 {
4150 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4151 	struct dma_desc *desc;
4152 	u32 buff_size;
4153 	int tmp_len;
4154 
4155 	tmp_len = total_len;
4156 
4157 	while (tmp_len > 0) {
4158 		dma_addr_t curr_addr;
4159 
4160 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4161 						priv->dma_conf.dma_tx_size);
4162 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4163 
4164 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4165 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4166 		else
4167 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4168 
4169 		curr_addr = des + (total_len - tmp_len);
4170 		if (priv->dma_cap.addr64 <= 32)
4171 			desc->des0 = cpu_to_le32(curr_addr);
4172 		else
4173 			stmmac_set_desc_addr(priv, desc, curr_addr);
4174 
4175 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4176 			    TSO_MAX_BUFF_SIZE : tmp_len;
4177 
4178 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4179 				0, 1,
4180 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4181 				0, 0);
4182 
4183 		tmp_len -= TSO_MAX_BUFF_SIZE;
4184 	}
4185 }
4186 
4187 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4188 {
4189 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4190 	int desc_size;
4191 
4192 	if (likely(priv->extend_desc))
4193 		desc_size = sizeof(struct dma_extended_desc);
4194 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4195 		desc_size = sizeof(struct dma_edesc);
4196 	else
4197 		desc_size = sizeof(struct dma_desc);
4198 
4199 	/* The own bit must be the latest setting done when prepare the
4200 	 * descriptor and then barrier is needed to make sure that
4201 	 * all is coherent before granting the DMA engine.
4202 	 */
4203 	wmb();
4204 
4205 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4206 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4207 }
4208 
4209 /**
4210  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4211  *  @skb : the socket buffer
4212  *  @dev : device pointer
4213  *  Description: this is the transmit function that is called on TSO frames
4214  *  (support available on GMAC4 and newer chips).
4215  *  Diagram below show the ring programming in case of TSO frames:
4216  *
4217  *  First Descriptor
4218  *   --------
4219  *   | DES0 |---> buffer1 = L2/L3/L4 header
4220  *   | DES1 |---> TCP Payload (can continue on next descr...)
4221  *   | DES2 |---> buffer 1 and 2 len
4222  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4223  *   --------
4224  *	|
4225  *     ...
4226  *	|
4227  *   --------
4228  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4229  *   | DES1 | --|
4230  *   | DES2 | --> buffer 1 and 2 len
4231  *   | DES3 |
4232  *   --------
4233  *
4234  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4235  */
4236 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4237 {
4238 	struct dma_desc *desc, *first, *mss_desc = NULL;
4239 	struct stmmac_priv *priv = netdev_priv(dev);
4240 	int tmp_pay_len = 0, first_tx, nfrags;
4241 	unsigned int first_entry, tx_packets;
4242 	struct stmmac_txq_stats *txq_stats;
4243 	struct stmmac_tx_queue *tx_q;
4244 	u32 pay_len, mss, queue;
4245 	u8 proto_hdr_len, hdr;
4246 	dma_addr_t des;
4247 	bool set_ic;
4248 	int i;
4249 
4250 	/* Always insert VLAN tag to SKB payload for TSO frames.
4251 	 *
4252 	 * Never insert VLAN tag by HW, since segments splited by
4253 	 * TSO engine will be un-tagged by mistake.
4254 	 */
4255 	if (skb_vlan_tag_present(skb)) {
4256 		skb = __vlan_hwaccel_push_inside(skb);
4257 		if (unlikely(!skb)) {
4258 			priv->xstats.tx_dropped++;
4259 			return NETDEV_TX_OK;
4260 		}
4261 	}
4262 
4263 	nfrags = skb_shinfo(skb)->nr_frags;
4264 	queue = skb_get_queue_mapping(skb);
4265 
4266 	tx_q = &priv->dma_conf.tx_queue[queue];
4267 	txq_stats = &priv->xstats.txq_stats[queue];
4268 	first_tx = tx_q->cur_tx;
4269 
4270 	/* Compute header lengths */
4271 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4272 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4273 		hdr = sizeof(struct udphdr);
4274 	} else {
4275 		proto_hdr_len = skb_tcp_all_headers(skb);
4276 		hdr = tcp_hdrlen(skb);
4277 	}
4278 
4279 	/* Desc availability based on threshold should be enough safe */
4280 	if (unlikely(stmmac_tx_avail(priv, queue) <
4281 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4282 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4283 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4284 								queue));
4285 			/* This is a hard error, log it. */
4286 			netdev_err(priv->dev,
4287 				   "%s: Tx Ring full when queue awake\n",
4288 				   __func__);
4289 		}
4290 		return NETDEV_TX_BUSY;
4291 	}
4292 
4293 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4294 
4295 	mss = skb_shinfo(skb)->gso_size;
4296 
4297 	/* set new MSS value if needed */
4298 	if (mss != tx_q->mss) {
4299 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4300 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4301 		else
4302 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4303 
4304 		stmmac_set_mss(priv, mss_desc, mss);
4305 		tx_q->mss = mss;
4306 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4307 						priv->dma_conf.dma_tx_size);
4308 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4309 	}
4310 
4311 	if (netif_msg_tx_queued(priv)) {
4312 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4313 			__func__, hdr, proto_hdr_len, pay_len, mss);
4314 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4315 			skb->data_len);
4316 	}
4317 
4318 	first_entry = tx_q->cur_tx;
4319 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4320 
4321 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4322 		desc = &tx_q->dma_entx[first_entry].basic;
4323 	else
4324 		desc = &tx_q->dma_tx[first_entry];
4325 	first = desc;
4326 
4327 	/* first descriptor: fill Headers on Buf1 */
4328 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4329 			     DMA_TO_DEVICE);
4330 	if (dma_mapping_error(priv->device, des))
4331 		goto dma_map_err;
4332 
4333 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4334 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4335 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4336 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4337 
4338 	if (priv->dma_cap.addr64 <= 32) {
4339 		first->des0 = cpu_to_le32(des);
4340 
4341 		/* Fill start of payload in buff2 of first descriptor */
4342 		if (pay_len)
4343 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4344 
4345 		/* If needed take extra descriptors to fill the remaining payload */
4346 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4347 	} else {
4348 		stmmac_set_desc_addr(priv, first, des);
4349 		tmp_pay_len = pay_len;
4350 		des += proto_hdr_len;
4351 		pay_len = 0;
4352 	}
4353 
4354 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4355 
4356 	/* Prepare fragments */
4357 	for (i = 0; i < nfrags; i++) {
4358 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4359 
4360 		des = skb_frag_dma_map(priv->device, frag, 0,
4361 				       skb_frag_size(frag),
4362 				       DMA_TO_DEVICE);
4363 		if (dma_mapping_error(priv->device, des))
4364 			goto dma_map_err;
4365 
4366 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4367 				     (i == nfrags - 1), queue);
4368 
4369 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4370 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4371 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4372 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4373 	}
4374 
4375 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4376 
4377 	/* Only the last descriptor gets to point to the skb. */
4378 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4379 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4380 
4381 	/* Manage tx mitigation */
4382 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4383 	tx_q->tx_count_frames += tx_packets;
4384 
4385 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4386 		set_ic = true;
4387 	else if (!priv->tx_coal_frames[queue])
4388 		set_ic = false;
4389 	else if (tx_packets > priv->tx_coal_frames[queue])
4390 		set_ic = true;
4391 	else if ((tx_q->tx_count_frames %
4392 		  priv->tx_coal_frames[queue]) < tx_packets)
4393 		set_ic = true;
4394 	else
4395 		set_ic = false;
4396 
4397 	if (set_ic) {
4398 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4399 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4400 		else
4401 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4402 
4403 		tx_q->tx_count_frames = 0;
4404 		stmmac_set_tx_ic(priv, desc);
4405 	}
4406 
4407 	/* We've used all descriptors we need for this skb, however,
4408 	 * advance cur_tx so that it references a fresh descriptor.
4409 	 * ndo_start_xmit will fill this descriptor the next time it's
4410 	 * called and stmmac_tx_clean may clean up to this descriptor.
4411 	 */
4412 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4413 
4414 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4415 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4416 			  __func__);
4417 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4418 	}
4419 
4420 	u64_stats_update_begin(&txq_stats->q_syncp);
4421 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4422 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4423 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4424 	if (set_ic)
4425 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4426 	u64_stats_update_end(&txq_stats->q_syncp);
4427 
4428 	if (priv->sarc_type)
4429 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4430 
4431 	skb_tx_timestamp(skb);
4432 
4433 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4434 		     priv->hwts_tx_en)) {
4435 		/* declare that device is doing timestamping */
4436 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4437 		stmmac_enable_tx_timestamp(priv, first);
4438 	}
4439 
4440 	/* Complete the first descriptor before granting the DMA */
4441 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4442 			proto_hdr_len,
4443 			pay_len,
4444 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4445 			hdr / 4, (skb->len - proto_hdr_len));
4446 
4447 	/* If context desc is used to change MSS */
4448 	if (mss_desc) {
4449 		/* Make sure that first descriptor has been completely
4450 		 * written, including its own bit. This is because MSS is
4451 		 * actually before first descriptor, so we need to make
4452 		 * sure that MSS's own bit is the last thing written.
4453 		 */
4454 		dma_wmb();
4455 		stmmac_set_tx_owner(priv, mss_desc);
4456 	}
4457 
4458 	if (netif_msg_pktdata(priv)) {
4459 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4460 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4461 			tx_q->cur_tx, first, nfrags);
4462 		pr_info(">>> frame to be transmitted: ");
4463 		print_pkt(skb->data, skb_headlen(skb));
4464 	}
4465 
4466 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4467 
4468 	stmmac_flush_tx_descriptors(priv, queue);
4469 	stmmac_tx_timer_arm(priv, queue);
4470 
4471 	return NETDEV_TX_OK;
4472 
4473 dma_map_err:
4474 	dev_err(priv->device, "Tx dma map failed\n");
4475 	dev_kfree_skb(skb);
4476 	priv->xstats.tx_dropped++;
4477 	return NETDEV_TX_OK;
4478 }
4479 
4480 /**
4481  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4482  * @skb: socket buffer to check
4483  *
4484  * Check if a packet has an ethertype that will trigger the IP header checks
4485  * and IP/TCP checksum engine of the stmmac core.
4486  *
4487  * Return: true if the ethertype can trigger the checksum engine, false
4488  * otherwise
4489  */
4490 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4491 {
4492 	int depth = 0;
4493 	__be16 proto;
4494 
4495 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4496 				    &depth);
4497 
4498 	return (depth <= ETH_HLEN) &&
4499 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4500 }
4501 
4502 /**
4503  *  stmmac_xmit - Tx entry point of the driver
4504  *  @skb : the socket buffer
4505  *  @dev : device pointer
4506  *  Description : this is the tx entry point of the driver.
4507  *  It programs the chain or the ring and supports oversized frames
4508  *  and SG feature.
4509  */
4510 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4511 {
4512 	unsigned int first_entry, tx_packets, enh_desc;
4513 	struct stmmac_priv *priv = netdev_priv(dev);
4514 	unsigned int nopaged_len = skb_headlen(skb);
4515 	int i, csum_insertion = 0, is_jumbo = 0;
4516 	u32 queue = skb_get_queue_mapping(skb);
4517 	int nfrags = skb_shinfo(skb)->nr_frags;
4518 	int gso = skb_shinfo(skb)->gso_type;
4519 	struct stmmac_txq_stats *txq_stats;
4520 	struct dma_edesc *tbs_desc = NULL;
4521 	struct dma_desc *desc, *first;
4522 	struct stmmac_tx_queue *tx_q;
4523 	bool has_vlan, set_ic;
4524 	int entry, first_tx;
4525 	dma_addr_t des;
4526 
4527 	tx_q = &priv->dma_conf.tx_queue[queue];
4528 	txq_stats = &priv->xstats.txq_stats[queue];
4529 	first_tx = tx_q->cur_tx;
4530 
4531 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4532 		stmmac_disable_eee_mode(priv);
4533 
4534 	/* Manage oversized TCP frames for GMAC4 device */
4535 	if (skb_is_gso(skb) && priv->tso) {
4536 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4537 			return stmmac_tso_xmit(skb, dev);
4538 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4539 			return stmmac_tso_xmit(skb, dev);
4540 	}
4541 
4542 	if (priv->est && priv->est->enable &&
4543 	    priv->est->max_sdu[queue] &&
4544 	    skb->len > priv->est->max_sdu[queue]){
4545 		priv->xstats.max_sdu_txq_drop[queue]++;
4546 		goto max_sdu_err;
4547 	}
4548 
4549 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4550 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4551 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4552 								queue));
4553 			/* This is a hard error, log it. */
4554 			netdev_err(priv->dev,
4555 				   "%s: Tx Ring full when queue awake\n",
4556 				   __func__);
4557 		}
4558 		return NETDEV_TX_BUSY;
4559 	}
4560 
4561 	/* Check if VLAN can be inserted by HW */
4562 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4563 
4564 	entry = tx_q->cur_tx;
4565 	first_entry = entry;
4566 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4567 
4568 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4569 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4570 	 * queues. In that case, checksum offloading for those queues that don't
4571 	 * support tx coe needs to fallback to software checksum calculation.
4572 	 *
4573 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4574 	 * also have to be checksummed in software.
4575 	 */
4576 	if (csum_insertion &&
4577 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4578 	     !stmmac_has_ip_ethertype(skb))) {
4579 		if (unlikely(skb_checksum_help(skb)))
4580 			goto dma_map_err;
4581 		csum_insertion = !csum_insertion;
4582 	}
4583 
4584 	if (likely(priv->extend_desc))
4585 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4586 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4587 		desc = &tx_q->dma_entx[entry].basic;
4588 	else
4589 		desc = tx_q->dma_tx + entry;
4590 
4591 	first = desc;
4592 
4593 	if (has_vlan)
4594 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4595 
4596 	enh_desc = priv->plat->enh_desc;
4597 	/* To program the descriptors according to the size of the frame */
4598 	if (enh_desc)
4599 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4600 
4601 	if (unlikely(is_jumbo)) {
4602 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4603 		if (unlikely(entry < 0) && (entry != -EINVAL))
4604 			goto dma_map_err;
4605 	}
4606 
4607 	for (i = 0; i < nfrags; i++) {
4608 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4609 		int len = skb_frag_size(frag);
4610 		bool last_segment = (i == (nfrags - 1));
4611 
4612 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4613 		WARN_ON(tx_q->tx_skbuff[entry]);
4614 
4615 		if (likely(priv->extend_desc))
4616 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4617 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4618 			desc = &tx_q->dma_entx[entry].basic;
4619 		else
4620 			desc = tx_q->dma_tx + entry;
4621 
4622 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4623 				       DMA_TO_DEVICE);
4624 		if (dma_mapping_error(priv->device, des))
4625 			goto dma_map_err; /* should reuse desc w/o issues */
4626 
4627 		tx_q->tx_skbuff_dma[entry].buf = des;
4628 
4629 		stmmac_set_desc_addr(priv, desc, des);
4630 
4631 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4632 		tx_q->tx_skbuff_dma[entry].len = len;
4633 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4634 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4635 
4636 		/* Prepare the descriptor and set the own bit too */
4637 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4638 				priv->mode, 1, last_segment, skb->len);
4639 	}
4640 
4641 	/* Only the last descriptor gets to point to the skb. */
4642 	tx_q->tx_skbuff[entry] = skb;
4643 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4644 
4645 	/* According to the coalesce parameter the IC bit for the latest
4646 	 * segment is reset and the timer re-started to clean the tx status.
4647 	 * This approach takes care about the fragments: desc is the first
4648 	 * element in case of no SG.
4649 	 */
4650 	tx_packets = (entry + 1) - first_tx;
4651 	tx_q->tx_count_frames += tx_packets;
4652 
4653 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4654 		set_ic = true;
4655 	else if (!priv->tx_coal_frames[queue])
4656 		set_ic = false;
4657 	else if (tx_packets > priv->tx_coal_frames[queue])
4658 		set_ic = true;
4659 	else if ((tx_q->tx_count_frames %
4660 		  priv->tx_coal_frames[queue]) < tx_packets)
4661 		set_ic = true;
4662 	else
4663 		set_ic = false;
4664 
4665 	if (set_ic) {
4666 		if (likely(priv->extend_desc))
4667 			desc = &tx_q->dma_etx[entry].basic;
4668 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4669 			desc = &tx_q->dma_entx[entry].basic;
4670 		else
4671 			desc = &tx_q->dma_tx[entry];
4672 
4673 		tx_q->tx_count_frames = 0;
4674 		stmmac_set_tx_ic(priv, desc);
4675 	}
4676 
4677 	/* We've used all descriptors we need for this skb, however,
4678 	 * advance cur_tx so that it references a fresh descriptor.
4679 	 * ndo_start_xmit will fill this descriptor the next time it's
4680 	 * called and stmmac_tx_clean may clean up to this descriptor.
4681 	 */
4682 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4683 	tx_q->cur_tx = entry;
4684 
4685 	if (netif_msg_pktdata(priv)) {
4686 		netdev_dbg(priv->dev,
4687 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4688 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4689 			   entry, first, nfrags);
4690 
4691 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4692 		print_pkt(skb->data, skb->len);
4693 	}
4694 
4695 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4696 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4697 			  __func__);
4698 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4699 	}
4700 
4701 	u64_stats_update_begin(&txq_stats->q_syncp);
4702 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4703 	if (set_ic)
4704 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4705 	u64_stats_update_end(&txq_stats->q_syncp);
4706 
4707 	if (priv->sarc_type)
4708 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4709 
4710 	skb_tx_timestamp(skb);
4711 
4712 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4713 	 * problems because all the descriptors are actually ready to be
4714 	 * passed to the DMA engine.
4715 	 */
4716 	if (likely(!is_jumbo)) {
4717 		bool last_segment = (nfrags == 0);
4718 
4719 		des = dma_map_single(priv->device, skb->data,
4720 				     nopaged_len, DMA_TO_DEVICE);
4721 		if (dma_mapping_error(priv->device, des))
4722 			goto dma_map_err;
4723 
4724 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4725 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4726 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4727 
4728 		stmmac_set_desc_addr(priv, first, des);
4729 
4730 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4731 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4732 
4733 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4734 			     priv->hwts_tx_en)) {
4735 			/* declare that device is doing timestamping */
4736 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4737 			stmmac_enable_tx_timestamp(priv, first);
4738 		}
4739 
4740 		/* Prepare the first descriptor setting the OWN bit too */
4741 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4742 				csum_insertion, priv->mode, 0, last_segment,
4743 				skb->len);
4744 	}
4745 
4746 	if (tx_q->tbs & STMMAC_TBS_EN) {
4747 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4748 
4749 		tbs_desc = &tx_q->dma_entx[first_entry];
4750 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4751 	}
4752 
4753 	stmmac_set_tx_owner(priv, first);
4754 
4755 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4756 
4757 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4758 
4759 	stmmac_flush_tx_descriptors(priv, queue);
4760 	stmmac_tx_timer_arm(priv, queue);
4761 
4762 	return NETDEV_TX_OK;
4763 
4764 dma_map_err:
4765 	netdev_err(priv->dev, "Tx DMA map failed\n");
4766 max_sdu_err:
4767 	dev_kfree_skb(skb);
4768 	priv->xstats.tx_dropped++;
4769 	return NETDEV_TX_OK;
4770 }
4771 
4772 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4773 {
4774 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4775 	__be16 vlan_proto = veth->h_vlan_proto;
4776 	u16 vlanid;
4777 
4778 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4779 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4780 	    (vlan_proto == htons(ETH_P_8021AD) &&
4781 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4782 		/* pop the vlan tag */
4783 		vlanid = ntohs(veth->h_vlan_TCI);
4784 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4785 		skb_pull(skb, VLAN_HLEN);
4786 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4787 	}
4788 }
4789 
4790 /**
4791  * stmmac_rx_refill - refill used skb preallocated buffers
4792  * @priv: driver private structure
4793  * @queue: RX queue index
4794  * Description : this is to reallocate the skb for the reception process
4795  * that is based on zero-copy.
4796  */
4797 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4798 {
4799 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4800 	int dirty = stmmac_rx_dirty(priv, queue);
4801 	unsigned int entry = rx_q->dirty_rx;
4802 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4803 
4804 	if (priv->dma_cap.host_dma_width <= 32)
4805 		gfp |= GFP_DMA32;
4806 
4807 	while (dirty-- > 0) {
4808 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4809 		struct dma_desc *p;
4810 		bool use_rx_wd;
4811 
4812 		if (priv->extend_desc)
4813 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4814 		else
4815 			p = rx_q->dma_rx + entry;
4816 
4817 		if (!buf->page) {
4818 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4819 			if (!buf->page)
4820 				break;
4821 		}
4822 
4823 		if (priv->sph && !buf->sec_page) {
4824 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4825 			if (!buf->sec_page)
4826 				break;
4827 
4828 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4829 		}
4830 
4831 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4832 
4833 		stmmac_set_desc_addr(priv, p, buf->addr);
4834 		if (priv->sph)
4835 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4836 		else
4837 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4838 		stmmac_refill_desc3(priv, rx_q, p);
4839 
4840 		rx_q->rx_count_frames++;
4841 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4842 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4843 			rx_q->rx_count_frames = 0;
4844 
4845 		use_rx_wd = !priv->rx_coal_frames[queue];
4846 		use_rx_wd |= rx_q->rx_count_frames > 0;
4847 		if (!priv->use_riwt)
4848 			use_rx_wd = false;
4849 
4850 		dma_wmb();
4851 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4852 
4853 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4854 	}
4855 	rx_q->dirty_rx = entry;
4856 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4857 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4858 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4859 }
4860 
4861 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4862 				       struct dma_desc *p,
4863 				       int status, unsigned int len)
4864 {
4865 	unsigned int plen = 0, hlen = 0;
4866 	int coe = priv->hw->rx_csum;
4867 
4868 	/* Not first descriptor, buffer is always zero */
4869 	if (priv->sph && len)
4870 		return 0;
4871 
4872 	/* First descriptor, get split header length */
4873 	stmmac_get_rx_header_len(priv, p, &hlen);
4874 	if (priv->sph && hlen) {
4875 		priv->xstats.rx_split_hdr_pkt_n++;
4876 		return hlen;
4877 	}
4878 
4879 	/* First descriptor, not last descriptor and not split header */
4880 	if (status & rx_not_ls)
4881 		return priv->dma_conf.dma_buf_sz;
4882 
4883 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4884 
4885 	/* First descriptor and last descriptor and not split header */
4886 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4887 }
4888 
4889 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4890 				       struct dma_desc *p,
4891 				       int status, unsigned int len)
4892 {
4893 	int coe = priv->hw->rx_csum;
4894 	unsigned int plen = 0;
4895 
4896 	/* Not split header, buffer is not available */
4897 	if (!priv->sph)
4898 		return 0;
4899 
4900 	/* Not last descriptor */
4901 	if (status & rx_not_ls)
4902 		return priv->dma_conf.dma_buf_sz;
4903 
4904 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4905 
4906 	/* Last descriptor */
4907 	return plen - len;
4908 }
4909 
4910 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4911 				struct xdp_frame *xdpf, bool dma_map)
4912 {
4913 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4914 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4915 	unsigned int entry = tx_q->cur_tx;
4916 	struct dma_desc *tx_desc;
4917 	dma_addr_t dma_addr;
4918 	bool set_ic;
4919 
4920 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4921 		return STMMAC_XDP_CONSUMED;
4922 
4923 	if (priv->est && priv->est->enable &&
4924 	    priv->est->max_sdu[queue] &&
4925 	    xdpf->len > priv->est->max_sdu[queue]) {
4926 		priv->xstats.max_sdu_txq_drop[queue]++;
4927 		return STMMAC_XDP_CONSUMED;
4928 	}
4929 
4930 	if (likely(priv->extend_desc))
4931 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4932 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4933 		tx_desc = &tx_q->dma_entx[entry].basic;
4934 	else
4935 		tx_desc = tx_q->dma_tx + entry;
4936 
4937 	if (dma_map) {
4938 		dma_addr = dma_map_single(priv->device, xdpf->data,
4939 					  xdpf->len, DMA_TO_DEVICE);
4940 		if (dma_mapping_error(priv->device, dma_addr))
4941 			return STMMAC_XDP_CONSUMED;
4942 
4943 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4944 	} else {
4945 		struct page *page = virt_to_page(xdpf->data);
4946 
4947 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4948 			   xdpf->headroom;
4949 		dma_sync_single_for_device(priv->device, dma_addr,
4950 					   xdpf->len, DMA_BIDIRECTIONAL);
4951 
4952 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4953 	}
4954 
4955 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4956 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4957 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4958 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4959 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4960 
4961 	tx_q->xdpf[entry] = xdpf;
4962 
4963 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4964 
4965 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4966 			       true, priv->mode, true, true,
4967 			       xdpf->len);
4968 
4969 	tx_q->tx_count_frames++;
4970 
4971 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4972 		set_ic = true;
4973 	else
4974 		set_ic = false;
4975 
4976 	if (set_ic) {
4977 		tx_q->tx_count_frames = 0;
4978 		stmmac_set_tx_ic(priv, tx_desc);
4979 		u64_stats_update_begin(&txq_stats->q_syncp);
4980 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4981 		u64_stats_update_end(&txq_stats->q_syncp);
4982 	}
4983 
4984 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4985 
4986 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4987 	tx_q->cur_tx = entry;
4988 
4989 	return STMMAC_XDP_TX;
4990 }
4991 
4992 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4993 				   int cpu)
4994 {
4995 	int index = cpu;
4996 
4997 	if (unlikely(index < 0))
4998 		index = 0;
4999 
5000 	while (index >= priv->plat->tx_queues_to_use)
5001 		index -= priv->plat->tx_queues_to_use;
5002 
5003 	return index;
5004 }
5005 
5006 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5007 				struct xdp_buff *xdp)
5008 {
5009 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5010 	int cpu = smp_processor_id();
5011 	struct netdev_queue *nq;
5012 	int queue;
5013 	int res;
5014 
5015 	if (unlikely(!xdpf))
5016 		return STMMAC_XDP_CONSUMED;
5017 
5018 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5019 	nq = netdev_get_tx_queue(priv->dev, queue);
5020 
5021 	__netif_tx_lock(nq, cpu);
5022 	/* Avoids TX time-out as we are sharing with slow path */
5023 	txq_trans_cond_update(nq);
5024 
5025 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5026 	if (res == STMMAC_XDP_TX)
5027 		stmmac_flush_tx_descriptors(priv, queue);
5028 
5029 	__netif_tx_unlock(nq);
5030 
5031 	return res;
5032 }
5033 
5034 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5035 				 struct bpf_prog *prog,
5036 				 struct xdp_buff *xdp)
5037 {
5038 	u32 act;
5039 	int res;
5040 
5041 	act = bpf_prog_run_xdp(prog, xdp);
5042 	switch (act) {
5043 	case XDP_PASS:
5044 		res = STMMAC_XDP_PASS;
5045 		break;
5046 	case XDP_TX:
5047 		res = stmmac_xdp_xmit_back(priv, xdp);
5048 		break;
5049 	case XDP_REDIRECT:
5050 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5051 			res = STMMAC_XDP_CONSUMED;
5052 		else
5053 			res = STMMAC_XDP_REDIRECT;
5054 		break;
5055 	default:
5056 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5057 		fallthrough;
5058 	case XDP_ABORTED:
5059 		trace_xdp_exception(priv->dev, prog, act);
5060 		fallthrough;
5061 	case XDP_DROP:
5062 		res = STMMAC_XDP_CONSUMED;
5063 		break;
5064 	}
5065 
5066 	return res;
5067 }
5068 
5069 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5070 					   struct xdp_buff *xdp)
5071 {
5072 	struct bpf_prog *prog;
5073 	int res;
5074 
5075 	prog = READ_ONCE(priv->xdp_prog);
5076 	if (!prog) {
5077 		res = STMMAC_XDP_PASS;
5078 		goto out;
5079 	}
5080 
5081 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5082 out:
5083 	return ERR_PTR(-res);
5084 }
5085 
5086 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5087 				   int xdp_status)
5088 {
5089 	int cpu = smp_processor_id();
5090 	int queue;
5091 
5092 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5093 
5094 	if (xdp_status & STMMAC_XDP_TX)
5095 		stmmac_tx_timer_arm(priv, queue);
5096 
5097 	if (xdp_status & STMMAC_XDP_REDIRECT)
5098 		xdp_do_flush();
5099 }
5100 
5101 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5102 					       struct xdp_buff *xdp)
5103 {
5104 	unsigned int metasize = xdp->data - xdp->data_meta;
5105 	unsigned int datasize = xdp->data_end - xdp->data;
5106 	struct sk_buff *skb;
5107 
5108 	skb = napi_alloc_skb(&ch->rxtx_napi,
5109 			     xdp->data_end - xdp->data_hard_start);
5110 	if (unlikely(!skb))
5111 		return NULL;
5112 
5113 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5114 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5115 	if (metasize)
5116 		skb_metadata_set(skb, metasize);
5117 
5118 	return skb;
5119 }
5120 
5121 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5122 				   struct dma_desc *p, struct dma_desc *np,
5123 				   struct xdp_buff *xdp)
5124 {
5125 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5126 	struct stmmac_channel *ch = &priv->channel[queue];
5127 	unsigned int len = xdp->data_end - xdp->data;
5128 	enum pkt_hash_types hash_type;
5129 	int coe = priv->hw->rx_csum;
5130 	struct sk_buff *skb;
5131 	u32 hash;
5132 
5133 	skb = stmmac_construct_skb_zc(ch, xdp);
5134 	if (!skb) {
5135 		priv->xstats.rx_dropped++;
5136 		return;
5137 	}
5138 
5139 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5140 	if (priv->hw->hw_vlan_en)
5141 		/* MAC level stripping. */
5142 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5143 	else
5144 		/* Driver level stripping. */
5145 		stmmac_rx_vlan(priv->dev, skb);
5146 	skb->protocol = eth_type_trans(skb, priv->dev);
5147 
5148 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5149 		skb_checksum_none_assert(skb);
5150 	else
5151 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5152 
5153 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5154 		skb_set_hash(skb, hash, hash_type);
5155 
5156 	skb_record_rx_queue(skb, queue);
5157 	napi_gro_receive(&ch->rxtx_napi, skb);
5158 
5159 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5160 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5161 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5162 	u64_stats_update_end(&rxq_stats->napi_syncp);
5163 }
5164 
5165 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5166 {
5167 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5168 	unsigned int entry = rx_q->dirty_rx;
5169 	struct dma_desc *rx_desc = NULL;
5170 	bool ret = true;
5171 
5172 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5173 
5174 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5175 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5176 		dma_addr_t dma_addr;
5177 		bool use_rx_wd;
5178 
5179 		if (!buf->xdp) {
5180 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5181 			if (!buf->xdp) {
5182 				ret = false;
5183 				break;
5184 			}
5185 		}
5186 
5187 		if (priv->extend_desc)
5188 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5189 		else
5190 			rx_desc = rx_q->dma_rx + entry;
5191 
5192 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5193 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5194 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5195 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5196 
5197 		rx_q->rx_count_frames++;
5198 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5199 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5200 			rx_q->rx_count_frames = 0;
5201 
5202 		use_rx_wd = !priv->rx_coal_frames[queue];
5203 		use_rx_wd |= rx_q->rx_count_frames > 0;
5204 		if (!priv->use_riwt)
5205 			use_rx_wd = false;
5206 
5207 		dma_wmb();
5208 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5209 
5210 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5211 	}
5212 
5213 	if (rx_desc) {
5214 		rx_q->dirty_rx = entry;
5215 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5216 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5217 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5218 	}
5219 
5220 	return ret;
5221 }
5222 
5223 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5224 {
5225 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5226 	 * to represent incoming packet, whereas cb field in the same structure
5227 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5228 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5229 	 */
5230 	return (struct stmmac_xdp_buff *)xdp;
5231 }
5232 
5233 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5234 {
5235 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5236 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5237 	unsigned int count = 0, error = 0, len = 0;
5238 	int dirty = stmmac_rx_dirty(priv, queue);
5239 	unsigned int next_entry = rx_q->cur_rx;
5240 	u32 rx_errors = 0, rx_dropped = 0;
5241 	unsigned int desc_size;
5242 	struct bpf_prog *prog;
5243 	bool failure = false;
5244 	int xdp_status = 0;
5245 	int status = 0;
5246 
5247 	if (netif_msg_rx_status(priv)) {
5248 		void *rx_head;
5249 
5250 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5251 		if (priv->extend_desc) {
5252 			rx_head = (void *)rx_q->dma_erx;
5253 			desc_size = sizeof(struct dma_extended_desc);
5254 		} else {
5255 			rx_head = (void *)rx_q->dma_rx;
5256 			desc_size = sizeof(struct dma_desc);
5257 		}
5258 
5259 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5260 				    rx_q->dma_rx_phy, desc_size);
5261 	}
5262 	while (count < limit) {
5263 		struct stmmac_rx_buffer *buf;
5264 		struct stmmac_xdp_buff *ctx;
5265 		unsigned int buf1_len = 0;
5266 		struct dma_desc *np, *p;
5267 		int entry;
5268 		int res;
5269 
5270 		if (!count && rx_q->state_saved) {
5271 			error = rx_q->state.error;
5272 			len = rx_q->state.len;
5273 		} else {
5274 			rx_q->state_saved = false;
5275 			error = 0;
5276 			len = 0;
5277 		}
5278 
5279 		if (count >= limit)
5280 			break;
5281 
5282 read_again:
5283 		buf1_len = 0;
5284 		entry = next_entry;
5285 		buf = &rx_q->buf_pool[entry];
5286 
5287 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5288 			failure = failure ||
5289 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5290 			dirty = 0;
5291 		}
5292 
5293 		if (priv->extend_desc)
5294 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5295 		else
5296 			p = rx_q->dma_rx + entry;
5297 
5298 		/* read the status of the incoming frame */
5299 		status = stmmac_rx_status(priv, &priv->xstats, p);
5300 		/* check if managed by the DMA otherwise go ahead */
5301 		if (unlikely(status & dma_own))
5302 			break;
5303 
5304 		/* Prefetch the next RX descriptor */
5305 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5306 						priv->dma_conf.dma_rx_size);
5307 		next_entry = rx_q->cur_rx;
5308 
5309 		if (priv->extend_desc)
5310 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5311 		else
5312 			np = rx_q->dma_rx + next_entry;
5313 
5314 		prefetch(np);
5315 
5316 		/* Ensure a valid XSK buffer before proceed */
5317 		if (!buf->xdp)
5318 			break;
5319 
5320 		if (priv->extend_desc)
5321 			stmmac_rx_extended_status(priv, &priv->xstats,
5322 						  rx_q->dma_erx + entry);
5323 		if (unlikely(status == discard_frame)) {
5324 			xsk_buff_free(buf->xdp);
5325 			buf->xdp = NULL;
5326 			dirty++;
5327 			error = 1;
5328 			if (!priv->hwts_rx_en)
5329 				rx_errors++;
5330 		}
5331 
5332 		if (unlikely(error && (status & rx_not_ls)))
5333 			goto read_again;
5334 		if (unlikely(error)) {
5335 			count++;
5336 			continue;
5337 		}
5338 
5339 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5340 		if (likely(status & rx_not_ls)) {
5341 			xsk_buff_free(buf->xdp);
5342 			buf->xdp = NULL;
5343 			dirty++;
5344 			count++;
5345 			goto read_again;
5346 		}
5347 
5348 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5349 		ctx->priv = priv;
5350 		ctx->desc = p;
5351 		ctx->ndesc = np;
5352 
5353 		/* XDP ZC Frame only support primary buffers for now */
5354 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5355 		len += buf1_len;
5356 
5357 		/* ACS is disabled; strip manually. */
5358 		if (likely(!(status & rx_not_ls))) {
5359 			buf1_len -= ETH_FCS_LEN;
5360 			len -= ETH_FCS_LEN;
5361 		}
5362 
5363 		/* RX buffer is good and fit into a XSK pool buffer */
5364 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5365 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5366 
5367 		prog = READ_ONCE(priv->xdp_prog);
5368 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5369 
5370 		switch (res) {
5371 		case STMMAC_XDP_PASS:
5372 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5373 			xsk_buff_free(buf->xdp);
5374 			break;
5375 		case STMMAC_XDP_CONSUMED:
5376 			xsk_buff_free(buf->xdp);
5377 			rx_dropped++;
5378 			break;
5379 		case STMMAC_XDP_TX:
5380 		case STMMAC_XDP_REDIRECT:
5381 			xdp_status |= res;
5382 			break;
5383 		}
5384 
5385 		buf->xdp = NULL;
5386 		dirty++;
5387 		count++;
5388 	}
5389 
5390 	if (status & rx_not_ls) {
5391 		rx_q->state_saved = true;
5392 		rx_q->state.error = error;
5393 		rx_q->state.len = len;
5394 	}
5395 
5396 	stmmac_finalize_xdp_rx(priv, xdp_status);
5397 
5398 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5399 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5400 	u64_stats_update_end(&rxq_stats->napi_syncp);
5401 
5402 	priv->xstats.rx_dropped += rx_dropped;
5403 	priv->xstats.rx_errors += rx_errors;
5404 
5405 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5406 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5407 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5408 		else
5409 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5410 
5411 		return (int)count;
5412 	}
5413 
5414 	return failure ? limit : (int)count;
5415 }
5416 
5417 /**
5418  * stmmac_rx - manage the receive process
5419  * @priv: driver private structure
5420  * @limit: napi bugget
5421  * @queue: RX queue index.
5422  * Description :  this the function called by the napi poll method.
5423  * It gets all the frames inside the ring.
5424  */
5425 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5426 {
5427 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5428 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5429 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5430 	struct stmmac_channel *ch = &priv->channel[queue];
5431 	unsigned int count = 0, error = 0, len = 0;
5432 	int status = 0, coe = priv->hw->rx_csum;
5433 	unsigned int next_entry = rx_q->cur_rx;
5434 	enum dma_data_direction dma_dir;
5435 	unsigned int desc_size;
5436 	struct sk_buff *skb = NULL;
5437 	struct stmmac_xdp_buff ctx;
5438 	int xdp_status = 0;
5439 	int buf_sz;
5440 
5441 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5442 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5443 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5444 
5445 	if (netif_msg_rx_status(priv)) {
5446 		void *rx_head;
5447 
5448 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5449 		if (priv->extend_desc) {
5450 			rx_head = (void *)rx_q->dma_erx;
5451 			desc_size = sizeof(struct dma_extended_desc);
5452 		} else {
5453 			rx_head = (void *)rx_q->dma_rx;
5454 			desc_size = sizeof(struct dma_desc);
5455 		}
5456 
5457 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5458 				    rx_q->dma_rx_phy, desc_size);
5459 	}
5460 	while (count < limit) {
5461 		unsigned int buf1_len = 0, buf2_len = 0;
5462 		enum pkt_hash_types hash_type;
5463 		struct stmmac_rx_buffer *buf;
5464 		struct dma_desc *np, *p;
5465 		int entry;
5466 		u32 hash;
5467 
5468 		if (!count && rx_q->state_saved) {
5469 			skb = rx_q->state.skb;
5470 			error = rx_q->state.error;
5471 			len = rx_q->state.len;
5472 		} else {
5473 			rx_q->state_saved = false;
5474 			skb = NULL;
5475 			error = 0;
5476 			len = 0;
5477 		}
5478 
5479 read_again:
5480 		if (count >= limit)
5481 			break;
5482 
5483 		buf1_len = 0;
5484 		buf2_len = 0;
5485 		entry = next_entry;
5486 		buf = &rx_q->buf_pool[entry];
5487 
5488 		if (priv->extend_desc)
5489 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5490 		else
5491 			p = rx_q->dma_rx + entry;
5492 
5493 		/* read the status of the incoming frame */
5494 		status = stmmac_rx_status(priv, &priv->xstats, p);
5495 		/* check if managed by the DMA otherwise go ahead */
5496 		if (unlikely(status & dma_own))
5497 			break;
5498 
5499 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5500 						priv->dma_conf.dma_rx_size);
5501 		next_entry = rx_q->cur_rx;
5502 
5503 		if (priv->extend_desc)
5504 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5505 		else
5506 			np = rx_q->dma_rx + next_entry;
5507 
5508 		prefetch(np);
5509 
5510 		if (priv->extend_desc)
5511 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5512 		if (unlikely(status == discard_frame)) {
5513 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5514 			buf->page = NULL;
5515 			error = 1;
5516 			if (!priv->hwts_rx_en)
5517 				rx_errors++;
5518 		}
5519 
5520 		if (unlikely(error && (status & rx_not_ls)))
5521 			goto read_again;
5522 		if (unlikely(error)) {
5523 			dev_kfree_skb(skb);
5524 			skb = NULL;
5525 			count++;
5526 			continue;
5527 		}
5528 
5529 		/* Buffer is good. Go on. */
5530 
5531 		prefetch(page_address(buf->page) + buf->page_offset);
5532 		if (buf->sec_page)
5533 			prefetch(page_address(buf->sec_page));
5534 
5535 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5536 		len += buf1_len;
5537 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5538 		len += buf2_len;
5539 
5540 		/* ACS is disabled; strip manually. */
5541 		if (likely(!(status & rx_not_ls))) {
5542 			if (buf2_len) {
5543 				buf2_len -= ETH_FCS_LEN;
5544 				len -= ETH_FCS_LEN;
5545 			} else if (buf1_len) {
5546 				buf1_len -= ETH_FCS_LEN;
5547 				len -= ETH_FCS_LEN;
5548 			}
5549 		}
5550 
5551 		if (!skb) {
5552 			unsigned int pre_len, sync_len;
5553 
5554 			dma_sync_single_for_cpu(priv->device, buf->addr,
5555 						buf1_len, dma_dir);
5556 
5557 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5558 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5559 					 buf->page_offset, buf1_len, true);
5560 
5561 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5562 				  buf->page_offset;
5563 
5564 			ctx.priv = priv;
5565 			ctx.desc = p;
5566 			ctx.ndesc = np;
5567 
5568 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5569 			/* Due xdp_adjust_tail: DMA sync for_device
5570 			 * cover max len CPU touch
5571 			 */
5572 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5573 				   buf->page_offset;
5574 			sync_len = max(sync_len, pre_len);
5575 
5576 			/* For Not XDP_PASS verdict */
5577 			if (IS_ERR(skb)) {
5578 				unsigned int xdp_res = -PTR_ERR(skb);
5579 
5580 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5581 					page_pool_put_page(rx_q->page_pool,
5582 							   virt_to_head_page(ctx.xdp.data),
5583 							   sync_len, true);
5584 					buf->page = NULL;
5585 					rx_dropped++;
5586 
5587 					/* Clear skb as it was set as
5588 					 * status by XDP program.
5589 					 */
5590 					skb = NULL;
5591 
5592 					if (unlikely((status & rx_not_ls)))
5593 						goto read_again;
5594 
5595 					count++;
5596 					continue;
5597 				} else if (xdp_res & (STMMAC_XDP_TX |
5598 						      STMMAC_XDP_REDIRECT)) {
5599 					xdp_status |= xdp_res;
5600 					buf->page = NULL;
5601 					skb = NULL;
5602 					count++;
5603 					continue;
5604 				}
5605 			}
5606 		}
5607 
5608 		if (!skb) {
5609 			/* XDP program may expand or reduce tail */
5610 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5611 
5612 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5613 			if (!skb) {
5614 				rx_dropped++;
5615 				count++;
5616 				goto drain_data;
5617 			}
5618 
5619 			/* XDP program may adjust header */
5620 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5621 			skb_put(skb, buf1_len);
5622 
5623 			/* Data payload copied into SKB, page ready for recycle */
5624 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5625 			buf->page = NULL;
5626 		} else if (buf1_len) {
5627 			dma_sync_single_for_cpu(priv->device, buf->addr,
5628 						buf1_len, dma_dir);
5629 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5630 					buf->page, buf->page_offset, buf1_len,
5631 					priv->dma_conf.dma_buf_sz);
5632 
5633 			/* Data payload appended into SKB */
5634 			skb_mark_for_recycle(skb);
5635 			buf->page = NULL;
5636 		}
5637 
5638 		if (buf2_len) {
5639 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5640 						buf2_len, dma_dir);
5641 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5642 					buf->sec_page, 0, buf2_len,
5643 					priv->dma_conf.dma_buf_sz);
5644 
5645 			/* Data payload appended into SKB */
5646 			skb_mark_for_recycle(skb);
5647 			buf->sec_page = NULL;
5648 		}
5649 
5650 drain_data:
5651 		if (likely(status & rx_not_ls))
5652 			goto read_again;
5653 		if (!skb)
5654 			continue;
5655 
5656 		/* Got entire packet into SKB. Finish it. */
5657 
5658 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5659 
5660 		if (priv->hw->hw_vlan_en)
5661 			/* MAC level stripping. */
5662 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5663 		else
5664 			/* Driver level stripping. */
5665 			stmmac_rx_vlan(priv->dev, skb);
5666 
5667 		skb->protocol = eth_type_trans(skb, priv->dev);
5668 
5669 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5670 			skb_checksum_none_assert(skb);
5671 		else
5672 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5673 
5674 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5675 			skb_set_hash(skb, hash, hash_type);
5676 
5677 		skb_record_rx_queue(skb, queue);
5678 		napi_gro_receive(&ch->rx_napi, skb);
5679 		skb = NULL;
5680 
5681 		rx_packets++;
5682 		rx_bytes += len;
5683 		count++;
5684 	}
5685 
5686 	if (status & rx_not_ls || skb) {
5687 		rx_q->state_saved = true;
5688 		rx_q->state.skb = skb;
5689 		rx_q->state.error = error;
5690 		rx_q->state.len = len;
5691 	}
5692 
5693 	stmmac_finalize_xdp_rx(priv, xdp_status);
5694 
5695 	stmmac_rx_refill(priv, queue);
5696 
5697 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5698 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5699 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5700 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5701 	u64_stats_update_end(&rxq_stats->napi_syncp);
5702 
5703 	priv->xstats.rx_dropped += rx_dropped;
5704 	priv->xstats.rx_errors += rx_errors;
5705 
5706 	return count;
5707 }
5708 
5709 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5710 {
5711 	struct stmmac_channel *ch =
5712 		container_of(napi, struct stmmac_channel, rx_napi);
5713 	struct stmmac_priv *priv = ch->priv_data;
5714 	struct stmmac_rxq_stats *rxq_stats;
5715 	u32 chan = ch->index;
5716 	int work_done;
5717 
5718 	rxq_stats = &priv->xstats.rxq_stats[chan];
5719 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5720 	u64_stats_inc(&rxq_stats->napi.poll);
5721 	u64_stats_update_end(&rxq_stats->napi_syncp);
5722 
5723 	work_done = stmmac_rx(priv, budget, chan);
5724 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5725 		unsigned long flags;
5726 
5727 		spin_lock_irqsave(&ch->lock, flags);
5728 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5729 		spin_unlock_irqrestore(&ch->lock, flags);
5730 	}
5731 
5732 	return work_done;
5733 }
5734 
5735 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5736 {
5737 	struct stmmac_channel *ch =
5738 		container_of(napi, struct stmmac_channel, tx_napi);
5739 	struct stmmac_priv *priv = ch->priv_data;
5740 	struct stmmac_txq_stats *txq_stats;
5741 	bool pending_packets = false;
5742 	u32 chan = ch->index;
5743 	int work_done;
5744 
5745 	txq_stats = &priv->xstats.txq_stats[chan];
5746 	u64_stats_update_begin(&txq_stats->napi_syncp);
5747 	u64_stats_inc(&txq_stats->napi.poll);
5748 	u64_stats_update_end(&txq_stats->napi_syncp);
5749 
5750 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5751 	work_done = min(work_done, budget);
5752 
5753 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5754 		unsigned long flags;
5755 
5756 		spin_lock_irqsave(&ch->lock, flags);
5757 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5758 		spin_unlock_irqrestore(&ch->lock, flags);
5759 	}
5760 
5761 	/* TX still have packet to handle, check if we need to arm tx timer */
5762 	if (pending_packets)
5763 		stmmac_tx_timer_arm(priv, chan);
5764 
5765 	return work_done;
5766 }
5767 
5768 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5769 {
5770 	struct stmmac_channel *ch =
5771 		container_of(napi, struct stmmac_channel, rxtx_napi);
5772 	struct stmmac_priv *priv = ch->priv_data;
5773 	bool tx_pending_packets = false;
5774 	int rx_done, tx_done, rxtx_done;
5775 	struct stmmac_rxq_stats *rxq_stats;
5776 	struct stmmac_txq_stats *txq_stats;
5777 	u32 chan = ch->index;
5778 
5779 	rxq_stats = &priv->xstats.rxq_stats[chan];
5780 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5781 	u64_stats_inc(&rxq_stats->napi.poll);
5782 	u64_stats_update_end(&rxq_stats->napi_syncp);
5783 
5784 	txq_stats = &priv->xstats.txq_stats[chan];
5785 	u64_stats_update_begin(&txq_stats->napi_syncp);
5786 	u64_stats_inc(&txq_stats->napi.poll);
5787 	u64_stats_update_end(&txq_stats->napi_syncp);
5788 
5789 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5790 	tx_done = min(tx_done, budget);
5791 
5792 	rx_done = stmmac_rx_zc(priv, budget, chan);
5793 
5794 	rxtx_done = max(tx_done, rx_done);
5795 
5796 	/* If either TX or RX work is not complete, return budget
5797 	 * and keep pooling
5798 	 */
5799 	if (rxtx_done >= budget)
5800 		return budget;
5801 
5802 	/* all work done, exit the polling mode */
5803 	if (napi_complete_done(napi, rxtx_done)) {
5804 		unsigned long flags;
5805 
5806 		spin_lock_irqsave(&ch->lock, flags);
5807 		/* Both RX and TX work done are compelte,
5808 		 * so enable both RX & TX IRQs.
5809 		 */
5810 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5811 		spin_unlock_irqrestore(&ch->lock, flags);
5812 	}
5813 
5814 	/* TX still have packet to handle, check if we need to arm tx timer */
5815 	if (tx_pending_packets)
5816 		stmmac_tx_timer_arm(priv, chan);
5817 
5818 	return min(rxtx_done, budget - 1);
5819 }
5820 
5821 /**
5822  *  stmmac_tx_timeout
5823  *  @dev : Pointer to net device structure
5824  *  @txqueue: the index of the hanging transmit queue
5825  *  Description: this function is called when a packet transmission fails to
5826  *   complete within a reasonable time. The driver will mark the error in the
5827  *   netdev structure and arrange for the device to be reset to a sane state
5828  *   in order to transmit a new packet.
5829  */
5830 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5831 {
5832 	struct stmmac_priv *priv = netdev_priv(dev);
5833 
5834 	stmmac_global_err(priv);
5835 }
5836 
5837 /**
5838  *  stmmac_set_rx_mode - entry point for multicast addressing
5839  *  @dev : pointer to the device structure
5840  *  Description:
5841  *  This function is a driver entry point which gets called by the kernel
5842  *  whenever multicast addresses must be enabled/disabled.
5843  *  Return value:
5844  *  void.
5845  */
5846 static void stmmac_set_rx_mode(struct net_device *dev)
5847 {
5848 	struct stmmac_priv *priv = netdev_priv(dev);
5849 
5850 	stmmac_set_filter(priv, priv->hw, dev);
5851 }
5852 
5853 /**
5854  *  stmmac_change_mtu - entry point to change MTU size for the device.
5855  *  @dev : device pointer.
5856  *  @new_mtu : the new MTU size for the device.
5857  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5858  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5859  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5860  *  Return value:
5861  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5862  *  file on failure.
5863  */
5864 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5865 {
5866 	struct stmmac_priv *priv = netdev_priv(dev);
5867 	int txfifosz = priv->plat->tx_fifo_size;
5868 	struct stmmac_dma_conf *dma_conf;
5869 	const int mtu = new_mtu;
5870 	int ret;
5871 
5872 	if (txfifosz == 0)
5873 		txfifosz = priv->dma_cap.tx_fifo_size;
5874 
5875 	txfifosz /= priv->plat->tx_queues_to_use;
5876 
5877 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5878 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5879 		return -EINVAL;
5880 	}
5881 
5882 	new_mtu = STMMAC_ALIGN(new_mtu);
5883 
5884 	/* If condition true, FIFO is too small or MTU too large */
5885 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5886 		return -EINVAL;
5887 
5888 	if (netif_running(dev)) {
5889 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5890 		/* Try to allocate the new DMA conf with the new mtu */
5891 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5892 		if (IS_ERR(dma_conf)) {
5893 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5894 				   mtu);
5895 			return PTR_ERR(dma_conf);
5896 		}
5897 
5898 		stmmac_release(dev);
5899 
5900 		ret = __stmmac_open(dev, dma_conf);
5901 		if (ret) {
5902 			free_dma_desc_resources(priv, dma_conf);
5903 			kfree(dma_conf);
5904 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5905 			return ret;
5906 		}
5907 
5908 		kfree(dma_conf);
5909 
5910 		stmmac_set_rx_mode(dev);
5911 	}
5912 
5913 	WRITE_ONCE(dev->mtu, mtu);
5914 	netdev_update_features(dev);
5915 
5916 	return 0;
5917 }
5918 
5919 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5920 					     netdev_features_t features)
5921 {
5922 	struct stmmac_priv *priv = netdev_priv(dev);
5923 
5924 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5925 		features &= ~NETIF_F_RXCSUM;
5926 
5927 	if (!priv->plat->tx_coe)
5928 		features &= ~NETIF_F_CSUM_MASK;
5929 
5930 	/* Some GMAC devices have a bugged Jumbo frame support that
5931 	 * needs to have the Tx COE disabled for oversized frames
5932 	 * (due to limited buffer sizes). In this case we disable
5933 	 * the TX csum insertion in the TDES and not use SF.
5934 	 */
5935 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5936 		features &= ~NETIF_F_CSUM_MASK;
5937 
5938 	/* Disable tso if asked by ethtool */
5939 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5940 		if (features & NETIF_F_TSO)
5941 			priv->tso = true;
5942 		else
5943 			priv->tso = false;
5944 	}
5945 
5946 	return features;
5947 }
5948 
5949 static int stmmac_set_features(struct net_device *netdev,
5950 			       netdev_features_t features)
5951 {
5952 	struct stmmac_priv *priv = netdev_priv(netdev);
5953 
5954 	/* Keep the COE Type in case of csum is supporting */
5955 	if (features & NETIF_F_RXCSUM)
5956 		priv->hw->rx_csum = priv->plat->rx_coe;
5957 	else
5958 		priv->hw->rx_csum = 0;
5959 	/* No check needed because rx_coe has been set before and it will be
5960 	 * fixed in case of issue.
5961 	 */
5962 	stmmac_rx_ipc(priv, priv->hw);
5963 
5964 	if (priv->sph_cap) {
5965 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5966 		u32 chan;
5967 
5968 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5969 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5970 	}
5971 
5972 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5973 		priv->hw->hw_vlan_en = true;
5974 	else
5975 		priv->hw->hw_vlan_en = false;
5976 
5977 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5978 
5979 	return 0;
5980 }
5981 
5982 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5983 {
5984 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5985 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5986 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5987 	bool *hs_enable = &fpe_cfg->hs_enable;
5988 
5989 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5990 		return;
5991 
5992 	/* If LP has sent verify mPacket, LP is FPE capable */
5993 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5994 		if (*lp_state < FPE_STATE_CAPABLE)
5995 			*lp_state = FPE_STATE_CAPABLE;
5996 
5997 		/* If user has requested FPE enable, quickly response */
5998 		if (*hs_enable)
5999 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6000 						fpe_cfg,
6001 						MPACKET_RESPONSE);
6002 	}
6003 
6004 	/* If Local has sent verify mPacket, Local is FPE capable */
6005 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
6006 		if (*lo_state < FPE_STATE_CAPABLE)
6007 			*lo_state = FPE_STATE_CAPABLE;
6008 	}
6009 
6010 	/* If LP has sent response mPacket, LP is entering FPE ON */
6011 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
6012 		*lp_state = FPE_STATE_ENTERING_ON;
6013 
6014 	/* If Local has sent response mPacket, Local is entering FPE ON */
6015 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
6016 		*lo_state = FPE_STATE_ENTERING_ON;
6017 
6018 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
6019 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
6020 	    priv->fpe_wq) {
6021 		queue_work(priv->fpe_wq, &priv->fpe_task);
6022 	}
6023 }
6024 
6025 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6026 {
6027 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6028 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6029 	u32 queues_count;
6030 	u32 queue;
6031 	bool xmac;
6032 
6033 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6034 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6035 
6036 	if (priv->irq_wake)
6037 		pm_wakeup_event(priv->device, 0);
6038 
6039 	if (priv->dma_cap.estsel)
6040 		stmmac_est_irq_status(priv, priv, priv->dev,
6041 				      &priv->xstats, tx_cnt);
6042 
6043 	if (priv->dma_cap.fpesel) {
6044 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6045 						   priv->dev);
6046 
6047 		stmmac_fpe_event_status(priv, status);
6048 	}
6049 
6050 	/* To handle GMAC own interrupts */
6051 	if ((priv->plat->has_gmac) || xmac) {
6052 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6053 
6054 		if (unlikely(status)) {
6055 			/* For LPI we need to save the tx status */
6056 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6057 				priv->tx_path_in_lpi_mode = true;
6058 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6059 				priv->tx_path_in_lpi_mode = false;
6060 		}
6061 
6062 		for (queue = 0; queue < queues_count; queue++)
6063 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6064 
6065 		/* PCS link status */
6066 		if (priv->hw->pcs &&
6067 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6068 			if (priv->xstats.pcs_link)
6069 				netif_carrier_on(priv->dev);
6070 			else
6071 				netif_carrier_off(priv->dev);
6072 		}
6073 
6074 		stmmac_timestamp_interrupt(priv, priv);
6075 	}
6076 }
6077 
6078 /**
6079  *  stmmac_interrupt - main ISR
6080  *  @irq: interrupt number.
6081  *  @dev_id: to pass the net device pointer.
6082  *  Description: this is the main driver interrupt service routine.
6083  *  It can call:
6084  *  o DMA service routine (to manage incoming frame reception and transmission
6085  *    status)
6086  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6087  *    interrupts.
6088  */
6089 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6090 {
6091 	struct net_device *dev = (struct net_device *)dev_id;
6092 	struct stmmac_priv *priv = netdev_priv(dev);
6093 
6094 	/* Check if adapter is up */
6095 	if (test_bit(STMMAC_DOWN, &priv->state))
6096 		return IRQ_HANDLED;
6097 
6098 	/* Check ASP error if it isn't delivered via an individual IRQ */
6099 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6100 		return IRQ_HANDLED;
6101 
6102 	/* To handle Common interrupts */
6103 	stmmac_common_interrupt(priv);
6104 
6105 	/* To handle DMA interrupts */
6106 	stmmac_dma_interrupt(priv);
6107 
6108 	return IRQ_HANDLED;
6109 }
6110 
6111 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6112 {
6113 	struct net_device *dev = (struct net_device *)dev_id;
6114 	struct stmmac_priv *priv = netdev_priv(dev);
6115 
6116 	/* Check if adapter is up */
6117 	if (test_bit(STMMAC_DOWN, &priv->state))
6118 		return IRQ_HANDLED;
6119 
6120 	/* To handle Common interrupts */
6121 	stmmac_common_interrupt(priv);
6122 
6123 	return IRQ_HANDLED;
6124 }
6125 
6126 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6127 {
6128 	struct net_device *dev = (struct net_device *)dev_id;
6129 	struct stmmac_priv *priv = netdev_priv(dev);
6130 
6131 	/* Check if adapter is up */
6132 	if (test_bit(STMMAC_DOWN, &priv->state))
6133 		return IRQ_HANDLED;
6134 
6135 	/* Check if a fatal error happened */
6136 	stmmac_safety_feat_interrupt(priv);
6137 
6138 	return IRQ_HANDLED;
6139 }
6140 
6141 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6142 {
6143 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6144 	struct stmmac_dma_conf *dma_conf;
6145 	int chan = tx_q->queue_index;
6146 	struct stmmac_priv *priv;
6147 	int status;
6148 
6149 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6150 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6151 
6152 	/* Check if adapter is up */
6153 	if (test_bit(STMMAC_DOWN, &priv->state))
6154 		return IRQ_HANDLED;
6155 
6156 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6157 
6158 	if (unlikely(status & tx_hard_error_bump_tc)) {
6159 		/* Try to bump up the dma threshold on this failure */
6160 		stmmac_bump_dma_threshold(priv, chan);
6161 	} else if (unlikely(status == tx_hard_error)) {
6162 		stmmac_tx_err(priv, chan);
6163 	}
6164 
6165 	return IRQ_HANDLED;
6166 }
6167 
6168 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6169 {
6170 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6171 	struct stmmac_dma_conf *dma_conf;
6172 	int chan = rx_q->queue_index;
6173 	struct stmmac_priv *priv;
6174 
6175 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6176 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6177 
6178 	/* Check if adapter is up */
6179 	if (test_bit(STMMAC_DOWN, &priv->state))
6180 		return IRQ_HANDLED;
6181 
6182 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6183 
6184 	return IRQ_HANDLED;
6185 }
6186 
6187 /**
6188  *  stmmac_ioctl - Entry point for the Ioctl
6189  *  @dev: Device pointer.
6190  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6191  *  a proprietary structure used to pass information to the driver.
6192  *  @cmd: IOCTL command
6193  *  Description:
6194  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6195  */
6196 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6197 {
6198 	struct stmmac_priv *priv = netdev_priv (dev);
6199 	int ret = -EOPNOTSUPP;
6200 
6201 	if (!netif_running(dev))
6202 		return -EINVAL;
6203 
6204 	switch (cmd) {
6205 	case SIOCGMIIPHY:
6206 	case SIOCGMIIREG:
6207 	case SIOCSMIIREG:
6208 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6209 		break;
6210 	case SIOCSHWTSTAMP:
6211 		ret = stmmac_hwtstamp_set(dev, rq);
6212 		break;
6213 	case SIOCGHWTSTAMP:
6214 		ret = stmmac_hwtstamp_get(dev, rq);
6215 		break;
6216 	default:
6217 		break;
6218 	}
6219 
6220 	return ret;
6221 }
6222 
6223 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6224 				    void *cb_priv)
6225 {
6226 	struct stmmac_priv *priv = cb_priv;
6227 	int ret = -EOPNOTSUPP;
6228 
6229 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6230 		return ret;
6231 
6232 	__stmmac_disable_all_queues(priv);
6233 
6234 	switch (type) {
6235 	case TC_SETUP_CLSU32:
6236 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6237 		break;
6238 	case TC_SETUP_CLSFLOWER:
6239 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6240 		break;
6241 	default:
6242 		break;
6243 	}
6244 
6245 	stmmac_enable_all_queues(priv);
6246 	return ret;
6247 }
6248 
6249 static LIST_HEAD(stmmac_block_cb_list);
6250 
6251 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6252 			   void *type_data)
6253 {
6254 	struct stmmac_priv *priv = netdev_priv(ndev);
6255 
6256 	switch (type) {
6257 	case TC_QUERY_CAPS:
6258 		return stmmac_tc_query_caps(priv, priv, type_data);
6259 	case TC_SETUP_BLOCK:
6260 		return flow_block_cb_setup_simple(type_data,
6261 						  &stmmac_block_cb_list,
6262 						  stmmac_setup_tc_block_cb,
6263 						  priv, priv, true);
6264 	case TC_SETUP_QDISC_CBS:
6265 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6266 	case TC_SETUP_QDISC_TAPRIO:
6267 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6268 	case TC_SETUP_QDISC_ETF:
6269 		return stmmac_tc_setup_etf(priv, priv, type_data);
6270 	default:
6271 		return -EOPNOTSUPP;
6272 	}
6273 }
6274 
6275 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6276 			       struct net_device *sb_dev)
6277 {
6278 	int gso = skb_shinfo(skb)->gso_type;
6279 
6280 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6281 		/*
6282 		 * There is no way to determine the number of TSO/USO
6283 		 * capable Queues. Let's use always the Queue 0
6284 		 * because if TSO/USO is supported then at least this
6285 		 * one will be capable.
6286 		 */
6287 		return 0;
6288 	}
6289 
6290 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6291 }
6292 
6293 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6294 {
6295 	struct stmmac_priv *priv = netdev_priv(ndev);
6296 	int ret = 0;
6297 
6298 	ret = pm_runtime_resume_and_get(priv->device);
6299 	if (ret < 0)
6300 		return ret;
6301 
6302 	ret = eth_mac_addr(ndev, addr);
6303 	if (ret)
6304 		goto set_mac_error;
6305 
6306 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6307 
6308 set_mac_error:
6309 	pm_runtime_put(priv->device);
6310 
6311 	return ret;
6312 }
6313 
6314 #ifdef CONFIG_DEBUG_FS
6315 static struct dentry *stmmac_fs_dir;
6316 
6317 static void sysfs_display_ring(void *head, int size, int extend_desc,
6318 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6319 {
6320 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6321 	struct dma_desc *p = (struct dma_desc *)head;
6322 	unsigned int desc_size;
6323 	dma_addr_t dma_addr;
6324 	int i;
6325 
6326 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6327 	for (i = 0; i < size; i++) {
6328 		dma_addr = dma_phy_addr + i * desc_size;
6329 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6330 				i, &dma_addr,
6331 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6332 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6333 		if (extend_desc)
6334 			p = &(++ep)->basic;
6335 		else
6336 			p++;
6337 	}
6338 }
6339 
6340 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6341 {
6342 	struct net_device *dev = seq->private;
6343 	struct stmmac_priv *priv = netdev_priv(dev);
6344 	u32 rx_count = priv->plat->rx_queues_to_use;
6345 	u32 tx_count = priv->plat->tx_queues_to_use;
6346 	u32 queue;
6347 
6348 	if ((dev->flags & IFF_UP) == 0)
6349 		return 0;
6350 
6351 	for (queue = 0; queue < rx_count; queue++) {
6352 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6353 
6354 		seq_printf(seq, "RX Queue %d:\n", queue);
6355 
6356 		if (priv->extend_desc) {
6357 			seq_printf(seq, "Extended descriptor ring:\n");
6358 			sysfs_display_ring((void *)rx_q->dma_erx,
6359 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6360 		} else {
6361 			seq_printf(seq, "Descriptor ring:\n");
6362 			sysfs_display_ring((void *)rx_q->dma_rx,
6363 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6364 		}
6365 	}
6366 
6367 	for (queue = 0; queue < tx_count; queue++) {
6368 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6369 
6370 		seq_printf(seq, "TX Queue %d:\n", queue);
6371 
6372 		if (priv->extend_desc) {
6373 			seq_printf(seq, "Extended descriptor ring:\n");
6374 			sysfs_display_ring((void *)tx_q->dma_etx,
6375 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6376 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6377 			seq_printf(seq, "Descriptor ring:\n");
6378 			sysfs_display_ring((void *)tx_q->dma_tx,
6379 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6380 		}
6381 	}
6382 
6383 	return 0;
6384 }
6385 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6386 
6387 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6388 {
6389 	static const char * const dwxgmac_timestamp_source[] = {
6390 		"None",
6391 		"Internal",
6392 		"External",
6393 		"Both",
6394 	};
6395 	static const char * const dwxgmac_safety_feature_desc[] = {
6396 		"No",
6397 		"All Safety Features with ECC and Parity",
6398 		"All Safety Features without ECC or Parity",
6399 		"All Safety Features with Parity Only",
6400 		"ECC Only",
6401 		"UNDEFINED",
6402 		"UNDEFINED",
6403 		"UNDEFINED",
6404 	};
6405 	struct net_device *dev = seq->private;
6406 	struct stmmac_priv *priv = netdev_priv(dev);
6407 
6408 	if (!priv->hw_cap_support) {
6409 		seq_printf(seq, "DMA HW features not supported\n");
6410 		return 0;
6411 	}
6412 
6413 	seq_printf(seq, "==============================\n");
6414 	seq_printf(seq, "\tDMA HW features\n");
6415 	seq_printf(seq, "==============================\n");
6416 
6417 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6418 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6419 	seq_printf(seq, "\t1000 Mbps: %s\n",
6420 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6421 	seq_printf(seq, "\tHalf duplex: %s\n",
6422 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6423 	if (priv->plat->has_xgmac) {
6424 		seq_printf(seq,
6425 			   "\tNumber of Additional MAC address registers: %d\n",
6426 			   priv->dma_cap.multi_addr);
6427 	} else {
6428 		seq_printf(seq, "\tHash Filter: %s\n",
6429 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6430 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6431 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6432 	}
6433 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6434 		   (priv->dma_cap.pcs) ? "Y" : "N");
6435 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6436 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6437 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6438 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6439 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6440 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6441 	seq_printf(seq, "\tRMON module: %s\n",
6442 		   (priv->dma_cap.rmon) ? "Y" : "N");
6443 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6444 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6445 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6446 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6447 	if (priv->plat->has_xgmac)
6448 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6449 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6450 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6451 		   (priv->dma_cap.eee) ? "Y" : "N");
6452 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6453 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6454 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6455 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6456 	    priv->plat->has_xgmac) {
6457 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6458 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6459 	} else {
6460 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6461 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6462 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6463 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6464 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6465 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6466 	}
6467 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6468 		   priv->dma_cap.number_rx_channel);
6469 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6470 		   priv->dma_cap.number_tx_channel);
6471 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6472 		   priv->dma_cap.number_rx_queues);
6473 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6474 		   priv->dma_cap.number_tx_queues);
6475 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6476 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6477 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6478 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6479 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6480 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6481 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6482 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6483 		   priv->dma_cap.pps_out_num);
6484 	seq_printf(seq, "\tSafety Features: %s\n",
6485 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6486 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6487 		   priv->dma_cap.frpsel ? "Y" : "N");
6488 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6489 		   priv->dma_cap.host_dma_width);
6490 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6491 		   priv->dma_cap.rssen ? "Y" : "N");
6492 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6493 		   priv->dma_cap.vlhash ? "Y" : "N");
6494 	seq_printf(seq, "\tSplit Header: %s\n",
6495 		   priv->dma_cap.sphen ? "Y" : "N");
6496 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6497 		   priv->dma_cap.vlins ? "Y" : "N");
6498 	seq_printf(seq, "\tDouble VLAN: %s\n",
6499 		   priv->dma_cap.dvlan ? "Y" : "N");
6500 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6501 		   priv->dma_cap.l3l4fnum);
6502 	seq_printf(seq, "\tARP Offloading: %s\n",
6503 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6504 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6505 		   priv->dma_cap.estsel ? "Y" : "N");
6506 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6507 		   priv->dma_cap.fpesel ? "Y" : "N");
6508 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6509 		   priv->dma_cap.tbssel ? "Y" : "N");
6510 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6511 		   priv->dma_cap.tbs_ch_num);
6512 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6513 		   priv->dma_cap.sgfsel ? "Y" : "N");
6514 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6515 		   BIT(priv->dma_cap.ttsfd) >> 1);
6516 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6517 		   priv->dma_cap.numtc);
6518 	seq_printf(seq, "\tDCB Feature: %s\n",
6519 		   priv->dma_cap.dcben ? "Y" : "N");
6520 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6521 		   priv->dma_cap.advthword ? "Y" : "N");
6522 	seq_printf(seq, "\tPTP Offload: %s\n",
6523 		   priv->dma_cap.ptoen ? "Y" : "N");
6524 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6525 		   priv->dma_cap.osten ? "Y" : "N");
6526 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6527 		   priv->dma_cap.pfcen ? "Y" : "N");
6528 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6529 		   BIT(priv->dma_cap.frpes) << 6);
6530 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6531 		   BIT(priv->dma_cap.frpbs) << 6);
6532 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6533 		   priv->dma_cap.frppipe_num);
6534 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6535 		   priv->dma_cap.nrvf_num ?
6536 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6537 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6538 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6539 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6540 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6541 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6542 		   priv->dma_cap.cbtisel ? "Y" : "N");
6543 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6544 		   priv->dma_cap.aux_snapshot_n);
6545 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6546 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6547 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6548 		   priv->dma_cap.edma ? "Y" : "N");
6549 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6550 		   priv->dma_cap.ediffc ? "Y" : "N");
6551 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6552 		   priv->dma_cap.vxn ? "Y" : "N");
6553 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6554 		   priv->dma_cap.dbgmem ? "Y" : "N");
6555 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6556 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6557 	return 0;
6558 }
6559 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6560 
6561 /* Use network device events to rename debugfs file entries.
6562  */
6563 static int stmmac_device_event(struct notifier_block *unused,
6564 			       unsigned long event, void *ptr)
6565 {
6566 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6567 	struct stmmac_priv *priv = netdev_priv(dev);
6568 
6569 	if (dev->netdev_ops != &stmmac_netdev_ops)
6570 		goto done;
6571 
6572 	switch (event) {
6573 	case NETDEV_CHANGENAME:
6574 		if (priv->dbgfs_dir)
6575 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6576 							 priv->dbgfs_dir,
6577 							 stmmac_fs_dir,
6578 							 dev->name);
6579 		break;
6580 	}
6581 done:
6582 	return NOTIFY_DONE;
6583 }
6584 
6585 static struct notifier_block stmmac_notifier = {
6586 	.notifier_call = stmmac_device_event,
6587 };
6588 
6589 static void stmmac_init_fs(struct net_device *dev)
6590 {
6591 	struct stmmac_priv *priv = netdev_priv(dev);
6592 
6593 	rtnl_lock();
6594 
6595 	/* Create per netdev entries */
6596 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6597 
6598 	/* Entry to report DMA RX/TX rings */
6599 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6600 			    &stmmac_rings_status_fops);
6601 
6602 	/* Entry to report the DMA HW features */
6603 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6604 			    &stmmac_dma_cap_fops);
6605 
6606 	rtnl_unlock();
6607 }
6608 
6609 static void stmmac_exit_fs(struct net_device *dev)
6610 {
6611 	struct stmmac_priv *priv = netdev_priv(dev);
6612 
6613 	debugfs_remove_recursive(priv->dbgfs_dir);
6614 }
6615 #endif /* CONFIG_DEBUG_FS */
6616 
6617 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6618 {
6619 	unsigned char *data = (unsigned char *)&vid_le;
6620 	unsigned char data_byte = 0;
6621 	u32 crc = ~0x0;
6622 	u32 temp = 0;
6623 	int i, bits;
6624 
6625 	bits = get_bitmask_order(VLAN_VID_MASK);
6626 	for (i = 0; i < bits; i++) {
6627 		if ((i % 8) == 0)
6628 			data_byte = data[i / 8];
6629 
6630 		temp = ((crc & 1) ^ data_byte) & 1;
6631 		crc >>= 1;
6632 		data_byte >>= 1;
6633 
6634 		if (temp)
6635 			crc ^= 0xedb88320;
6636 	}
6637 
6638 	return crc;
6639 }
6640 
6641 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6642 {
6643 	u32 crc, hash = 0;
6644 	u16 pmatch = 0;
6645 	int count = 0;
6646 	u16 vid = 0;
6647 
6648 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6649 		__le16 vid_le = cpu_to_le16(vid);
6650 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6651 		hash |= (1 << crc);
6652 		count++;
6653 	}
6654 
6655 	if (!priv->dma_cap.vlhash) {
6656 		if (count > 2) /* VID = 0 always passes filter */
6657 			return -EOPNOTSUPP;
6658 
6659 		pmatch = vid;
6660 		hash = 0;
6661 	}
6662 
6663 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6664 }
6665 
6666 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6667 {
6668 	struct stmmac_priv *priv = netdev_priv(ndev);
6669 	bool is_double = false;
6670 	int ret;
6671 
6672 	ret = pm_runtime_resume_and_get(priv->device);
6673 	if (ret < 0)
6674 		return ret;
6675 
6676 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6677 		is_double = true;
6678 
6679 	set_bit(vid, priv->active_vlans);
6680 	ret = stmmac_vlan_update(priv, is_double);
6681 	if (ret) {
6682 		clear_bit(vid, priv->active_vlans);
6683 		goto err_pm_put;
6684 	}
6685 
6686 	if (priv->hw->num_vlan) {
6687 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6688 		if (ret)
6689 			goto err_pm_put;
6690 	}
6691 err_pm_put:
6692 	pm_runtime_put(priv->device);
6693 
6694 	return ret;
6695 }
6696 
6697 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6698 {
6699 	struct stmmac_priv *priv = netdev_priv(ndev);
6700 	bool is_double = false;
6701 	int ret;
6702 
6703 	ret = pm_runtime_resume_and_get(priv->device);
6704 	if (ret < 0)
6705 		return ret;
6706 
6707 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6708 		is_double = true;
6709 
6710 	clear_bit(vid, priv->active_vlans);
6711 
6712 	if (priv->hw->num_vlan) {
6713 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6714 		if (ret)
6715 			goto del_vlan_error;
6716 	}
6717 
6718 	ret = stmmac_vlan_update(priv, is_double);
6719 
6720 del_vlan_error:
6721 	pm_runtime_put(priv->device);
6722 
6723 	return ret;
6724 }
6725 
6726 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6727 {
6728 	struct stmmac_priv *priv = netdev_priv(dev);
6729 
6730 	switch (bpf->command) {
6731 	case XDP_SETUP_PROG:
6732 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6733 	case XDP_SETUP_XSK_POOL:
6734 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6735 					     bpf->xsk.queue_id);
6736 	default:
6737 		return -EOPNOTSUPP;
6738 	}
6739 }
6740 
6741 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6742 			   struct xdp_frame **frames, u32 flags)
6743 {
6744 	struct stmmac_priv *priv = netdev_priv(dev);
6745 	int cpu = smp_processor_id();
6746 	struct netdev_queue *nq;
6747 	int i, nxmit = 0;
6748 	int queue;
6749 
6750 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6751 		return -ENETDOWN;
6752 
6753 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6754 		return -EINVAL;
6755 
6756 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6757 	nq = netdev_get_tx_queue(priv->dev, queue);
6758 
6759 	__netif_tx_lock(nq, cpu);
6760 	/* Avoids TX time-out as we are sharing with slow path */
6761 	txq_trans_cond_update(nq);
6762 
6763 	for (i = 0; i < num_frames; i++) {
6764 		int res;
6765 
6766 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6767 		if (res == STMMAC_XDP_CONSUMED)
6768 			break;
6769 
6770 		nxmit++;
6771 	}
6772 
6773 	if (flags & XDP_XMIT_FLUSH) {
6774 		stmmac_flush_tx_descriptors(priv, queue);
6775 		stmmac_tx_timer_arm(priv, queue);
6776 	}
6777 
6778 	__netif_tx_unlock(nq);
6779 
6780 	return nxmit;
6781 }
6782 
6783 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6784 {
6785 	struct stmmac_channel *ch = &priv->channel[queue];
6786 	unsigned long flags;
6787 
6788 	spin_lock_irqsave(&ch->lock, flags);
6789 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6790 	spin_unlock_irqrestore(&ch->lock, flags);
6791 
6792 	stmmac_stop_rx_dma(priv, queue);
6793 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6794 }
6795 
6796 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6797 {
6798 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6799 	struct stmmac_channel *ch = &priv->channel[queue];
6800 	unsigned long flags;
6801 	u32 buf_size;
6802 	int ret;
6803 
6804 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6805 	if (ret) {
6806 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6807 		return;
6808 	}
6809 
6810 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6811 	if (ret) {
6812 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6813 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6814 		return;
6815 	}
6816 
6817 	stmmac_reset_rx_queue(priv, queue);
6818 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6819 
6820 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6821 			    rx_q->dma_rx_phy, rx_q->queue_index);
6822 
6823 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6824 			     sizeof(struct dma_desc));
6825 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6826 			       rx_q->rx_tail_addr, rx_q->queue_index);
6827 
6828 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6829 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6830 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6831 				      buf_size,
6832 				      rx_q->queue_index);
6833 	} else {
6834 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6835 				      priv->dma_conf.dma_buf_sz,
6836 				      rx_q->queue_index);
6837 	}
6838 
6839 	stmmac_start_rx_dma(priv, queue);
6840 
6841 	spin_lock_irqsave(&ch->lock, flags);
6842 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6843 	spin_unlock_irqrestore(&ch->lock, flags);
6844 }
6845 
6846 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6847 {
6848 	struct stmmac_channel *ch = &priv->channel[queue];
6849 	unsigned long flags;
6850 
6851 	spin_lock_irqsave(&ch->lock, flags);
6852 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6853 	spin_unlock_irqrestore(&ch->lock, flags);
6854 
6855 	stmmac_stop_tx_dma(priv, queue);
6856 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6857 }
6858 
6859 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6860 {
6861 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6862 	struct stmmac_channel *ch = &priv->channel[queue];
6863 	unsigned long flags;
6864 	int ret;
6865 
6866 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6867 	if (ret) {
6868 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6869 		return;
6870 	}
6871 
6872 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6873 	if (ret) {
6874 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6875 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6876 		return;
6877 	}
6878 
6879 	stmmac_reset_tx_queue(priv, queue);
6880 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6881 
6882 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6883 			    tx_q->dma_tx_phy, tx_q->queue_index);
6884 
6885 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6886 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6887 
6888 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6889 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6890 			       tx_q->tx_tail_addr, tx_q->queue_index);
6891 
6892 	stmmac_start_tx_dma(priv, queue);
6893 
6894 	spin_lock_irqsave(&ch->lock, flags);
6895 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6896 	spin_unlock_irqrestore(&ch->lock, flags);
6897 }
6898 
6899 void stmmac_xdp_release(struct net_device *dev)
6900 {
6901 	struct stmmac_priv *priv = netdev_priv(dev);
6902 	u32 chan;
6903 
6904 	/* Ensure tx function is not running */
6905 	netif_tx_disable(dev);
6906 
6907 	/* Disable NAPI process */
6908 	stmmac_disable_all_queues(priv);
6909 
6910 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6911 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6912 
6913 	/* Free the IRQ lines */
6914 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6915 
6916 	/* Stop TX/RX DMA channels */
6917 	stmmac_stop_all_dma(priv);
6918 
6919 	/* Release and free the Rx/Tx resources */
6920 	free_dma_desc_resources(priv, &priv->dma_conf);
6921 
6922 	/* Disable the MAC Rx/Tx */
6923 	stmmac_mac_set(priv, priv->ioaddr, false);
6924 
6925 	/* set trans_start so we don't get spurious
6926 	 * watchdogs during reset
6927 	 */
6928 	netif_trans_update(dev);
6929 	netif_carrier_off(dev);
6930 }
6931 
6932 int stmmac_xdp_open(struct net_device *dev)
6933 {
6934 	struct stmmac_priv *priv = netdev_priv(dev);
6935 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6936 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6937 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6938 	struct stmmac_rx_queue *rx_q;
6939 	struct stmmac_tx_queue *tx_q;
6940 	u32 buf_size;
6941 	bool sph_en;
6942 	u32 chan;
6943 	int ret;
6944 
6945 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6946 	if (ret < 0) {
6947 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6948 			   __func__);
6949 		goto dma_desc_error;
6950 	}
6951 
6952 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6953 	if (ret < 0) {
6954 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6955 			   __func__);
6956 		goto init_error;
6957 	}
6958 
6959 	stmmac_reset_queues_param(priv);
6960 
6961 	/* DMA CSR Channel configuration */
6962 	for (chan = 0; chan < dma_csr_ch; chan++) {
6963 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6964 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6965 	}
6966 
6967 	/* Adjust Split header */
6968 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6969 
6970 	/* DMA RX Channel Configuration */
6971 	for (chan = 0; chan < rx_cnt; chan++) {
6972 		rx_q = &priv->dma_conf.rx_queue[chan];
6973 
6974 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6975 				    rx_q->dma_rx_phy, chan);
6976 
6977 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6978 				     (rx_q->buf_alloc_num *
6979 				      sizeof(struct dma_desc));
6980 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6981 				       rx_q->rx_tail_addr, chan);
6982 
6983 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6984 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6985 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6986 					      buf_size,
6987 					      rx_q->queue_index);
6988 		} else {
6989 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6990 					      priv->dma_conf.dma_buf_sz,
6991 					      rx_q->queue_index);
6992 		}
6993 
6994 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6995 	}
6996 
6997 	/* DMA TX Channel Configuration */
6998 	for (chan = 0; chan < tx_cnt; chan++) {
6999 		tx_q = &priv->dma_conf.tx_queue[chan];
7000 
7001 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7002 				    tx_q->dma_tx_phy, chan);
7003 
7004 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7005 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7006 				       tx_q->tx_tail_addr, chan);
7007 
7008 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7009 		tx_q->txtimer.function = stmmac_tx_timer;
7010 	}
7011 
7012 	/* Enable the MAC Rx/Tx */
7013 	stmmac_mac_set(priv, priv->ioaddr, true);
7014 
7015 	/* Start Rx & Tx DMA Channels */
7016 	stmmac_start_all_dma(priv);
7017 
7018 	ret = stmmac_request_irq(dev);
7019 	if (ret)
7020 		goto irq_error;
7021 
7022 	/* Enable NAPI process*/
7023 	stmmac_enable_all_queues(priv);
7024 	netif_carrier_on(dev);
7025 	netif_tx_start_all_queues(dev);
7026 	stmmac_enable_all_dma_irq(priv);
7027 
7028 	return 0;
7029 
7030 irq_error:
7031 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7032 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7033 
7034 	stmmac_hw_teardown(dev);
7035 init_error:
7036 	free_dma_desc_resources(priv, &priv->dma_conf);
7037 dma_desc_error:
7038 	return ret;
7039 }
7040 
7041 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7042 {
7043 	struct stmmac_priv *priv = netdev_priv(dev);
7044 	struct stmmac_rx_queue *rx_q;
7045 	struct stmmac_tx_queue *tx_q;
7046 	struct stmmac_channel *ch;
7047 
7048 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7049 	    !netif_carrier_ok(priv->dev))
7050 		return -ENETDOWN;
7051 
7052 	if (!stmmac_xdp_is_enabled(priv))
7053 		return -EINVAL;
7054 
7055 	if (queue >= priv->plat->rx_queues_to_use ||
7056 	    queue >= priv->plat->tx_queues_to_use)
7057 		return -EINVAL;
7058 
7059 	rx_q = &priv->dma_conf.rx_queue[queue];
7060 	tx_q = &priv->dma_conf.tx_queue[queue];
7061 	ch = &priv->channel[queue];
7062 
7063 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7064 		return -EINVAL;
7065 
7066 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7067 		/* EQoS does not have per-DMA channel SW interrupt,
7068 		 * so we schedule RX Napi straight-away.
7069 		 */
7070 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7071 			__napi_schedule(&ch->rxtx_napi);
7072 	}
7073 
7074 	return 0;
7075 }
7076 
7077 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7078 {
7079 	struct stmmac_priv *priv = netdev_priv(dev);
7080 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7081 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7082 	unsigned int start;
7083 	int q;
7084 
7085 	for (q = 0; q < tx_cnt; q++) {
7086 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7087 		u64 tx_packets;
7088 		u64 tx_bytes;
7089 
7090 		do {
7091 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7092 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7093 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7094 		do {
7095 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7096 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7097 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7098 
7099 		stats->tx_packets += tx_packets;
7100 		stats->tx_bytes += tx_bytes;
7101 	}
7102 
7103 	for (q = 0; q < rx_cnt; q++) {
7104 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7105 		u64 rx_packets;
7106 		u64 rx_bytes;
7107 
7108 		do {
7109 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7110 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7111 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7112 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7113 
7114 		stats->rx_packets += rx_packets;
7115 		stats->rx_bytes += rx_bytes;
7116 	}
7117 
7118 	stats->rx_dropped = priv->xstats.rx_dropped;
7119 	stats->rx_errors = priv->xstats.rx_errors;
7120 	stats->tx_dropped = priv->xstats.tx_dropped;
7121 	stats->tx_errors = priv->xstats.tx_errors;
7122 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7123 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7124 	stats->rx_length_errors = priv->xstats.rx_length;
7125 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7126 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7127 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7128 }
7129 
7130 static const struct net_device_ops stmmac_netdev_ops = {
7131 	.ndo_open = stmmac_open,
7132 	.ndo_start_xmit = stmmac_xmit,
7133 	.ndo_stop = stmmac_release,
7134 	.ndo_change_mtu = stmmac_change_mtu,
7135 	.ndo_fix_features = stmmac_fix_features,
7136 	.ndo_set_features = stmmac_set_features,
7137 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7138 	.ndo_tx_timeout = stmmac_tx_timeout,
7139 	.ndo_eth_ioctl = stmmac_ioctl,
7140 	.ndo_get_stats64 = stmmac_get_stats64,
7141 	.ndo_setup_tc = stmmac_setup_tc,
7142 	.ndo_select_queue = stmmac_select_queue,
7143 	.ndo_set_mac_address = stmmac_set_mac_address,
7144 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7145 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7146 	.ndo_bpf = stmmac_bpf,
7147 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7148 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7149 };
7150 
7151 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7152 {
7153 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7154 		return;
7155 	if (test_bit(STMMAC_DOWN, &priv->state))
7156 		return;
7157 
7158 	netdev_err(priv->dev, "Reset adapter.\n");
7159 
7160 	rtnl_lock();
7161 	netif_trans_update(priv->dev);
7162 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7163 		usleep_range(1000, 2000);
7164 
7165 	set_bit(STMMAC_DOWN, &priv->state);
7166 	dev_close(priv->dev);
7167 	dev_open(priv->dev, NULL);
7168 	clear_bit(STMMAC_DOWN, &priv->state);
7169 	clear_bit(STMMAC_RESETING, &priv->state);
7170 	rtnl_unlock();
7171 }
7172 
7173 static void stmmac_service_task(struct work_struct *work)
7174 {
7175 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7176 			service_task);
7177 
7178 	stmmac_reset_subtask(priv);
7179 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7180 }
7181 
7182 /**
7183  *  stmmac_hw_init - Init the MAC device
7184  *  @priv: driver private structure
7185  *  Description: this function is to configure the MAC device according to
7186  *  some platform parameters or the HW capability register. It prepares the
7187  *  driver to use either ring or chain modes and to setup either enhanced or
7188  *  normal descriptors.
7189  */
7190 static int stmmac_hw_init(struct stmmac_priv *priv)
7191 {
7192 	int ret;
7193 
7194 	/* dwmac-sun8i only work in chain mode */
7195 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7196 		chain_mode = 1;
7197 	priv->chain_mode = chain_mode;
7198 
7199 	/* Initialize HW Interface */
7200 	ret = stmmac_hwif_init(priv);
7201 	if (ret)
7202 		return ret;
7203 
7204 	/* Get the HW capability (new GMAC newer than 3.50a) */
7205 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7206 	if (priv->hw_cap_support) {
7207 		dev_info(priv->device, "DMA HW capability register supported\n");
7208 
7209 		/* We can override some gmac/dma configuration fields: e.g.
7210 		 * enh_desc, tx_coe (e.g. that are passed through the
7211 		 * platform) with the values from the HW capability
7212 		 * register (if supported).
7213 		 */
7214 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7215 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7216 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7217 		priv->hw->pmt = priv->plat->pmt;
7218 		if (priv->dma_cap.hash_tb_sz) {
7219 			priv->hw->multicast_filter_bins =
7220 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7221 			priv->hw->mcast_bits_log2 =
7222 					ilog2(priv->hw->multicast_filter_bins);
7223 		}
7224 
7225 		/* TXCOE doesn't work in thresh DMA mode */
7226 		if (priv->plat->force_thresh_dma_mode)
7227 			priv->plat->tx_coe = 0;
7228 		else
7229 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7230 
7231 		/* In case of GMAC4 rx_coe is from HW cap register. */
7232 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7233 
7234 		if (priv->dma_cap.rx_coe_type2)
7235 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7236 		else if (priv->dma_cap.rx_coe_type1)
7237 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7238 
7239 	} else {
7240 		dev_info(priv->device, "No HW DMA feature register supported\n");
7241 	}
7242 
7243 	if (priv->plat->rx_coe) {
7244 		priv->hw->rx_csum = priv->plat->rx_coe;
7245 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7246 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7247 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7248 	}
7249 	if (priv->plat->tx_coe)
7250 		dev_info(priv->device, "TX Checksum insertion supported\n");
7251 
7252 	if (priv->plat->pmt) {
7253 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7254 		device_set_wakeup_capable(priv->device, 1);
7255 	}
7256 
7257 	if (priv->dma_cap.tsoen)
7258 		dev_info(priv->device, "TSO supported\n");
7259 
7260 	priv->hw->vlan_fail_q_en =
7261 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7262 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7263 
7264 	/* Run HW quirks, if any */
7265 	if (priv->hwif_quirks) {
7266 		ret = priv->hwif_quirks(priv);
7267 		if (ret)
7268 			return ret;
7269 	}
7270 
7271 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7272 	 * In some case, for example on bugged HW this feature
7273 	 * has to be disable and this can be done by passing the
7274 	 * riwt_off field from the platform.
7275 	 */
7276 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7277 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7278 		priv->use_riwt = 1;
7279 		dev_info(priv->device,
7280 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7281 	}
7282 
7283 	return 0;
7284 }
7285 
7286 static void stmmac_napi_add(struct net_device *dev)
7287 {
7288 	struct stmmac_priv *priv = netdev_priv(dev);
7289 	u32 queue, maxq;
7290 
7291 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7292 
7293 	for (queue = 0; queue < maxq; queue++) {
7294 		struct stmmac_channel *ch = &priv->channel[queue];
7295 
7296 		ch->priv_data = priv;
7297 		ch->index = queue;
7298 		spin_lock_init(&ch->lock);
7299 
7300 		if (queue < priv->plat->rx_queues_to_use) {
7301 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7302 		}
7303 		if (queue < priv->plat->tx_queues_to_use) {
7304 			netif_napi_add_tx(dev, &ch->tx_napi,
7305 					  stmmac_napi_poll_tx);
7306 		}
7307 		if (queue < priv->plat->rx_queues_to_use &&
7308 		    queue < priv->plat->tx_queues_to_use) {
7309 			netif_napi_add(dev, &ch->rxtx_napi,
7310 				       stmmac_napi_poll_rxtx);
7311 		}
7312 	}
7313 }
7314 
7315 static void stmmac_napi_del(struct net_device *dev)
7316 {
7317 	struct stmmac_priv *priv = netdev_priv(dev);
7318 	u32 queue, maxq;
7319 
7320 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7321 
7322 	for (queue = 0; queue < maxq; queue++) {
7323 		struct stmmac_channel *ch = &priv->channel[queue];
7324 
7325 		if (queue < priv->plat->rx_queues_to_use)
7326 			netif_napi_del(&ch->rx_napi);
7327 		if (queue < priv->plat->tx_queues_to_use)
7328 			netif_napi_del(&ch->tx_napi);
7329 		if (queue < priv->plat->rx_queues_to_use &&
7330 		    queue < priv->plat->tx_queues_to_use) {
7331 			netif_napi_del(&ch->rxtx_napi);
7332 		}
7333 	}
7334 }
7335 
7336 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7337 {
7338 	struct stmmac_priv *priv = netdev_priv(dev);
7339 	int ret = 0, i;
7340 
7341 	if (netif_running(dev))
7342 		stmmac_release(dev);
7343 
7344 	stmmac_napi_del(dev);
7345 
7346 	priv->plat->rx_queues_to_use = rx_cnt;
7347 	priv->plat->tx_queues_to_use = tx_cnt;
7348 	if (!netif_is_rxfh_configured(dev))
7349 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7350 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7351 									rx_cnt);
7352 
7353 	stmmac_napi_add(dev);
7354 
7355 	if (netif_running(dev))
7356 		ret = stmmac_open(dev);
7357 
7358 	return ret;
7359 }
7360 
7361 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7362 {
7363 	struct stmmac_priv *priv = netdev_priv(dev);
7364 	int ret = 0;
7365 
7366 	if (netif_running(dev))
7367 		stmmac_release(dev);
7368 
7369 	priv->dma_conf.dma_rx_size = rx_size;
7370 	priv->dma_conf.dma_tx_size = tx_size;
7371 
7372 	if (netif_running(dev))
7373 		ret = stmmac_open(dev);
7374 
7375 	return ret;
7376 }
7377 
7378 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7379 static void stmmac_fpe_lp_task(struct work_struct *work)
7380 {
7381 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7382 						fpe_task);
7383 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7384 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7385 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7386 	bool *hs_enable = &fpe_cfg->hs_enable;
7387 	bool *enable = &fpe_cfg->enable;
7388 	int retries = 20;
7389 
7390 	while (retries-- > 0) {
7391 		/* Bail out immediately if FPE handshake is OFF */
7392 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7393 			break;
7394 
7395 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7396 		    *lp_state == FPE_STATE_ENTERING_ON) {
7397 			stmmac_fpe_configure(priv, priv->ioaddr,
7398 					     fpe_cfg,
7399 					     priv->plat->tx_queues_to_use,
7400 					     priv->plat->rx_queues_to_use,
7401 					     *enable);
7402 
7403 			netdev_info(priv->dev, "configured FPE\n");
7404 
7405 			*lo_state = FPE_STATE_ON;
7406 			*lp_state = FPE_STATE_ON;
7407 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7408 			break;
7409 		}
7410 
7411 		if ((*lo_state == FPE_STATE_CAPABLE ||
7412 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7413 		     *lp_state != FPE_STATE_ON) {
7414 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7415 				    *lo_state, *lp_state);
7416 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7417 						fpe_cfg,
7418 						MPACKET_VERIFY);
7419 		}
7420 		/* Sleep then retry */
7421 		msleep(500);
7422 	}
7423 
7424 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7425 }
7426 
7427 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7428 {
7429 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7430 		if (enable) {
7431 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7432 						priv->plat->fpe_cfg,
7433 						MPACKET_VERIFY);
7434 		} else {
7435 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7436 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7437 		}
7438 
7439 		priv->plat->fpe_cfg->hs_enable = enable;
7440 	}
7441 }
7442 
7443 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7444 {
7445 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7446 	struct dma_desc *desc_contains_ts = ctx->desc;
7447 	struct stmmac_priv *priv = ctx->priv;
7448 	struct dma_desc *ndesc = ctx->ndesc;
7449 	struct dma_desc *desc = ctx->desc;
7450 	u64 ns = 0;
7451 
7452 	if (!priv->hwts_rx_en)
7453 		return -ENODATA;
7454 
7455 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7456 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7457 		desc_contains_ts = ndesc;
7458 
7459 	/* Check if timestamp is available */
7460 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7461 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7462 		ns -= priv->plat->cdc_error_adj;
7463 		*timestamp = ns_to_ktime(ns);
7464 		return 0;
7465 	}
7466 
7467 	return -ENODATA;
7468 }
7469 
7470 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7471 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7472 };
7473 
7474 /**
7475  * stmmac_dvr_probe
7476  * @device: device pointer
7477  * @plat_dat: platform data pointer
7478  * @res: stmmac resource pointer
7479  * Description: this is the main probe function used to
7480  * call the alloc_etherdev, allocate the priv structure.
7481  * Return:
7482  * returns 0 on success, otherwise errno.
7483  */
7484 int stmmac_dvr_probe(struct device *device,
7485 		     struct plat_stmmacenet_data *plat_dat,
7486 		     struct stmmac_resources *res)
7487 {
7488 	struct net_device *ndev = NULL;
7489 	struct stmmac_priv *priv;
7490 	u32 rxq;
7491 	int i, ret = 0;
7492 
7493 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7494 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7495 	if (!ndev)
7496 		return -ENOMEM;
7497 
7498 	SET_NETDEV_DEV(ndev, device);
7499 
7500 	priv = netdev_priv(ndev);
7501 	priv->device = device;
7502 	priv->dev = ndev;
7503 
7504 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7505 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7506 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7507 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7508 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7509 	}
7510 
7511 	priv->xstats.pcpu_stats =
7512 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7513 	if (!priv->xstats.pcpu_stats)
7514 		return -ENOMEM;
7515 
7516 	stmmac_set_ethtool_ops(ndev);
7517 	priv->pause = pause;
7518 	priv->plat = plat_dat;
7519 	priv->ioaddr = res->addr;
7520 	priv->dev->base_addr = (unsigned long)res->addr;
7521 	priv->plat->dma_cfg->multi_msi_en =
7522 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7523 
7524 	priv->dev->irq = res->irq;
7525 	priv->wol_irq = res->wol_irq;
7526 	priv->lpi_irq = res->lpi_irq;
7527 	priv->sfty_irq = res->sfty_irq;
7528 	priv->sfty_ce_irq = res->sfty_ce_irq;
7529 	priv->sfty_ue_irq = res->sfty_ue_irq;
7530 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7531 		priv->rx_irq[i] = res->rx_irq[i];
7532 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7533 		priv->tx_irq[i] = res->tx_irq[i];
7534 
7535 	if (!is_zero_ether_addr(res->mac))
7536 		eth_hw_addr_set(priv->dev, res->mac);
7537 
7538 	dev_set_drvdata(device, priv->dev);
7539 
7540 	/* Verify driver arguments */
7541 	stmmac_verify_args();
7542 
7543 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7544 	if (!priv->af_xdp_zc_qps)
7545 		return -ENOMEM;
7546 
7547 	/* Allocate workqueue */
7548 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7549 	if (!priv->wq) {
7550 		dev_err(priv->device, "failed to create workqueue\n");
7551 		ret = -ENOMEM;
7552 		goto error_wq_init;
7553 	}
7554 
7555 	INIT_WORK(&priv->service_task, stmmac_service_task);
7556 
7557 	/* Initialize Link Partner FPE workqueue */
7558 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7559 
7560 	/* Override with kernel parameters if supplied XXX CRS XXX
7561 	 * this needs to have multiple instances
7562 	 */
7563 	if ((phyaddr >= 0) && (phyaddr <= 31))
7564 		priv->plat->phy_addr = phyaddr;
7565 
7566 	if (priv->plat->stmmac_rst) {
7567 		ret = reset_control_assert(priv->plat->stmmac_rst);
7568 		reset_control_deassert(priv->plat->stmmac_rst);
7569 		/* Some reset controllers have only reset callback instead of
7570 		 * assert + deassert callbacks pair.
7571 		 */
7572 		if (ret == -ENOTSUPP)
7573 			reset_control_reset(priv->plat->stmmac_rst);
7574 	}
7575 
7576 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7577 	if (ret == -ENOTSUPP)
7578 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7579 			ERR_PTR(ret));
7580 
7581 	/* Wait a bit for the reset to take effect */
7582 	udelay(10);
7583 
7584 	/* Init MAC and get the capabilities */
7585 	ret = stmmac_hw_init(priv);
7586 	if (ret)
7587 		goto error_hw_init;
7588 
7589 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7590 	 */
7591 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7592 		priv->plat->dma_cfg->dche = false;
7593 
7594 	stmmac_check_ether_addr(priv);
7595 
7596 	ndev->netdev_ops = &stmmac_netdev_ops;
7597 
7598 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7599 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7600 
7601 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7602 			    NETIF_F_RXCSUM;
7603 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7604 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7605 
7606 	ret = stmmac_tc_init(priv, priv);
7607 	if (!ret) {
7608 		ndev->hw_features |= NETIF_F_HW_TC;
7609 	}
7610 
7611 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7612 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7613 		if (priv->plat->has_gmac4)
7614 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7615 		priv->tso = true;
7616 		dev_info(priv->device, "TSO feature enabled\n");
7617 	}
7618 
7619 	if (priv->dma_cap.sphen &&
7620 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7621 		ndev->hw_features |= NETIF_F_GRO;
7622 		priv->sph_cap = true;
7623 		priv->sph = priv->sph_cap;
7624 		dev_info(priv->device, "SPH feature enabled\n");
7625 	}
7626 
7627 	/* Ideally our host DMA address width is the same as for the
7628 	 * device. However, it may differ and then we have to use our
7629 	 * host DMA width for allocation and the device DMA width for
7630 	 * register handling.
7631 	 */
7632 	if (priv->plat->host_dma_width)
7633 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7634 	else
7635 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7636 
7637 	if (priv->dma_cap.host_dma_width) {
7638 		ret = dma_set_mask_and_coherent(device,
7639 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7640 		if (!ret) {
7641 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7642 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7643 
7644 			/*
7645 			 * If more than 32 bits can be addressed, make sure to
7646 			 * enable enhanced addressing mode.
7647 			 */
7648 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7649 				priv->plat->dma_cfg->eame = true;
7650 		} else {
7651 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7652 			if (ret) {
7653 				dev_err(priv->device, "Failed to set DMA Mask\n");
7654 				goto error_hw_init;
7655 			}
7656 
7657 			priv->dma_cap.host_dma_width = 32;
7658 		}
7659 	}
7660 
7661 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7662 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7663 #ifdef STMMAC_VLAN_TAG_USED
7664 	/* Both mac100 and gmac support receive VLAN tag detection */
7665 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7666 	if (priv->plat->has_gmac4) {
7667 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7668 		priv->hw->hw_vlan_en = true;
7669 	}
7670 	if (priv->dma_cap.vlhash) {
7671 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7672 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7673 	}
7674 	if (priv->dma_cap.vlins) {
7675 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7676 		if (priv->dma_cap.dvlan)
7677 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7678 	}
7679 #endif
7680 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7681 
7682 	priv->xstats.threshold = tc;
7683 
7684 	/* Initialize RSS */
7685 	rxq = priv->plat->rx_queues_to_use;
7686 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7687 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7688 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7689 
7690 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7691 		ndev->features |= NETIF_F_RXHASH;
7692 
7693 	ndev->vlan_features |= ndev->features;
7694 
7695 	/* MTU range: 46 - hw-specific max */
7696 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7697 	if (priv->plat->has_xgmac)
7698 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7699 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7700 		ndev->max_mtu = JUMBO_LEN;
7701 	else
7702 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7703 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7704 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7705 	 */
7706 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7707 	    (priv->plat->maxmtu >= ndev->min_mtu))
7708 		ndev->max_mtu = priv->plat->maxmtu;
7709 	else if (priv->plat->maxmtu < ndev->min_mtu)
7710 		dev_warn(priv->device,
7711 			 "%s: warning: maxmtu having invalid value (%d)\n",
7712 			 __func__, priv->plat->maxmtu);
7713 
7714 	if (flow_ctrl)
7715 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7716 
7717 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7718 
7719 	/* Setup channels NAPI */
7720 	stmmac_napi_add(ndev);
7721 
7722 	mutex_init(&priv->lock);
7723 
7724 	/* If a specific clk_csr value is passed from the platform
7725 	 * this means that the CSR Clock Range selection cannot be
7726 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7727 	 * set the MDC clock dynamically according to the csr actual
7728 	 * clock input.
7729 	 */
7730 	if (priv->plat->clk_csr >= 0)
7731 		priv->clk_csr = priv->plat->clk_csr;
7732 	else
7733 		stmmac_clk_csr_set(priv);
7734 
7735 	stmmac_check_pcs_mode(priv);
7736 
7737 	pm_runtime_get_noresume(device);
7738 	pm_runtime_set_active(device);
7739 	if (!pm_runtime_enabled(device))
7740 		pm_runtime_enable(device);
7741 
7742 	ret = stmmac_mdio_register(ndev);
7743 	if (ret < 0) {
7744 		dev_err_probe(priv->device, ret,
7745 			      "MDIO bus (id: %d) registration failed\n",
7746 			      priv->plat->bus_id);
7747 		goto error_mdio_register;
7748 	}
7749 
7750 	if (priv->plat->speed_mode_2500)
7751 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7752 
7753 	ret = stmmac_pcs_setup(ndev);
7754 	if (ret)
7755 		goto error_pcs_setup;
7756 
7757 	ret = stmmac_phy_setup(priv);
7758 	if (ret) {
7759 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7760 		goto error_phy_setup;
7761 	}
7762 
7763 	ret = register_netdev(ndev);
7764 	if (ret) {
7765 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7766 			__func__, ret);
7767 		goto error_netdev_register;
7768 	}
7769 
7770 #ifdef CONFIG_DEBUG_FS
7771 	stmmac_init_fs(ndev);
7772 #endif
7773 
7774 	if (priv->plat->dump_debug_regs)
7775 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7776 
7777 	/* Let pm_runtime_put() disable the clocks.
7778 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7779 	 */
7780 	pm_runtime_put(device);
7781 
7782 	return ret;
7783 
7784 error_netdev_register:
7785 	phylink_destroy(priv->phylink);
7786 error_phy_setup:
7787 	stmmac_pcs_clean(ndev);
7788 error_pcs_setup:
7789 	stmmac_mdio_unregister(ndev);
7790 error_mdio_register:
7791 	stmmac_napi_del(ndev);
7792 error_hw_init:
7793 	destroy_workqueue(priv->wq);
7794 error_wq_init:
7795 	bitmap_free(priv->af_xdp_zc_qps);
7796 
7797 	return ret;
7798 }
7799 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7800 
7801 /**
7802  * stmmac_dvr_remove
7803  * @dev: device pointer
7804  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7805  * changes the link status, releases the DMA descriptor rings.
7806  */
7807 void stmmac_dvr_remove(struct device *dev)
7808 {
7809 	struct net_device *ndev = dev_get_drvdata(dev);
7810 	struct stmmac_priv *priv = netdev_priv(ndev);
7811 
7812 	netdev_info(priv->dev, "%s: removing driver", __func__);
7813 
7814 	pm_runtime_get_sync(dev);
7815 
7816 	stmmac_stop_all_dma(priv);
7817 	stmmac_mac_set(priv, priv->ioaddr, false);
7818 	unregister_netdev(ndev);
7819 
7820 #ifdef CONFIG_DEBUG_FS
7821 	stmmac_exit_fs(ndev);
7822 #endif
7823 	phylink_destroy(priv->phylink);
7824 	if (priv->plat->stmmac_rst)
7825 		reset_control_assert(priv->plat->stmmac_rst);
7826 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7827 
7828 	stmmac_pcs_clean(ndev);
7829 	stmmac_mdio_unregister(ndev);
7830 
7831 	destroy_workqueue(priv->wq);
7832 	mutex_destroy(&priv->lock);
7833 	bitmap_free(priv->af_xdp_zc_qps);
7834 
7835 	pm_runtime_disable(dev);
7836 	pm_runtime_put_noidle(dev);
7837 }
7838 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7839 
7840 /**
7841  * stmmac_suspend - suspend callback
7842  * @dev: device pointer
7843  * Description: this is the function to suspend the device and it is called
7844  * by the platform driver to stop the network queue, release the resources,
7845  * program the PMT register (for WoL), clean and release driver resources.
7846  */
7847 int stmmac_suspend(struct device *dev)
7848 {
7849 	struct net_device *ndev = dev_get_drvdata(dev);
7850 	struct stmmac_priv *priv = netdev_priv(ndev);
7851 	u32 chan;
7852 
7853 	if (!ndev || !netif_running(ndev))
7854 		return 0;
7855 
7856 	mutex_lock(&priv->lock);
7857 
7858 	netif_device_detach(ndev);
7859 
7860 	stmmac_disable_all_queues(priv);
7861 
7862 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7863 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7864 
7865 	if (priv->eee_enabled) {
7866 		priv->tx_path_in_lpi_mode = false;
7867 		del_timer_sync(&priv->eee_ctrl_timer);
7868 	}
7869 
7870 	/* Stop TX/RX DMA */
7871 	stmmac_stop_all_dma(priv);
7872 
7873 	if (priv->plat->serdes_powerdown)
7874 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7875 
7876 	/* Enable Power down mode by programming the PMT regs */
7877 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7878 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7879 		priv->irq_wake = 1;
7880 	} else {
7881 		stmmac_mac_set(priv, priv->ioaddr, false);
7882 		pinctrl_pm_select_sleep_state(priv->device);
7883 	}
7884 
7885 	mutex_unlock(&priv->lock);
7886 
7887 	rtnl_lock();
7888 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7889 		phylink_suspend(priv->phylink, true);
7890 	} else {
7891 		if (device_may_wakeup(priv->device))
7892 			phylink_speed_down(priv->phylink, false);
7893 		phylink_suspend(priv->phylink, false);
7894 	}
7895 	rtnl_unlock();
7896 
7897 	if (priv->dma_cap.fpesel) {
7898 		/* Disable FPE */
7899 		stmmac_fpe_configure(priv, priv->ioaddr,
7900 				     priv->plat->fpe_cfg,
7901 				     priv->plat->tx_queues_to_use,
7902 				     priv->plat->rx_queues_to_use, false);
7903 
7904 		stmmac_fpe_handshake(priv, false);
7905 		stmmac_fpe_stop_wq(priv);
7906 	}
7907 
7908 	priv->speed = SPEED_UNKNOWN;
7909 	return 0;
7910 }
7911 EXPORT_SYMBOL_GPL(stmmac_suspend);
7912 
7913 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7914 {
7915 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7916 
7917 	rx_q->cur_rx = 0;
7918 	rx_q->dirty_rx = 0;
7919 }
7920 
7921 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7922 {
7923 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7924 
7925 	tx_q->cur_tx = 0;
7926 	tx_q->dirty_tx = 0;
7927 	tx_q->mss = 0;
7928 
7929 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7930 }
7931 
7932 /**
7933  * stmmac_reset_queues_param - reset queue parameters
7934  * @priv: device pointer
7935  */
7936 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7937 {
7938 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7939 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7940 	u32 queue;
7941 
7942 	for (queue = 0; queue < rx_cnt; queue++)
7943 		stmmac_reset_rx_queue(priv, queue);
7944 
7945 	for (queue = 0; queue < tx_cnt; queue++)
7946 		stmmac_reset_tx_queue(priv, queue);
7947 }
7948 
7949 /**
7950  * stmmac_resume - resume callback
7951  * @dev: device pointer
7952  * Description: when resume this function is invoked to setup the DMA and CORE
7953  * in a usable state.
7954  */
7955 int stmmac_resume(struct device *dev)
7956 {
7957 	struct net_device *ndev = dev_get_drvdata(dev);
7958 	struct stmmac_priv *priv = netdev_priv(ndev);
7959 	int ret;
7960 
7961 	if (!netif_running(ndev))
7962 		return 0;
7963 
7964 	/* Power Down bit, into the PM register, is cleared
7965 	 * automatically as soon as a magic packet or a Wake-up frame
7966 	 * is received. Anyway, it's better to manually clear
7967 	 * this bit because it can generate problems while resuming
7968 	 * from another devices (e.g. serial console).
7969 	 */
7970 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7971 		mutex_lock(&priv->lock);
7972 		stmmac_pmt(priv, priv->hw, 0);
7973 		mutex_unlock(&priv->lock);
7974 		priv->irq_wake = 0;
7975 	} else {
7976 		pinctrl_pm_select_default_state(priv->device);
7977 		/* reset the phy so that it's ready */
7978 		if (priv->mii)
7979 			stmmac_mdio_reset(priv->mii);
7980 	}
7981 
7982 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7983 	    priv->plat->serdes_powerup) {
7984 		ret = priv->plat->serdes_powerup(ndev,
7985 						 priv->plat->bsp_priv);
7986 
7987 		if (ret < 0)
7988 			return ret;
7989 	}
7990 
7991 	rtnl_lock();
7992 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7993 		phylink_resume(priv->phylink);
7994 	} else {
7995 		phylink_resume(priv->phylink);
7996 		if (device_may_wakeup(priv->device))
7997 			phylink_speed_up(priv->phylink);
7998 	}
7999 	rtnl_unlock();
8000 
8001 	rtnl_lock();
8002 	mutex_lock(&priv->lock);
8003 
8004 	stmmac_reset_queues_param(priv);
8005 
8006 	stmmac_free_tx_skbufs(priv);
8007 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8008 
8009 	stmmac_hw_setup(ndev, false);
8010 	stmmac_init_coalesce(priv);
8011 	stmmac_set_rx_mode(ndev);
8012 
8013 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8014 
8015 	stmmac_enable_all_queues(priv);
8016 	stmmac_enable_all_dma_irq(priv);
8017 
8018 	mutex_unlock(&priv->lock);
8019 	rtnl_unlock();
8020 
8021 	netif_device_attach(ndev);
8022 
8023 	return 0;
8024 }
8025 EXPORT_SYMBOL_GPL(stmmac_resume);
8026 
8027 #ifndef MODULE
8028 static int __init stmmac_cmdline_opt(char *str)
8029 {
8030 	char *opt;
8031 
8032 	if (!str || !*str)
8033 		return 1;
8034 	while ((opt = strsep(&str, ",")) != NULL) {
8035 		if (!strncmp(opt, "debug:", 6)) {
8036 			if (kstrtoint(opt + 6, 0, &debug))
8037 				goto err;
8038 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8039 			if (kstrtoint(opt + 8, 0, &phyaddr))
8040 				goto err;
8041 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8042 			if (kstrtoint(opt + 7, 0, &buf_sz))
8043 				goto err;
8044 		} else if (!strncmp(opt, "tc:", 3)) {
8045 			if (kstrtoint(opt + 3, 0, &tc))
8046 				goto err;
8047 		} else if (!strncmp(opt, "watchdog:", 9)) {
8048 			if (kstrtoint(opt + 9, 0, &watchdog))
8049 				goto err;
8050 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8051 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8052 				goto err;
8053 		} else if (!strncmp(opt, "pause:", 6)) {
8054 			if (kstrtoint(opt + 6, 0, &pause))
8055 				goto err;
8056 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8057 			if (kstrtoint(opt + 10, 0, &eee_timer))
8058 				goto err;
8059 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8060 			if (kstrtoint(opt + 11, 0, &chain_mode))
8061 				goto err;
8062 		}
8063 	}
8064 	return 1;
8065 
8066 err:
8067 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8068 	return 1;
8069 }
8070 
8071 __setup("stmmaceth=", stmmac_cmdline_opt);
8072 #endif /* MODULE */
8073 
8074 static int __init stmmac_init(void)
8075 {
8076 #ifdef CONFIG_DEBUG_FS
8077 	/* Create debugfs main directory if it doesn't exist yet */
8078 	if (!stmmac_fs_dir)
8079 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8080 	register_netdevice_notifier(&stmmac_notifier);
8081 #endif
8082 
8083 	return 0;
8084 }
8085 
8086 static void __exit stmmac_exit(void)
8087 {
8088 #ifdef CONFIG_DEBUG_FS
8089 	unregister_netdevice_notifier(&stmmac_notifier);
8090 	debugfs_remove_recursive(stmmac_fs_dir);
8091 #endif
8092 }
8093 
8094 module_init(stmmac_init)
8095 module_exit(stmmac_exit)
8096 
8097 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8098 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8099 MODULE_LICENSE("GPL");
8100