xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 566ab427f827b0256d3e8ce0235d088e6a9c28bd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Check if MAC core supports the EEE feature. */
475 	if (!priv->dma_cap.eee)
476 		return false;
477 
478 	mutex_lock(&priv->lock);
479 
480 	/* Check if it needs to be deactivated */
481 	if (!priv->eee_active) {
482 		if (priv->eee_enabled) {
483 			netdev_dbg(priv->dev, "disable EEE\n");
484 			stmmac_lpi_entry_timer_config(priv, 0);
485 			del_timer_sync(&priv->eee_ctrl_timer);
486 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
487 			if (priv->hw->xpcs)
488 				xpcs_config_eee(priv->hw->xpcs,
489 						priv->plat->mult_fact_100ns,
490 						false);
491 		}
492 		mutex_unlock(&priv->lock);
493 		return false;
494 	}
495 
496 	if (priv->eee_active && !priv->eee_enabled) {
497 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
498 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
499 				     eee_tw_timer);
500 		if (priv->hw->xpcs)
501 			xpcs_config_eee(priv->hw->xpcs,
502 					priv->plat->mult_fact_100ns,
503 					true);
504 	}
505 
506 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
507 		del_timer_sync(&priv->eee_ctrl_timer);
508 		priv->tx_path_in_lpi_mode = false;
509 		stmmac_lpi_entry_timer_config(priv, 1);
510 	} else {
511 		stmmac_lpi_entry_timer_config(priv, 0);
512 		mod_timer(&priv->eee_ctrl_timer,
513 			  STMMAC_LPI_T(priv->tx_lpi_timer));
514 	}
515 
516 	mutex_unlock(&priv->lock);
517 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
518 	return true;
519 }
520 
521 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
522  * @priv: driver private structure
523  * @p : descriptor pointer
524  * @skb : the socket buffer
525  * Description :
526  * This function will read timestamp from the descriptor & pass it to stack.
527  * and also perform some sanity checks.
528  */
529 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
530 				   struct dma_desc *p, struct sk_buff *skb)
531 {
532 	struct skb_shared_hwtstamps shhwtstamp;
533 	bool found = false;
534 	u64 ns = 0;
535 
536 	if (!priv->hwts_tx_en)
537 		return;
538 
539 	/* exit if skb doesn't support hw tstamp */
540 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
541 		return;
542 
543 	/* check tx tstamp status */
544 	if (stmmac_get_tx_timestamp_status(priv, p)) {
545 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
546 		found = true;
547 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
548 		found = true;
549 	}
550 
551 	if (found) {
552 		ns -= priv->plat->cdc_error_adj;
553 
554 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
555 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
556 
557 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
558 		/* pass tstamp to stack */
559 		skb_tstamp_tx(skb, &shhwtstamp);
560 	}
561 }
562 
563 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
564  * @priv: driver private structure
565  * @p : descriptor pointer
566  * @np : next descriptor pointer
567  * @skb : the socket buffer
568  * Description :
569  * This function will read received packet's timestamp from the descriptor
570  * and pass it to stack. It also perform some sanity checks.
571  */
572 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
573 				   struct dma_desc *np, struct sk_buff *skb)
574 {
575 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
576 	struct dma_desc *desc = p;
577 	u64 ns = 0;
578 
579 	if (!priv->hwts_rx_en)
580 		return;
581 	/* For GMAC4, the valid timestamp is from CTX next desc. */
582 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
583 		desc = np;
584 
585 	/* Check if timestamp is available */
586 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
587 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
588 
589 		ns -= priv->plat->cdc_error_adj;
590 
591 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
592 		shhwtstamp = skb_hwtstamps(skb);
593 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
594 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
595 	} else  {
596 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
597 	}
598 }
599 
600 /**
601  *  stmmac_hwtstamp_set - control hardware timestamping.
602  *  @dev: device pointer.
603  *  @ifr: An IOCTL specific structure, that can contain a pointer to
604  *  a proprietary structure used to pass information to the driver.
605  *  Description:
606  *  This function configures the MAC to enable/disable both outgoing(TX)
607  *  and incoming(RX) packets time stamping based on user input.
608  *  Return Value:
609  *  0 on success and an appropriate -ve integer on failure.
610  */
611 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
612 {
613 	struct stmmac_priv *priv = netdev_priv(dev);
614 	struct hwtstamp_config config;
615 	u32 ptp_v2 = 0;
616 	u32 tstamp_all = 0;
617 	u32 ptp_over_ipv4_udp = 0;
618 	u32 ptp_over_ipv6_udp = 0;
619 	u32 ptp_over_ethernet = 0;
620 	u32 snap_type_sel = 0;
621 	u32 ts_master_en = 0;
622 	u32 ts_event_en = 0;
623 
624 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
625 		netdev_alert(priv->dev, "No support for HW time stamping\n");
626 		priv->hwts_tx_en = 0;
627 		priv->hwts_rx_en = 0;
628 
629 		return -EOPNOTSUPP;
630 	}
631 
632 	if (copy_from_user(&config, ifr->ifr_data,
633 			   sizeof(config)))
634 		return -EFAULT;
635 
636 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
637 		   __func__, config.flags, config.tx_type, config.rx_filter);
638 
639 	if (config.tx_type != HWTSTAMP_TX_OFF &&
640 	    config.tx_type != HWTSTAMP_TX_ON)
641 		return -ERANGE;
642 
643 	if (priv->adv_ts) {
644 		switch (config.rx_filter) {
645 		case HWTSTAMP_FILTER_NONE:
646 			/* time stamp no incoming packet at all */
647 			config.rx_filter = HWTSTAMP_FILTER_NONE;
648 			break;
649 
650 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
651 			/* PTP v1, UDP, any kind of event packet */
652 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
653 			/* 'xmac' hardware can support Sync, Pdelay_Req and
654 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
655 			 * This leaves Delay_Req timestamps out.
656 			 * Enable all events *and* general purpose message
657 			 * timestamping
658 			 */
659 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			break;
663 
664 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
665 			/* PTP v1, UDP, Sync packet */
666 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
667 			/* take time stamp for SYNC messages only */
668 			ts_event_en = PTP_TCR_TSEVNTENA;
669 
670 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
671 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
672 			break;
673 
674 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
675 			/* PTP v1, UDP, Delay_req packet */
676 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
677 			/* take time stamp for Delay_Req messages only */
678 			ts_master_en = PTP_TCR_TSMSTRENA;
679 			ts_event_en = PTP_TCR_TSEVNTENA;
680 
681 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
682 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
683 			break;
684 
685 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
686 			/* PTP v2, UDP, any kind of event packet */
687 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
688 			ptp_v2 = PTP_TCR_TSVER2ENA;
689 			/* take time stamp for all event messages */
690 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
691 
692 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
693 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
694 			break;
695 
696 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
697 			/* PTP v2, UDP, Sync packet */
698 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
699 			ptp_v2 = PTP_TCR_TSVER2ENA;
700 			/* take time stamp for SYNC messages only */
701 			ts_event_en = PTP_TCR_TSEVNTENA;
702 
703 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
704 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
705 			break;
706 
707 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
708 			/* PTP v2, UDP, Delay_req packet */
709 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
710 			ptp_v2 = PTP_TCR_TSVER2ENA;
711 			/* take time stamp for Delay_Req messages only */
712 			ts_master_en = PTP_TCR_TSMSTRENA;
713 			ts_event_en = PTP_TCR_TSEVNTENA;
714 
715 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
716 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
717 			break;
718 
719 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
720 			/* PTP v2/802.AS1 any layer, any kind of event packet */
721 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
722 			ptp_v2 = PTP_TCR_TSVER2ENA;
723 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
724 			if (priv->synopsys_id < DWMAC_CORE_4_10)
725 				ts_event_en = PTP_TCR_TSEVNTENA;
726 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
727 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
728 			ptp_over_ethernet = PTP_TCR_TSIPENA;
729 			break;
730 
731 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
732 			/* PTP v2/802.AS1, any layer, Sync packet */
733 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
734 			ptp_v2 = PTP_TCR_TSVER2ENA;
735 			/* take time stamp for SYNC messages only */
736 			ts_event_en = PTP_TCR_TSEVNTENA;
737 
738 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
739 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
740 			ptp_over_ethernet = PTP_TCR_TSIPENA;
741 			break;
742 
743 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
744 			/* PTP v2/802.AS1, any layer, Delay_req packet */
745 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
746 			ptp_v2 = PTP_TCR_TSVER2ENA;
747 			/* take time stamp for Delay_Req messages only */
748 			ts_master_en = PTP_TCR_TSMSTRENA;
749 			ts_event_en = PTP_TCR_TSEVNTENA;
750 
751 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
752 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
753 			ptp_over_ethernet = PTP_TCR_TSIPENA;
754 			break;
755 
756 		case HWTSTAMP_FILTER_NTP_ALL:
757 		case HWTSTAMP_FILTER_ALL:
758 			/* time stamp any incoming packet */
759 			config.rx_filter = HWTSTAMP_FILTER_ALL;
760 			tstamp_all = PTP_TCR_TSENALL;
761 			break;
762 
763 		default:
764 			return -ERANGE;
765 		}
766 	} else {
767 		switch (config.rx_filter) {
768 		case HWTSTAMP_FILTER_NONE:
769 			config.rx_filter = HWTSTAMP_FILTER_NONE;
770 			break;
771 		default:
772 			/* PTP v1, UDP, any kind of event packet */
773 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
774 			break;
775 		}
776 	}
777 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
778 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
779 
780 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
781 
782 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
783 		priv->systime_flags |= tstamp_all | ptp_v2 |
784 				       ptp_over_ethernet | ptp_over_ipv6_udp |
785 				       ptp_over_ipv4_udp | ts_event_en |
786 				       ts_master_en | snap_type_sel;
787 	}
788 
789 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
790 
791 	memcpy(&priv->tstamp_config, &config, sizeof(config));
792 
793 	return copy_to_user(ifr->ifr_data, &config,
794 			    sizeof(config)) ? -EFAULT : 0;
795 }
796 
797 /**
798  *  stmmac_hwtstamp_get - read hardware timestamping.
799  *  @dev: device pointer.
800  *  @ifr: An IOCTL specific structure, that can contain a pointer to
801  *  a proprietary structure used to pass information to the driver.
802  *  Description:
803  *  This function obtain the current hardware timestamping settings
804  *  as requested.
805  */
806 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
807 {
808 	struct stmmac_priv *priv = netdev_priv(dev);
809 	struct hwtstamp_config *config = &priv->tstamp_config;
810 
811 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
812 		return -EOPNOTSUPP;
813 
814 	return copy_to_user(ifr->ifr_data, config,
815 			    sizeof(*config)) ? -EFAULT : 0;
816 }
817 
818 /**
819  * stmmac_init_tstamp_counter - init hardware timestamping counter
820  * @priv: driver private structure
821  * @systime_flags: timestamping flags
822  * Description:
823  * Initialize hardware counter for packet timestamping.
824  * This is valid as long as the interface is open and not suspended.
825  * Will be rerun after resuming from suspend, case in which the timestamping
826  * flags updated by stmmac_hwtstamp_set() also need to be restored.
827  */
828 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
829 {
830 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
831 	struct timespec64 now;
832 	u32 sec_inc = 0;
833 	u64 temp = 0;
834 
835 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
836 		return -EOPNOTSUPP;
837 
838 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
839 	priv->systime_flags = systime_flags;
840 
841 	/* program Sub Second Increment reg */
842 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
843 					   priv->plat->clk_ptp_rate,
844 					   xmac, &sec_inc);
845 	temp = div_u64(1000000000ULL, sec_inc);
846 
847 	/* Store sub second increment for later use */
848 	priv->sub_second_inc = sec_inc;
849 
850 	/* calculate default added value:
851 	 * formula is :
852 	 * addend = (2^32)/freq_div_ratio;
853 	 * where, freq_div_ratio = 1e9ns/sec_inc
854 	 */
855 	temp = (u64)(temp << 32);
856 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
857 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
858 
859 	/* initialize system time */
860 	ktime_get_real_ts64(&now);
861 
862 	/* lower 32 bits of tv_sec are safe until y2106 */
863 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
864 
865 	return 0;
866 }
867 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
868 
869 /**
870  * stmmac_init_ptp - init PTP
871  * @priv: driver private structure
872  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
873  * This is done by looking at the HW cap. register.
874  * This function also registers the ptp driver.
875  */
876 static int stmmac_init_ptp(struct stmmac_priv *priv)
877 {
878 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
879 	int ret;
880 
881 	if (priv->plat->ptp_clk_freq_config)
882 		priv->plat->ptp_clk_freq_config(priv);
883 
884 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
885 	if (ret)
886 		return ret;
887 
888 	priv->adv_ts = 0;
889 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
890 	if (xmac && priv->dma_cap.atime_stamp)
891 		priv->adv_ts = 1;
892 	/* Dwmac 3.x core with extend_desc can support adv_ts */
893 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
894 		priv->adv_ts = 1;
895 
896 	if (priv->dma_cap.time_stamp)
897 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
898 
899 	if (priv->adv_ts)
900 		netdev_info(priv->dev,
901 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
902 
903 	priv->hwts_tx_en = 0;
904 	priv->hwts_rx_en = 0;
905 
906 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
907 		stmmac_hwtstamp_correct_latency(priv, priv);
908 
909 	return 0;
910 }
911 
912 static void stmmac_release_ptp(struct stmmac_priv *priv)
913 {
914 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
915 	stmmac_ptp_unregister(priv);
916 }
917 
918 /**
919  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
920  *  @priv: driver private structure
921  *  @duplex: duplex passed to the next function
922  *  Description: It is used for configuring the flow control in all queues
923  */
924 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
925 {
926 	u32 tx_cnt = priv->plat->tx_queues_to_use;
927 
928 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
929 			priv->pause, tx_cnt);
930 }
931 
932 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
933 					 phy_interface_t interface)
934 {
935 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
936 
937 	/* Refresh the MAC-specific capabilities */
938 	stmmac_mac_update_caps(priv);
939 
940 	config->mac_capabilities = priv->hw->link.caps;
941 
942 	if (priv->plat->max_speed)
943 		phylink_limit_mac_speed(config, priv->plat->max_speed);
944 
945 	return config->mac_capabilities;
946 }
947 
948 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
949 						 phy_interface_t interface)
950 {
951 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952 	struct phylink_pcs *pcs;
953 
954 	if (priv->plat->select_pcs) {
955 		pcs = priv->plat->select_pcs(priv, interface);
956 		if (!IS_ERR(pcs))
957 			return pcs;
958 	}
959 
960 	return NULL;
961 }
962 
963 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
964 			      const struct phylink_link_state *state)
965 {
966 	/* Nothing to do, xpcs_config() handles everything */
967 }
968 
969 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
970 {
971 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
972 	unsigned long flags;
973 
974 	timer_shutdown_sync(&fpe_cfg->verify_timer);
975 
976 	spin_lock_irqsave(&fpe_cfg->lock, flags);
977 
978 	if (is_up && fpe_cfg->pmac_enabled) {
979 		/* VERIFY process requires pmac enabled when NIC comes up */
980 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
981 				     priv->plat->tx_queues_to_use,
982 				     priv->plat->rx_queues_to_use,
983 				     false, true);
984 
985 		/* New link => maybe new partner => new verification process */
986 		stmmac_fpe_apply(priv);
987 	} else {
988 		/* No link => turn off EFPE */
989 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
990 				     priv->plat->tx_queues_to_use,
991 				     priv->plat->rx_queues_to_use,
992 				     false, false);
993 	}
994 
995 	spin_unlock_irqrestore(&fpe_cfg->lock, flags);
996 }
997 
998 static void stmmac_mac_link_down(struct phylink_config *config,
999 				 unsigned int mode, phy_interface_t interface)
1000 {
1001 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1002 
1003 	stmmac_mac_set(priv, priv->ioaddr, false);
1004 	priv->eee_active = false;
1005 	priv->tx_lpi_enabled = false;
1006 	priv->eee_enabled = stmmac_eee_init(priv);
1007 	stmmac_set_eee_pls(priv, priv->hw, false);
1008 
1009 	if (priv->dma_cap.fpesel)
1010 		stmmac_fpe_link_state_handle(priv, false);
1011 }
1012 
1013 static void stmmac_mac_link_up(struct phylink_config *config,
1014 			       struct phy_device *phy,
1015 			       unsigned int mode, phy_interface_t interface,
1016 			       int speed, int duplex,
1017 			       bool tx_pause, bool rx_pause)
1018 {
1019 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1020 	u32 old_ctrl, ctrl;
1021 
1022 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1023 	    priv->plat->serdes_powerup)
1024 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1025 
1026 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1027 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1028 
1029 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1030 		switch (speed) {
1031 		case SPEED_10000:
1032 			ctrl |= priv->hw->link.xgmii.speed10000;
1033 			break;
1034 		case SPEED_5000:
1035 			ctrl |= priv->hw->link.xgmii.speed5000;
1036 			break;
1037 		case SPEED_2500:
1038 			ctrl |= priv->hw->link.xgmii.speed2500;
1039 			break;
1040 		default:
1041 			return;
1042 		}
1043 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1044 		switch (speed) {
1045 		case SPEED_100000:
1046 			ctrl |= priv->hw->link.xlgmii.speed100000;
1047 			break;
1048 		case SPEED_50000:
1049 			ctrl |= priv->hw->link.xlgmii.speed50000;
1050 			break;
1051 		case SPEED_40000:
1052 			ctrl |= priv->hw->link.xlgmii.speed40000;
1053 			break;
1054 		case SPEED_25000:
1055 			ctrl |= priv->hw->link.xlgmii.speed25000;
1056 			break;
1057 		case SPEED_10000:
1058 			ctrl |= priv->hw->link.xgmii.speed10000;
1059 			break;
1060 		case SPEED_2500:
1061 			ctrl |= priv->hw->link.speed2500;
1062 			break;
1063 		case SPEED_1000:
1064 			ctrl |= priv->hw->link.speed1000;
1065 			break;
1066 		default:
1067 			return;
1068 		}
1069 	} else {
1070 		switch (speed) {
1071 		case SPEED_2500:
1072 			ctrl |= priv->hw->link.speed2500;
1073 			break;
1074 		case SPEED_1000:
1075 			ctrl |= priv->hw->link.speed1000;
1076 			break;
1077 		case SPEED_100:
1078 			ctrl |= priv->hw->link.speed100;
1079 			break;
1080 		case SPEED_10:
1081 			ctrl |= priv->hw->link.speed10;
1082 			break;
1083 		default:
1084 			return;
1085 		}
1086 	}
1087 
1088 	priv->speed = speed;
1089 
1090 	if (priv->plat->fix_mac_speed)
1091 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1092 
1093 	if (!duplex)
1094 		ctrl &= ~priv->hw->link.duplex;
1095 	else
1096 		ctrl |= priv->hw->link.duplex;
1097 
1098 	/* Flow Control operation */
1099 	if (rx_pause && tx_pause)
1100 		priv->flow_ctrl = FLOW_AUTO;
1101 	else if (rx_pause && !tx_pause)
1102 		priv->flow_ctrl = FLOW_RX;
1103 	else if (!rx_pause && tx_pause)
1104 		priv->flow_ctrl = FLOW_TX;
1105 	else
1106 		priv->flow_ctrl = FLOW_OFF;
1107 
1108 	stmmac_mac_flow_ctrl(priv, duplex);
1109 
1110 	if (ctrl != old_ctrl)
1111 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1112 
1113 	stmmac_mac_set(priv, priv->ioaddr, true);
1114 	if (phy && priv->dma_cap.eee) {
1115 		priv->eee_active =
1116 			phy_init_eee(phy, !(priv->plat->flags &
1117 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1118 		priv->eee_enabled = stmmac_eee_init(priv);
1119 		priv->tx_lpi_enabled = priv->eee_enabled;
1120 		stmmac_set_eee_pls(priv, priv->hw, true);
1121 	}
1122 
1123 	if (priv->dma_cap.fpesel)
1124 		stmmac_fpe_link_state_handle(priv, true);
1125 
1126 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1127 		stmmac_hwtstamp_correct_latency(priv, priv);
1128 }
1129 
1130 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1131 	.mac_get_caps = stmmac_mac_get_caps,
1132 	.mac_select_pcs = stmmac_mac_select_pcs,
1133 	.mac_config = stmmac_mac_config,
1134 	.mac_link_down = stmmac_mac_link_down,
1135 	.mac_link_up = stmmac_mac_link_up,
1136 };
1137 
1138 /**
1139  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1140  * @priv: driver private structure
1141  * Description: this is to verify if the HW supports the PCS.
1142  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1143  * configured for the TBI, RTBI, or SGMII PHY interface.
1144  */
1145 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1146 {
1147 	int interface = priv->plat->mac_interface;
1148 
1149 	if (priv->dma_cap.pcs) {
1150 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1151 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1152 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1153 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1154 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1155 			priv->hw->pcs = STMMAC_PCS_RGMII;
1156 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1157 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1158 			priv->hw->pcs = STMMAC_PCS_SGMII;
1159 		}
1160 	}
1161 }
1162 
1163 /**
1164  * stmmac_init_phy - PHY initialization
1165  * @dev: net device structure
1166  * Description: it initializes the driver's PHY state, and attaches the PHY
1167  * to the mac driver.
1168  *  Return value:
1169  *  0 on success
1170  */
1171 static int stmmac_init_phy(struct net_device *dev)
1172 {
1173 	struct stmmac_priv *priv = netdev_priv(dev);
1174 	struct fwnode_handle *phy_fwnode;
1175 	struct fwnode_handle *fwnode;
1176 	int ret;
1177 
1178 	if (!phylink_expects_phy(priv->phylink))
1179 		return 0;
1180 
1181 	fwnode = priv->plat->port_node;
1182 	if (!fwnode)
1183 		fwnode = dev_fwnode(priv->device);
1184 
1185 	if (fwnode)
1186 		phy_fwnode = fwnode_get_phy_node(fwnode);
1187 	else
1188 		phy_fwnode = NULL;
1189 
1190 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1191 	 * manually parse it
1192 	 */
1193 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1194 		int addr = priv->plat->phy_addr;
1195 		struct phy_device *phydev;
1196 
1197 		if (addr < 0) {
1198 			netdev_err(priv->dev, "no phy found\n");
1199 			return -ENODEV;
1200 		}
1201 
1202 		phydev = mdiobus_get_phy(priv->mii, addr);
1203 		if (!phydev) {
1204 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1205 			return -ENODEV;
1206 		}
1207 
1208 		ret = phylink_connect_phy(priv->phylink, phydev);
1209 	} else {
1210 		fwnode_handle_put(phy_fwnode);
1211 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1212 	}
1213 
1214 	if (!priv->plat->pmt) {
1215 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1216 
1217 		phylink_ethtool_get_wol(priv->phylink, &wol);
1218 		device_set_wakeup_capable(priv->device, !!wol.supported);
1219 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1220 	}
1221 
1222 	return ret;
1223 }
1224 
1225 static int stmmac_phy_setup(struct stmmac_priv *priv)
1226 {
1227 	struct stmmac_mdio_bus_data *mdio_bus_data;
1228 	int mode = priv->plat->phy_interface;
1229 	struct fwnode_handle *fwnode;
1230 	struct phylink *phylink;
1231 
1232 	priv->phylink_config.dev = &priv->dev->dev;
1233 	priv->phylink_config.type = PHYLINK_NETDEV;
1234 	priv->phylink_config.mac_managed_pm = true;
1235 
1236 	/* Stmmac always requires an RX clock for hardware initialization */
1237 	priv->phylink_config.mac_requires_rxc = true;
1238 
1239 	mdio_bus_data = priv->plat->mdio_bus_data;
1240 	if (mdio_bus_data)
1241 		priv->phylink_config.default_an_inband =
1242 			mdio_bus_data->default_an_inband;
1243 
1244 	/* Set the platform/firmware specified interface mode. Note, phylink
1245 	 * deals with the PHY interface mode, not the MAC interface mode.
1246 	 */
1247 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1248 
1249 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1250 	if (priv->hw->xpcs)
1251 		xpcs_get_interfaces(priv->hw->xpcs,
1252 				    priv->phylink_config.supported_interfaces);
1253 
1254 	fwnode = priv->plat->port_node;
1255 	if (!fwnode)
1256 		fwnode = dev_fwnode(priv->device);
1257 
1258 	phylink = phylink_create(&priv->phylink_config, fwnode,
1259 				 mode, &stmmac_phylink_mac_ops);
1260 	if (IS_ERR(phylink))
1261 		return PTR_ERR(phylink);
1262 
1263 	priv->phylink = phylink;
1264 	return 0;
1265 }
1266 
1267 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1268 				    struct stmmac_dma_conf *dma_conf)
1269 {
1270 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1271 	unsigned int desc_size;
1272 	void *head_rx;
1273 	u32 queue;
1274 
1275 	/* Display RX rings */
1276 	for (queue = 0; queue < rx_cnt; queue++) {
1277 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1278 
1279 		pr_info("\tRX Queue %u rings\n", queue);
1280 
1281 		if (priv->extend_desc) {
1282 			head_rx = (void *)rx_q->dma_erx;
1283 			desc_size = sizeof(struct dma_extended_desc);
1284 		} else {
1285 			head_rx = (void *)rx_q->dma_rx;
1286 			desc_size = sizeof(struct dma_desc);
1287 		}
1288 
1289 		/* Display RX ring */
1290 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1291 				    rx_q->dma_rx_phy, desc_size);
1292 	}
1293 }
1294 
1295 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1296 				    struct stmmac_dma_conf *dma_conf)
1297 {
1298 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1299 	unsigned int desc_size;
1300 	void *head_tx;
1301 	u32 queue;
1302 
1303 	/* Display TX rings */
1304 	for (queue = 0; queue < tx_cnt; queue++) {
1305 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1306 
1307 		pr_info("\tTX Queue %d rings\n", queue);
1308 
1309 		if (priv->extend_desc) {
1310 			head_tx = (void *)tx_q->dma_etx;
1311 			desc_size = sizeof(struct dma_extended_desc);
1312 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1313 			head_tx = (void *)tx_q->dma_entx;
1314 			desc_size = sizeof(struct dma_edesc);
1315 		} else {
1316 			head_tx = (void *)tx_q->dma_tx;
1317 			desc_size = sizeof(struct dma_desc);
1318 		}
1319 
1320 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1321 				    tx_q->dma_tx_phy, desc_size);
1322 	}
1323 }
1324 
1325 static void stmmac_display_rings(struct stmmac_priv *priv,
1326 				 struct stmmac_dma_conf *dma_conf)
1327 {
1328 	/* Display RX ring */
1329 	stmmac_display_rx_rings(priv, dma_conf);
1330 
1331 	/* Display TX ring */
1332 	stmmac_display_tx_rings(priv, dma_conf);
1333 }
1334 
1335 static int stmmac_set_bfsize(int mtu, int bufsize)
1336 {
1337 	int ret = bufsize;
1338 
1339 	if (mtu >= BUF_SIZE_8KiB)
1340 		ret = BUF_SIZE_16KiB;
1341 	else if (mtu >= BUF_SIZE_4KiB)
1342 		ret = BUF_SIZE_8KiB;
1343 	else if (mtu >= BUF_SIZE_2KiB)
1344 		ret = BUF_SIZE_4KiB;
1345 	else if (mtu > DEFAULT_BUFSIZE)
1346 		ret = BUF_SIZE_2KiB;
1347 	else
1348 		ret = DEFAULT_BUFSIZE;
1349 
1350 	return ret;
1351 }
1352 
1353 /**
1354  * stmmac_clear_rx_descriptors - clear RX descriptors
1355  * @priv: driver private structure
1356  * @dma_conf: structure to take the dma data
1357  * @queue: RX queue index
1358  * Description: this function is called to clear the RX descriptors
1359  * in case of both basic and extended descriptors are used.
1360  */
1361 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1362 					struct stmmac_dma_conf *dma_conf,
1363 					u32 queue)
1364 {
1365 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1366 	int i;
1367 
1368 	/* Clear the RX descriptors */
1369 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1370 		if (priv->extend_desc)
1371 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1372 					priv->use_riwt, priv->mode,
1373 					(i == dma_conf->dma_rx_size - 1),
1374 					dma_conf->dma_buf_sz);
1375 		else
1376 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1377 					priv->use_riwt, priv->mode,
1378 					(i == dma_conf->dma_rx_size - 1),
1379 					dma_conf->dma_buf_sz);
1380 }
1381 
1382 /**
1383  * stmmac_clear_tx_descriptors - clear tx descriptors
1384  * @priv: driver private structure
1385  * @dma_conf: structure to take the dma data
1386  * @queue: TX queue index.
1387  * Description: this function is called to clear the TX descriptors
1388  * in case of both basic and extended descriptors are used.
1389  */
1390 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1391 					struct stmmac_dma_conf *dma_conf,
1392 					u32 queue)
1393 {
1394 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1395 	int i;
1396 
1397 	/* Clear the TX descriptors */
1398 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1399 		int last = (i == (dma_conf->dma_tx_size - 1));
1400 		struct dma_desc *p;
1401 
1402 		if (priv->extend_desc)
1403 			p = &tx_q->dma_etx[i].basic;
1404 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1405 			p = &tx_q->dma_entx[i].basic;
1406 		else
1407 			p = &tx_q->dma_tx[i];
1408 
1409 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1410 	}
1411 }
1412 
1413 /**
1414  * stmmac_clear_descriptors - clear descriptors
1415  * @priv: driver private structure
1416  * @dma_conf: structure to take the dma data
1417  * Description: this function is called to clear the TX and RX descriptors
1418  * in case of both basic and extended descriptors are used.
1419  */
1420 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1421 				     struct stmmac_dma_conf *dma_conf)
1422 {
1423 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1424 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1425 	u32 queue;
1426 
1427 	/* Clear the RX descriptors */
1428 	for (queue = 0; queue < rx_queue_cnt; queue++)
1429 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1430 
1431 	/* Clear the TX descriptors */
1432 	for (queue = 0; queue < tx_queue_cnt; queue++)
1433 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1434 }
1435 
1436 /**
1437  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1438  * @priv: driver private structure
1439  * @dma_conf: structure to take the dma data
1440  * @p: descriptor pointer
1441  * @i: descriptor index
1442  * @flags: gfp flag
1443  * @queue: RX queue index
1444  * Description: this function is called to allocate a receive buffer, perform
1445  * the DMA mapping and init the descriptor.
1446  */
1447 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1448 				  struct stmmac_dma_conf *dma_conf,
1449 				  struct dma_desc *p,
1450 				  int i, gfp_t flags, u32 queue)
1451 {
1452 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1453 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1454 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1455 
1456 	if (priv->dma_cap.host_dma_width <= 32)
1457 		gfp |= GFP_DMA32;
1458 
1459 	if (!buf->page) {
1460 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1461 		if (!buf->page)
1462 			return -ENOMEM;
1463 		buf->page_offset = stmmac_rx_offset(priv);
1464 	}
1465 
1466 	if (priv->sph && !buf->sec_page) {
1467 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1468 		if (!buf->sec_page)
1469 			return -ENOMEM;
1470 
1471 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1472 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1473 	} else {
1474 		buf->sec_page = NULL;
1475 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1476 	}
1477 
1478 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1479 
1480 	stmmac_set_desc_addr(priv, p, buf->addr);
1481 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1482 		stmmac_init_desc3(priv, p);
1483 
1484 	return 0;
1485 }
1486 
1487 /**
1488  * stmmac_free_rx_buffer - free RX dma buffers
1489  * @priv: private structure
1490  * @rx_q: RX queue
1491  * @i: buffer index.
1492  */
1493 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1494 				  struct stmmac_rx_queue *rx_q,
1495 				  int i)
1496 {
1497 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1498 
1499 	if (buf->page)
1500 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1501 	buf->page = NULL;
1502 
1503 	if (buf->sec_page)
1504 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1505 	buf->sec_page = NULL;
1506 }
1507 
1508 /**
1509  * stmmac_free_tx_buffer - free RX dma buffers
1510  * @priv: private structure
1511  * @dma_conf: structure to take the dma data
1512  * @queue: RX queue index
1513  * @i: buffer index.
1514  */
1515 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1516 				  struct stmmac_dma_conf *dma_conf,
1517 				  u32 queue, int i)
1518 {
1519 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1520 
1521 	if (tx_q->tx_skbuff_dma[i].buf &&
1522 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1523 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1524 			dma_unmap_page(priv->device,
1525 				       tx_q->tx_skbuff_dma[i].buf,
1526 				       tx_q->tx_skbuff_dma[i].len,
1527 				       DMA_TO_DEVICE);
1528 		else
1529 			dma_unmap_single(priv->device,
1530 					 tx_q->tx_skbuff_dma[i].buf,
1531 					 tx_q->tx_skbuff_dma[i].len,
1532 					 DMA_TO_DEVICE);
1533 	}
1534 
1535 	if (tx_q->xdpf[i] &&
1536 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1537 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1538 		xdp_return_frame(tx_q->xdpf[i]);
1539 		tx_q->xdpf[i] = NULL;
1540 	}
1541 
1542 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1543 		tx_q->xsk_frames_done++;
1544 
1545 	if (tx_q->tx_skbuff[i] &&
1546 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1547 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1548 		tx_q->tx_skbuff[i] = NULL;
1549 	}
1550 
1551 	tx_q->tx_skbuff_dma[i].buf = 0;
1552 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1553 }
1554 
1555 /**
1556  * dma_free_rx_skbufs - free RX dma buffers
1557  * @priv: private structure
1558  * @dma_conf: structure to take the dma data
1559  * @queue: RX queue index
1560  */
1561 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1562 			       struct stmmac_dma_conf *dma_conf,
1563 			       u32 queue)
1564 {
1565 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1566 	int i;
1567 
1568 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1569 		stmmac_free_rx_buffer(priv, rx_q, i);
1570 }
1571 
1572 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1573 				   struct stmmac_dma_conf *dma_conf,
1574 				   u32 queue, gfp_t flags)
1575 {
1576 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1577 	int i;
1578 
1579 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1580 		struct dma_desc *p;
1581 		int ret;
1582 
1583 		if (priv->extend_desc)
1584 			p = &((rx_q->dma_erx + i)->basic);
1585 		else
1586 			p = rx_q->dma_rx + i;
1587 
1588 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1589 					     queue);
1590 		if (ret)
1591 			return ret;
1592 
1593 		rx_q->buf_alloc_num++;
1594 	}
1595 
1596 	return 0;
1597 }
1598 
1599 /**
1600  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1601  * @priv: private structure
1602  * @dma_conf: structure to take the dma data
1603  * @queue: RX queue index
1604  */
1605 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1606 				struct stmmac_dma_conf *dma_conf,
1607 				u32 queue)
1608 {
1609 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1610 	int i;
1611 
1612 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1613 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1614 
1615 		if (!buf->xdp)
1616 			continue;
1617 
1618 		xsk_buff_free(buf->xdp);
1619 		buf->xdp = NULL;
1620 	}
1621 }
1622 
1623 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1624 				      struct stmmac_dma_conf *dma_conf,
1625 				      u32 queue)
1626 {
1627 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1628 	int i;
1629 
1630 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1631 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1632 	 * use this macro to make sure no size violations.
1633 	 */
1634 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1635 
1636 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1637 		struct stmmac_rx_buffer *buf;
1638 		dma_addr_t dma_addr;
1639 		struct dma_desc *p;
1640 
1641 		if (priv->extend_desc)
1642 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1643 		else
1644 			p = rx_q->dma_rx + i;
1645 
1646 		buf = &rx_q->buf_pool[i];
1647 
1648 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1649 		if (!buf->xdp)
1650 			return -ENOMEM;
1651 
1652 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1653 		stmmac_set_desc_addr(priv, p, dma_addr);
1654 		rx_q->buf_alloc_num++;
1655 	}
1656 
1657 	return 0;
1658 }
1659 
1660 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1661 {
1662 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1663 		return NULL;
1664 
1665 	return xsk_get_pool_from_qid(priv->dev, queue);
1666 }
1667 
1668 /**
1669  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1670  * @priv: driver private structure
1671  * @dma_conf: structure to take the dma data
1672  * @queue: RX queue index
1673  * @flags: gfp flag.
1674  * Description: this function initializes the DMA RX descriptors
1675  * and allocates the socket buffers. It supports the chained and ring
1676  * modes.
1677  */
1678 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1679 				    struct stmmac_dma_conf *dma_conf,
1680 				    u32 queue, gfp_t flags)
1681 {
1682 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1683 	int ret;
1684 
1685 	netif_dbg(priv, probe, priv->dev,
1686 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1687 		  (u32)rx_q->dma_rx_phy);
1688 
1689 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1690 
1691 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1692 
1693 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1694 
1695 	if (rx_q->xsk_pool) {
1696 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1697 						   MEM_TYPE_XSK_BUFF_POOL,
1698 						   NULL));
1699 		netdev_info(priv->dev,
1700 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1701 			    rx_q->queue_index);
1702 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1703 	} else {
1704 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1705 						   MEM_TYPE_PAGE_POOL,
1706 						   rx_q->page_pool));
1707 		netdev_info(priv->dev,
1708 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1709 			    rx_q->queue_index);
1710 	}
1711 
1712 	if (rx_q->xsk_pool) {
1713 		/* RX XDP ZC buffer pool may not be populated, e.g.
1714 		 * xdpsock TX-only.
1715 		 */
1716 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1717 	} else {
1718 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1719 		if (ret < 0)
1720 			return -ENOMEM;
1721 	}
1722 
1723 	/* Setup the chained descriptor addresses */
1724 	if (priv->mode == STMMAC_CHAIN_MODE) {
1725 		if (priv->extend_desc)
1726 			stmmac_mode_init(priv, rx_q->dma_erx,
1727 					 rx_q->dma_rx_phy,
1728 					 dma_conf->dma_rx_size, 1);
1729 		else
1730 			stmmac_mode_init(priv, rx_q->dma_rx,
1731 					 rx_q->dma_rx_phy,
1732 					 dma_conf->dma_rx_size, 0);
1733 	}
1734 
1735 	return 0;
1736 }
1737 
1738 static int init_dma_rx_desc_rings(struct net_device *dev,
1739 				  struct stmmac_dma_conf *dma_conf,
1740 				  gfp_t flags)
1741 {
1742 	struct stmmac_priv *priv = netdev_priv(dev);
1743 	u32 rx_count = priv->plat->rx_queues_to_use;
1744 	int queue;
1745 	int ret;
1746 
1747 	/* RX INITIALIZATION */
1748 	netif_dbg(priv, probe, priv->dev,
1749 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1750 
1751 	for (queue = 0; queue < rx_count; queue++) {
1752 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1753 		if (ret)
1754 			goto err_init_rx_buffers;
1755 	}
1756 
1757 	return 0;
1758 
1759 err_init_rx_buffers:
1760 	while (queue >= 0) {
1761 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1762 
1763 		if (rx_q->xsk_pool)
1764 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1765 		else
1766 			dma_free_rx_skbufs(priv, dma_conf, queue);
1767 
1768 		rx_q->buf_alloc_num = 0;
1769 		rx_q->xsk_pool = NULL;
1770 
1771 		queue--;
1772 	}
1773 
1774 	return ret;
1775 }
1776 
1777 /**
1778  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1779  * @priv: driver private structure
1780  * @dma_conf: structure to take the dma data
1781  * @queue: TX queue index
1782  * Description: this function initializes the DMA TX descriptors
1783  * and allocates the socket buffers. It supports the chained and ring
1784  * modes.
1785  */
1786 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1787 				    struct stmmac_dma_conf *dma_conf,
1788 				    u32 queue)
1789 {
1790 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1791 	int i;
1792 
1793 	netif_dbg(priv, probe, priv->dev,
1794 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1795 		  (u32)tx_q->dma_tx_phy);
1796 
1797 	/* Setup the chained descriptor addresses */
1798 	if (priv->mode == STMMAC_CHAIN_MODE) {
1799 		if (priv->extend_desc)
1800 			stmmac_mode_init(priv, tx_q->dma_etx,
1801 					 tx_q->dma_tx_phy,
1802 					 dma_conf->dma_tx_size, 1);
1803 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1804 			stmmac_mode_init(priv, tx_q->dma_tx,
1805 					 tx_q->dma_tx_phy,
1806 					 dma_conf->dma_tx_size, 0);
1807 	}
1808 
1809 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1810 
1811 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1812 		struct dma_desc *p;
1813 
1814 		if (priv->extend_desc)
1815 			p = &((tx_q->dma_etx + i)->basic);
1816 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1817 			p = &((tx_q->dma_entx + i)->basic);
1818 		else
1819 			p = tx_q->dma_tx + i;
1820 
1821 		stmmac_clear_desc(priv, p);
1822 
1823 		tx_q->tx_skbuff_dma[i].buf = 0;
1824 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1825 		tx_q->tx_skbuff_dma[i].len = 0;
1826 		tx_q->tx_skbuff_dma[i].last_segment = false;
1827 		tx_q->tx_skbuff[i] = NULL;
1828 	}
1829 
1830 	return 0;
1831 }
1832 
1833 static int init_dma_tx_desc_rings(struct net_device *dev,
1834 				  struct stmmac_dma_conf *dma_conf)
1835 {
1836 	struct stmmac_priv *priv = netdev_priv(dev);
1837 	u32 tx_queue_cnt;
1838 	u32 queue;
1839 
1840 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1841 
1842 	for (queue = 0; queue < tx_queue_cnt; queue++)
1843 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1844 
1845 	return 0;
1846 }
1847 
1848 /**
1849  * init_dma_desc_rings - init the RX/TX descriptor rings
1850  * @dev: net device structure
1851  * @dma_conf: structure to take the dma data
1852  * @flags: gfp flag.
1853  * Description: this function initializes the DMA RX/TX descriptors
1854  * and allocates the socket buffers. It supports the chained and ring
1855  * modes.
1856  */
1857 static int init_dma_desc_rings(struct net_device *dev,
1858 			       struct stmmac_dma_conf *dma_conf,
1859 			       gfp_t flags)
1860 {
1861 	struct stmmac_priv *priv = netdev_priv(dev);
1862 	int ret;
1863 
1864 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1865 	if (ret)
1866 		return ret;
1867 
1868 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1869 
1870 	stmmac_clear_descriptors(priv, dma_conf);
1871 
1872 	if (netif_msg_hw(priv))
1873 		stmmac_display_rings(priv, dma_conf);
1874 
1875 	return ret;
1876 }
1877 
1878 /**
1879  * dma_free_tx_skbufs - free TX dma buffers
1880  * @priv: private structure
1881  * @dma_conf: structure to take the dma data
1882  * @queue: TX queue index
1883  */
1884 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1885 			       struct stmmac_dma_conf *dma_conf,
1886 			       u32 queue)
1887 {
1888 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1889 	int i;
1890 
1891 	tx_q->xsk_frames_done = 0;
1892 
1893 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1894 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1895 
1896 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1897 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1898 		tx_q->xsk_frames_done = 0;
1899 		tx_q->xsk_pool = NULL;
1900 	}
1901 }
1902 
1903 /**
1904  * stmmac_free_tx_skbufs - free TX skb buffers
1905  * @priv: private structure
1906  */
1907 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1908 {
1909 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1910 	u32 queue;
1911 
1912 	for (queue = 0; queue < tx_queue_cnt; queue++)
1913 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1914 }
1915 
1916 /**
1917  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1918  * @priv: private structure
1919  * @dma_conf: structure to take the dma data
1920  * @queue: RX queue index
1921  */
1922 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1923 					 struct stmmac_dma_conf *dma_conf,
1924 					 u32 queue)
1925 {
1926 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1927 
1928 	/* Release the DMA RX socket buffers */
1929 	if (rx_q->xsk_pool)
1930 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1931 	else
1932 		dma_free_rx_skbufs(priv, dma_conf, queue);
1933 
1934 	rx_q->buf_alloc_num = 0;
1935 	rx_q->xsk_pool = NULL;
1936 
1937 	/* Free DMA regions of consistent memory previously allocated */
1938 	if (!priv->extend_desc)
1939 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1940 				  sizeof(struct dma_desc),
1941 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1942 	else
1943 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1944 				  sizeof(struct dma_extended_desc),
1945 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1946 
1947 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1948 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1949 
1950 	kfree(rx_q->buf_pool);
1951 	if (rx_q->page_pool)
1952 		page_pool_destroy(rx_q->page_pool);
1953 }
1954 
1955 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1956 				       struct stmmac_dma_conf *dma_conf)
1957 {
1958 	u32 rx_count = priv->plat->rx_queues_to_use;
1959 	u32 queue;
1960 
1961 	/* Free RX queue resources */
1962 	for (queue = 0; queue < rx_count; queue++)
1963 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1964 }
1965 
1966 /**
1967  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1968  * @priv: private structure
1969  * @dma_conf: structure to take the dma data
1970  * @queue: TX queue index
1971  */
1972 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1973 					 struct stmmac_dma_conf *dma_conf,
1974 					 u32 queue)
1975 {
1976 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1977 	size_t size;
1978 	void *addr;
1979 
1980 	/* Release the DMA TX socket buffers */
1981 	dma_free_tx_skbufs(priv, dma_conf, queue);
1982 
1983 	if (priv->extend_desc) {
1984 		size = sizeof(struct dma_extended_desc);
1985 		addr = tx_q->dma_etx;
1986 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1987 		size = sizeof(struct dma_edesc);
1988 		addr = tx_q->dma_entx;
1989 	} else {
1990 		size = sizeof(struct dma_desc);
1991 		addr = tx_q->dma_tx;
1992 	}
1993 
1994 	size *= dma_conf->dma_tx_size;
1995 
1996 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1997 
1998 	kfree(tx_q->tx_skbuff_dma);
1999 	kfree(tx_q->tx_skbuff);
2000 }
2001 
2002 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2003 				       struct stmmac_dma_conf *dma_conf)
2004 {
2005 	u32 tx_count = priv->plat->tx_queues_to_use;
2006 	u32 queue;
2007 
2008 	/* Free TX queue resources */
2009 	for (queue = 0; queue < tx_count; queue++)
2010 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2011 }
2012 
2013 /**
2014  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2015  * @priv: private structure
2016  * @dma_conf: structure to take the dma data
2017  * @queue: RX queue index
2018  * Description: according to which descriptor can be used (extend or basic)
2019  * this function allocates the resources for TX and RX paths. In case of
2020  * reception, for example, it pre-allocated the RX socket buffer in order to
2021  * allow zero-copy mechanism.
2022  */
2023 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2024 					 struct stmmac_dma_conf *dma_conf,
2025 					 u32 queue)
2026 {
2027 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2028 	struct stmmac_channel *ch = &priv->channel[queue];
2029 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2030 	struct page_pool_params pp_params = { 0 };
2031 	unsigned int num_pages;
2032 	unsigned int napi_id;
2033 	int ret;
2034 
2035 	rx_q->queue_index = queue;
2036 	rx_q->priv_data = priv;
2037 
2038 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2039 	pp_params.pool_size = dma_conf->dma_rx_size;
2040 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2041 	pp_params.order = ilog2(num_pages);
2042 	pp_params.nid = dev_to_node(priv->device);
2043 	pp_params.dev = priv->device;
2044 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2045 	pp_params.offset = stmmac_rx_offset(priv);
2046 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2047 
2048 	rx_q->page_pool = page_pool_create(&pp_params);
2049 	if (IS_ERR(rx_q->page_pool)) {
2050 		ret = PTR_ERR(rx_q->page_pool);
2051 		rx_q->page_pool = NULL;
2052 		return ret;
2053 	}
2054 
2055 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2056 				 sizeof(*rx_q->buf_pool),
2057 				 GFP_KERNEL);
2058 	if (!rx_q->buf_pool)
2059 		return -ENOMEM;
2060 
2061 	if (priv->extend_desc) {
2062 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2063 						   dma_conf->dma_rx_size *
2064 						   sizeof(struct dma_extended_desc),
2065 						   &rx_q->dma_rx_phy,
2066 						   GFP_KERNEL);
2067 		if (!rx_q->dma_erx)
2068 			return -ENOMEM;
2069 
2070 	} else {
2071 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2072 						  dma_conf->dma_rx_size *
2073 						  sizeof(struct dma_desc),
2074 						  &rx_q->dma_rx_phy,
2075 						  GFP_KERNEL);
2076 		if (!rx_q->dma_rx)
2077 			return -ENOMEM;
2078 	}
2079 
2080 	if (stmmac_xdp_is_enabled(priv) &&
2081 	    test_bit(queue, priv->af_xdp_zc_qps))
2082 		napi_id = ch->rxtx_napi.napi_id;
2083 	else
2084 		napi_id = ch->rx_napi.napi_id;
2085 
2086 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2087 			       rx_q->queue_index,
2088 			       napi_id);
2089 	if (ret) {
2090 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2091 		return -EINVAL;
2092 	}
2093 
2094 	return 0;
2095 }
2096 
2097 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2098 				       struct stmmac_dma_conf *dma_conf)
2099 {
2100 	u32 rx_count = priv->plat->rx_queues_to_use;
2101 	u32 queue;
2102 	int ret;
2103 
2104 	/* RX queues buffers and DMA */
2105 	for (queue = 0; queue < rx_count; queue++) {
2106 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2107 		if (ret)
2108 			goto err_dma;
2109 	}
2110 
2111 	return 0;
2112 
2113 err_dma:
2114 	free_dma_rx_desc_resources(priv, dma_conf);
2115 
2116 	return ret;
2117 }
2118 
2119 /**
2120  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2121  * @priv: private structure
2122  * @dma_conf: structure to take the dma data
2123  * @queue: TX queue index
2124  * Description: according to which descriptor can be used (extend or basic)
2125  * this function allocates the resources for TX and RX paths. In case of
2126  * reception, for example, it pre-allocated the RX socket buffer in order to
2127  * allow zero-copy mechanism.
2128  */
2129 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2130 					 struct stmmac_dma_conf *dma_conf,
2131 					 u32 queue)
2132 {
2133 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2134 	size_t size;
2135 	void *addr;
2136 
2137 	tx_q->queue_index = queue;
2138 	tx_q->priv_data = priv;
2139 
2140 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2141 				      sizeof(*tx_q->tx_skbuff_dma),
2142 				      GFP_KERNEL);
2143 	if (!tx_q->tx_skbuff_dma)
2144 		return -ENOMEM;
2145 
2146 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2147 				  sizeof(struct sk_buff *),
2148 				  GFP_KERNEL);
2149 	if (!tx_q->tx_skbuff)
2150 		return -ENOMEM;
2151 
2152 	if (priv->extend_desc)
2153 		size = sizeof(struct dma_extended_desc);
2154 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2155 		size = sizeof(struct dma_edesc);
2156 	else
2157 		size = sizeof(struct dma_desc);
2158 
2159 	size *= dma_conf->dma_tx_size;
2160 
2161 	addr = dma_alloc_coherent(priv->device, size,
2162 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2163 	if (!addr)
2164 		return -ENOMEM;
2165 
2166 	if (priv->extend_desc)
2167 		tx_q->dma_etx = addr;
2168 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2169 		tx_q->dma_entx = addr;
2170 	else
2171 		tx_q->dma_tx = addr;
2172 
2173 	return 0;
2174 }
2175 
2176 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2177 				       struct stmmac_dma_conf *dma_conf)
2178 {
2179 	u32 tx_count = priv->plat->tx_queues_to_use;
2180 	u32 queue;
2181 	int ret;
2182 
2183 	/* TX queues buffers and DMA */
2184 	for (queue = 0; queue < tx_count; queue++) {
2185 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2186 		if (ret)
2187 			goto err_dma;
2188 	}
2189 
2190 	return 0;
2191 
2192 err_dma:
2193 	free_dma_tx_desc_resources(priv, dma_conf);
2194 	return ret;
2195 }
2196 
2197 /**
2198  * alloc_dma_desc_resources - alloc TX/RX resources.
2199  * @priv: private structure
2200  * @dma_conf: structure to take the dma data
2201  * Description: according to which descriptor can be used (extend or basic)
2202  * this function allocates the resources for TX and RX paths. In case of
2203  * reception, for example, it pre-allocated the RX socket buffer in order to
2204  * allow zero-copy mechanism.
2205  */
2206 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2207 				    struct stmmac_dma_conf *dma_conf)
2208 {
2209 	/* RX Allocation */
2210 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2211 
2212 	if (ret)
2213 		return ret;
2214 
2215 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2216 
2217 	return ret;
2218 }
2219 
2220 /**
2221  * free_dma_desc_resources - free dma desc resources
2222  * @priv: private structure
2223  * @dma_conf: structure to take the dma data
2224  */
2225 static void free_dma_desc_resources(struct stmmac_priv *priv,
2226 				    struct stmmac_dma_conf *dma_conf)
2227 {
2228 	/* Release the DMA TX socket buffers */
2229 	free_dma_tx_desc_resources(priv, dma_conf);
2230 
2231 	/* Release the DMA RX socket buffers later
2232 	 * to ensure all pending XDP_TX buffers are returned.
2233 	 */
2234 	free_dma_rx_desc_resources(priv, dma_conf);
2235 }
2236 
2237 /**
2238  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2239  *  @priv: driver private structure
2240  *  Description: It is used for enabling the rx queues in the MAC
2241  */
2242 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2243 {
2244 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2245 	int queue;
2246 	u8 mode;
2247 
2248 	for (queue = 0; queue < rx_queues_count; queue++) {
2249 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2250 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2251 	}
2252 }
2253 
2254 /**
2255  * stmmac_start_rx_dma - start RX DMA channel
2256  * @priv: driver private structure
2257  * @chan: RX channel index
2258  * Description:
2259  * This starts a RX DMA channel
2260  */
2261 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2262 {
2263 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2264 	stmmac_start_rx(priv, priv->ioaddr, chan);
2265 }
2266 
2267 /**
2268  * stmmac_start_tx_dma - start TX DMA channel
2269  * @priv: driver private structure
2270  * @chan: TX channel index
2271  * Description:
2272  * This starts a TX DMA channel
2273  */
2274 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2275 {
2276 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2277 	stmmac_start_tx(priv, priv->ioaddr, chan);
2278 }
2279 
2280 /**
2281  * stmmac_stop_rx_dma - stop RX DMA channel
2282  * @priv: driver private structure
2283  * @chan: RX channel index
2284  * Description:
2285  * This stops a RX DMA channel
2286  */
2287 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2288 {
2289 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2290 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2291 }
2292 
2293 /**
2294  * stmmac_stop_tx_dma - stop TX DMA channel
2295  * @priv: driver private structure
2296  * @chan: TX channel index
2297  * Description:
2298  * This stops a TX DMA channel
2299  */
2300 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2301 {
2302 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2303 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2304 }
2305 
2306 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2307 {
2308 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2309 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2310 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2311 	u32 chan;
2312 
2313 	for (chan = 0; chan < dma_csr_ch; chan++) {
2314 		struct stmmac_channel *ch = &priv->channel[chan];
2315 		unsigned long flags;
2316 
2317 		spin_lock_irqsave(&ch->lock, flags);
2318 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2319 		spin_unlock_irqrestore(&ch->lock, flags);
2320 	}
2321 }
2322 
2323 /**
2324  * stmmac_start_all_dma - start all RX and TX DMA channels
2325  * @priv: driver private structure
2326  * Description:
2327  * This starts all the RX and TX DMA channels
2328  */
2329 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2330 {
2331 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2332 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2333 	u32 chan = 0;
2334 
2335 	for (chan = 0; chan < rx_channels_count; chan++)
2336 		stmmac_start_rx_dma(priv, chan);
2337 
2338 	for (chan = 0; chan < tx_channels_count; chan++)
2339 		stmmac_start_tx_dma(priv, chan);
2340 }
2341 
2342 /**
2343  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2344  * @priv: driver private structure
2345  * Description:
2346  * This stops the RX and TX DMA channels
2347  */
2348 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2349 {
2350 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2351 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2352 	u32 chan = 0;
2353 
2354 	for (chan = 0; chan < rx_channels_count; chan++)
2355 		stmmac_stop_rx_dma(priv, chan);
2356 
2357 	for (chan = 0; chan < tx_channels_count; chan++)
2358 		stmmac_stop_tx_dma(priv, chan);
2359 }
2360 
2361 /**
2362  *  stmmac_dma_operation_mode - HW DMA operation mode
2363  *  @priv: driver private structure
2364  *  Description: it is used for configuring the DMA operation mode register in
2365  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2366  */
2367 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2368 {
2369 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2370 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2371 	int rxfifosz = priv->plat->rx_fifo_size;
2372 	int txfifosz = priv->plat->tx_fifo_size;
2373 	u32 txmode = 0;
2374 	u32 rxmode = 0;
2375 	u32 chan = 0;
2376 	u8 qmode = 0;
2377 
2378 	if (rxfifosz == 0)
2379 		rxfifosz = priv->dma_cap.rx_fifo_size;
2380 	if (txfifosz == 0)
2381 		txfifosz = priv->dma_cap.tx_fifo_size;
2382 
2383 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2384 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2385 		rxfifosz /= rx_channels_count;
2386 		txfifosz /= tx_channels_count;
2387 	}
2388 
2389 	if (priv->plat->force_thresh_dma_mode) {
2390 		txmode = tc;
2391 		rxmode = tc;
2392 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2393 		/*
2394 		 * In case of GMAC, SF mode can be enabled
2395 		 * to perform the TX COE in HW. This depends on:
2396 		 * 1) TX COE if actually supported
2397 		 * 2) There is no bugged Jumbo frame support
2398 		 *    that needs to not insert csum in the TDES.
2399 		 */
2400 		txmode = SF_DMA_MODE;
2401 		rxmode = SF_DMA_MODE;
2402 		priv->xstats.threshold = SF_DMA_MODE;
2403 	} else {
2404 		txmode = tc;
2405 		rxmode = SF_DMA_MODE;
2406 	}
2407 
2408 	/* configure all channels */
2409 	for (chan = 0; chan < rx_channels_count; chan++) {
2410 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2411 		u32 buf_size;
2412 
2413 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2414 
2415 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2416 				rxfifosz, qmode);
2417 
2418 		if (rx_q->xsk_pool) {
2419 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2420 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 					      buf_size,
2422 					      chan);
2423 		} else {
2424 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2425 					      priv->dma_conf.dma_buf_sz,
2426 					      chan);
2427 		}
2428 	}
2429 
2430 	for (chan = 0; chan < tx_channels_count; chan++) {
2431 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2432 
2433 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2434 				txfifosz, qmode);
2435 	}
2436 }
2437 
2438 static void stmmac_xsk_request_timestamp(void *_priv)
2439 {
2440 	struct stmmac_metadata_request *meta_req = _priv;
2441 
2442 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2443 	*meta_req->set_ic = true;
2444 }
2445 
2446 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2447 {
2448 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2449 	struct stmmac_priv *priv = tx_compl->priv;
2450 	struct dma_desc *desc = tx_compl->desc;
2451 	bool found = false;
2452 	u64 ns = 0;
2453 
2454 	if (!priv->hwts_tx_en)
2455 		return 0;
2456 
2457 	/* check tx tstamp status */
2458 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2459 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2460 		found = true;
2461 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2462 		found = true;
2463 	}
2464 
2465 	if (found) {
2466 		ns -= priv->plat->cdc_error_adj;
2467 		return ns_to_ktime(ns);
2468 	}
2469 
2470 	return 0;
2471 }
2472 
2473 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2474 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2475 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2476 };
2477 
2478 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2479 {
2480 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2481 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2482 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2483 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2484 	unsigned int entry = tx_q->cur_tx;
2485 	struct dma_desc *tx_desc = NULL;
2486 	struct xdp_desc xdp_desc;
2487 	bool work_done = true;
2488 	u32 tx_set_ic_bit = 0;
2489 
2490 	/* Avoids TX time-out as we are sharing with slow path */
2491 	txq_trans_cond_update(nq);
2492 
2493 	budget = min(budget, stmmac_tx_avail(priv, queue));
2494 
2495 	while (budget-- > 0) {
2496 		struct stmmac_metadata_request meta_req;
2497 		struct xsk_tx_metadata *meta = NULL;
2498 		dma_addr_t dma_addr;
2499 		bool set_ic;
2500 
2501 		/* We are sharing with slow path and stop XSK TX desc submission when
2502 		 * available TX ring is less than threshold.
2503 		 */
2504 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2505 		    !netif_carrier_ok(priv->dev)) {
2506 			work_done = false;
2507 			break;
2508 		}
2509 
2510 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2511 			break;
2512 
2513 		if (priv->est && priv->est->enable &&
2514 		    priv->est->max_sdu[queue] &&
2515 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2516 			priv->xstats.max_sdu_txq_drop[queue]++;
2517 			continue;
2518 		}
2519 
2520 		if (likely(priv->extend_desc))
2521 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2522 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2523 			tx_desc = &tx_q->dma_entx[entry].basic;
2524 		else
2525 			tx_desc = tx_q->dma_tx + entry;
2526 
2527 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2528 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2529 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2530 
2531 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2532 
2533 		/* To return XDP buffer to XSK pool, we simple call
2534 		 * xsk_tx_completed(), so we don't need to fill up
2535 		 * 'buf' and 'xdpf'.
2536 		 */
2537 		tx_q->tx_skbuff_dma[entry].buf = 0;
2538 		tx_q->xdpf[entry] = NULL;
2539 
2540 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2541 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2542 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2543 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2544 
2545 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2546 
2547 		tx_q->tx_count_frames++;
2548 
2549 		if (!priv->tx_coal_frames[queue])
2550 			set_ic = false;
2551 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2552 			set_ic = true;
2553 		else
2554 			set_ic = false;
2555 
2556 		meta_req.priv = priv;
2557 		meta_req.tx_desc = tx_desc;
2558 		meta_req.set_ic = &set_ic;
2559 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2560 					&meta_req);
2561 		if (set_ic) {
2562 			tx_q->tx_count_frames = 0;
2563 			stmmac_set_tx_ic(priv, tx_desc);
2564 			tx_set_ic_bit++;
2565 		}
2566 
2567 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2568 				       true, priv->mode, true, true,
2569 				       xdp_desc.len);
2570 
2571 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2572 
2573 		xsk_tx_metadata_to_compl(meta,
2574 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2575 
2576 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2577 		entry = tx_q->cur_tx;
2578 	}
2579 	u64_stats_update_begin(&txq_stats->napi_syncp);
2580 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2581 	u64_stats_update_end(&txq_stats->napi_syncp);
2582 
2583 	if (tx_desc) {
2584 		stmmac_flush_tx_descriptors(priv, queue);
2585 		xsk_tx_release(pool);
2586 	}
2587 
2588 	/* Return true if all of the 3 conditions are met
2589 	 *  a) TX Budget is still available
2590 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2591 	 *     pending XSK TX for transmission)
2592 	 */
2593 	return !!budget && work_done;
2594 }
2595 
2596 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2597 {
2598 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2599 		tc += 64;
2600 
2601 		if (priv->plat->force_thresh_dma_mode)
2602 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2603 		else
2604 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2605 						      chan);
2606 
2607 		priv->xstats.threshold = tc;
2608 	}
2609 }
2610 
2611 /**
2612  * stmmac_tx_clean - to manage the transmission completion
2613  * @priv: driver private structure
2614  * @budget: napi budget limiting this functions packet handling
2615  * @queue: TX queue index
2616  * @pending_packets: signal to arm the TX coal timer
2617  * Description: it reclaims the transmit resources after transmission completes.
2618  * If some packets still needs to be handled, due to TX coalesce, set
2619  * pending_packets to true to make NAPI arm the TX coal timer.
2620  */
2621 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2622 			   bool *pending_packets)
2623 {
2624 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2625 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2626 	unsigned int bytes_compl = 0, pkts_compl = 0;
2627 	unsigned int entry, xmits = 0, count = 0;
2628 	u32 tx_packets = 0, tx_errors = 0;
2629 
2630 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2631 
2632 	tx_q->xsk_frames_done = 0;
2633 
2634 	entry = tx_q->dirty_tx;
2635 
2636 	/* Try to clean all TX complete frame in 1 shot */
2637 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2638 		struct xdp_frame *xdpf;
2639 		struct sk_buff *skb;
2640 		struct dma_desc *p;
2641 		int status;
2642 
2643 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2644 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2645 			xdpf = tx_q->xdpf[entry];
2646 			skb = NULL;
2647 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2648 			xdpf = NULL;
2649 			skb = tx_q->tx_skbuff[entry];
2650 		} else {
2651 			xdpf = NULL;
2652 			skb = NULL;
2653 		}
2654 
2655 		if (priv->extend_desc)
2656 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2657 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2658 			p = &tx_q->dma_entx[entry].basic;
2659 		else
2660 			p = tx_q->dma_tx + entry;
2661 
2662 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2663 		/* Check if the descriptor is owned by the DMA */
2664 		if (unlikely(status & tx_dma_own))
2665 			break;
2666 
2667 		count++;
2668 
2669 		/* Make sure descriptor fields are read after reading
2670 		 * the own bit.
2671 		 */
2672 		dma_rmb();
2673 
2674 		/* Just consider the last segment and ...*/
2675 		if (likely(!(status & tx_not_ls))) {
2676 			/* ... verify the status error condition */
2677 			if (unlikely(status & tx_err)) {
2678 				tx_errors++;
2679 				if (unlikely(status & tx_err_bump_tc))
2680 					stmmac_bump_dma_threshold(priv, queue);
2681 			} else {
2682 				tx_packets++;
2683 			}
2684 			if (skb) {
2685 				stmmac_get_tx_hwtstamp(priv, p, skb);
2686 			} else if (tx_q->xsk_pool &&
2687 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2688 				struct stmmac_xsk_tx_complete tx_compl = {
2689 					.priv = priv,
2690 					.desc = p,
2691 				};
2692 
2693 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2694 							 &stmmac_xsk_tx_metadata_ops,
2695 							 &tx_compl);
2696 			}
2697 		}
2698 
2699 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2700 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2701 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2702 				dma_unmap_page(priv->device,
2703 					       tx_q->tx_skbuff_dma[entry].buf,
2704 					       tx_q->tx_skbuff_dma[entry].len,
2705 					       DMA_TO_DEVICE);
2706 			else
2707 				dma_unmap_single(priv->device,
2708 						 tx_q->tx_skbuff_dma[entry].buf,
2709 						 tx_q->tx_skbuff_dma[entry].len,
2710 						 DMA_TO_DEVICE);
2711 			tx_q->tx_skbuff_dma[entry].buf = 0;
2712 			tx_q->tx_skbuff_dma[entry].len = 0;
2713 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2714 		}
2715 
2716 		stmmac_clean_desc3(priv, tx_q, p);
2717 
2718 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2719 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2720 
2721 		if (xdpf &&
2722 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2723 			xdp_return_frame_rx_napi(xdpf);
2724 			tx_q->xdpf[entry] = NULL;
2725 		}
2726 
2727 		if (xdpf &&
2728 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2729 			xdp_return_frame(xdpf);
2730 			tx_q->xdpf[entry] = NULL;
2731 		}
2732 
2733 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2734 			tx_q->xsk_frames_done++;
2735 
2736 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2737 			if (likely(skb)) {
2738 				pkts_compl++;
2739 				bytes_compl += skb->len;
2740 				dev_consume_skb_any(skb);
2741 				tx_q->tx_skbuff[entry] = NULL;
2742 			}
2743 		}
2744 
2745 		stmmac_release_tx_desc(priv, p, priv->mode);
2746 
2747 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2748 	}
2749 	tx_q->dirty_tx = entry;
2750 
2751 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2752 				  pkts_compl, bytes_compl);
2753 
2754 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2755 								queue))) &&
2756 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2757 
2758 		netif_dbg(priv, tx_done, priv->dev,
2759 			  "%s: restart transmit\n", __func__);
2760 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2761 	}
2762 
2763 	if (tx_q->xsk_pool) {
2764 		bool work_done;
2765 
2766 		if (tx_q->xsk_frames_done)
2767 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2768 
2769 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2770 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2771 
2772 		/* For XSK TX, we try to send as many as possible.
2773 		 * If XSK work done (XSK TX desc empty and budget still
2774 		 * available), return "budget - 1" to reenable TX IRQ.
2775 		 * Else, return "budget" to make NAPI continue polling.
2776 		 */
2777 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2778 					       STMMAC_XSK_TX_BUDGET_MAX);
2779 		if (work_done)
2780 			xmits = budget - 1;
2781 		else
2782 			xmits = budget;
2783 	}
2784 
2785 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2786 	    priv->eee_sw_timer_en) {
2787 		if (stmmac_enable_eee_mode(priv))
2788 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2789 	}
2790 
2791 	/* We still have pending packets, let's call for a new scheduling */
2792 	if (tx_q->dirty_tx != tx_q->cur_tx)
2793 		*pending_packets = true;
2794 
2795 	u64_stats_update_begin(&txq_stats->napi_syncp);
2796 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2797 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2798 	u64_stats_inc(&txq_stats->napi.tx_clean);
2799 	u64_stats_update_end(&txq_stats->napi_syncp);
2800 
2801 	priv->xstats.tx_errors += tx_errors;
2802 
2803 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2804 
2805 	/* Combine decisions from TX clean and XSK TX */
2806 	return max(count, xmits);
2807 }
2808 
2809 /**
2810  * stmmac_tx_err - to manage the tx error
2811  * @priv: driver private structure
2812  * @chan: channel index
2813  * Description: it cleans the descriptors and restarts the transmission
2814  * in case of transmission errors.
2815  */
2816 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2817 {
2818 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2819 
2820 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2821 
2822 	stmmac_stop_tx_dma(priv, chan);
2823 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2824 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2825 	stmmac_reset_tx_queue(priv, chan);
2826 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2827 			    tx_q->dma_tx_phy, chan);
2828 	stmmac_start_tx_dma(priv, chan);
2829 
2830 	priv->xstats.tx_errors++;
2831 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2832 }
2833 
2834 /**
2835  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2836  *  @priv: driver private structure
2837  *  @txmode: TX operating mode
2838  *  @rxmode: RX operating mode
2839  *  @chan: channel index
2840  *  Description: it is used for configuring of the DMA operation mode in
2841  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2842  *  mode.
2843  */
2844 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2845 					  u32 rxmode, u32 chan)
2846 {
2847 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2848 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2849 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2850 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2851 	int rxfifosz = priv->plat->rx_fifo_size;
2852 	int txfifosz = priv->plat->tx_fifo_size;
2853 
2854 	if (rxfifosz == 0)
2855 		rxfifosz = priv->dma_cap.rx_fifo_size;
2856 	if (txfifosz == 0)
2857 		txfifosz = priv->dma_cap.tx_fifo_size;
2858 
2859 	/* Adjust for real per queue fifo size */
2860 	rxfifosz /= rx_channels_count;
2861 	txfifosz /= tx_channels_count;
2862 
2863 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2864 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2865 }
2866 
2867 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2868 {
2869 	int ret;
2870 
2871 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2872 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2873 	if (ret && (ret != -EINVAL)) {
2874 		stmmac_global_err(priv);
2875 		return true;
2876 	}
2877 
2878 	return false;
2879 }
2880 
2881 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2882 {
2883 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2884 						 &priv->xstats, chan, dir);
2885 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2886 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2887 	struct stmmac_channel *ch = &priv->channel[chan];
2888 	struct napi_struct *rx_napi;
2889 	struct napi_struct *tx_napi;
2890 	unsigned long flags;
2891 
2892 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2893 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2894 
2895 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2896 		if (napi_schedule_prep(rx_napi)) {
2897 			spin_lock_irqsave(&ch->lock, flags);
2898 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2899 			spin_unlock_irqrestore(&ch->lock, flags);
2900 			__napi_schedule(rx_napi);
2901 		}
2902 	}
2903 
2904 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2905 		if (napi_schedule_prep(tx_napi)) {
2906 			spin_lock_irqsave(&ch->lock, flags);
2907 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2908 			spin_unlock_irqrestore(&ch->lock, flags);
2909 			__napi_schedule(tx_napi);
2910 		}
2911 	}
2912 
2913 	return status;
2914 }
2915 
2916 /**
2917  * stmmac_dma_interrupt - DMA ISR
2918  * @priv: driver private structure
2919  * Description: this is the DMA ISR. It is called by the main ISR.
2920  * It calls the dwmac dma routine and schedule poll method in case of some
2921  * work can be done.
2922  */
2923 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2924 {
2925 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2926 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2927 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2928 				tx_channel_count : rx_channel_count;
2929 	u32 chan;
2930 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2931 
2932 	/* Make sure we never check beyond our status buffer. */
2933 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2934 		channels_to_check = ARRAY_SIZE(status);
2935 
2936 	for (chan = 0; chan < channels_to_check; chan++)
2937 		status[chan] = stmmac_napi_check(priv, chan,
2938 						 DMA_DIR_RXTX);
2939 
2940 	for (chan = 0; chan < tx_channel_count; chan++) {
2941 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2942 			/* Try to bump up the dma threshold on this failure */
2943 			stmmac_bump_dma_threshold(priv, chan);
2944 		} else if (unlikely(status[chan] == tx_hard_error)) {
2945 			stmmac_tx_err(priv, chan);
2946 		}
2947 	}
2948 }
2949 
2950 /**
2951  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2952  * @priv: driver private structure
2953  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2954  */
2955 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2956 {
2957 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2958 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2959 
2960 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2961 
2962 	if (priv->dma_cap.rmon) {
2963 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2964 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2965 	} else
2966 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2967 }
2968 
2969 /**
2970  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2971  * @priv: driver private structure
2972  * Description:
2973  *  new GMAC chip generations have a new register to indicate the
2974  *  presence of the optional feature/functions.
2975  *  This can be also used to override the value passed through the
2976  *  platform and necessary for old MAC10/100 and GMAC chips.
2977  */
2978 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2979 {
2980 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2981 }
2982 
2983 /**
2984  * stmmac_check_ether_addr - check if the MAC addr is valid
2985  * @priv: driver private structure
2986  * Description:
2987  * it is to verify if the MAC address is valid, in case of failures it
2988  * generates a random MAC address
2989  */
2990 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2991 {
2992 	u8 addr[ETH_ALEN];
2993 
2994 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2995 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2996 		if (is_valid_ether_addr(addr))
2997 			eth_hw_addr_set(priv->dev, addr);
2998 		else
2999 			eth_hw_addr_random(priv->dev);
3000 		dev_info(priv->device, "device MAC address %pM\n",
3001 			 priv->dev->dev_addr);
3002 	}
3003 }
3004 
3005 /**
3006  * stmmac_init_dma_engine - DMA init.
3007  * @priv: driver private structure
3008  * Description:
3009  * It inits the DMA invoking the specific MAC/GMAC callback.
3010  * Some DMA parameters can be passed from the platform;
3011  * in case of these are not passed a default is kept for the MAC or GMAC.
3012  */
3013 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3014 {
3015 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3016 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3017 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3018 	struct stmmac_rx_queue *rx_q;
3019 	struct stmmac_tx_queue *tx_q;
3020 	u32 chan = 0;
3021 	int ret = 0;
3022 
3023 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3024 		dev_err(priv->device, "Invalid DMA configuration\n");
3025 		return -EINVAL;
3026 	}
3027 
3028 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3029 		priv->plat->dma_cfg->atds = 1;
3030 
3031 	ret = stmmac_reset(priv, priv->ioaddr);
3032 	if (ret) {
3033 		dev_err(priv->device, "Failed to reset the dma\n");
3034 		return ret;
3035 	}
3036 
3037 	/* DMA Configuration */
3038 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3039 
3040 	if (priv->plat->axi)
3041 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3042 
3043 	/* DMA CSR Channel configuration */
3044 	for (chan = 0; chan < dma_csr_ch; chan++) {
3045 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3046 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3047 	}
3048 
3049 	/* DMA RX Channel Configuration */
3050 	for (chan = 0; chan < rx_channels_count; chan++) {
3051 		rx_q = &priv->dma_conf.rx_queue[chan];
3052 
3053 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3054 				    rx_q->dma_rx_phy, chan);
3055 
3056 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3057 				     (rx_q->buf_alloc_num *
3058 				      sizeof(struct dma_desc));
3059 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3060 				       rx_q->rx_tail_addr, chan);
3061 	}
3062 
3063 	/* DMA TX Channel Configuration */
3064 	for (chan = 0; chan < tx_channels_count; chan++) {
3065 		tx_q = &priv->dma_conf.tx_queue[chan];
3066 
3067 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3068 				    tx_q->dma_tx_phy, chan);
3069 
3070 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3071 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3072 				       tx_q->tx_tail_addr, chan);
3073 	}
3074 
3075 	return ret;
3076 }
3077 
3078 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3079 {
3080 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3081 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3082 	struct stmmac_channel *ch;
3083 	struct napi_struct *napi;
3084 
3085 	if (!tx_coal_timer)
3086 		return;
3087 
3088 	ch = &priv->channel[tx_q->queue_index];
3089 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3090 
3091 	/* Arm timer only if napi is not already scheduled.
3092 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3093 	 * again in the next scheduled napi.
3094 	 */
3095 	if (unlikely(!napi_is_scheduled(napi)))
3096 		hrtimer_start(&tx_q->txtimer,
3097 			      STMMAC_COAL_TIMER(tx_coal_timer),
3098 			      HRTIMER_MODE_REL);
3099 	else
3100 		hrtimer_try_to_cancel(&tx_q->txtimer);
3101 }
3102 
3103 /**
3104  * stmmac_tx_timer - mitigation sw timer for tx.
3105  * @t: data pointer
3106  * Description:
3107  * This is the timer handler to directly invoke the stmmac_tx_clean.
3108  */
3109 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3110 {
3111 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3112 	struct stmmac_priv *priv = tx_q->priv_data;
3113 	struct stmmac_channel *ch;
3114 	struct napi_struct *napi;
3115 
3116 	ch = &priv->channel[tx_q->queue_index];
3117 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3118 
3119 	if (likely(napi_schedule_prep(napi))) {
3120 		unsigned long flags;
3121 
3122 		spin_lock_irqsave(&ch->lock, flags);
3123 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3124 		spin_unlock_irqrestore(&ch->lock, flags);
3125 		__napi_schedule(napi);
3126 	}
3127 
3128 	return HRTIMER_NORESTART;
3129 }
3130 
3131 /**
3132  * stmmac_init_coalesce - init mitigation options.
3133  * @priv: driver private structure
3134  * Description:
3135  * This inits the coalesce parameters: i.e. timer rate,
3136  * timer handler and default threshold used for enabling the
3137  * interrupt on completion bit.
3138  */
3139 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3140 {
3141 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3142 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3143 	u32 chan;
3144 
3145 	for (chan = 0; chan < tx_channel_count; chan++) {
3146 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3147 
3148 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3149 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3150 
3151 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3152 		tx_q->txtimer.function = stmmac_tx_timer;
3153 	}
3154 
3155 	for (chan = 0; chan < rx_channel_count; chan++)
3156 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3157 }
3158 
3159 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3160 {
3161 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3162 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3163 	u32 chan;
3164 
3165 	/* set TX ring length */
3166 	for (chan = 0; chan < tx_channels_count; chan++)
3167 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3168 				       (priv->dma_conf.dma_tx_size - 1), chan);
3169 
3170 	/* set RX ring length */
3171 	for (chan = 0; chan < rx_channels_count; chan++)
3172 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3173 				       (priv->dma_conf.dma_rx_size - 1), chan);
3174 }
3175 
3176 /**
3177  *  stmmac_set_tx_queue_weight - Set TX queue weight
3178  *  @priv: driver private structure
3179  *  Description: It is used for setting TX queues weight
3180  */
3181 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3182 {
3183 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3184 	u32 weight;
3185 	u32 queue;
3186 
3187 	for (queue = 0; queue < tx_queues_count; queue++) {
3188 		weight = priv->plat->tx_queues_cfg[queue].weight;
3189 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3190 	}
3191 }
3192 
3193 /**
3194  *  stmmac_configure_cbs - Configure CBS in TX queue
3195  *  @priv: driver private structure
3196  *  Description: It is used for configuring CBS in AVB TX queues
3197  */
3198 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3199 {
3200 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3201 	u32 mode_to_use;
3202 	u32 queue;
3203 
3204 	/* queue 0 is reserved for legacy traffic */
3205 	for (queue = 1; queue < tx_queues_count; queue++) {
3206 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3207 		if (mode_to_use == MTL_QUEUE_DCB)
3208 			continue;
3209 
3210 		stmmac_config_cbs(priv, priv->hw,
3211 				priv->plat->tx_queues_cfg[queue].send_slope,
3212 				priv->plat->tx_queues_cfg[queue].idle_slope,
3213 				priv->plat->tx_queues_cfg[queue].high_credit,
3214 				priv->plat->tx_queues_cfg[queue].low_credit,
3215 				queue);
3216 	}
3217 }
3218 
3219 /**
3220  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3221  *  @priv: driver private structure
3222  *  Description: It is used for mapping RX queues to RX dma channels
3223  */
3224 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3225 {
3226 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3227 	u32 queue;
3228 	u32 chan;
3229 
3230 	for (queue = 0; queue < rx_queues_count; queue++) {
3231 		chan = priv->plat->rx_queues_cfg[queue].chan;
3232 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3233 	}
3234 }
3235 
3236 /**
3237  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3238  *  @priv: driver private structure
3239  *  Description: It is used for configuring the RX Queue Priority
3240  */
3241 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3242 {
3243 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3244 	u32 queue;
3245 	u32 prio;
3246 
3247 	for (queue = 0; queue < rx_queues_count; queue++) {
3248 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3249 			continue;
3250 
3251 		prio = priv->plat->rx_queues_cfg[queue].prio;
3252 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3253 	}
3254 }
3255 
3256 /**
3257  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3258  *  @priv: driver private structure
3259  *  Description: It is used for configuring the TX Queue Priority
3260  */
3261 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3262 {
3263 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3264 	u32 queue;
3265 	u32 prio;
3266 
3267 	for (queue = 0; queue < tx_queues_count; queue++) {
3268 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3269 			continue;
3270 
3271 		prio = priv->plat->tx_queues_cfg[queue].prio;
3272 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3273 	}
3274 }
3275 
3276 /**
3277  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3278  *  @priv: driver private structure
3279  *  Description: It is used for configuring the RX queue routing
3280  */
3281 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3282 {
3283 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3284 	u32 queue;
3285 	u8 packet;
3286 
3287 	for (queue = 0; queue < rx_queues_count; queue++) {
3288 		/* no specific packet type routing specified for the queue */
3289 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3290 			continue;
3291 
3292 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3293 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3294 	}
3295 }
3296 
3297 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3298 {
3299 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3300 		priv->rss.enable = false;
3301 		return;
3302 	}
3303 
3304 	if (priv->dev->features & NETIF_F_RXHASH)
3305 		priv->rss.enable = true;
3306 	else
3307 		priv->rss.enable = false;
3308 
3309 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3310 			     priv->plat->rx_queues_to_use);
3311 }
3312 
3313 /**
3314  *  stmmac_mtl_configuration - Configure MTL
3315  *  @priv: driver private structure
3316  *  Description: It is used for configurring MTL
3317  */
3318 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3319 {
3320 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3321 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3322 
3323 	if (tx_queues_count > 1)
3324 		stmmac_set_tx_queue_weight(priv);
3325 
3326 	/* Configure MTL RX algorithms */
3327 	if (rx_queues_count > 1)
3328 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3329 				priv->plat->rx_sched_algorithm);
3330 
3331 	/* Configure MTL TX algorithms */
3332 	if (tx_queues_count > 1)
3333 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3334 				priv->plat->tx_sched_algorithm);
3335 
3336 	/* Configure CBS in AVB TX queues */
3337 	if (tx_queues_count > 1)
3338 		stmmac_configure_cbs(priv);
3339 
3340 	/* Map RX MTL to DMA channels */
3341 	stmmac_rx_queue_dma_chan_map(priv);
3342 
3343 	/* Enable MAC RX Queues */
3344 	stmmac_mac_enable_rx_queues(priv);
3345 
3346 	/* Set RX priorities */
3347 	if (rx_queues_count > 1)
3348 		stmmac_mac_config_rx_queues_prio(priv);
3349 
3350 	/* Set TX priorities */
3351 	if (tx_queues_count > 1)
3352 		stmmac_mac_config_tx_queues_prio(priv);
3353 
3354 	/* Set RX routing */
3355 	if (rx_queues_count > 1)
3356 		stmmac_mac_config_rx_queues_routing(priv);
3357 
3358 	/* Receive Side Scaling */
3359 	if (rx_queues_count > 1)
3360 		stmmac_mac_config_rss(priv);
3361 }
3362 
3363 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3364 {
3365 	if (priv->dma_cap.asp) {
3366 		netdev_info(priv->dev, "Enabling Safety Features\n");
3367 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3368 					  priv->plat->safety_feat_cfg);
3369 	} else {
3370 		netdev_info(priv->dev, "No Safety Features support found\n");
3371 	}
3372 }
3373 
3374 /**
3375  * stmmac_hw_setup - setup mac in a usable state.
3376  *  @dev : pointer to the device structure.
3377  *  @ptp_register: register PTP if set
3378  *  Description:
3379  *  this is the main function to setup the HW in a usable state because the
3380  *  dma engine is reset, the core registers are configured (e.g. AXI,
3381  *  Checksum features, timers). The DMA is ready to start receiving and
3382  *  transmitting.
3383  *  Return value:
3384  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3385  *  file on failure.
3386  */
3387 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3388 {
3389 	struct stmmac_priv *priv = netdev_priv(dev);
3390 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3391 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3392 	bool sph_en;
3393 	u32 chan;
3394 	int ret;
3395 
3396 	/* Make sure RX clock is enabled */
3397 	if (priv->hw->phylink_pcs)
3398 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3399 
3400 	/* DMA initialization and SW reset */
3401 	ret = stmmac_init_dma_engine(priv);
3402 	if (ret < 0) {
3403 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3404 			   __func__);
3405 		return ret;
3406 	}
3407 
3408 	/* Copy the MAC addr into the HW  */
3409 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3410 
3411 	/* PS and related bits will be programmed according to the speed */
3412 	if (priv->hw->pcs) {
3413 		int speed = priv->plat->mac_port_sel_speed;
3414 
3415 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3416 		    (speed == SPEED_1000)) {
3417 			priv->hw->ps = speed;
3418 		} else {
3419 			dev_warn(priv->device, "invalid port speed\n");
3420 			priv->hw->ps = 0;
3421 		}
3422 	}
3423 
3424 	/* Initialize the MAC Core */
3425 	stmmac_core_init(priv, priv->hw, dev);
3426 
3427 	/* Initialize MTL*/
3428 	stmmac_mtl_configuration(priv);
3429 
3430 	/* Initialize Safety Features */
3431 	stmmac_safety_feat_configuration(priv);
3432 
3433 	ret = stmmac_rx_ipc(priv, priv->hw);
3434 	if (!ret) {
3435 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3436 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3437 		priv->hw->rx_csum = 0;
3438 	}
3439 
3440 	/* Enable the MAC Rx/Tx */
3441 	stmmac_mac_set(priv, priv->ioaddr, true);
3442 
3443 	/* Set the HW DMA mode and the COE */
3444 	stmmac_dma_operation_mode(priv);
3445 
3446 	stmmac_mmc_setup(priv);
3447 
3448 	if (ptp_register) {
3449 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3450 		if (ret < 0)
3451 			netdev_warn(priv->dev,
3452 				    "failed to enable PTP reference clock: %pe\n",
3453 				    ERR_PTR(ret));
3454 	}
3455 
3456 	ret = stmmac_init_ptp(priv);
3457 	if (ret == -EOPNOTSUPP)
3458 		netdev_info(priv->dev, "PTP not supported by HW\n");
3459 	else if (ret)
3460 		netdev_warn(priv->dev, "PTP init failed\n");
3461 	else if (ptp_register)
3462 		stmmac_ptp_register(priv);
3463 
3464 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3465 
3466 	/* Convert the timer from msec to usec */
3467 	if (!priv->tx_lpi_timer)
3468 		priv->tx_lpi_timer = eee_timer * 1000;
3469 
3470 	if (priv->use_riwt) {
3471 		u32 queue;
3472 
3473 		for (queue = 0; queue < rx_cnt; queue++) {
3474 			if (!priv->rx_riwt[queue])
3475 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3476 
3477 			stmmac_rx_watchdog(priv, priv->ioaddr,
3478 					   priv->rx_riwt[queue], queue);
3479 		}
3480 	}
3481 
3482 	if (priv->hw->pcs)
3483 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3484 
3485 	/* set TX and RX rings length */
3486 	stmmac_set_rings_length(priv);
3487 
3488 	/* Enable TSO */
3489 	if (priv->tso) {
3490 		for (chan = 0; chan < tx_cnt; chan++) {
3491 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3492 
3493 			/* TSO and TBS cannot co-exist */
3494 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3495 				continue;
3496 
3497 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3498 		}
3499 	}
3500 
3501 	/* Enable Split Header */
3502 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3503 	for (chan = 0; chan < rx_cnt; chan++)
3504 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3505 
3506 
3507 	/* VLAN Tag Insertion */
3508 	if (priv->dma_cap.vlins)
3509 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3510 
3511 	/* TBS */
3512 	for (chan = 0; chan < tx_cnt; chan++) {
3513 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3514 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3515 
3516 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3517 	}
3518 
3519 	/* Configure real RX and TX queues */
3520 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3521 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3522 
3523 	/* Start the ball rolling... */
3524 	stmmac_start_all_dma(priv);
3525 
3526 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3527 
3528 	return 0;
3529 }
3530 
3531 static void stmmac_hw_teardown(struct net_device *dev)
3532 {
3533 	struct stmmac_priv *priv = netdev_priv(dev);
3534 
3535 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3536 }
3537 
3538 static void stmmac_free_irq(struct net_device *dev,
3539 			    enum request_irq_err irq_err, int irq_idx)
3540 {
3541 	struct stmmac_priv *priv = netdev_priv(dev);
3542 	int j;
3543 
3544 	switch (irq_err) {
3545 	case REQ_IRQ_ERR_ALL:
3546 		irq_idx = priv->plat->tx_queues_to_use;
3547 		fallthrough;
3548 	case REQ_IRQ_ERR_TX:
3549 		for (j = irq_idx - 1; j >= 0; j--) {
3550 			if (priv->tx_irq[j] > 0) {
3551 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3552 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3553 			}
3554 		}
3555 		irq_idx = priv->plat->rx_queues_to_use;
3556 		fallthrough;
3557 	case REQ_IRQ_ERR_RX:
3558 		for (j = irq_idx - 1; j >= 0; j--) {
3559 			if (priv->rx_irq[j] > 0) {
3560 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3561 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3562 			}
3563 		}
3564 
3565 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3566 			free_irq(priv->sfty_ue_irq, dev);
3567 		fallthrough;
3568 	case REQ_IRQ_ERR_SFTY_UE:
3569 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3570 			free_irq(priv->sfty_ce_irq, dev);
3571 		fallthrough;
3572 	case REQ_IRQ_ERR_SFTY_CE:
3573 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3574 			free_irq(priv->lpi_irq, dev);
3575 		fallthrough;
3576 	case REQ_IRQ_ERR_LPI:
3577 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3578 			free_irq(priv->wol_irq, dev);
3579 		fallthrough;
3580 	case REQ_IRQ_ERR_SFTY:
3581 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3582 			free_irq(priv->sfty_irq, dev);
3583 		fallthrough;
3584 	case REQ_IRQ_ERR_WOL:
3585 		free_irq(dev->irq, dev);
3586 		fallthrough;
3587 	case REQ_IRQ_ERR_MAC:
3588 	case REQ_IRQ_ERR_NO:
3589 		/* If MAC IRQ request error, no more IRQ to free */
3590 		break;
3591 	}
3592 }
3593 
3594 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3595 {
3596 	struct stmmac_priv *priv = netdev_priv(dev);
3597 	enum request_irq_err irq_err;
3598 	cpumask_t cpu_mask;
3599 	int irq_idx = 0;
3600 	char *int_name;
3601 	int ret;
3602 	int i;
3603 
3604 	/* For common interrupt */
3605 	int_name = priv->int_name_mac;
3606 	sprintf(int_name, "%s:%s", dev->name, "mac");
3607 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3608 			  0, int_name, dev);
3609 	if (unlikely(ret < 0)) {
3610 		netdev_err(priv->dev,
3611 			   "%s: alloc mac MSI %d (error: %d)\n",
3612 			   __func__, dev->irq, ret);
3613 		irq_err = REQ_IRQ_ERR_MAC;
3614 		goto irq_error;
3615 	}
3616 
3617 	/* Request the Wake IRQ in case of another line
3618 	 * is used for WoL
3619 	 */
3620 	priv->wol_irq_disabled = true;
3621 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3622 		int_name = priv->int_name_wol;
3623 		sprintf(int_name, "%s:%s", dev->name, "wol");
3624 		ret = request_irq(priv->wol_irq,
3625 				  stmmac_mac_interrupt,
3626 				  0, int_name, dev);
3627 		if (unlikely(ret < 0)) {
3628 			netdev_err(priv->dev,
3629 				   "%s: alloc wol MSI %d (error: %d)\n",
3630 				   __func__, priv->wol_irq, ret);
3631 			irq_err = REQ_IRQ_ERR_WOL;
3632 			goto irq_error;
3633 		}
3634 	}
3635 
3636 	/* Request the LPI IRQ in case of another line
3637 	 * is used for LPI
3638 	 */
3639 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3640 		int_name = priv->int_name_lpi;
3641 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3642 		ret = request_irq(priv->lpi_irq,
3643 				  stmmac_mac_interrupt,
3644 				  0, int_name, dev);
3645 		if (unlikely(ret < 0)) {
3646 			netdev_err(priv->dev,
3647 				   "%s: alloc lpi MSI %d (error: %d)\n",
3648 				   __func__, priv->lpi_irq, ret);
3649 			irq_err = REQ_IRQ_ERR_LPI;
3650 			goto irq_error;
3651 		}
3652 	}
3653 
3654 	/* Request the common Safety Feature Correctible/Uncorrectible
3655 	 * Error line in case of another line is used
3656 	 */
3657 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3658 		int_name = priv->int_name_sfty;
3659 		sprintf(int_name, "%s:%s", dev->name, "safety");
3660 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3661 				  0, int_name, dev);
3662 		if (unlikely(ret < 0)) {
3663 			netdev_err(priv->dev,
3664 				   "%s: alloc sfty MSI %d (error: %d)\n",
3665 				   __func__, priv->sfty_irq, ret);
3666 			irq_err = REQ_IRQ_ERR_SFTY;
3667 			goto irq_error;
3668 		}
3669 	}
3670 
3671 	/* Request the Safety Feature Correctible Error line in
3672 	 * case of another line is used
3673 	 */
3674 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3675 		int_name = priv->int_name_sfty_ce;
3676 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3677 		ret = request_irq(priv->sfty_ce_irq,
3678 				  stmmac_safety_interrupt,
3679 				  0, int_name, dev);
3680 		if (unlikely(ret < 0)) {
3681 			netdev_err(priv->dev,
3682 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3683 				   __func__, priv->sfty_ce_irq, ret);
3684 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3685 			goto irq_error;
3686 		}
3687 	}
3688 
3689 	/* Request the Safety Feature Uncorrectible Error line in
3690 	 * case of another line is used
3691 	 */
3692 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3693 		int_name = priv->int_name_sfty_ue;
3694 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3695 		ret = request_irq(priv->sfty_ue_irq,
3696 				  stmmac_safety_interrupt,
3697 				  0, int_name, dev);
3698 		if (unlikely(ret < 0)) {
3699 			netdev_err(priv->dev,
3700 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3701 				   __func__, priv->sfty_ue_irq, ret);
3702 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3703 			goto irq_error;
3704 		}
3705 	}
3706 
3707 	/* Request Rx MSI irq */
3708 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3709 		if (i >= MTL_MAX_RX_QUEUES)
3710 			break;
3711 		if (priv->rx_irq[i] == 0)
3712 			continue;
3713 
3714 		int_name = priv->int_name_rx_irq[i];
3715 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3716 		ret = request_irq(priv->rx_irq[i],
3717 				  stmmac_msi_intr_rx,
3718 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3719 		if (unlikely(ret < 0)) {
3720 			netdev_err(priv->dev,
3721 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3722 				   __func__, i, priv->rx_irq[i], ret);
3723 			irq_err = REQ_IRQ_ERR_RX;
3724 			irq_idx = i;
3725 			goto irq_error;
3726 		}
3727 		cpumask_clear(&cpu_mask);
3728 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3729 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3730 	}
3731 
3732 	/* Request Tx MSI irq */
3733 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3734 		if (i >= MTL_MAX_TX_QUEUES)
3735 			break;
3736 		if (priv->tx_irq[i] == 0)
3737 			continue;
3738 
3739 		int_name = priv->int_name_tx_irq[i];
3740 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3741 		ret = request_irq(priv->tx_irq[i],
3742 				  stmmac_msi_intr_tx,
3743 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3744 		if (unlikely(ret < 0)) {
3745 			netdev_err(priv->dev,
3746 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3747 				   __func__, i, priv->tx_irq[i], ret);
3748 			irq_err = REQ_IRQ_ERR_TX;
3749 			irq_idx = i;
3750 			goto irq_error;
3751 		}
3752 		cpumask_clear(&cpu_mask);
3753 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3754 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3755 	}
3756 
3757 	return 0;
3758 
3759 irq_error:
3760 	stmmac_free_irq(dev, irq_err, irq_idx);
3761 	return ret;
3762 }
3763 
3764 static int stmmac_request_irq_single(struct net_device *dev)
3765 {
3766 	struct stmmac_priv *priv = netdev_priv(dev);
3767 	enum request_irq_err irq_err;
3768 	int ret;
3769 
3770 	ret = request_irq(dev->irq, stmmac_interrupt,
3771 			  IRQF_SHARED, dev->name, dev);
3772 	if (unlikely(ret < 0)) {
3773 		netdev_err(priv->dev,
3774 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3775 			   __func__, dev->irq, ret);
3776 		irq_err = REQ_IRQ_ERR_MAC;
3777 		goto irq_error;
3778 	}
3779 
3780 	/* Request the Wake IRQ in case of another line
3781 	 * is used for WoL
3782 	 */
3783 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3784 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3785 				  IRQF_SHARED, dev->name, dev);
3786 		if (unlikely(ret < 0)) {
3787 			netdev_err(priv->dev,
3788 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3789 				   __func__, priv->wol_irq, ret);
3790 			irq_err = REQ_IRQ_ERR_WOL;
3791 			goto irq_error;
3792 		}
3793 	}
3794 
3795 	/* Request the IRQ lines */
3796 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3797 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3798 				  IRQF_SHARED, dev->name, dev);
3799 		if (unlikely(ret < 0)) {
3800 			netdev_err(priv->dev,
3801 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3802 				   __func__, priv->lpi_irq, ret);
3803 			irq_err = REQ_IRQ_ERR_LPI;
3804 			goto irq_error;
3805 		}
3806 	}
3807 
3808 	/* Request the common Safety Feature Correctible/Uncorrectible
3809 	 * Error line in case of another line is used
3810 	 */
3811 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3812 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3813 				  IRQF_SHARED, dev->name, dev);
3814 		if (unlikely(ret < 0)) {
3815 			netdev_err(priv->dev,
3816 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3817 				   __func__, priv->sfty_irq, ret);
3818 			irq_err = REQ_IRQ_ERR_SFTY;
3819 			goto irq_error;
3820 		}
3821 	}
3822 
3823 	return 0;
3824 
3825 irq_error:
3826 	stmmac_free_irq(dev, irq_err, 0);
3827 	return ret;
3828 }
3829 
3830 static int stmmac_request_irq(struct net_device *dev)
3831 {
3832 	struct stmmac_priv *priv = netdev_priv(dev);
3833 	int ret;
3834 
3835 	/* Request the IRQ lines */
3836 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3837 		ret = stmmac_request_irq_multi_msi(dev);
3838 	else
3839 		ret = stmmac_request_irq_single(dev);
3840 
3841 	return ret;
3842 }
3843 
3844 /**
3845  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3846  *  @priv: driver private structure
3847  *  @mtu: MTU to setup the dma queue and buf with
3848  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3849  *  Allocate the Tx/Rx DMA queue and init them.
3850  *  Return value:
3851  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3852  */
3853 static struct stmmac_dma_conf *
3854 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3855 {
3856 	struct stmmac_dma_conf *dma_conf;
3857 	int chan, bfsize, ret;
3858 
3859 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3860 	if (!dma_conf) {
3861 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3862 			   __func__);
3863 		return ERR_PTR(-ENOMEM);
3864 	}
3865 
3866 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3867 	if (bfsize < 0)
3868 		bfsize = 0;
3869 
3870 	if (bfsize < BUF_SIZE_16KiB)
3871 		bfsize = stmmac_set_bfsize(mtu, 0);
3872 
3873 	dma_conf->dma_buf_sz = bfsize;
3874 	/* Chose the tx/rx size from the already defined one in the
3875 	 * priv struct. (if defined)
3876 	 */
3877 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3878 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3879 
3880 	if (!dma_conf->dma_tx_size)
3881 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3882 	if (!dma_conf->dma_rx_size)
3883 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3884 
3885 	/* Earlier check for TBS */
3886 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3887 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3888 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3889 
3890 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3891 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3892 	}
3893 
3894 	ret = alloc_dma_desc_resources(priv, dma_conf);
3895 	if (ret < 0) {
3896 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3897 			   __func__);
3898 		goto alloc_error;
3899 	}
3900 
3901 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3902 	if (ret < 0) {
3903 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3904 			   __func__);
3905 		goto init_error;
3906 	}
3907 
3908 	return dma_conf;
3909 
3910 init_error:
3911 	free_dma_desc_resources(priv, dma_conf);
3912 alloc_error:
3913 	kfree(dma_conf);
3914 	return ERR_PTR(ret);
3915 }
3916 
3917 /**
3918  *  __stmmac_open - open entry point of the driver
3919  *  @dev : pointer to the device structure.
3920  *  @dma_conf :  structure to take the dma data
3921  *  Description:
3922  *  This function is the open entry point of the driver.
3923  *  Return value:
3924  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3925  *  file on failure.
3926  */
3927 static int __stmmac_open(struct net_device *dev,
3928 			 struct stmmac_dma_conf *dma_conf)
3929 {
3930 	struct stmmac_priv *priv = netdev_priv(dev);
3931 	int mode = priv->plat->phy_interface;
3932 	u32 chan;
3933 	int ret;
3934 
3935 	ret = pm_runtime_resume_and_get(priv->device);
3936 	if (ret < 0)
3937 		return ret;
3938 
3939 	if ((!priv->hw->xpcs ||
3940 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3941 		ret = stmmac_init_phy(dev);
3942 		if (ret) {
3943 			netdev_err(priv->dev,
3944 				   "%s: Cannot attach to PHY (error: %d)\n",
3945 				   __func__, ret);
3946 			goto init_phy_error;
3947 		}
3948 	}
3949 
3950 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3951 
3952 	buf_sz = dma_conf->dma_buf_sz;
3953 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3954 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3955 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3956 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3957 
3958 	stmmac_reset_queues_param(priv);
3959 
3960 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3961 	    priv->plat->serdes_powerup) {
3962 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3963 		if (ret < 0) {
3964 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3965 				   __func__);
3966 			goto init_error;
3967 		}
3968 	}
3969 
3970 	ret = stmmac_hw_setup(dev, true);
3971 	if (ret < 0) {
3972 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3973 		goto init_error;
3974 	}
3975 
3976 	stmmac_init_coalesce(priv);
3977 
3978 	phylink_start(priv->phylink);
3979 	/* We may have called phylink_speed_down before */
3980 	phylink_speed_up(priv->phylink);
3981 
3982 	ret = stmmac_request_irq(dev);
3983 	if (ret)
3984 		goto irq_error;
3985 
3986 	stmmac_enable_all_queues(priv);
3987 	netif_tx_start_all_queues(priv->dev);
3988 	stmmac_enable_all_dma_irq(priv);
3989 
3990 	return 0;
3991 
3992 irq_error:
3993 	phylink_stop(priv->phylink);
3994 
3995 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3996 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3997 
3998 	stmmac_hw_teardown(dev);
3999 init_error:
4000 	phylink_disconnect_phy(priv->phylink);
4001 init_phy_error:
4002 	pm_runtime_put(priv->device);
4003 	return ret;
4004 }
4005 
4006 static int stmmac_open(struct net_device *dev)
4007 {
4008 	struct stmmac_priv *priv = netdev_priv(dev);
4009 	struct stmmac_dma_conf *dma_conf;
4010 	int ret;
4011 
4012 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4013 	if (IS_ERR(dma_conf))
4014 		return PTR_ERR(dma_conf);
4015 
4016 	ret = __stmmac_open(dev, dma_conf);
4017 	if (ret)
4018 		free_dma_desc_resources(priv, dma_conf);
4019 
4020 	kfree(dma_conf);
4021 	return ret;
4022 }
4023 
4024 /**
4025  *  stmmac_release - close entry point of the driver
4026  *  @dev : device pointer.
4027  *  Description:
4028  *  This is the stop entry point of the driver.
4029  */
4030 static int stmmac_release(struct net_device *dev)
4031 {
4032 	struct stmmac_priv *priv = netdev_priv(dev);
4033 	u32 chan;
4034 
4035 	if (device_may_wakeup(priv->device))
4036 		phylink_speed_down(priv->phylink, false);
4037 	/* Stop and disconnect the PHY */
4038 	phylink_stop(priv->phylink);
4039 	phylink_disconnect_phy(priv->phylink);
4040 
4041 	stmmac_disable_all_queues(priv);
4042 
4043 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4044 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4045 
4046 	netif_tx_disable(dev);
4047 
4048 	/* Free the IRQ lines */
4049 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4050 
4051 	if (priv->eee_enabled) {
4052 		priv->tx_path_in_lpi_mode = false;
4053 		del_timer_sync(&priv->eee_ctrl_timer);
4054 	}
4055 
4056 	/* Stop TX/RX DMA and clear the descriptors */
4057 	stmmac_stop_all_dma(priv);
4058 
4059 	/* Release and free the Rx/Tx resources */
4060 	free_dma_desc_resources(priv, &priv->dma_conf);
4061 
4062 	/* Disable the MAC Rx/Tx */
4063 	stmmac_mac_set(priv, priv->ioaddr, false);
4064 
4065 	/* Powerdown Serdes if there is */
4066 	if (priv->plat->serdes_powerdown)
4067 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4068 
4069 	stmmac_release_ptp(priv);
4070 
4071 	if (priv->dma_cap.fpesel)
4072 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4073 
4074 	pm_runtime_put(priv->device);
4075 
4076 	return 0;
4077 }
4078 
4079 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4080 			       struct stmmac_tx_queue *tx_q)
4081 {
4082 	u16 tag = 0x0, inner_tag = 0x0;
4083 	u32 inner_type = 0x0;
4084 	struct dma_desc *p;
4085 
4086 	if (!priv->dma_cap.vlins)
4087 		return false;
4088 	if (!skb_vlan_tag_present(skb))
4089 		return false;
4090 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4091 		inner_tag = skb_vlan_tag_get(skb);
4092 		inner_type = STMMAC_VLAN_INSERT;
4093 	}
4094 
4095 	tag = skb_vlan_tag_get(skb);
4096 
4097 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4098 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4099 	else
4100 		p = &tx_q->dma_tx[tx_q->cur_tx];
4101 
4102 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4103 		return false;
4104 
4105 	stmmac_set_tx_owner(priv, p);
4106 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4107 	return true;
4108 }
4109 
4110 /**
4111  *  stmmac_tso_allocator - close entry point of the driver
4112  *  @priv: driver private structure
4113  *  @des: buffer start address
4114  *  @total_len: total length to fill in descriptors
4115  *  @last_segment: condition for the last descriptor
4116  *  @queue: TX queue index
4117  *  Description:
4118  *  This function fills descriptor and request new descriptors according to
4119  *  buffer length to fill
4120  */
4121 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4122 				 int total_len, bool last_segment, u32 queue)
4123 {
4124 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4125 	struct dma_desc *desc;
4126 	u32 buff_size;
4127 	int tmp_len;
4128 
4129 	tmp_len = total_len;
4130 
4131 	while (tmp_len > 0) {
4132 		dma_addr_t curr_addr;
4133 
4134 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4135 						priv->dma_conf.dma_tx_size);
4136 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4137 
4138 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4139 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4140 		else
4141 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4142 
4143 		curr_addr = des + (total_len - tmp_len);
4144 		if (priv->dma_cap.addr64 <= 32)
4145 			desc->des0 = cpu_to_le32(curr_addr);
4146 		else
4147 			stmmac_set_desc_addr(priv, desc, curr_addr);
4148 
4149 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4150 			    TSO_MAX_BUFF_SIZE : tmp_len;
4151 
4152 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4153 				0, 1,
4154 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4155 				0, 0);
4156 
4157 		tmp_len -= TSO_MAX_BUFF_SIZE;
4158 	}
4159 }
4160 
4161 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4162 {
4163 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4164 	int desc_size;
4165 
4166 	if (likely(priv->extend_desc))
4167 		desc_size = sizeof(struct dma_extended_desc);
4168 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4169 		desc_size = sizeof(struct dma_edesc);
4170 	else
4171 		desc_size = sizeof(struct dma_desc);
4172 
4173 	/* The own bit must be the latest setting done when prepare the
4174 	 * descriptor and then barrier is needed to make sure that
4175 	 * all is coherent before granting the DMA engine.
4176 	 */
4177 	wmb();
4178 
4179 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4180 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4181 }
4182 
4183 /**
4184  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4185  *  @skb : the socket buffer
4186  *  @dev : device pointer
4187  *  Description: this is the transmit function that is called on TSO frames
4188  *  (support available on GMAC4 and newer chips).
4189  *  Diagram below show the ring programming in case of TSO frames:
4190  *
4191  *  First Descriptor
4192  *   --------
4193  *   | DES0 |---> buffer1 = L2/L3/L4 header
4194  *   | DES1 |---> TCP Payload (can continue on next descr...)
4195  *   | DES2 |---> buffer 1 and 2 len
4196  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4197  *   --------
4198  *	|
4199  *     ...
4200  *	|
4201  *   --------
4202  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4203  *   | DES1 | --|
4204  *   | DES2 | --> buffer 1 and 2 len
4205  *   | DES3 |
4206  *   --------
4207  *
4208  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4209  */
4210 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4211 {
4212 	struct dma_desc *desc, *first, *mss_desc = NULL;
4213 	struct stmmac_priv *priv = netdev_priv(dev);
4214 	int tmp_pay_len = 0, first_tx, nfrags;
4215 	unsigned int first_entry, tx_packets;
4216 	struct stmmac_txq_stats *txq_stats;
4217 	struct stmmac_tx_queue *tx_q;
4218 	u32 pay_len, mss, queue;
4219 	u8 proto_hdr_len, hdr;
4220 	dma_addr_t des;
4221 	bool set_ic;
4222 	int i;
4223 
4224 	/* Always insert VLAN tag to SKB payload for TSO frames.
4225 	 *
4226 	 * Never insert VLAN tag by HW, since segments splited by
4227 	 * TSO engine will be un-tagged by mistake.
4228 	 */
4229 	if (skb_vlan_tag_present(skb)) {
4230 		skb = __vlan_hwaccel_push_inside(skb);
4231 		if (unlikely(!skb)) {
4232 			priv->xstats.tx_dropped++;
4233 			return NETDEV_TX_OK;
4234 		}
4235 	}
4236 
4237 	nfrags = skb_shinfo(skb)->nr_frags;
4238 	queue = skb_get_queue_mapping(skb);
4239 
4240 	tx_q = &priv->dma_conf.tx_queue[queue];
4241 	txq_stats = &priv->xstats.txq_stats[queue];
4242 	first_tx = tx_q->cur_tx;
4243 
4244 	/* Compute header lengths */
4245 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4246 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4247 		hdr = sizeof(struct udphdr);
4248 	} else {
4249 		proto_hdr_len = skb_tcp_all_headers(skb);
4250 		hdr = tcp_hdrlen(skb);
4251 	}
4252 
4253 	/* Desc availability based on threshold should be enough safe */
4254 	if (unlikely(stmmac_tx_avail(priv, queue) <
4255 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4256 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4257 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4258 								queue));
4259 			/* This is a hard error, log it. */
4260 			netdev_err(priv->dev,
4261 				   "%s: Tx Ring full when queue awake\n",
4262 				   __func__);
4263 		}
4264 		return NETDEV_TX_BUSY;
4265 	}
4266 
4267 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4268 
4269 	mss = skb_shinfo(skb)->gso_size;
4270 
4271 	/* set new MSS value if needed */
4272 	if (mss != tx_q->mss) {
4273 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4274 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4275 		else
4276 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4277 
4278 		stmmac_set_mss(priv, mss_desc, mss);
4279 		tx_q->mss = mss;
4280 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4281 						priv->dma_conf.dma_tx_size);
4282 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4283 	}
4284 
4285 	if (netif_msg_tx_queued(priv)) {
4286 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4287 			__func__, hdr, proto_hdr_len, pay_len, mss);
4288 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4289 			skb->data_len);
4290 	}
4291 
4292 	first_entry = tx_q->cur_tx;
4293 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4294 
4295 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4296 		desc = &tx_q->dma_entx[first_entry].basic;
4297 	else
4298 		desc = &tx_q->dma_tx[first_entry];
4299 	first = desc;
4300 
4301 	/* first descriptor: fill Headers on Buf1 */
4302 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4303 			     DMA_TO_DEVICE);
4304 	if (dma_mapping_error(priv->device, des))
4305 		goto dma_map_err;
4306 
4307 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4308 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4309 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4310 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4311 
4312 	if (priv->dma_cap.addr64 <= 32) {
4313 		first->des0 = cpu_to_le32(des);
4314 
4315 		/* Fill start of payload in buff2 of first descriptor */
4316 		if (pay_len)
4317 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4318 
4319 		/* If needed take extra descriptors to fill the remaining payload */
4320 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4321 	} else {
4322 		stmmac_set_desc_addr(priv, first, des);
4323 		tmp_pay_len = pay_len;
4324 		des += proto_hdr_len;
4325 		pay_len = 0;
4326 	}
4327 
4328 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4329 
4330 	/* Prepare fragments */
4331 	for (i = 0; i < nfrags; i++) {
4332 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4333 
4334 		des = skb_frag_dma_map(priv->device, frag, 0,
4335 				       skb_frag_size(frag),
4336 				       DMA_TO_DEVICE);
4337 		if (dma_mapping_error(priv->device, des))
4338 			goto dma_map_err;
4339 
4340 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4341 				     (i == nfrags - 1), queue);
4342 
4343 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4344 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4345 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4346 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4347 	}
4348 
4349 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4350 
4351 	/* Only the last descriptor gets to point to the skb. */
4352 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4353 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4354 
4355 	/* Manage tx mitigation */
4356 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4357 	tx_q->tx_count_frames += tx_packets;
4358 
4359 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4360 		set_ic = true;
4361 	else if (!priv->tx_coal_frames[queue])
4362 		set_ic = false;
4363 	else if (tx_packets > priv->tx_coal_frames[queue])
4364 		set_ic = true;
4365 	else if ((tx_q->tx_count_frames %
4366 		  priv->tx_coal_frames[queue]) < tx_packets)
4367 		set_ic = true;
4368 	else
4369 		set_ic = false;
4370 
4371 	if (set_ic) {
4372 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4373 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4374 		else
4375 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4376 
4377 		tx_q->tx_count_frames = 0;
4378 		stmmac_set_tx_ic(priv, desc);
4379 	}
4380 
4381 	/* We've used all descriptors we need for this skb, however,
4382 	 * advance cur_tx so that it references a fresh descriptor.
4383 	 * ndo_start_xmit will fill this descriptor the next time it's
4384 	 * called and stmmac_tx_clean may clean up to this descriptor.
4385 	 */
4386 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4387 
4388 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4389 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4390 			  __func__);
4391 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4392 	}
4393 
4394 	u64_stats_update_begin(&txq_stats->q_syncp);
4395 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4396 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4397 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4398 	if (set_ic)
4399 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4400 	u64_stats_update_end(&txq_stats->q_syncp);
4401 
4402 	if (priv->sarc_type)
4403 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4404 
4405 	skb_tx_timestamp(skb);
4406 
4407 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4408 		     priv->hwts_tx_en)) {
4409 		/* declare that device is doing timestamping */
4410 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4411 		stmmac_enable_tx_timestamp(priv, first);
4412 	}
4413 
4414 	/* Complete the first descriptor before granting the DMA */
4415 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4416 			proto_hdr_len,
4417 			pay_len,
4418 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4419 			hdr / 4, (skb->len - proto_hdr_len));
4420 
4421 	/* If context desc is used to change MSS */
4422 	if (mss_desc) {
4423 		/* Make sure that first descriptor has been completely
4424 		 * written, including its own bit. This is because MSS is
4425 		 * actually before first descriptor, so we need to make
4426 		 * sure that MSS's own bit is the last thing written.
4427 		 */
4428 		dma_wmb();
4429 		stmmac_set_tx_owner(priv, mss_desc);
4430 	}
4431 
4432 	if (netif_msg_pktdata(priv)) {
4433 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4434 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4435 			tx_q->cur_tx, first, nfrags);
4436 		pr_info(">>> frame to be transmitted: ");
4437 		print_pkt(skb->data, skb_headlen(skb));
4438 	}
4439 
4440 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4441 
4442 	stmmac_flush_tx_descriptors(priv, queue);
4443 	stmmac_tx_timer_arm(priv, queue);
4444 
4445 	return NETDEV_TX_OK;
4446 
4447 dma_map_err:
4448 	dev_err(priv->device, "Tx dma map failed\n");
4449 	dev_kfree_skb(skb);
4450 	priv->xstats.tx_dropped++;
4451 	return NETDEV_TX_OK;
4452 }
4453 
4454 /**
4455  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4456  * @skb: socket buffer to check
4457  *
4458  * Check if a packet has an ethertype that will trigger the IP header checks
4459  * and IP/TCP checksum engine of the stmmac core.
4460  *
4461  * Return: true if the ethertype can trigger the checksum engine, false
4462  * otherwise
4463  */
4464 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4465 {
4466 	int depth = 0;
4467 	__be16 proto;
4468 
4469 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4470 				    &depth);
4471 
4472 	return (depth <= ETH_HLEN) &&
4473 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4474 }
4475 
4476 /**
4477  *  stmmac_xmit - Tx entry point of the driver
4478  *  @skb : the socket buffer
4479  *  @dev : device pointer
4480  *  Description : this is the tx entry point of the driver.
4481  *  It programs the chain or the ring and supports oversized frames
4482  *  and SG feature.
4483  */
4484 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4485 {
4486 	unsigned int first_entry, tx_packets, enh_desc;
4487 	struct stmmac_priv *priv = netdev_priv(dev);
4488 	unsigned int nopaged_len = skb_headlen(skb);
4489 	int i, csum_insertion = 0, is_jumbo = 0;
4490 	u32 queue = skb_get_queue_mapping(skb);
4491 	int nfrags = skb_shinfo(skb)->nr_frags;
4492 	int gso = skb_shinfo(skb)->gso_type;
4493 	struct stmmac_txq_stats *txq_stats;
4494 	struct dma_edesc *tbs_desc = NULL;
4495 	struct dma_desc *desc, *first;
4496 	struct stmmac_tx_queue *tx_q;
4497 	bool has_vlan, set_ic;
4498 	int entry, first_tx;
4499 	dma_addr_t des;
4500 
4501 	tx_q = &priv->dma_conf.tx_queue[queue];
4502 	txq_stats = &priv->xstats.txq_stats[queue];
4503 	first_tx = tx_q->cur_tx;
4504 
4505 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4506 		stmmac_disable_eee_mode(priv);
4507 
4508 	/* Manage oversized TCP frames for GMAC4 device */
4509 	if (skb_is_gso(skb) && priv->tso) {
4510 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4511 			return stmmac_tso_xmit(skb, dev);
4512 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4513 			return stmmac_tso_xmit(skb, dev);
4514 	}
4515 
4516 	if (priv->est && priv->est->enable &&
4517 	    priv->est->max_sdu[queue] &&
4518 	    skb->len > priv->est->max_sdu[queue]){
4519 		priv->xstats.max_sdu_txq_drop[queue]++;
4520 		goto max_sdu_err;
4521 	}
4522 
4523 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4524 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4525 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4526 								queue));
4527 			/* This is a hard error, log it. */
4528 			netdev_err(priv->dev,
4529 				   "%s: Tx Ring full when queue awake\n",
4530 				   __func__);
4531 		}
4532 		return NETDEV_TX_BUSY;
4533 	}
4534 
4535 	/* Check if VLAN can be inserted by HW */
4536 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4537 
4538 	entry = tx_q->cur_tx;
4539 	first_entry = entry;
4540 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4541 
4542 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4543 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4544 	 * queues. In that case, checksum offloading for those queues that don't
4545 	 * support tx coe needs to fallback to software checksum calculation.
4546 	 *
4547 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4548 	 * also have to be checksummed in software.
4549 	 */
4550 	if (csum_insertion &&
4551 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4552 	     !stmmac_has_ip_ethertype(skb))) {
4553 		if (unlikely(skb_checksum_help(skb)))
4554 			goto dma_map_err;
4555 		csum_insertion = !csum_insertion;
4556 	}
4557 
4558 	if (likely(priv->extend_desc))
4559 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4560 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4561 		desc = &tx_q->dma_entx[entry].basic;
4562 	else
4563 		desc = tx_q->dma_tx + entry;
4564 
4565 	first = desc;
4566 
4567 	if (has_vlan)
4568 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4569 
4570 	enh_desc = priv->plat->enh_desc;
4571 	/* To program the descriptors according to the size of the frame */
4572 	if (enh_desc)
4573 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4574 
4575 	if (unlikely(is_jumbo)) {
4576 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4577 		if (unlikely(entry < 0) && (entry != -EINVAL))
4578 			goto dma_map_err;
4579 	}
4580 
4581 	for (i = 0; i < nfrags; i++) {
4582 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4583 		int len = skb_frag_size(frag);
4584 		bool last_segment = (i == (nfrags - 1));
4585 
4586 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4587 		WARN_ON(tx_q->tx_skbuff[entry]);
4588 
4589 		if (likely(priv->extend_desc))
4590 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4591 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4592 			desc = &tx_q->dma_entx[entry].basic;
4593 		else
4594 			desc = tx_q->dma_tx + entry;
4595 
4596 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4597 				       DMA_TO_DEVICE);
4598 		if (dma_mapping_error(priv->device, des))
4599 			goto dma_map_err; /* should reuse desc w/o issues */
4600 
4601 		tx_q->tx_skbuff_dma[entry].buf = des;
4602 
4603 		stmmac_set_desc_addr(priv, desc, des);
4604 
4605 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4606 		tx_q->tx_skbuff_dma[entry].len = len;
4607 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4608 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4609 
4610 		/* Prepare the descriptor and set the own bit too */
4611 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4612 				priv->mode, 1, last_segment, skb->len);
4613 	}
4614 
4615 	/* Only the last descriptor gets to point to the skb. */
4616 	tx_q->tx_skbuff[entry] = skb;
4617 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4618 
4619 	/* According to the coalesce parameter the IC bit for the latest
4620 	 * segment is reset and the timer re-started to clean the tx status.
4621 	 * This approach takes care about the fragments: desc is the first
4622 	 * element in case of no SG.
4623 	 */
4624 	tx_packets = (entry + 1) - first_tx;
4625 	tx_q->tx_count_frames += tx_packets;
4626 
4627 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4628 		set_ic = true;
4629 	else if (!priv->tx_coal_frames[queue])
4630 		set_ic = false;
4631 	else if (tx_packets > priv->tx_coal_frames[queue])
4632 		set_ic = true;
4633 	else if ((tx_q->tx_count_frames %
4634 		  priv->tx_coal_frames[queue]) < tx_packets)
4635 		set_ic = true;
4636 	else
4637 		set_ic = false;
4638 
4639 	if (set_ic) {
4640 		if (likely(priv->extend_desc))
4641 			desc = &tx_q->dma_etx[entry].basic;
4642 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4643 			desc = &tx_q->dma_entx[entry].basic;
4644 		else
4645 			desc = &tx_q->dma_tx[entry];
4646 
4647 		tx_q->tx_count_frames = 0;
4648 		stmmac_set_tx_ic(priv, desc);
4649 	}
4650 
4651 	/* We've used all descriptors we need for this skb, however,
4652 	 * advance cur_tx so that it references a fresh descriptor.
4653 	 * ndo_start_xmit will fill this descriptor the next time it's
4654 	 * called and stmmac_tx_clean may clean up to this descriptor.
4655 	 */
4656 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4657 	tx_q->cur_tx = entry;
4658 
4659 	if (netif_msg_pktdata(priv)) {
4660 		netdev_dbg(priv->dev,
4661 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4662 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4663 			   entry, first, nfrags);
4664 
4665 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4666 		print_pkt(skb->data, skb->len);
4667 	}
4668 
4669 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4670 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4671 			  __func__);
4672 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4673 	}
4674 
4675 	u64_stats_update_begin(&txq_stats->q_syncp);
4676 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4677 	if (set_ic)
4678 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4679 	u64_stats_update_end(&txq_stats->q_syncp);
4680 
4681 	if (priv->sarc_type)
4682 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4683 
4684 	skb_tx_timestamp(skb);
4685 
4686 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4687 	 * problems because all the descriptors are actually ready to be
4688 	 * passed to the DMA engine.
4689 	 */
4690 	if (likely(!is_jumbo)) {
4691 		bool last_segment = (nfrags == 0);
4692 
4693 		des = dma_map_single(priv->device, skb->data,
4694 				     nopaged_len, DMA_TO_DEVICE);
4695 		if (dma_mapping_error(priv->device, des))
4696 			goto dma_map_err;
4697 
4698 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4699 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4700 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4701 
4702 		stmmac_set_desc_addr(priv, first, des);
4703 
4704 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4705 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4706 
4707 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4708 			     priv->hwts_tx_en)) {
4709 			/* declare that device is doing timestamping */
4710 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4711 			stmmac_enable_tx_timestamp(priv, first);
4712 		}
4713 
4714 		/* Prepare the first descriptor setting the OWN bit too */
4715 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4716 				csum_insertion, priv->mode, 0, last_segment,
4717 				skb->len);
4718 	}
4719 
4720 	if (tx_q->tbs & STMMAC_TBS_EN) {
4721 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4722 
4723 		tbs_desc = &tx_q->dma_entx[first_entry];
4724 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4725 	}
4726 
4727 	stmmac_set_tx_owner(priv, first);
4728 
4729 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4730 
4731 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4732 
4733 	stmmac_flush_tx_descriptors(priv, queue);
4734 	stmmac_tx_timer_arm(priv, queue);
4735 
4736 	return NETDEV_TX_OK;
4737 
4738 dma_map_err:
4739 	netdev_err(priv->dev, "Tx DMA map failed\n");
4740 max_sdu_err:
4741 	dev_kfree_skb(skb);
4742 	priv->xstats.tx_dropped++;
4743 	return NETDEV_TX_OK;
4744 }
4745 
4746 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4747 {
4748 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4749 	__be16 vlan_proto = veth->h_vlan_proto;
4750 	u16 vlanid;
4751 
4752 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4753 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4754 	    (vlan_proto == htons(ETH_P_8021AD) &&
4755 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4756 		/* pop the vlan tag */
4757 		vlanid = ntohs(veth->h_vlan_TCI);
4758 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4759 		skb_pull(skb, VLAN_HLEN);
4760 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4761 	}
4762 }
4763 
4764 /**
4765  * stmmac_rx_refill - refill used skb preallocated buffers
4766  * @priv: driver private structure
4767  * @queue: RX queue index
4768  * Description : this is to reallocate the skb for the reception process
4769  * that is based on zero-copy.
4770  */
4771 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4772 {
4773 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4774 	int dirty = stmmac_rx_dirty(priv, queue);
4775 	unsigned int entry = rx_q->dirty_rx;
4776 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4777 
4778 	if (priv->dma_cap.host_dma_width <= 32)
4779 		gfp |= GFP_DMA32;
4780 
4781 	while (dirty-- > 0) {
4782 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4783 		struct dma_desc *p;
4784 		bool use_rx_wd;
4785 
4786 		if (priv->extend_desc)
4787 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4788 		else
4789 			p = rx_q->dma_rx + entry;
4790 
4791 		if (!buf->page) {
4792 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4793 			if (!buf->page)
4794 				break;
4795 		}
4796 
4797 		if (priv->sph && !buf->sec_page) {
4798 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4799 			if (!buf->sec_page)
4800 				break;
4801 
4802 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4803 		}
4804 
4805 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4806 
4807 		stmmac_set_desc_addr(priv, p, buf->addr);
4808 		if (priv->sph)
4809 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4810 		else
4811 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4812 		stmmac_refill_desc3(priv, rx_q, p);
4813 
4814 		rx_q->rx_count_frames++;
4815 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4816 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4817 			rx_q->rx_count_frames = 0;
4818 
4819 		use_rx_wd = !priv->rx_coal_frames[queue];
4820 		use_rx_wd |= rx_q->rx_count_frames > 0;
4821 		if (!priv->use_riwt)
4822 			use_rx_wd = false;
4823 
4824 		dma_wmb();
4825 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4826 
4827 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4828 	}
4829 	rx_q->dirty_rx = entry;
4830 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4831 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4832 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4833 }
4834 
4835 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4836 				       struct dma_desc *p,
4837 				       int status, unsigned int len)
4838 {
4839 	unsigned int plen = 0, hlen = 0;
4840 	int coe = priv->hw->rx_csum;
4841 
4842 	/* Not first descriptor, buffer is always zero */
4843 	if (priv->sph && len)
4844 		return 0;
4845 
4846 	/* First descriptor, get split header length */
4847 	stmmac_get_rx_header_len(priv, p, &hlen);
4848 	if (priv->sph && hlen) {
4849 		priv->xstats.rx_split_hdr_pkt_n++;
4850 		return hlen;
4851 	}
4852 
4853 	/* First descriptor, not last descriptor and not split header */
4854 	if (status & rx_not_ls)
4855 		return priv->dma_conf.dma_buf_sz;
4856 
4857 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4858 
4859 	/* First descriptor and last descriptor and not split header */
4860 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4861 }
4862 
4863 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4864 				       struct dma_desc *p,
4865 				       int status, unsigned int len)
4866 {
4867 	int coe = priv->hw->rx_csum;
4868 	unsigned int plen = 0;
4869 
4870 	/* Not split header, buffer is not available */
4871 	if (!priv->sph)
4872 		return 0;
4873 
4874 	/* Not last descriptor */
4875 	if (status & rx_not_ls)
4876 		return priv->dma_conf.dma_buf_sz;
4877 
4878 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4879 
4880 	/* Last descriptor */
4881 	return plen - len;
4882 }
4883 
4884 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4885 				struct xdp_frame *xdpf, bool dma_map)
4886 {
4887 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4888 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4889 	unsigned int entry = tx_q->cur_tx;
4890 	struct dma_desc *tx_desc;
4891 	dma_addr_t dma_addr;
4892 	bool set_ic;
4893 
4894 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4895 		return STMMAC_XDP_CONSUMED;
4896 
4897 	if (priv->est && priv->est->enable &&
4898 	    priv->est->max_sdu[queue] &&
4899 	    xdpf->len > priv->est->max_sdu[queue]) {
4900 		priv->xstats.max_sdu_txq_drop[queue]++;
4901 		return STMMAC_XDP_CONSUMED;
4902 	}
4903 
4904 	if (likely(priv->extend_desc))
4905 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4906 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4907 		tx_desc = &tx_q->dma_entx[entry].basic;
4908 	else
4909 		tx_desc = tx_q->dma_tx + entry;
4910 
4911 	if (dma_map) {
4912 		dma_addr = dma_map_single(priv->device, xdpf->data,
4913 					  xdpf->len, DMA_TO_DEVICE);
4914 		if (dma_mapping_error(priv->device, dma_addr))
4915 			return STMMAC_XDP_CONSUMED;
4916 
4917 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4918 	} else {
4919 		struct page *page = virt_to_page(xdpf->data);
4920 
4921 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4922 			   xdpf->headroom;
4923 		dma_sync_single_for_device(priv->device, dma_addr,
4924 					   xdpf->len, DMA_BIDIRECTIONAL);
4925 
4926 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4927 	}
4928 
4929 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4930 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4931 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4932 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4933 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4934 
4935 	tx_q->xdpf[entry] = xdpf;
4936 
4937 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4938 
4939 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4940 			       true, priv->mode, true, true,
4941 			       xdpf->len);
4942 
4943 	tx_q->tx_count_frames++;
4944 
4945 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4946 		set_ic = true;
4947 	else
4948 		set_ic = false;
4949 
4950 	if (set_ic) {
4951 		tx_q->tx_count_frames = 0;
4952 		stmmac_set_tx_ic(priv, tx_desc);
4953 		u64_stats_update_begin(&txq_stats->q_syncp);
4954 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4955 		u64_stats_update_end(&txq_stats->q_syncp);
4956 	}
4957 
4958 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4959 
4960 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4961 	tx_q->cur_tx = entry;
4962 
4963 	return STMMAC_XDP_TX;
4964 }
4965 
4966 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4967 				   int cpu)
4968 {
4969 	int index = cpu;
4970 
4971 	if (unlikely(index < 0))
4972 		index = 0;
4973 
4974 	while (index >= priv->plat->tx_queues_to_use)
4975 		index -= priv->plat->tx_queues_to_use;
4976 
4977 	return index;
4978 }
4979 
4980 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4981 				struct xdp_buff *xdp)
4982 {
4983 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4984 	int cpu = smp_processor_id();
4985 	struct netdev_queue *nq;
4986 	int queue;
4987 	int res;
4988 
4989 	if (unlikely(!xdpf))
4990 		return STMMAC_XDP_CONSUMED;
4991 
4992 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4993 	nq = netdev_get_tx_queue(priv->dev, queue);
4994 
4995 	__netif_tx_lock(nq, cpu);
4996 	/* Avoids TX time-out as we are sharing with slow path */
4997 	txq_trans_cond_update(nq);
4998 
4999 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5000 	if (res == STMMAC_XDP_TX)
5001 		stmmac_flush_tx_descriptors(priv, queue);
5002 
5003 	__netif_tx_unlock(nq);
5004 
5005 	return res;
5006 }
5007 
5008 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5009 				 struct bpf_prog *prog,
5010 				 struct xdp_buff *xdp)
5011 {
5012 	u32 act;
5013 	int res;
5014 
5015 	act = bpf_prog_run_xdp(prog, xdp);
5016 	switch (act) {
5017 	case XDP_PASS:
5018 		res = STMMAC_XDP_PASS;
5019 		break;
5020 	case XDP_TX:
5021 		res = stmmac_xdp_xmit_back(priv, xdp);
5022 		break;
5023 	case XDP_REDIRECT:
5024 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5025 			res = STMMAC_XDP_CONSUMED;
5026 		else
5027 			res = STMMAC_XDP_REDIRECT;
5028 		break;
5029 	default:
5030 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5031 		fallthrough;
5032 	case XDP_ABORTED:
5033 		trace_xdp_exception(priv->dev, prog, act);
5034 		fallthrough;
5035 	case XDP_DROP:
5036 		res = STMMAC_XDP_CONSUMED;
5037 		break;
5038 	}
5039 
5040 	return res;
5041 }
5042 
5043 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5044 					   struct xdp_buff *xdp)
5045 {
5046 	struct bpf_prog *prog;
5047 	int res;
5048 
5049 	prog = READ_ONCE(priv->xdp_prog);
5050 	if (!prog) {
5051 		res = STMMAC_XDP_PASS;
5052 		goto out;
5053 	}
5054 
5055 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5056 out:
5057 	return ERR_PTR(-res);
5058 }
5059 
5060 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5061 				   int xdp_status)
5062 {
5063 	int cpu = smp_processor_id();
5064 	int queue;
5065 
5066 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5067 
5068 	if (xdp_status & STMMAC_XDP_TX)
5069 		stmmac_tx_timer_arm(priv, queue);
5070 
5071 	if (xdp_status & STMMAC_XDP_REDIRECT)
5072 		xdp_do_flush();
5073 }
5074 
5075 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5076 					       struct xdp_buff *xdp)
5077 {
5078 	unsigned int metasize = xdp->data - xdp->data_meta;
5079 	unsigned int datasize = xdp->data_end - xdp->data;
5080 	struct sk_buff *skb;
5081 
5082 	skb = napi_alloc_skb(&ch->rxtx_napi,
5083 			     xdp->data_end - xdp->data_hard_start);
5084 	if (unlikely(!skb))
5085 		return NULL;
5086 
5087 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5088 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5089 	if (metasize)
5090 		skb_metadata_set(skb, metasize);
5091 
5092 	return skb;
5093 }
5094 
5095 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5096 				   struct dma_desc *p, struct dma_desc *np,
5097 				   struct xdp_buff *xdp)
5098 {
5099 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5100 	struct stmmac_channel *ch = &priv->channel[queue];
5101 	unsigned int len = xdp->data_end - xdp->data;
5102 	enum pkt_hash_types hash_type;
5103 	int coe = priv->hw->rx_csum;
5104 	struct sk_buff *skb;
5105 	u32 hash;
5106 
5107 	skb = stmmac_construct_skb_zc(ch, xdp);
5108 	if (!skb) {
5109 		priv->xstats.rx_dropped++;
5110 		return;
5111 	}
5112 
5113 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5114 	if (priv->hw->hw_vlan_en)
5115 		/* MAC level stripping. */
5116 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5117 	else
5118 		/* Driver level stripping. */
5119 		stmmac_rx_vlan(priv->dev, skb);
5120 	skb->protocol = eth_type_trans(skb, priv->dev);
5121 
5122 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5123 		skb_checksum_none_assert(skb);
5124 	else
5125 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5126 
5127 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5128 		skb_set_hash(skb, hash, hash_type);
5129 
5130 	skb_record_rx_queue(skb, queue);
5131 	napi_gro_receive(&ch->rxtx_napi, skb);
5132 
5133 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5134 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5135 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5136 	u64_stats_update_end(&rxq_stats->napi_syncp);
5137 }
5138 
5139 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5140 {
5141 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5142 	unsigned int entry = rx_q->dirty_rx;
5143 	struct dma_desc *rx_desc = NULL;
5144 	bool ret = true;
5145 
5146 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5147 
5148 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5149 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5150 		dma_addr_t dma_addr;
5151 		bool use_rx_wd;
5152 
5153 		if (!buf->xdp) {
5154 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5155 			if (!buf->xdp) {
5156 				ret = false;
5157 				break;
5158 			}
5159 		}
5160 
5161 		if (priv->extend_desc)
5162 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5163 		else
5164 			rx_desc = rx_q->dma_rx + entry;
5165 
5166 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5167 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5168 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5169 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5170 
5171 		rx_q->rx_count_frames++;
5172 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5173 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5174 			rx_q->rx_count_frames = 0;
5175 
5176 		use_rx_wd = !priv->rx_coal_frames[queue];
5177 		use_rx_wd |= rx_q->rx_count_frames > 0;
5178 		if (!priv->use_riwt)
5179 			use_rx_wd = false;
5180 
5181 		dma_wmb();
5182 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5183 
5184 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5185 	}
5186 
5187 	if (rx_desc) {
5188 		rx_q->dirty_rx = entry;
5189 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5190 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5191 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5192 	}
5193 
5194 	return ret;
5195 }
5196 
5197 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5198 {
5199 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5200 	 * to represent incoming packet, whereas cb field in the same structure
5201 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5202 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5203 	 */
5204 	return (struct stmmac_xdp_buff *)xdp;
5205 }
5206 
5207 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5208 {
5209 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5210 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5211 	unsigned int count = 0, error = 0, len = 0;
5212 	int dirty = stmmac_rx_dirty(priv, queue);
5213 	unsigned int next_entry = rx_q->cur_rx;
5214 	u32 rx_errors = 0, rx_dropped = 0;
5215 	unsigned int desc_size;
5216 	struct bpf_prog *prog;
5217 	bool failure = false;
5218 	int xdp_status = 0;
5219 	int status = 0;
5220 
5221 	if (netif_msg_rx_status(priv)) {
5222 		void *rx_head;
5223 
5224 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5225 		if (priv->extend_desc) {
5226 			rx_head = (void *)rx_q->dma_erx;
5227 			desc_size = sizeof(struct dma_extended_desc);
5228 		} else {
5229 			rx_head = (void *)rx_q->dma_rx;
5230 			desc_size = sizeof(struct dma_desc);
5231 		}
5232 
5233 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5234 				    rx_q->dma_rx_phy, desc_size);
5235 	}
5236 	while (count < limit) {
5237 		struct stmmac_rx_buffer *buf;
5238 		struct stmmac_xdp_buff *ctx;
5239 		unsigned int buf1_len = 0;
5240 		struct dma_desc *np, *p;
5241 		int entry;
5242 		int res;
5243 
5244 		if (!count && rx_q->state_saved) {
5245 			error = rx_q->state.error;
5246 			len = rx_q->state.len;
5247 		} else {
5248 			rx_q->state_saved = false;
5249 			error = 0;
5250 			len = 0;
5251 		}
5252 
5253 		if (count >= limit)
5254 			break;
5255 
5256 read_again:
5257 		buf1_len = 0;
5258 		entry = next_entry;
5259 		buf = &rx_q->buf_pool[entry];
5260 
5261 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5262 			failure = failure ||
5263 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5264 			dirty = 0;
5265 		}
5266 
5267 		if (priv->extend_desc)
5268 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5269 		else
5270 			p = rx_q->dma_rx + entry;
5271 
5272 		/* read the status of the incoming frame */
5273 		status = stmmac_rx_status(priv, &priv->xstats, p);
5274 		/* check if managed by the DMA otherwise go ahead */
5275 		if (unlikely(status & dma_own))
5276 			break;
5277 
5278 		/* Prefetch the next RX descriptor */
5279 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5280 						priv->dma_conf.dma_rx_size);
5281 		next_entry = rx_q->cur_rx;
5282 
5283 		if (priv->extend_desc)
5284 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5285 		else
5286 			np = rx_q->dma_rx + next_entry;
5287 
5288 		prefetch(np);
5289 
5290 		/* Ensure a valid XSK buffer before proceed */
5291 		if (!buf->xdp)
5292 			break;
5293 
5294 		if (priv->extend_desc)
5295 			stmmac_rx_extended_status(priv, &priv->xstats,
5296 						  rx_q->dma_erx + entry);
5297 		if (unlikely(status == discard_frame)) {
5298 			xsk_buff_free(buf->xdp);
5299 			buf->xdp = NULL;
5300 			dirty++;
5301 			error = 1;
5302 			if (!priv->hwts_rx_en)
5303 				rx_errors++;
5304 		}
5305 
5306 		if (unlikely(error && (status & rx_not_ls)))
5307 			goto read_again;
5308 		if (unlikely(error)) {
5309 			count++;
5310 			continue;
5311 		}
5312 
5313 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5314 		if (likely(status & rx_not_ls)) {
5315 			xsk_buff_free(buf->xdp);
5316 			buf->xdp = NULL;
5317 			dirty++;
5318 			count++;
5319 			goto read_again;
5320 		}
5321 
5322 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5323 		ctx->priv = priv;
5324 		ctx->desc = p;
5325 		ctx->ndesc = np;
5326 
5327 		/* XDP ZC Frame only support primary buffers for now */
5328 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5329 		len += buf1_len;
5330 
5331 		/* ACS is disabled; strip manually. */
5332 		if (likely(!(status & rx_not_ls))) {
5333 			buf1_len -= ETH_FCS_LEN;
5334 			len -= ETH_FCS_LEN;
5335 		}
5336 
5337 		/* RX buffer is good and fit into a XSK pool buffer */
5338 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5339 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5340 
5341 		prog = READ_ONCE(priv->xdp_prog);
5342 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5343 
5344 		switch (res) {
5345 		case STMMAC_XDP_PASS:
5346 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5347 			xsk_buff_free(buf->xdp);
5348 			break;
5349 		case STMMAC_XDP_CONSUMED:
5350 			xsk_buff_free(buf->xdp);
5351 			rx_dropped++;
5352 			break;
5353 		case STMMAC_XDP_TX:
5354 		case STMMAC_XDP_REDIRECT:
5355 			xdp_status |= res;
5356 			break;
5357 		}
5358 
5359 		buf->xdp = NULL;
5360 		dirty++;
5361 		count++;
5362 	}
5363 
5364 	if (status & rx_not_ls) {
5365 		rx_q->state_saved = true;
5366 		rx_q->state.error = error;
5367 		rx_q->state.len = len;
5368 	}
5369 
5370 	stmmac_finalize_xdp_rx(priv, xdp_status);
5371 
5372 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5373 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5374 	u64_stats_update_end(&rxq_stats->napi_syncp);
5375 
5376 	priv->xstats.rx_dropped += rx_dropped;
5377 	priv->xstats.rx_errors += rx_errors;
5378 
5379 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5380 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5381 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5382 		else
5383 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5384 
5385 		return (int)count;
5386 	}
5387 
5388 	return failure ? limit : (int)count;
5389 }
5390 
5391 /**
5392  * stmmac_rx - manage the receive process
5393  * @priv: driver private structure
5394  * @limit: napi bugget
5395  * @queue: RX queue index.
5396  * Description :  this the function called by the napi poll method.
5397  * It gets all the frames inside the ring.
5398  */
5399 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5400 {
5401 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5402 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5403 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5404 	struct stmmac_channel *ch = &priv->channel[queue];
5405 	unsigned int count = 0, error = 0, len = 0;
5406 	int status = 0, coe = priv->hw->rx_csum;
5407 	unsigned int next_entry = rx_q->cur_rx;
5408 	enum dma_data_direction dma_dir;
5409 	unsigned int desc_size;
5410 	struct sk_buff *skb = NULL;
5411 	struct stmmac_xdp_buff ctx;
5412 	int xdp_status = 0;
5413 	int buf_sz;
5414 
5415 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5416 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5417 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5418 
5419 	if (netif_msg_rx_status(priv)) {
5420 		void *rx_head;
5421 
5422 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5423 		if (priv->extend_desc) {
5424 			rx_head = (void *)rx_q->dma_erx;
5425 			desc_size = sizeof(struct dma_extended_desc);
5426 		} else {
5427 			rx_head = (void *)rx_q->dma_rx;
5428 			desc_size = sizeof(struct dma_desc);
5429 		}
5430 
5431 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5432 				    rx_q->dma_rx_phy, desc_size);
5433 	}
5434 	while (count < limit) {
5435 		unsigned int buf1_len = 0, buf2_len = 0;
5436 		enum pkt_hash_types hash_type;
5437 		struct stmmac_rx_buffer *buf;
5438 		struct dma_desc *np, *p;
5439 		int entry;
5440 		u32 hash;
5441 
5442 		if (!count && rx_q->state_saved) {
5443 			skb = rx_q->state.skb;
5444 			error = rx_q->state.error;
5445 			len = rx_q->state.len;
5446 		} else {
5447 			rx_q->state_saved = false;
5448 			skb = NULL;
5449 			error = 0;
5450 			len = 0;
5451 		}
5452 
5453 read_again:
5454 		if (count >= limit)
5455 			break;
5456 
5457 		buf1_len = 0;
5458 		buf2_len = 0;
5459 		entry = next_entry;
5460 		buf = &rx_q->buf_pool[entry];
5461 
5462 		if (priv->extend_desc)
5463 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5464 		else
5465 			p = rx_q->dma_rx + entry;
5466 
5467 		/* read the status of the incoming frame */
5468 		status = stmmac_rx_status(priv, &priv->xstats, p);
5469 		/* check if managed by the DMA otherwise go ahead */
5470 		if (unlikely(status & dma_own))
5471 			break;
5472 
5473 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5474 						priv->dma_conf.dma_rx_size);
5475 		next_entry = rx_q->cur_rx;
5476 
5477 		if (priv->extend_desc)
5478 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5479 		else
5480 			np = rx_q->dma_rx + next_entry;
5481 
5482 		prefetch(np);
5483 
5484 		if (priv->extend_desc)
5485 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5486 		if (unlikely(status == discard_frame)) {
5487 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5488 			buf->page = NULL;
5489 			error = 1;
5490 			if (!priv->hwts_rx_en)
5491 				rx_errors++;
5492 		}
5493 
5494 		if (unlikely(error && (status & rx_not_ls)))
5495 			goto read_again;
5496 		if (unlikely(error)) {
5497 			dev_kfree_skb(skb);
5498 			skb = NULL;
5499 			count++;
5500 			continue;
5501 		}
5502 
5503 		/* Buffer is good. Go on. */
5504 
5505 		prefetch(page_address(buf->page) + buf->page_offset);
5506 		if (buf->sec_page)
5507 			prefetch(page_address(buf->sec_page));
5508 
5509 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5510 		len += buf1_len;
5511 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5512 		len += buf2_len;
5513 
5514 		/* ACS is disabled; strip manually. */
5515 		if (likely(!(status & rx_not_ls))) {
5516 			if (buf2_len) {
5517 				buf2_len -= ETH_FCS_LEN;
5518 				len -= ETH_FCS_LEN;
5519 			} else if (buf1_len) {
5520 				buf1_len -= ETH_FCS_LEN;
5521 				len -= ETH_FCS_LEN;
5522 			}
5523 		}
5524 
5525 		if (!skb) {
5526 			unsigned int pre_len, sync_len;
5527 
5528 			dma_sync_single_for_cpu(priv->device, buf->addr,
5529 						buf1_len, dma_dir);
5530 
5531 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5532 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5533 					 buf->page_offset, buf1_len, true);
5534 
5535 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5536 				  buf->page_offset;
5537 
5538 			ctx.priv = priv;
5539 			ctx.desc = p;
5540 			ctx.ndesc = np;
5541 
5542 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5543 			/* Due xdp_adjust_tail: DMA sync for_device
5544 			 * cover max len CPU touch
5545 			 */
5546 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5547 				   buf->page_offset;
5548 			sync_len = max(sync_len, pre_len);
5549 
5550 			/* For Not XDP_PASS verdict */
5551 			if (IS_ERR(skb)) {
5552 				unsigned int xdp_res = -PTR_ERR(skb);
5553 
5554 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5555 					page_pool_put_page(rx_q->page_pool,
5556 							   virt_to_head_page(ctx.xdp.data),
5557 							   sync_len, true);
5558 					buf->page = NULL;
5559 					rx_dropped++;
5560 
5561 					/* Clear skb as it was set as
5562 					 * status by XDP program.
5563 					 */
5564 					skb = NULL;
5565 
5566 					if (unlikely((status & rx_not_ls)))
5567 						goto read_again;
5568 
5569 					count++;
5570 					continue;
5571 				} else if (xdp_res & (STMMAC_XDP_TX |
5572 						      STMMAC_XDP_REDIRECT)) {
5573 					xdp_status |= xdp_res;
5574 					buf->page = NULL;
5575 					skb = NULL;
5576 					count++;
5577 					continue;
5578 				}
5579 			}
5580 		}
5581 
5582 		if (!skb) {
5583 			/* XDP program may expand or reduce tail */
5584 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5585 
5586 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5587 			if (!skb) {
5588 				rx_dropped++;
5589 				count++;
5590 				goto drain_data;
5591 			}
5592 
5593 			/* XDP program may adjust header */
5594 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5595 			skb_put(skb, buf1_len);
5596 
5597 			/* Data payload copied into SKB, page ready for recycle */
5598 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5599 			buf->page = NULL;
5600 		} else if (buf1_len) {
5601 			dma_sync_single_for_cpu(priv->device, buf->addr,
5602 						buf1_len, dma_dir);
5603 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5604 					buf->page, buf->page_offset, buf1_len,
5605 					priv->dma_conf.dma_buf_sz);
5606 
5607 			/* Data payload appended into SKB */
5608 			skb_mark_for_recycle(skb);
5609 			buf->page = NULL;
5610 		}
5611 
5612 		if (buf2_len) {
5613 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5614 						buf2_len, dma_dir);
5615 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5616 					buf->sec_page, 0, buf2_len,
5617 					priv->dma_conf.dma_buf_sz);
5618 
5619 			/* Data payload appended into SKB */
5620 			skb_mark_for_recycle(skb);
5621 			buf->sec_page = NULL;
5622 		}
5623 
5624 drain_data:
5625 		if (likely(status & rx_not_ls))
5626 			goto read_again;
5627 		if (!skb)
5628 			continue;
5629 
5630 		/* Got entire packet into SKB. Finish it. */
5631 
5632 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5633 
5634 		if (priv->hw->hw_vlan_en)
5635 			/* MAC level stripping. */
5636 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5637 		else
5638 			/* Driver level stripping. */
5639 			stmmac_rx_vlan(priv->dev, skb);
5640 
5641 		skb->protocol = eth_type_trans(skb, priv->dev);
5642 
5643 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5644 			skb_checksum_none_assert(skb);
5645 		else
5646 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5647 
5648 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5649 			skb_set_hash(skb, hash, hash_type);
5650 
5651 		skb_record_rx_queue(skb, queue);
5652 		napi_gro_receive(&ch->rx_napi, skb);
5653 		skb = NULL;
5654 
5655 		rx_packets++;
5656 		rx_bytes += len;
5657 		count++;
5658 	}
5659 
5660 	if (status & rx_not_ls || skb) {
5661 		rx_q->state_saved = true;
5662 		rx_q->state.skb = skb;
5663 		rx_q->state.error = error;
5664 		rx_q->state.len = len;
5665 	}
5666 
5667 	stmmac_finalize_xdp_rx(priv, xdp_status);
5668 
5669 	stmmac_rx_refill(priv, queue);
5670 
5671 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5672 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5673 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5674 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5675 	u64_stats_update_end(&rxq_stats->napi_syncp);
5676 
5677 	priv->xstats.rx_dropped += rx_dropped;
5678 	priv->xstats.rx_errors += rx_errors;
5679 
5680 	return count;
5681 }
5682 
5683 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5684 {
5685 	struct stmmac_channel *ch =
5686 		container_of(napi, struct stmmac_channel, rx_napi);
5687 	struct stmmac_priv *priv = ch->priv_data;
5688 	struct stmmac_rxq_stats *rxq_stats;
5689 	u32 chan = ch->index;
5690 	int work_done;
5691 
5692 	rxq_stats = &priv->xstats.rxq_stats[chan];
5693 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5694 	u64_stats_inc(&rxq_stats->napi.poll);
5695 	u64_stats_update_end(&rxq_stats->napi_syncp);
5696 
5697 	work_done = stmmac_rx(priv, budget, chan);
5698 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5699 		unsigned long flags;
5700 
5701 		spin_lock_irqsave(&ch->lock, flags);
5702 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5703 		spin_unlock_irqrestore(&ch->lock, flags);
5704 	}
5705 
5706 	return work_done;
5707 }
5708 
5709 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5710 {
5711 	struct stmmac_channel *ch =
5712 		container_of(napi, struct stmmac_channel, tx_napi);
5713 	struct stmmac_priv *priv = ch->priv_data;
5714 	struct stmmac_txq_stats *txq_stats;
5715 	bool pending_packets = false;
5716 	u32 chan = ch->index;
5717 	int work_done;
5718 
5719 	txq_stats = &priv->xstats.txq_stats[chan];
5720 	u64_stats_update_begin(&txq_stats->napi_syncp);
5721 	u64_stats_inc(&txq_stats->napi.poll);
5722 	u64_stats_update_end(&txq_stats->napi_syncp);
5723 
5724 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5725 	work_done = min(work_done, budget);
5726 
5727 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5728 		unsigned long flags;
5729 
5730 		spin_lock_irqsave(&ch->lock, flags);
5731 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5732 		spin_unlock_irqrestore(&ch->lock, flags);
5733 	}
5734 
5735 	/* TX still have packet to handle, check if we need to arm tx timer */
5736 	if (pending_packets)
5737 		stmmac_tx_timer_arm(priv, chan);
5738 
5739 	return work_done;
5740 }
5741 
5742 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5743 {
5744 	struct stmmac_channel *ch =
5745 		container_of(napi, struct stmmac_channel, rxtx_napi);
5746 	struct stmmac_priv *priv = ch->priv_data;
5747 	bool tx_pending_packets = false;
5748 	int rx_done, tx_done, rxtx_done;
5749 	struct stmmac_rxq_stats *rxq_stats;
5750 	struct stmmac_txq_stats *txq_stats;
5751 	u32 chan = ch->index;
5752 
5753 	rxq_stats = &priv->xstats.rxq_stats[chan];
5754 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5755 	u64_stats_inc(&rxq_stats->napi.poll);
5756 	u64_stats_update_end(&rxq_stats->napi_syncp);
5757 
5758 	txq_stats = &priv->xstats.txq_stats[chan];
5759 	u64_stats_update_begin(&txq_stats->napi_syncp);
5760 	u64_stats_inc(&txq_stats->napi.poll);
5761 	u64_stats_update_end(&txq_stats->napi_syncp);
5762 
5763 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5764 	tx_done = min(tx_done, budget);
5765 
5766 	rx_done = stmmac_rx_zc(priv, budget, chan);
5767 
5768 	rxtx_done = max(tx_done, rx_done);
5769 
5770 	/* If either TX or RX work is not complete, return budget
5771 	 * and keep pooling
5772 	 */
5773 	if (rxtx_done >= budget)
5774 		return budget;
5775 
5776 	/* all work done, exit the polling mode */
5777 	if (napi_complete_done(napi, rxtx_done)) {
5778 		unsigned long flags;
5779 
5780 		spin_lock_irqsave(&ch->lock, flags);
5781 		/* Both RX and TX work done are compelte,
5782 		 * so enable both RX & TX IRQs.
5783 		 */
5784 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5785 		spin_unlock_irqrestore(&ch->lock, flags);
5786 	}
5787 
5788 	/* TX still have packet to handle, check if we need to arm tx timer */
5789 	if (tx_pending_packets)
5790 		stmmac_tx_timer_arm(priv, chan);
5791 
5792 	return min(rxtx_done, budget - 1);
5793 }
5794 
5795 /**
5796  *  stmmac_tx_timeout
5797  *  @dev : Pointer to net device structure
5798  *  @txqueue: the index of the hanging transmit queue
5799  *  Description: this function is called when a packet transmission fails to
5800  *   complete within a reasonable time. The driver will mark the error in the
5801  *   netdev structure and arrange for the device to be reset to a sane state
5802  *   in order to transmit a new packet.
5803  */
5804 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5805 {
5806 	struct stmmac_priv *priv = netdev_priv(dev);
5807 
5808 	stmmac_global_err(priv);
5809 }
5810 
5811 /**
5812  *  stmmac_set_rx_mode - entry point for multicast addressing
5813  *  @dev : pointer to the device structure
5814  *  Description:
5815  *  This function is a driver entry point which gets called by the kernel
5816  *  whenever multicast addresses must be enabled/disabled.
5817  *  Return value:
5818  *  void.
5819  */
5820 static void stmmac_set_rx_mode(struct net_device *dev)
5821 {
5822 	struct stmmac_priv *priv = netdev_priv(dev);
5823 
5824 	stmmac_set_filter(priv, priv->hw, dev);
5825 }
5826 
5827 /**
5828  *  stmmac_change_mtu - entry point to change MTU size for the device.
5829  *  @dev : device pointer.
5830  *  @new_mtu : the new MTU size for the device.
5831  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5832  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5833  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5834  *  Return value:
5835  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5836  *  file on failure.
5837  */
5838 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5839 {
5840 	struct stmmac_priv *priv = netdev_priv(dev);
5841 	int txfifosz = priv->plat->tx_fifo_size;
5842 	struct stmmac_dma_conf *dma_conf;
5843 	const int mtu = new_mtu;
5844 	int ret;
5845 
5846 	if (txfifosz == 0)
5847 		txfifosz = priv->dma_cap.tx_fifo_size;
5848 
5849 	txfifosz /= priv->plat->tx_queues_to_use;
5850 
5851 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5852 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5853 		return -EINVAL;
5854 	}
5855 
5856 	new_mtu = STMMAC_ALIGN(new_mtu);
5857 
5858 	/* If condition true, FIFO is too small or MTU too large */
5859 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5860 		return -EINVAL;
5861 
5862 	if (netif_running(dev)) {
5863 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5864 		/* Try to allocate the new DMA conf with the new mtu */
5865 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5866 		if (IS_ERR(dma_conf)) {
5867 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5868 				   mtu);
5869 			return PTR_ERR(dma_conf);
5870 		}
5871 
5872 		stmmac_release(dev);
5873 
5874 		ret = __stmmac_open(dev, dma_conf);
5875 		if (ret) {
5876 			free_dma_desc_resources(priv, dma_conf);
5877 			kfree(dma_conf);
5878 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5879 			return ret;
5880 		}
5881 
5882 		kfree(dma_conf);
5883 
5884 		stmmac_set_rx_mode(dev);
5885 	}
5886 
5887 	WRITE_ONCE(dev->mtu, mtu);
5888 	netdev_update_features(dev);
5889 
5890 	return 0;
5891 }
5892 
5893 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5894 					     netdev_features_t features)
5895 {
5896 	struct stmmac_priv *priv = netdev_priv(dev);
5897 
5898 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5899 		features &= ~NETIF_F_RXCSUM;
5900 
5901 	if (!priv->plat->tx_coe)
5902 		features &= ~NETIF_F_CSUM_MASK;
5903 
5904 	/* Some GMAC devices have a bugged Jumbo frame support that
5905 	 * needs to have the Tx COE disabled for oversized frames
5906 	 * (due to limited buffer sizes). In this case we disable
5907 	 * the TX csum insertion in the TDES and not use SF.
5908 	 */
5909 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5910 		features &= ~NETIF_F_CSUM_MASK;
5911 
5912 	/* Disable tso if asked by ethtool */
5913 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5914 		if (features & NETIF_F_TSO)
5915 			priv->tso = true;
5916 		else
5917 			priv->tso = false;
5918 	}
5919 
5920 	return features;
5921 }
5922 
5923 static int stmmac_set_features(struct net_device *netdev,
5924 			       netdev_features_t features)
5925 {
5926 	struct stmmac_priv *priv = netdev_priv(netdev);
5927 
5928 	/* Keep the COE Type in case of csum is supporting */
5929 	if (features & NETIF_F_RXCSUM)
5930 		priv->hw->rx_csum = priv->plat->rx_coe;
5931 	else
5932 		priv->hw->rx_csum = 0;
5933 	/* No check needed because rx_coe has been set before and it will be
5934 	 * fixed in case of issue.
5935 	 */
5936 	stmmac_rx_ipc(priv, priv->hw);
5937 
5938 	if (priv->sph_cap) {
5939 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5940 		u32 chan;
5941 
5942 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5943 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5944 	}
5945 
5946 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5947 		priv->hw->hw_vlan_en = true;
5948 	else
5949 		priv->hw->hw_vlan_en = false;
5950 
5951 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5952 
5953 	return 0;
5954 }
5955 
5956 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5957 {
5958 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
5959 
5960 	/* This is interrupt context, just spin_lock() */
5961 	spin_lock(&fpe_cfg->lock);
5962 
5963 	if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
5964 		goto unlock_out;
5965 
5966 	/* LP has sent verify mPacket */
5967 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
5968 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
5969 					MPACKET_RESPONSE);
5970 
5971 	/* Local has sent verify mPacket */
5972 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
5973 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
5974 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
5975 
5976 	/* LP has sent response mPacket */
5977 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
5978 	    fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
5979 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
5980 
5981 unlock_out:
5982 	spin_unlock(&fpe_cfg->lock);
5983 }
5984 
5985 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5986 {
5987 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5988 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5989 	u32 queues_count;
5990 	u32 queue;
5991 	bool xmac;
5992 
5993 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5994 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5995 
5996 	if (priv->irq_wake)
5997 		pm_wakeup_event(priv->device, 0);
5998 
5999 	if (priv->dma_cap.estsel)
6000 		stmmac_est_irq_status(priv, priv, priv->dev,
6001 				      &priv->xstats, tx_cnt);
6002 
6003 	if (priv->dma_cap.fpesel) {
6004 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6005 						   priv->dev);
6006 
6007 		stmmac_fpe_event_status(priv, status);
6008 	}
6009 
6010 	/* To handle GMAC own interrupts */
6011 	if ((priv->plat->has_gmac) || xmac) {
6012 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6013 
6014 		if (unlikely(status)) {
6015 			/* For LPI we need to save the tx status */
6016 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6017 				priv->tx_path_in_lpi_mode = true;
6018 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6019 				priv->tx_path_in_lpi_mode = false;
6020 		}
6021 
6022 		for (queue = 0; queue < queues_count; queue++)
6023 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6024 
6025 		/* PCS link status */
6026 		if (priv->hw->pcs &&
6027 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6028 			if (priv->xstats.pcs_link)
6029 				netif_carrier_on(priv->dev);
6030 			else
6031 				netif_carrier_off(priv->dev);
6032 		}
6033 
6034 		stmmac_timestamp_interrupt(priv, priv);
6035 	}
6036 }
6037 
6038 /**
6039  *  stmmac_interrupt - main ISR
6040  *  @irq: interrupt number.
6041  *  @dev_id: to pass the net device pointer.
6042  *  Description: this is the main driver interrupt service routine.
6043  *  It can call:
6044  *  o DMA service routine (to manage incoming frame reception and transmission
6045  *    status)
6046  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6047  *    interrupts.
6048  */
6049 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6050 {
6051 	struct net_device *dev = (struct net_device *)dev_id;
6052 	struct stmmac_priv *priv = netdev_priv(dev);
6053 
6054 	/* Check if adapter is up */
6055 	if (test_bit(STMMAC_DOWN, &priv->state))
6056 		return IRQ_HANDLED;
6057 
6058 	/* Check ASP error if it isn't delivered via an individual IRQ */
6059 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6060 		return IRQ_HANDLED;
6061 
6062 	/* To handle Common interrupts */
6063 	stmmac_common_interrupt(priv);
6064 
6065 	/* To handle DMA interrupts */
6066 	stmmac_dma_interrupt(priv);
6067 
6068 	return IRQ_HANDLED;
6069 }
6070 
6071 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6072 {
6073 	struct net_device *dev = (struct net_device *)dev_id;
6074 	struct stmmac_priv *priv = netdev_priv(dev);
6075 
6076 	/* Check if adapter is up */
6077 	if (test_bit(STMMAC_DOWN, &priv->state))
6078 		return IRQ_HANDLED;
6079 
6080 	/* To handle Common interrupts */
6081 	stmmac_common_interrupt(priv);
6082 
6083 	return IRQ_HANDLED;
6084 }
6085 
6086 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6087 {
6088 	struct net_device *dev = (struct net_device *)dev_id;
6089 	struct stmmac_priv *priv = netdev_priv(dev);
6090 
6091 	/* Check if adapter is up */
6092 	if (test_bit(STMMAC_DOWN, &priv->state))
6093 		return IRQ_HANDLED;
6094 
6095 	/* Check if a fatal error happened */
6096 	stmmac_safety_feat_interrupt(priv);
6097 
6098 	return IRQ_HANDLED;
6099 }
6100 
6101 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6102 {
6103 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6104 	struct stmmac_dma_conf *dma_conf;
6105 	int chan = tx_q->queue_index;
6106 	struct stmmac_priv *priv;
6107 	int status;
6108 
6109 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6110 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6111 
6112 	/* Check if adapter is up */
6113 	if (test_bit(STMMAC_DOWN, &priv->state))
6114 		return IRQ_HANDLED;
6115 
6116 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6117 
6118 	if (unlikely(status & tx_hard_error_bump_tc)) {
6119 		/* Try to bump up the dma threshold on this failure */
6120 		stmmac_bump_dma_threshold(priv, chan);
6121 	} else if (unlikely(status == tx_hard_error)) {
6122 		stmmac_tx_err(priv, chan);
6123 	}
6124 
6125 	return IRQ_HANDLED;
6126 }
6127 
6128 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6129 {
6130 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6131 	struct stmmac_dma_conf *dma_conf;
6132 	int chan = rx_q->queue_index;
6133 	struct stmmac_priv *priv;
6134 
6135 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6136 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6137 
6138 	/* Check if adapter is up */
6139 	if (test_bit(STMMAC_DOWN, &priv->state))
6140 		return IRQ_HANDLED;
6141 
6142 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6143 
6144 	return IRQ_HANDLED;
6145 }
6146 
6147 /**
6148  *  stmmac_ioctl - Entry point for the Ioctl
6149  *  @dev: Device pointer.
6150  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6151  *  a proprietary structure used to pass information to the driver.
6152  *  @cmd: IOCTL command
6153  *  Description:
6154  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6155  */
6156 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6157 {
6158 	struct stmmac_priv *priv = netdev_priv (dev);
6159 	int ret = -EOPNOTSUPP;
6160 
6161 	if (!netif_running(dev))
6162 		return -EINVAL;
6163 
6164 	switch (cmd) {
6165 	case SIOCGMIIPHY:
6166 	case SIOCGMIIREG:
6167 	case SIOCSMIIREG:
6168 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6169 		break;
6170 	case SIOCSHWTSTAMP:
6171 		ret = stmmac_hwtstamp_set(dev, rq);
6172 		break;
6173 	case SIOCGHWTSTAMP:
6174 		ret = stmmac_hwtstamp_get(dev, rq);
6175 		break;
6176 	default:
6177 		break;
6178 	}
6179 
6180 	return ret;
6181 }
6182 
6183 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6184 				    void *cb_priv)
6185 {
6186 	struct stmmac_priv *priv = cb_priv;
6187 	int ret = -EOPNOTSUPP;
6188 
6189 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6190 		return ret;
6191 
6192 	__stmmac_disable_all_queues(priv);
6193 
6194 	switch (type) {
6195 	case TC_SETUP_CLSU32:
6196 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6197 		break;
6198 	case TC_SETUP_CLSFLOWER:
6199 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6200 		break;
6201 	default:
6202 		break;
6203 	}
6204 
6205 	stmmac_enable_all_queues(priv);
6206 	return ret;
6207 }
6208 
6209 static LIST_HEAD(stmmac_block_cb_list);
6210 
6211 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6212 			   void *type_data)
6213 {
6214 	struct stmmac_priv *priv = netdev_priv(ndev);
6215 
6216 	switch (type) {
6217 	case TC_QUERY_CAPS:
6218 		return stmmac_tc_query_caps(priv, priv, type_data);
6219 	case TC_SETUP_QDISC_MQPRIO:
6220 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6221 	case TC_SETUP_BLOCK:
6222 		return flow_block_cb_setup_simple(type_data,
6223 						  &stmmac_block_cb_list,
6224 						  stmmac_setup_tc_block_cb,
6225 						  priv, priv, true);
6226 	case TC_SETUP_QDISC_CBS:
6227 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6228 	case TC_SETUP_QDISC_TAPRIO:
6229 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6230 	case TC_SETUP_QDISC_ETF:
6231 		return stmmac_tc_setup_etf(priv, priv, type_data);
6232 	default:
6233 		return -EOPNOTSUPP;
6234 	}
6235 }
6236 
6237 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6238 			       struct net_device *sb_dev)
6239 {
6240 	int gso = skb_shinfo(skb)->gso_type;
6241 
6242 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6243 		/*
6244 		 * There is no way to determine the number of TSO/USO
6245 		 * capable Queues. Let's use always the Queue 0
6246 		 * because if TSO/USO is supported then at least this
6247 		 * one will be capable.
6248 		 */
6249 		return 0;
6250 	}
6251 
6252 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6253 }
6254 
6255 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6256 {
6257 	struct stmmac_priv *priv = netdev_priv(ndev);
6258 	int ret = 0;
6259 
6260 	ret = pm_runtime_resume_and_get(priv->device);
6261 	if (ret < 0)
6262 		return ret;
6263 
6264 	ret = eth_mac_addr(ndev, addr);
6265 	if (ret)
6266 		goto set_mac_error;
6267 
6268 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6269 
6270 set_mac_error:
6271 	pm_runtime_put(priv->device);
6272 
6273 	return ret;
6274 }
6275 
6276 #ifdef CONFIG_DEBUG_FS
6277 static struct dentry *stmmac_fs_dir;
6278 
6279 static void sysfs_display_ring(void *head, int size, int extend_desc,
6280 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6281 {
6282 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6283 	struct dma_desc *p = (struct dma_desc *)head;
6284 	unsigned int desc_size;
6285 	dma_addr_t dma_addr;
6286 	int i;
6287 
6288 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6289 	for (i = 0; i < size; i++) {
6290 		dma_addr = dma_phy_addr + i * desc_size;
6291 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6292 				i, &dma_addr,
6293 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6294 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6295 		if (extend_desc)
6296 			p = &(++ep)->basic;
6297 		else
6298 			p++;
6299 	}
6300 }
6301 
6302 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6303 {
6304 	struct net_device *dev = seq->private;
6305 	struct stmmac_priv *priv = netdev_priv(dev);
6306 	u32 rx_count = priv->plat->rx_queues_to_use;
6307 	u32 tx_count = priv->plat->tx_queues_to_use;
6308 	u32 queue;
6309 
6310 	if ((dev->flags & IFF_UP) == 0)
6311 		return 0;
6312 
6313 	for (queue = 0; queue < rx_count; queue++) {
6314 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6315 
6316 		seq_printf(seq, "RX Queue %d:\n", queue);
6317 
6318 		if (priv->extend_desc) {
6319 			seq_printf(seq, "Extended descriptor ring:\n");
6320 			sysfs_display_ring((void *)rx_q->dma_erx,
6321 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6322 		} else {
6323 			seq_printf(seq, "Descriptor ring:\n");
6324 			sysfs_display_ring((void *)rx_q->dma_rx,
6325 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6326 		}
6327 	}
6328 
6329 	for (queue = 0; queue < tx_count; queue++) {
6330 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6331 
6332 		seq_printf(seq, "TX Queue %d:\n", queue);
6333 
6334 		if (priv->extend_desc) {
6335 			seq_printf(seq, "Extended descriptor ring:\n");
6336 			sysfs_display_ring((void *)tx_q->dma_etx,
6337 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6338 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6339 			seq_printf(seq, "Descriptor ring:\n");
6340 			sysfs_display_ring((void *)tx_q->dma_tx,
6341 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6342 		}
6343 	}
6344 
6345 	return 0;
6346 }
6347 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6348 
6349 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6350 {
6351 	static const char * const dwxgmac_timestamp_source[] = {
6352 		"None",
6353 		"Internal",
6354 		"External",
6355 		"Both",
6356 	};
6357 	static const char * const dwxgmac_safety_feature_desc[] = {
6358 		"No",
6359 		"All Safety Features with ECC and Parity",
6360 		"All Safety Features without ECC or Parity",
6361 		"All Safety Features with Parity Only",
6362 		"ECC Only",
6363 		"UNDEFINED",
6364 		"UNDEFINED",
6365 		"UNDEFINED",
6366 	};
6367 	struct net_device *dev = seq->private;
6368 	struct stmmac_priv *priv = netdev_priv(dev);
6369 
6370 	if (!priv->hw_cap_support) {
6371 		seq_printf(seq, "DMA HW features not supported\n");
6372 		return 0;
6373 	}
6374 
6375 	seq_printf(seq, "==============================\n");
6376 	seq_printf(seq, "\tDMA HW features\n");
6377 	seq_printf(seq, "==============================\n");
6378 
6379 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6380 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6381 	seq_printf(seq, "\t1000 Mbps: %s\n",
6382 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6383 	seq_printf(seq, "\tHalf duplex: %s\n",
6384 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6385 	if (priv->plat->has_xgmac) {
6386 		seq_printf(seq,
6387 			   "\tNumber of Additional MAC address registers: %d\n",
6388 			   priv->dma_cap.multi_addr);
6389 	} else {
6390 		seq_printf(seq, "\tHash Filter: %s\n",
6391 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6392 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6393 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6394 	}
6395 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6396 		   (priv->dma_cap.pcs) ? "Y" : "N");
6397 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6398 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6399 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6400 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6401 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6402 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6403 	seq_printf(seq, "\tRMON module: %s\n",
6404 		   (priv->dma_cap.rmon) ? "Y" : "N");
6405 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6406 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6407 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6408 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6409 	if (priv->plat->has_xgmac)
6410 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6411 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6412 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6413 		   (priv->dma_cap.eee) ? "Y" : "N");
6414 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6415 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6416 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6417 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6418 	    priv->plat->has_xgmac) {
6419 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6420 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6421 	} else {
6422 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6423 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6424 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6425 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6426 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6427 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6428 	}
6429 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6430 		   priv->dma_cap.number_rx_channel);
6431 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6432 		   priv->dma_cap.number_tx_channel);
6433 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6434 		   priv->dma_cap.number_rx_queues);
6435 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6436 		   priv->dma_cap.number_tx_queues);
6437 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6438 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6439 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6440 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6441 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6442 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6443 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6444 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6445 		   priv->dma_cap.pps_out_num);
6446 	seq_printf(seq, "\tSafety Features: %s\n",
6447 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6448 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6449 		   priv->dma_cap.frpsel ? "Y" : "N");
6450 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6451 		   priv->dma_cap.host_dma_width);
6452 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6453 		   priv->dma_cap.rssen ? "Y" : "N");
6454 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6455 		   priv->dma_cap.vlhash ? "Y" : "N");
6456 	seq_printf(seq, "\tSplit Header: %s\n",
6457 		   priv->dma_cap.sphen ? "Y" : "N");
6458 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6459 		   priv->dma_cap.vlins ? "Y" : "N");
6460 	seq_printf(seq, "\tDouble VLAN: %s\n",
6461 		   priv->dma_cap.dvlan ? "Y" : "N");
6462 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6463 		   priv->dma_cap.l3l4fnum);
6464 	seq_printf(seq, "\tARP Offloading: %s\n",
6465 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6466 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6467 		   priv->dma_cap.estsel ? "Y" : "N");
6468 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6469 		   priv->dma_cap.fpesel ? "Y" : "N");
6470 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6471 		   priv->dma_cap.tbssel ? "Y" : "N");
6472 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6473 		   priv->dma_cap.tbs_ch_num);
6474 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6475 		   priv->dma_cap.sgfsel ? "Y" : "N");
6476 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6477 		   BIT(priv->dma_cap.ttsfd) >> 1);
6478 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6479 		   priv->dma_cap.numtc);
6480 	seq_printf(seq, "\tDCB Feature: %s\n",
6481 		   priv->dma_cap.dcben ? "Y" : "N");
6482 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6483 		   priv->dma_cap.advthword ? "Y" : "N");
6484 	seq_printf(seq, "\tPTP Offload: %s\n",
6485 		   priv->dma_cap.ptoen ? "Y" : "N");
6486 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6487 		   priv->dma_cap.osten ? "Y" : "N");
6488 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6489 		   priv->dma_cap.pfcen ? "Y" : "N");
6490 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6491 		   BIT(priv->dma_cap.frpes) << 6);
6492 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6493 		   BIT(priv->dma_cap.frpbs) << 6);
6494 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6495 		   priv->dma_cap.frppipe_num);
6496 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6497 		   priv->dma_cap.nrvf_num ?
6498 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6499 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6500 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6501 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6502 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6503 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6504 		   priv->dma_cap.cbtisel ? "Y" : "N");
6505 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6506 		   priv->dma_cap.aux_snapshot_n);
6507 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6508 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6509 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6510 		   priv->dma_cap.edma ? "Y" : "N");
6511 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6512 		   priv->dma_cap.ediffc ? "Y" : "N");
6513 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6514 		   priv->dma_cap.vxn ? "Y" : "N");
6515 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6516 		   priv->dma_cap.dbgmem ? "Y" : "N");
6517 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6518 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6519 	return 0;
6520 }
6521 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6522 
6523 /* Use network device events to rename debugfs file entries.
6524  */
6525 static int stmmac_device_event(struct notifier_block *unused,
6526 			       unsigned long event, void *ptr)
6527 {
6528 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6529 	struct stmmac_priv *priv = netdev_priv(dev);
6530 
6531 	if (dev->netdev_ops != &stmmac_netdev_ops)
6532 		goto done;
6533 
6534 	switch (event) {
6535 	case NETDEV_CHANGENAME:
6536 		if (priv->dbgfs_dir)
6537 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6538 							 priv->dbgfs_dir,
6539 							 stmmac_fs_dir,
6540 							 dev->name);
6541 		break;
6542 	}
6543 done:
6544 	return NOTIFY_DONE;
6545 }
6546 
6547 static struct notifier_block stmmac_notifier = {
6548 	.notifier_call = stmmac_device_event,
6549 };
6550 
6551 static void stmmac_init_fs(struct net_device *dev)
6552 {
6553 	struct stmmac_priv *priv = netdev_priv(dev);
6554 
6555 	rtnl_lock();
6556 
6557 	/* Create per netdev entries */
6558 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6559 
6560 	/* Entry to report DMA RX/TX rings */
6561 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6562 			    &stmmac_rings_status_fops);
6563 
6564 	/* Entry to report the DMA HW features */
6565 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6566 			    &stmmac_dma_cap_fops);
6567 
6568 	rtnl_unlock();
6569 }
6570 
6571 static void stmmac_exit_fs(struct net_device *dev)
6572 {
6573 	struct stmmac_priv *priv = netdev_priv(dev);
6574 
6575 	debugfs_remove_recursive(priv->dbgfs_dir);
6576 }
6577 #endif /* CONFIG_DEBUG_FS */
6578 
6579 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6580 {
6581 	unsigned char *data = (unsigned char *)&vid_le;
6582 	unsigned char data_byte = 0;
6583 	u32 crc = ~0x0;
6584 	u32 temp = 0;
6585 	int i, bits;
6586 
6587 	bits = get_bitmask_order(VLAN_VID_MASK);
6588 	for (i = 0; i < bits; i++) {
6589 		if ((i % 8) == 0)
6590 			data_byte = data[i / 8];
6591 
6592 		temp = ((crc & 1) ^ data_byte) & 1;
6593 		crc >>= 1;
6594 		data_byte >>= 1;
6595 
6596 		if (temp)
6597 			crc ^= 0xedb88320;
6598 	}
6599 
6600 	return crc;
6601 }
6602 
6603 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6604 {
6605 	u32 crc, hash = 0;
6606 	u16 pmatch = 0;
6607 	int count = 0;
6608 	u16 vid = 0;
6609 
6610 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6611 		__le16 vid_le = cpu_to_le16(vid);
6612 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6613 		hash |= (1 << crc);
6614 		count++;
6615 	}
6616 
6617 	if (!priv->dma_cap.vlhash) {
6618 		if (count > 2) /* VID = 0 always passes filter */
6619 			return -EOPNOTSUPP;
6620 
6621 		pmatch = vid;
6622 		hash = 0;
6623 	}
6624 
6625 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6626 }
6627 
6628 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6629 {
6630 	struct stmmac_priv *priv = netdev_priv(ndev);
6631 	bool is_double = false;
6632 	int ret;
6633 
6634 	ret = pm_runtime_resume_and_get(priv->device);
6635 	if (ret < 0)
6636 		return ret;
6637 
6638 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6639 		is_double = true;
6640 
6641 	set_bit(vid, priv->active_vlans);
6642 	ret = stmmac_vlan_update(priv, is_double);
6643 	if (ret) {
6644 		clear_bit(vid, priv->active_vlans);
6645 		goto err_pm_put;
6646 	}
6647 
6648 	if (priv->hw->num_vlan) {
6649 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6650 		if (ret)
6651 			goto err_pm_put;
6652 	}
6653 err_pm_put:
6654 	pm_runtime_put(priv->device);
6655 
6656 	return ret;
6657 }
6658 
6659 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6660 {
6661 	struct stmmac_priv *priv = netdev_priv(ndev);
6662 	bool is_double = false;
6663 	int ret;
6664 
6665 	ret = pm_runtime_resume_and_get(priv->device);
6666 	if (ret < 0)
6667 		return ret;
6668 
6669 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6670 		is_double = true;
6671 
6672 	clear_bit(vid, priv->active_vlans);
6673 
6674 	if (priv->hw->num_vlan) {
6675 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6676 		if (ret)
6677 			goto del_vlan_error;
6678 	}
6679 
6680 	ret = stmmac_vlan_update(priv, is_double);
6681 
6682 del_vlan_error:
6683 	pm_runtime_put(priv->device);
6684 
6685 	return ret;
6686 }
6687 
6688 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6689 {
6690 	struct stmmac_priv *priv = netdev_priv(dev);
6691 
6692 	switch (bpf->command) {
6693 	case XDP_SETUP_PROG:
6694 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6695 	case XDP_SETUP_XSK_POOL:
6696 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6697 					     bpf->xsk.queue_id);
6698 	default:
6699 		return -EOPNOTSUPP;
6700 	}
6701 }
6702 
6703 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6704 			   struct xdp_frame **frames, u32 flags)
6705 {
6706 	struct stmmac_priv *priv = netdev_priv(dev);
6707 	int cpu = smp_processor_id();
6708 	struct netdev_queue *nq;
6709 	int i, nxmit = 0;
6710 	int queue;
6711 
6712 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6713 		return -ENETDOWN;
6714 
6715 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6716 		return -EINVAL;
6717 
6718 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6719 	nq = netdev_get_tx_queue(priv->dev, queue);
6720 
6721 	__netif_tx_lock(nq, cpu);
6722 	/* Avoids TX time-out as we are sharing with slow path */
6723 	txq_trans_cond_update(nq);
6724 
6725 	for (i = 0; i < num_frames; i++) {
6726 		int res;
6727 
6728 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6729 		if (res == STMMAC_XDP_CONSUMED)
6730 			break;
6731 
6732 		nxmit++;
6733 	}
6734 
6735 	if (flags & XDP_XMIT_FLUSH) {
6736 		stmmac_flush_tx_descriptors(priv, queue);
6737 		stmmac_tx_timer_arm(priv, queue);
6738 	}
6739 
6740 	__netif_tx_unlock(nq);
6741 
6742 	return nxmit;
6743 }
6744 
6745 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6746 {
6747 	struct stmmac_channel *ch = &priv->channel[queue];
6748 	unsigned long flags;
6749 
6750 	spin_lock_irqsave(&ch->lock, flags);
6751 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6752 	spin_unlock_irqrestore(&ch->lock, flags);
6753 
6754 	stmmac_stop_rx_dma(priv, queue);
6755 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6756 }
6757 
6758 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6759 {
6760 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6761 	struct stmmac_channel *ch = &priv->channel[queue];
6762 	unsigned long flags;
6763 	u32 buf_size;
6764 	int ret;
6765 
6766 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6767 	if (ret) {
6768 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6769 		return;
6770 	}
6771 
6772 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6773 	if (ret) {
6774 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6775 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6776 		return;
6777 	}
6778 
6779 	stmmac_reset_rx_queue(priv, queue);
6780 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6781 
6782 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6783 			    rx_q->dma_rx_phy, rx_q->queue_index);
6784 
6785 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6786 			     sizeof(struct dma_desc));
6787 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6788 			       rx_q->rx_tail_addr, rx_q->queue_index);
6789 
6790 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6791 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6792 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6793 				      buf_size,
6794 				      rx_q->queue_index);
6795 	} else {
6796 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6797 				      priv->dma_conf.dma_buf_sz,
6798 				      rx_q->queue_index);
6799 	}
6800 
6801 	stmmac_start_rx_dma(priv, queue);
6802 
6803 	spin_lock_irqsave(&ch->lock, flags);
6804 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6805 	spin_unlock_irqrestore(&ch->lock, flags);
6806 }
6807 
6808 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6809 {
6810 	struct stmmac_channel *ch = &priv->channel[queue];
6811 	unsigned long flags;
6812 
6813 	spin_lock_irqsave(&ch->lock, flags);
6814 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6815 	spin_unlock_irqrestore(&ch->lock, flags);
6816 
6817 	stmmac_stop_tx_dma(priv, queue);
6818 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6819 }
6820 
6821 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6822 {
6823 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6824 	struct stmmac_channel *ch = &priv->channel[queue];
6825 	unsigned long flags;
6826 	int ret;
6827 
6828 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6829 	if (ret) {
6830 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6831 		return;
6832 	}
6833 
6834 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6835 	if (ret) {
6836 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6837 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6838 		return;
6839 	}
6840 
6841 	stmmac_reset_tx_queue(priv, queue);
6842 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6843 
6844 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6845 			    tx_q->dma_tx_phy, tx_q->queue_index);
6846 
6847 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6848 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6849 
6850 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6851 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6852 			       tx_q->tx_tail_addr, tx_q->queue_index);
6853 
6854 	stmmac_start_tx_dma(priv, queue);
6855 
6856 	spin_lock_irqsave(&ch->lock, flags);
6857 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6858 	spin_unlock_irqrestore(&ch->lock, flags);
6859 }
6860 
6861 void stmmac_xdp_release(struct net_device *dev)
6862 {
6863 	struct stmmac_priv *priv = netdev_priv(dev);
6864 	u32 chan;
6865 
6866 	/* Ensure tx function is not running */
6867 	netif_tx_disable(dev);
6868 
6869 	/* Disable NAPI process */
6870 	stmmac_disable_all_queues(priv);
6871 
6872 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6873 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6874 
6875 	/* Free the IRQ lines */
6876 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6877 
6878 	/* Stop TX/RX DMA channels */
6879 	stmmac_stop_all_dma(priv);
6880 
6881 	/* Release and free the Rx/Tx resources */
6882 	free_dma_desc_resources(priv, &priv->dma_conf);
6883 
6884 	/* Disable the MAC Rx/Tx */
6885 	stmmac_mac_set(priv, priv->ioaddr, false);
6886 
6887 	/* set trans_start so we don't get spurious
6888 	 * watchdogs during reset
6889 	 */
6890 	netif_trans_update(dev);
6891 	netif_carrier_off(dev);
6892 }
6893 
6894 int stmmac_xdp_open(struct net_device *dev)
6895 {
6896 	struct stmmac_priv *priv = netdev_priv(dev);
6897 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6898 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6899 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6900 	struct stmmac_rx_queue *rx_q;
6901 	struct stmmac_tx_queue *tx_q;
6902 	u32 buf_size;
6903 	bool sph_en;
6904 	u32 chan;
6905 	int ret;
6906 
6907 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6908 	if (ret < 0) {
6909 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6910 			   __func__);
6911 		goto dma_desc_error;
6912 	}
6913 
6914 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6915 	if (ret < 0) {
6916 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6917 			   __func__);
6918 		goto init_error;
6919 	}
6920 
6921 	stmmac_reset_queues_param(priv);
6922 
6923 	/* DMA CSR Channel configuration */
6924 	for (chan = 0; chan < dma_csr_ch; chan++) {
6925 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6926 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6927 	}
6928 
6929 	/* Adjust Split header */
6930 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6931 
6932 	/* DMA RX Channel Configuration */
6933 	for (chan = 0; chan < rx_cnt; chan++) {
6934 		rx_q = &priv->dma_conf.rx_queue[chan];
6935 
6936 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6937 				    rx_q->dma_rx_phy, chan);
6938 
6939 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6940 				     (rx_q->buf_alloc_num *
6941 				      sizeof(struct dma_desc));
6942 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6943 				       rx_q->rx_tail_addr, chan);
6944 
6945 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6946 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6947 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6948 					      buf_size,
6949 					      rx_q->queue_index);
6950 		} else {
6951 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6952 					      priv->dma_conf.dma_buf_sz,
6953 					      rx_q->queue_index);
6954 		}
6955 
6956 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6957 	}
6958 
6959 	/* DMA TX Channel Configuration */
6960 	for (chan = 0; chan < tx_cnt; chan++) {
6961 		tx_q = &priv->dma_conf.tx_queue[chan];
6962 
6963 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6964 				    tx_q->dma_tx_phy, chan);
6965 
6966 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6967 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6968 				       tx_q->tx_tail_addr, chan);
6969 
6970 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6971 		tx_q->txtimer.function = stmmac_tx_timer;
6972 	}
6973 
6974 	/* Enable the MAC Rx/Tx */
6975 	stmmac_mac_set(priv, priv->ioaddr, true);
6976 
6977 	/* Start Rx & Tx DMA Channels */
6978 	stmmac_start_all_dma(priv);
6979 
6980 	ret = stmmac_request_irq(dev);
6981 	if (ret)
6982 		goto irq_error;
6983 
6984 	/* Enable NAPI process*/
6985 	stmmac_enable_all_queues(priv);
6986 	netif_carrier_on(dev);
6987 	netif_tx_start_all_queues(dev);
6988 	stmmac_enable_all_dma_irq(priv);
6989 
6990 	return 0;
6991 
6992 irq_error:
6993 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6994 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6995 
6996 	stmmac_hw_teardown(dev);
6997 init_error:
6998 	free_dma_desc_resources(priv, &priv->dma_conf);
6999 dma_desc_error:
7000 	return ret;
7001 }
7002 
7003 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7004 {
7005 	struct stmmac_priv *priv = netdev_priv(dev);
7006 	struct stmmac_rx_queue *rx_q;
7007 	struct stmmac_tx_queue *tx_q;
7008 	struct stmmac_channel *ch;
7009 
7010 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7011 	    !netif_carrier_ok(priv->dev))
7012 		return -ENETDOWN;
7013 
7014 	if (!stmmac_xdp_is_enabled(priv))
7015 		return -EINVAL;
7016 
7017 	if (queue >= priv->plat->rx_queues_to_use ||
7018 	    queue >= priv->plat->tx_queues_to_use)
7019 		return -EINVAL;
7020 
7021 	rx_q = &priv->dma_conf.rx_queue[queue];
7022 	tx_q = &priv->dma_conf.tx_queue[queue];
7023 	ch = &priv->channel[queue];
7024 
7025 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7026 		return -EINVAL;
7027 
7028 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7029 		/* EQoS does not have per-DMA channel SW interrupt,
7030 		 * so we schedule RX Napi straight-away.
7031 		 */
7032 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7033 			__napi_schedule(&ch->rxtx_napi);
7034 	}
7035 
7036 	return 0;
7037 }
7038 
7039 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7040 {
7041 	struct stmmac_priv *priv = netdev_priv(dev);
7042 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7043 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7044 	unsigned int start;
7045 	int q;
7046 
7047 	for (q = 0; q < tx_cnt; q++) {
7048 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7049 		u64 tx_packets;
7050 		u64 tx_bytes;
7051 
7052 		do {
7053 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7054 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7055 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7056 		do {
7057 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7058 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7059 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7060 
7061 		stats->tx_packets += tx_packets;
7062 		stats->tx_bytes += tx_bytes;
7063 	}
7064 
7065 	for (q = 0; q < rx_cnt; q++) {
7066 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7067 		u64 rx_packets;
7068 		u64 rx_bytes;
7069 
7070 		do {
7071 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7072 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7073 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7074 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7075 
7076 		stats->rx_packets += rx_packets;
7077 		stats->rx_bytes += rx_bytes;
7078 	}
7079 
7080 	stats->rx_dropped = priv->xstats.rx_dropped;
7081 	stats->rx_errors = priv->xstats.rx_errors;
7082 	stats->tx_dropped = priv->xstats.tx_dropped;
7083 	stats->tx_errors = priv->xstats.tx_errors;
7084 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7085 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7086 	stats->rx_length_errors = priv->xstats.rx_length;
7087 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7088 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7089 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7090 }
7091 
7092 static const struct net_device_ops stmmac_netdev_ops = {
7093 	.ndo_open = stmmac_open,
7094 	.ndo_start_xmit = stmmac_xmit,
7095 	.ndo_stop = stmmac_release,
7096 	.ndo_change_mtu = stmmac_change_mtu,
7097 	.ndo_fix_features = stmmac_fix_features,
7098 	.ndo_set_features = stmmac_set_features,
7099 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7100 	.ndo_tx_timeout = stmmac_tx_timeout,
7101 	.ndo_eth_ioctl = stmmac_ioctl,
7102 	.ndo_get_stats64 = stmmac_get_stats64,
7103 	.ndo_setup_tc = stmmac_setup_tc,
7104 	.ndo_select_queue = stmmac_select_queue,
7105 	.ndo_set_mac_address = stmmac_set_mac_address,
7106 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7107 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7108 	.ndo_bpf = stmmac_bpf,
7109 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7110 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7111 };
7112 
7113 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7114 {
7115 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7116 		return;
7117 	if (test_bit(STMMAC_DOWN, &priv->state))
7118 		return;
7119 
7120 	netdev_err(priv->dev, "Reset adapter.\n");
7121 
7122 	rtnl_lock();
7123 	netif_trans_update(priv->dev);
7124 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7125 		usleep_range(1000, 2000);
7126 
7127 	set_bit(STMMAC_DOWN, &priv->state);
7128 	dev_close(priv->dev);
7129 	dev_open(priv->dev, NULL);
7130 	clear_bit(STMMAC_DOWN, &priv->state);
7131 	clear_bit(STMMAC_RESETING, &priv->state);
7132 	rtnl_unlock();
7133 }
7134 
7135 static void stmmac_service_task(struct work_struct *work)
7136 {
7137 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7138 			service_task);
7139 
7140 	stmmac_reset_subtask(priv);
7141 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7142 }
7143 
7144 /**
7145  *  stmmac_hw_init - Init the MAC device
7146  *  @priv: driver private structure
7147  *  Description: this function is to configure the MAC device according to
7148  *  some platform parameters or the HW capability register. It prepares the
7149  *  driver to use either ring or chain modes and to setup either enhanced or
7150  *  normal descriptors.
7151  */
7152 static int stmmac_hw_init(struct stmmac_priv *priv)
7153 {
7154 	int ret;
7155 
7156 	/* dwmac-sun8i only work in chain mode */
7157 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7158 		chain_mode = 1;
7159 	priv->chain_mode = chain_mode;
7160 
7161 	/* Initialize HW Interface */
7162 	ret = stmmac_hwif_init(priv);
7163 	if (ret)
7164 		return ret;
7165 
7166 	/* Get the HW capability (new GMAC newer than 3.50a) */
7167 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7168 	if (priv->hw_cap_support) {
7169 		dev_info(priv->device, "DMA HW capability register supported\n");
7170 
7171 		/* We can override some gmac/dma configuration fields: e.g.
7172 		 * enh_desc, tx_coe (e.g. that are passed through the
7173 		 * platform) with the values from the HW capability
7174 		 * register (if supported).
7175 		 */
7176 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7177 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7178 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7179 		priv->hw->pmt = priv->plat->pmt;
7180 		if (priv->dma_cap.hash_tb_sz) {
7181 			priv->hw->multicast_filter_bins =
7182 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7183 			priv->hw->mcast_bits_log2 =
7184 					ilog2(priv->hw->multicast_filter_bins);
7185 		}
7186 
7187 		/* TXCOE doesn't work in thresh DMA mode */
7188 		if (priv->plat->force_thresh_dma_mode)
7189 			priv->plat->tx_coe = 0;
7190 		else
7191 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7192 
7193 		/* In case of GMAC4 rx_coe is from HW cap register. */
7194 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7195 
7196 		if (priv->dma_cap.rx_coe_type2)
7197 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7198 		else if (priv->dma_cap.rx_coe_type1)
7199 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7200 
7201 	} else {
7202 		dev_info(priv->device, "No HW DMA feature register supported\n");
7203 	}
7204 
7205 	if (priv->plat->rx_coe) {
7206 		priv->hw->rx_csum = priv->plat->rx_coe;
7207 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7208 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7209 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7210 	}
7211 	if (priv->plat->tx_coe)
7212 		dev_info(priv->device, "TX Checksum insertion supported\n");
7213 
7214 	if (priv->plat->pmt) {
7215 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7216 		device_set_wakeup_capable(priv->device, 1);
7217 	}
7218 
7219 	if (priv->dma_cap.tsoen)
7220 		dev_info(priv->device, "TSO supported\n");
7221 
7222 	priv->hw->vlan_fail_q_en =
7223 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7224 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7225 
7226 	/* Run HW quirks, if any */
7227 	if (priv->hwif_quirks) {
7228 		ret = priv->hwif_quirks(priv);
7229 		if (ret)
7230 			return ret;
7231 	}
7232 
7233 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7234 	 * In some case, for example on bugged HW this feature
7235 	 * has to be disable and this can be done by passing the
7236 	 * riwt_off field from the platform.
7237 	 */
7238 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7239 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7240 		priv->use_riwt = 1;
7241 		dev_info(priv->device,
7242 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7243 	}
7244 
7245 	return 0;
7246 }
7247 
7248 static void stmmac_napi_add(struct net_device *dev)
7249 {
7250 	struct stmmac_priv *priv = netdev_priv(dev);
7251 	u32 queue, maxq;
7252 
7253 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7254 
7255 	for (queue = 0; queue < maxq; queue++) {
7256 		struct stmmac_channel *ch = &priv->channel[queue];
7257 
7258 		ch->priv_data = priv;
7259 		ch->index = queue;
7260 		spin_lock_init(&ch->lock);
7261 
7262 		if (queue < priv->plat->rx_queues_to_use) {
7263 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7264 		}
7265 		if (queue < priv->plat->tx_queues_to_use) {
7266 			netif_napi_add_tx(dev, &ch->tx_napi,
7267 					  stmmac_napi_poll_tx);
7268 		}
7269 		if (queue < priv->plat->rx_queues_to_use &&
7270 		    queue < priv->plat->tx_queues_to_use) {
7271 			netif_napi_add(dev, &ch->rxtx_napi,
7272 				       stmmac_napi_poll_rxtx);
7273 		}
7274 	}
7275 }
7276 
7277 static void stmmac_napi_del(struct net_device *dev)
7278 {
7279 	struct stmmac_priv *priv = netdev_priv(dev);
7280 	u32 queue, maxq;
7281 
7282 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7283 
7284 	for (queue = 0; queue < maxq; queue++) {
7285 		struct stmmac_channel *ch = &priv->channel[queue];
7286 
7287 		if (queue < priv->plat->rx_queues_to_use)
7288 			netif_napi_del(&ch->rx_napi);
7289 		if (queue < priv->plat->tx_queues_to_use)
7290 			netif_napi_del(&ch->tx_napi);
7291 		if (queue < priv->plat->rx_queues_to_use &&
7292 		    queue < priv->plat->tx_queues_to_use) {
7293 			netif_napi_del(&ch->rxtx_napi);
7294 		}
7295 	}
7296 }
7297 
7298 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7299 {
7300 	struct stmmac_priv *priv = netdev_priv(dev);
7301 	int ret = 0, i;
7302 
7303 	if (netif_running(dev))
7304 		stmmac_release(dev);
7305 
7306 	stmmac_napi_del(dev);
7307 
7308 	priv->plat->rx_queues_to_use = rx_cnt;
7309 	priv->plat->tx_queues_to_use = tx_cnt;
7310 	if (!netif_is_rxfh_configured(dev))
7311 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7312 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7313 									rx_cnt);
7314 
7315 	stmmac_napi_add(dev);
7316 
7317 	if (netif_running(dev))
7318 		ret = stmmac_open(dev);
7319 
7320 	return ret;
7321 }
7322 
7323 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7324 {
7325 	struct stmmac_priv *priv = netdev_priv(dev);
7326 	int ret = 0;
7327 
7328 	if (netif_running(dev))
7329 		stmmac_release(dev);
7330 
7331 	priv->dma_conf.dma_rx_size = rx_size;
7332 	priv->dma_conf.dma_tx_size = tx_size;
7333 
7334 	if (netif_running(dev))
7335 		ret = stmmac_open(dev);
7336 
7337 	return ret;
7338 }
7339 
7340 /**
7341  * stmmac_fpe_verify_timer - Timer for MAC Merge verification
7342  * @t:  timer_list struct containing private info
7343  *
7344  * Verify the MAC Merge capability in the local TX direction, by
7345  * transmitting Verify mPackets up to 3 times. Wait until link
7346  * partner responds with a Response mPacket, otherwise fail.
7347  */
7348 static void stmmac_fpe_verify_timer(struct timer_list *t)
7349 {
7350 	struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
7351 	struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
7352 						fpe_cfg);
7353 	unsigned long flags;
7354 	bool rearm = false;
7355 
7356 	spin_lock_irqsave(&fpe_cfg->lock, flags);
7357 
7358 	switch (fpe_cfg->status) {
7359 	case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
7360 	case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
7361 		if (fpe_cfg->verify_retries != 0) {
7362 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7363 						fpe_cfg, MPACKET_VERIFY);
7364 			rearm = true;
7365 		} else {
7366 			fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
7367 		}
7368 
7369 		fpe_cfg->verify_retries--;
7370 		break;
7371 
7372 	case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
7373 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
7374 				     priv->plat->tx_queues_to_use,
7375 				     priv->plat->rx_queues_to_use,
7376 				     true, true);
7377 		break;
7378 
7379 	default:
7380 		break;
7381 	}
7382 
7383 	if (rearm) {
7384 		mod_timer(&fpe_cfg->verify_timer,
7385 			  jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
7386 	}
7387 
7388 	spin_unlock_irqrestore(&fpe_cfg->lock, flags);
7389 }
7390 
7391 static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
7392 {
7393 	if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
7394 	    fpe_cfg->verify_enabled &&
7395 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
7396 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
7397 		timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
7398 		mod_timer(&fpe_cfg->verify_timer, jiffies);
7399 	}
7400 }
7401 
7402 void stmmac_fpe_apply(struct stmmac_priv *priv)
7403 {
7404 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
7405 
7406 	/* If verification is disabled, configure FPE right away.
7407 	 * Otherwise let the timer code do it.
7408 	 */
7409 	if (!fpe_cfg->verify_enabled) {
7410 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
7411 				     priv->plat->tx_queues_to_use,
7412 				     priv->plat->rx_queues_to_use,
7413 				     fpe_cfg->tx_enabled,
7414 				     fpe_cfg->pmac_enabled);
7415 	} else {
7416 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
7417 		fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
7418 
7419 		if (netif_running(priv->dev))
7420 			stmmac_fpe_verify_timer_arm(fpe_cfg);
7421 	}
7422 }
7423 
7424 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7425 {
7426 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7427 	struct dma_desc *desc_contains_ts = ctx->desc;
7428 	struct stmmac_priv *priv = ctx->priv;
7429 	struct dma_desc *ndesc = ctx->ndesc;
7430 	struct dma_desc *desc = ctx->desc;
7431 	u64 ns = 0;
7432 
7433 	if (!priv->hwts_rx_en)
7434 		return -ENODATA;
7435 
7436 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7437 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7438 		desc_contains_ts = ndesc;
7439 
7440 	/* Check if timestamp is available */
7441 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7442 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7443 		ns -= priv->plat->cdc_error_adj;
7444 		*timestamp = ns_to_ktime(ns);
7445 		return 0;
7446 	}
7447 
7448 	return -ENODATA;
7449 }
7450 
7451 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7452 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7453 };
7454 
7455 /**
7456  * stmmac_dvr_probe
7457  * @device: device pointer
7458  * @plat_dat: platform data pointer
7459  * @res: stmmac resource pointer
7460  * Description: this is the main probe function used to
7461  * call the alloc_etherdev, allocate the priv structure.
7462  * Return:
7463  * returns 0 on success, otherwise errno.
7464  */
7465 int stmmac_dvr_probe(struct device *device,
7466 		     struct plat_stmmacenet_data *plat_dat,
7467 		     struct stmmac_resources *res)
7468 {
7469 	struct net_device *ndev = NULL;
7470 	struct stmmac_priv *priv;
7471 	u32 rxq;
7472 	int i, ret = 0;
7473 
7474 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7475 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7476 	if (!ndev)
7477 		return -ENOMEM;
7478 
7479 	SET_NETDEV_DEV(ndev, device);
7480 
7481 	priv = netdev_priv(ndev);
7482 	priv->device = device;
7483 	priv->dev = ndev;
7484 
7485 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7486 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7487 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7488 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7489 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7490 	}
7491 
7492 	priv->xstats.pcpu_stats =
7493 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7494 	if (!priv->xstats.pcpu_stats)
7495 		return -ENOMEM;
7496 
7497 	stmmac_set_ethtool_ops(ndev);
7498 	priv->pause = pause;
7499 	priv->plat = plat_dat;
7500 	priv->ioaddr = res->addr;
7501 	priv->dev->base_addr = (unsigned long)res->addr;
7502 	priv->plat->dma_cfg->multi_msi_en =
7503 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7504 
7505 	priv->dev->irq = res->irq;
7506 	priv->wol_irq = res->wol_irq;
7507 	priv->lpi_irq = res->lpi_irq;
7508 	priv->sfty_irq = res->sfty_irq;
7509 	priv->sfty_ce_irq = res->sfty_ce_irq;
7510 	priv->sfty_ue_irq = res->sfty_ue_irq;
7511 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7512 		priv->rx_irq[i] = res->rx_irq[i];
7513 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7514 		priv->tx_irq[i] = res->tx_irq[i];
7515 
7516 	if (!is_zero_ether_addr(res->mac))
7517 		eth_hw_addr_set(priv->dev, res->mac);
7518 
7519 	dev_set_drvdata(device, priv->dev);
7520 
7521 	/* Verify driver arguments */
7522 	stmmac_verify_args();
7523 
7524 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7525 	if (!priv->af_xdp_zc_qps)
7526 		return -ENOMEM;
7527 
7528 	/* Allocate workqueue */
7529 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7530 	if (!priv->wq) {
7531 		dev_err(priv->device, "failed to create workqueue\n");
7532 		ret = -ENOMEM;
7533 		goto error_wq_init;
7534 	}
7535 
7536 	INIT_WORK(&priv->service_task, stmmac_service_task);
7537 
7538 	/* Override with kernel parameters if supplied XXX CRS XXX
7539 	 * this needs to have multiple instances
7540 	 */
7541 	if ((phyaddr >= 0) && (phyaddr <= 31))
7542 		priv->plat->phy_addr = phyaddr;
7543 
7544 	if (priv->plat->stmmac_rst) {
7545 		ret = reset_control_assert(priv->plat->stmmac_rst);
7546 		reset_control_deassert(priv->plat->stmmac_rst);
7547 		/* Some reset controllers have only reset callback instead of
7548 		 * assert + deassert callbacks pair.
7549 		 */
7550 		if (ret == -ENOTSUPP)
7551 			reset_control_reset(priv->plat->stmmac_rst);
7552 	}
7553 
7554 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7555 	if (ret == -ENOTSUPP)
7556 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7557 			ERR_PTR(ret));
7558 
7559 	/* Wait a bit for the reset to take effect */
7560 	udelay(10);
7561 
7562 	/* Init MAC and get the capabilities */
7563 	ret = stmmac_hw_init(priv);
7564 	if (ret)
7565 		goto error_hw_init;
7566 
7567 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7568 	 */
7569 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7570 		priv->plat->dma_cfg->dche = false;
7571 
7572 	stmmac_check_ether_addr(priv);
7573 
7574 	ndev->netdev_ops = &stmmac_netdev_ops;
7575 
7576 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7577 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7578 
7579 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7580 			    NETIF_F_RXCSUM;
7581 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7582 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7583 
7584 	ret = stmmac_tc_init(priv, priv);
7585 	if (!ret) {
7586 		ndev->hw_features |= NETIF_F_HW_TC;
7587 	}
7588 
7589 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7590 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7591 		if (priv->plat->has_gmac4)
7592 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7593 		priv->tso = true;
7594 		dev_info(priv->device, "TSO feature enabled\n");
7595 	}
7596 
7597 	if (priv->dma_cap.sphen &&
7598 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7599 		ndev->hw_features |= NETIF_F_GRO;
7600 		priv->sph_cap = true;
7601 		priv->sph = priv->sph_cap;
7602 		dev_info(priv->device, "SPH feature enabled\n");
7603 	}
7604 
7605 	/* Ideally our host DMA address width is the same as for the
7606 	 * device. However, it may differ and then we have to use our
7607 	 * host DMA width for allocation and the device DMA width for
7608 	 * register handling.
7609 	 */
7610 	if (priv->plat->host_dma_width)
7611 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7612 	else
7613 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7614 
7615 	if (priv->dma_cap.host_dma_width) {
7616 		ret = dma_set_mask_and_coherent(device,
7617 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7618 		if (!ret) {
7619 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7620 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7621 
7622 			/*
7623 			 * If more than 32 bits can be addressed, make sure to
7624 			 * enable enhanced addressing mode.
7625 			 */
7626 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7627 				priv->plat->dma_cfg->eame = true;
7628 		} else {
7629 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7630 			if (ret) {
7631 				dev_err(priv->device, "Failed to set DMA Mask\n");
7632 				goto error_hw_init;
7633 			}
7634 
7635 			priv->dma_cap.host_dma_width = 32;
7636 		}
7637 	}
7638 
7639 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7640 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7641 #ifdef STMMAC_VLAN_TAG_USED
7642 	/* Both mac100 and gmac support receive VLAN tag detection */
7643 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7644 	if (priv->plat->has_gmac4) {
7645 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7646 		priv->hw->hw_vlan_en = true;
7647 	}
7648 	if (priv->dma_cap.vlhash) {
7649 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7650 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7651 	}
7652 	if (priv->dma_cap.vlins) {
7653 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7654 		if (priv->dma_cap.dvlan)
7655 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7656 	}
7657 #endif
7658 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7659 
7660 	priv->xstats.threshold = tc;
7661 
7662 	/* Initialize RSS */
7663 	rxq = priv->plat->rx_queues_to_use;
7664 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7665 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7666 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7667 
7668 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7669 		ndev->features |= NETIF_F_RXHASH;
7670 
7671 	ndev->vlan_features |= ndev->features;
7672 
7673 	/* MTU range: 46 - hw-specific max */
7674 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7675 	if (priv->plat->has_xgmac)
7676 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7677 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7678 		ndev->max_mtu = JUMBO_LEN;
7679 	else
7680 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7681 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7682 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7683 	 */
7684 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7685 	    (priv->plat->maxmtu >= ndev->min_mtu))
7686 		ndev->max_mtu = priv->plat->maxmtu;
7687 	else if (priv->plat->maxmtu < ndev->min_mtu)
7688 		dev_warn(priv->device,
7689 			 "%s: warning: maxmtu having invalid value (%d)\n",
7690 			 __func__, priv->plat->maxmtu);
7691 
7692 	if (flow_ctrl)
7693 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7694 
7695 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7696 
7697 	/* Setup channels NAPI */
7698 	stmmac_napi_add(ndev);
7699 
7700 	mutex_init(&priv->lock);
7701 
7702 	priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
7703 	priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
7704 	priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
7705 	timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
7706 	spin_lock_init(&priv->fpe_cfg.lock);
7707 
7708 	/* If a specific clk_csr value is passed from the platform
7709 	 * this means that the CSR Clock Range selection cannot be
7710 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7711 	 * set the MDC clock dynamically according to the csr actual
7712 	 * clock input.
7713 	 */
7714 	if (priv->plat->clk_csr >= 0)
7715 		priv->clk_csr = priv->plat->clk_csr;
7716 	else
7717 		stmmac_clk_csr_set(priv);
7718 
7719 	stmmac_check_pcs_mode(priv);
7720 
7721 	pm_runtime_get_noresume(device);
7722 	pm_runtime_set_active(device);
7723 	if (!pm_runtime_enabled(device))
7724 		pm_runtime_enable(device);
7725 
7726 	ret = stmmac_mdio_register(ndev);
7727 	if (ret < 0) {
7728 		dev_err_probe(priv->device, ret,
7729 			      "MDIO bus (id: %d) registration failed\n",
7730 			      priv->plat->bus_id);
7731 		goto error_mdio_register;
7732 	}
7733 
7734 	if (priv->plat->speed_mode_2500)
7735 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7736 
7737 	ret = stmmac_pcs_setup(ndev);
7738 	if (ret)
7739 		goto error_pcs_setup;
7740 
7741 	ret = stmmac_phy_setup(priv);
7742 	if (ret) {
7743 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7744 		goto error_phy_setup;
7745 	}
7746 
7747 	ret = register_netdev(ndev);
7748 	if (ret) {
7749 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7750 			__func__, ret);
7751 		goto error_netdev_register;
7752 	}
7753 
7754 #ifdef CONFIG_DEBUG_FS
7755 	stmmac_init_fs(ndev);
7756 #endif
7757 
7758 	if (priv->plat->dump_debug_regs)
7759 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7760 
7761 	/* Let pm_runtime_put() disable the clocks.
7762 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7763 	 */
7764 	pm_runtime_put(device);
7765 
7766 	return ret;
7767 
7768 error_netdev_register:
7769 	phylink_destroy(priv->phylink);
7770 error_phy_setup:
7771 	stmmac_pcs_clean(ndev);
7772 error_pcs_setup:
7773 	stmmac_mdio_unregister(ndev);
7774 error_mdio_register:
7775 	stmmac_napi_del(ndev);
7776 error_hw_init:
7777 	destroy_workqueue(priv->wq);
7778 error_wq_init:
7779 	bitmap_free(priv->af_xdp_zc_qps);
7780 
7781 	return ret;
7782 }
7783 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7784 
7785 /**
7786  * stmmac_dvr_remove
7787  * @dev: device pointer
7788  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7789  * changes the link status, releases the DMA descriptor rings.
7790  */
7791 void stmmac_dvr_remove(struct device *dev)
7792 {
7793 	struct net_device *ndev = dev_get_drvdata(dev);
7794 	struct stmmac_priv *priv = netdev_priv(ndev);
7795 
7796 	netdev_info(priv->dev, "%s: removing driver", __func__);
7797 
7798 	pm_runtime_get_sync(dev);
7799 
7800 	stmmac_stop_all_dma(priv);
7801 	stmmac_mac_set(priv, priv->ioaddr, false);
7802 	unregister_netdev(ndev);
7803 
7804 #ifdef CONFIG_DEBUG_FS
7805 	stmmac_exit_fs(ndev);
7806 #endif
7807 	phylink_destroy(priv->phylink);
7808 	if (priv->plat->stmmac_rst)
7809 		reset_control_assert(priv->plat->stmmac_rst);
7810 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7811 
7812 	stmmac_pcs_clean(ndev);
7813 	stmmac_mdio_unregister(ndev);
7814 
7815 	destroy_workqueue(priv->wq);
7816 	mutex_destroy(&priv->lock);
7817 	bitmap_free(priv->af_xdp_zc_qps);
7818 
7819 	pm_runtime_disable(dev);
7820 	pm_runtime_put_noidle(dev);
7821 }
7822 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7823 
7824 /**
7825  * stmmac_suspend - suspend callback
7826  * @dev: device pointer
7827  * Description: this is the function to suspend the device and it is called
7828  * by the platform driver to stop the network queue, release the resources,
7829  * program the PMT register (for WoL), clean and release driver resources.
7830  */
7831 int stmmac_suspend(struct device *dev)
7832 {
7833 	struct net_device *ndev = dev_get_drvdata(dev);
7834 	struct stmmac_priv *priv = netdev_priv(ndev);
7835 	u32 chan;
7836 
7837 	if (!ndev || !netif_running(ndev))
7838 		return 0;
7839 
7840 	mutex_lock(&priv->lock);
7841 
7842 	netif_device_detach(ndev);
7843 
7844 	stmmac_disable_all_queues(priv);
7845 
7846 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7847 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7848 
7849 	if (priv->eee_enabled) {
7850 		priv->tx_path_in_lpi_mode = false;
7851 		del_timer_sync(&priv->eee_ctrl_timer);
7852 	}
7853 
7854 	/* Stop TX/RX DMA */
7855 	stmmac_stop_all_dma(priv);
7856 
7857 	if (priv->plat->serdes_powerdown)
7858 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7859 
7860 	/* Enable Power down mode by programming the PMT regs */
7861 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7862 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7863 		priv->irq_wake = 1;
7864 	} else {
7865 		stmmac_mac_set(priv, priv->ioaddr, false);
7866 		pinctrl_pm_select_sleep_state(priv->device);
7867 	}
7868 
7869 	mutex_unlock(&priv->lock);
7870 
7871 	rtnl_lock();
7872 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7873 		phylink_suspend(priv->phylink, true);
7874 	} else {
7875 		if (device_may_wakeup(priv->device))
7876 			phylink_speed_down(priv->phylink, false);
7877 		phylink_suspend(priv->phylink, false);
7878 	}
7879 	rtnl_unlock();
7880 
7881 	if (priv->dma_cap.fpesel)
7882 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7883 
7884 	priv->speed = SPEED_UNKNOWN;
7885 	return 0;
7886 }
7887 EXPORT_SYMBOL_GPL(stmmac_suspend);
7888 
7889 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7890 {
7891 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7892 
7893 	rx_q->cur_rx = 0;
7894 	rx_q->dirty_rx = 0;
7895 }
7896 
7897 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7898 {
7899 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7900 
7901 	tx_q->cur_tx = 0;
7902 	tx_q->dirty_tx = 0;
7903 	tx_q->mss = 0;
7904 
7905 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7906 }
7907 
7908 /**
7909  * stmmac_reset_queues_param - reset queue parameters
7910  * @priv: device pointer
7911  */
7912 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7913 {
7914 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7915 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7916 	u32 queue;
7917 
7918 	for (queue = 0; queue < rx_cnt; queue++)
7919 		stmmac_reset_rx_queue(priv, queue);
7920 
7921 	for (queue = 0; queue < tx_cnt; queue++)
7922 		stmmac_reset_tx_queue(priv, queue);
7923 }
7924 
7925 /**
7926  * stmmac_resume - resume callback
7927  * @dev: device pointer
7928  * Description: when resume this function is invoked to setup the DMA and CORE
7929  * in a usable state.
7930  */
7931 int stmmac_resume(struct device *dev)
7932 {
7933 	struct net_device *ndev = dev_get_drvdata(dev);
7934 	struct stmmac_priv *priv = netdev_priv(ndev);
7935 	int ret;
7936 
7937 	if (!netif_running(ndev))
7938 		return 0;
7939 
7940 	/* Power Down bit, into the PM register, is cleared
7941 	 * automatically as soon as a magic packet or a Wake-up frame
7942 	 * is received. Anyway, it's better to manually clear
7943 	 * this bit because it can generate problems while resuming
7944 	 * from another devices (e.g. serial console).
7945 	 */
7946 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7947 		mutex_lock(&priv->lock);
7948 		stmmac_pmt(priv, priv->hw, 0);
7949 		mutex_unlock(&priv->lock);
7950 		priv->irq_wake = 0;
7951 	} else {
7952 		pinctrl_pm_select_default_state(priv->device);
7953 		/* reset the phy so that it's ready */
7954 		if (priv->mii)
7955 			stmmac_mdio_reset(priv->mii);
7956 	}
7957 
7958 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7959 	    priv->plat->serdes_powerup) {
7960 		ret = priv->plat->serdes_powerup(ndev,
7961 						 priv->plat->bsp_priv);
7962 
7963 		if (ret < 0)
7964 			return ret;
7965 	}
7966 
7967 	rtnl_lock();
7968 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7969 		phylink_resume(priv->phylink);
7970 	} else {
7971 		phylink_resume(priv->phylink);
7972 		if (device_may_wakeup(priv->device))
7973 			phylink_speed_up(priv->phylink);
7974 	}
7975 	rtnl_unlock();
7976 
7977 	rtnl_lock();
7978 	mutex_lock(&priv->lock);
7979 
7980 	stmmac_reset_queues_param(priv);
7981 
7982 	stmmac_free_tx_skbufs(priv);
7983 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7984 
7985 	stmmac_hw_setup(ndev, false);
7986 	stmmac_init_coalesce(priv);
7987 	stmmac_set_rx_mode(ndev);
7988 
7989 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7990 
7991 	stmmac_enable_all_queues(priv);
7992 	stmmac_enable_all_dma_irq(priv);
7993 
7994 	mutex_unlock(&priv->lock);
7995 	rtnl_unlock();
7996 
7997 	netif_device_attach(ndev);
7998 
7999 	return 0;
8000 }
8001 EXPORT_SYMBOL_GPL(stmmac_resume);
8002 
8003 #ifndef MODULE
8004 static int __init stmmac_cmdline_opt(char *str)
8005 {
8006 	char *opt;
8007 
8008 	if (!str || !*str)
8009 		return 1;
8010 	while ((opt = strsep(&str, ",")) != NULL) {
8011 		if (!strncmp(opt, "debug:", 6)) {
8012 			if (kstrtoint(opt + 6, 0, &debug))
8013 				goto err;
8014 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8015 			if (kstrtoint(opt + 8, 0, &phyaddr))
8016 				goto err;
8017 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8018 			if (kstrtoint(opt + 7, 0, &buf_sz))
8019 				goto err;
8020 		} else if (!strncmp(opt, "tc:", 3)) {
8021 			if (kstrtoint(opt + 3, 0, &tc))
8022 				goto err;
8023 		} else if (!strncmp(opt, "watchdog:", 9)) {
8024 			if (kstrtoint(opt + 9, 0, &watchdog))
8025 				goto err;
8026 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8027 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8028 				goto err;
8029 		} else if (!strncmp(opt, "pause:", 6)) {
8030 			if (kstrtoint(opt + 6, 0, &pause))
8031 				goto err;
8032 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8033 			if (kstrtoint(opt + 10, 0, &eee_timer))
8034 				goto err;
8035 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8036 			if (kstrtoint(opt + 11, 0, &chain_mode))
8037 				goto err;
8038 		}
8039 	}
8040 	return 1;
8041 
8042 err:
8043 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8044 	return 1;
8045 }
8046 
8047 __setup("stmmaceth=", stmmac_cmdline_opt);
8048 #endif /* MODULE */
8049 
8050 static int __init stmmac_init(void)
8051 {
8052 #ifdef CONFIG_DEBUG_FS
8053 	/* Create debugfs main directory if it doesn't exist yet */
8054 	if (!stmmac_fs_dir)
8055 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8056 	register_netdevice_notifier(&stmmac_notifier);
8057 #endif
8058 
8059 	return 0;
8060 }
8061 
8062 static void __exit stmmac_exit(void)
8063 {
8064 #ifdef CONFIG_DEBUG_FS
8065 	unregister_netdevice_notifier(&stmmac_notifier);
8066 	debugfs_remove_recursive(stmmac_fs_dir);
8067 #endif
8068 }
8069 
8070 module_init(stmmac_init)
8071 module_exit(stmmac_exit)
8072 
8073 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8074 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8075 MODULE_LICENSE("GPL");
8076