xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 0ad9617c78acbc71373fb341a6f75d4012b01d69)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54 
55 /* As long as the interface is active, we keep the timestamping counter enabled
56  * with fine resolution and binary rollover. This avoid non-monotonic behavior
57  * (clock jumps) when changing timestamping settings at runtime.
58  */
59 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 				 PTP_TCR_TSCTRLSSR)
61 
62 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
64 
65 /* Module parameters */
66 #define TX_TIMEO	5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70 
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78 
79 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
110 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
111 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
112 
113 #define STMMAC_DEFAULT_LPI_TIMER	1000
114 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
115 module_param(eee_timer, uint, 0644);
116 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
117 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
118 
119 /* By default the driver will use the ring mode to manage tx and rx descriptors,
120  * but allow user to force to use the chain instead of the ring
121  */
122 static unsigned int chain_mode;
123 module_param(chain_mode, int, 0444);
124 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
125 
126 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
127 /* For MSI interrupts handling */
128 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
129 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
131 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
132 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
133 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
135 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
137 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
138 					  u32 rxmode, u32 chan);
139 
140 #ifdef CONFIG_DEBUG_FS
141 static const struct net_device_ops stmmac_netdev_ops;
142 static void stmmac_init_fs(struct net_device *dev);
143 static void stmmac_exit_fs(struct net_device *dev);
144 #endif
145 
146 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
147 
148 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
149 {
150 	int ret = 0;
151 
152 	if (enabled) {
153 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
154 		if (ret)
155 			return ret;
156 		ret = clk_prepare_enable(priv->plat->pclk);
157 		if (ret) {
158 			clk_disable_unprepare(priv->plat->stmmac_clk);
159 			return ret;
160 		}
161 		if (priv->plat->clks_config) {
162 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
163 			if (ret) {
164 				clk_disable_unprepare(priv->plat->stmmac_clk);
165 				clk_disable_unprepare(priv->plat->pclk);
166 				return ret;
167 			}
168 		}
169 	} else {
170 		clk_disable_unprepare(priv->plat->stmmac_clk);
171 		clk_disable_unprepare(priv->plat->pclk);
172 		if (priv->plat->clks_config)
173 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
174 	}
175 
176 	return ret;
177 }
178 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
179 
180 /**
181  * stmmac_verify_args - verify the driver parameters.
182  * Description: it checks the driver parameters and set a default in case of
183  * errors.
184  */
185 static void stmmac_verify_args(void)
186 {
187 	if (unlikely(watchdog < 0))
188 		watchdog = TX_TIMEO;
189 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
190 		buf_sz = DEFAULT_BUFSIZE;
191 	if (unlikely(flow_ctrl > 1))
192 		flow_ctrl = FLOW_AUTO;
193 	else if (likely(flow_ctrl < 0))
194 		flow_ctrl = FLOW_OFF;
195 	if (unlikely((pause < 0) || (pause > 0xffff)))
196 		pause = PAUSE_TIME;
197 }
198 
199 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
200 {
201 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
202 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
203 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
204 	u32 queue;
205 
206 	for (queue = 0; queue < maxq; queue++) {
207 		struct stmmac_channel *ch = &priv->channel[queue];
208 
209 		if (stmmac_xdp_is_enabled(priv) &&
210 		    test_bit(queue, priv->af_xdp_zc_qps)) {
211 			napi_disable(&ch->rxtx_napi);
212 			continue;
213 		}
214 
215 		if (queue < rx_queues_cnt)
216 			napi_disable(&ch->rx_napi);
217 		if (queue < tx_queues_cnt)
218 			napi_disable(&ch->tx_napi);
219 	}
220 }
221 
222 /**
223  * stmmac_disable_all_queues - Disable all queues
224  * @priv: driver private structure
225  */
226 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
227 {
228 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
229 	struct stmmac_rx_queue *rx_q;
230 	u32 queue;
231 
232 	/* synchronize_rcu() needed for pending XDP buffers to drain */
233 	for (queue = 0; queue < rx_queues_cnt; queue++) {
234 		rx_q = &priv->dma_conf.rx_queue[queue];
235 		if (rx_q->xsk_pool) {
236 			synchronize_rcu();
237 			break;
238 		}
239 	}
240 
241 	__stmmac_disable_all_queues(priv);
242 }
243 
244 /**
245  * stmmac_enable_all_queues - Enable all queues
246  * @priv: driver private structure
247  */
248 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
249 {
250 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
251 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
252 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
253 	u32 queue;
254 
255 	for (queue = 0; queue < maxq; queue++) {
256 		struct stmmac_channel *ch = &priv->channel[queue];
257 
258 		if (stmmac_xdp_is_enabled(priv) &&
259 		    test_bit(queue, priv->af_xdp_zc_qps)) {
260 			napi_enable(&ch->rxtx_napi);
261 			continue;
262 		}
263 
264 		if (queue < rx_queues_cnt)
265 			napi_enable(&ch->rx_napi);
266 		if (queue < tx_queues_cnt)
267 			napi_enable(&ch->tx_napi);
268 	}
269 }
270 
271 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
272 {
273 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
274 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
275 		queue_work(priv->wq, &priv->service_task);
276 }
277 
278 static void stmmac_global_err(struct stmmac_priv *priv)
279 {
280 	netif_carrier_off(priv->dev);
281 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
282 	stmmac_service_event_schedule(priv);
283 }
284 
285 /**
286  * stmmac_clk_csr_set - dynamically set the MDC clock
287  * @priv: driver private structure
288  * Description: this is to dynamically set the MDC clock according to the csr
289  * clock input.
290  * Note:
291  *	If a specific clk_csr value is passed from the platform
292  *	this means that the CSR Clock Range selection cannot be
293  *	changed at run-time and it is fixed (as reported in the driver
294  *	documentation). Viceversa the driver will try to set the MDC
295  *	clock dynamically according to the actual clock input.
296  */
297 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
298 {
299 	unsigned long clk_rate;
300 
301 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
302 
303 	/* Platform provided default clk_csr would be assumed valid
304 	 * for all other cases except for the below mentioned ones.
305 	 * For values higher than the IEEE 802.3 specified frequency
306 	 * we can not estimate the proper divider as it is not known
307 	 * the frequency of clk_csr_i. So we do not change the default
308 	 * divider.
309 	 */
310 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
311 		if (clk_rate < CSR_F_35M)
312 			priv->clk_csr = STMMAC_CSR_20_35M;
313 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
314 			priv->clk_csr = STMMAC_CSR_35_60M;
315 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
316 			priv->clk_csr = STMMAC_CSR_60_100M;
317 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
318 			priv->clk_csr = STMMAC_CSR_100_150M;
319 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
320 			priv->clk_csr = STMMAC_CSR_150_250M;
321 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
322 			priv->clk_csr = STMMAC_CSR_250_300M;
323 		else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
324 			priv->clk_csr = STMMAC_CSR_300_500M;
325 		else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
326 			priv->clk_csr = STMMAC_CSR_500_800M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
394 {
395 	stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
396 }
397 
398 static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
399 {
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
401 }
402 
403 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
404 {
405 	u32 tx_cnt = priv->plat->tx_queues_to_use;
406 	u32 queue;
407 
408 	/* check if all TX queues have the work finished */
409 	for (queue = 0; queue < tx_cnt; queue++) {
410 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
411 
412 		if (tx_q->dirty_tx != tx_q->cur_tx)
413 			return true; /* still unfinished work */
414 	}
415 
416 	return false;
417 }
418 
419 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
420 {
421 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
422 }
423 
424 /**
425  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
426  * @priv: driver private structure
427  * Description: this function is to verify and enter in LPI mode in case of
428  * EEE.
429  */
430 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
431 {
432 	if (stmmac_eee_tx_busy(priv)) {
433 		stmmac_restart_sw_lpi_timer(priv);
434 		return;
435 	}
436 
437 	/* Check and enter in LPI mode */
438 	if (!priv->tx_path_in_lpi_mode)
439 		stmmac_set_eee_mode(priv, priv->hw,
440 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
441 }
442 
443 /**
444  * stmmac_stop_sw_lpi - stop transmitting LPI
445  * @priv: driver private structure
446  * Description: When using software-controlled LPI, stop transmitting LPI state.
447  */
448 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
449 {
450 	stmmac_reset_eee_mode(priv, priv->hw);
451 	del_timer_sync(&priv->eee_ctrl_timer);
452 	priv->tx_path_in_lpi_mode = false;
453 }
454 
455 /**
456  * stmmac_eee_ctrl_timer - EEE TX SW timer.
457  * @t:  timer_list struct containing private info
458  * Description:
459  *  if there is no data transfer and if we are not in LPI state,
460  *  then MAC Transmitter can be moved to LPI state.
461  */
462 static void stmmac_eee_ctrl_timer(struct timer_list *t)
463 {
464 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
465 
466 	stmmac_try_to_start_sw_lpi(priv);
467 }
468 
469 /**
470  * stmmac_eee_init - init EEE
471  * @priv: driver private structure
472  * @active: indicates whether EEE should be enabled.
473  * Description:
474  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
475  *  can also manage EEE, this function enable the LPI state and start related
476  *  timer.
477  */
478 static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
479 {
480 	priv->eee_active = active;
481 
482 	/* Check if MAC core supports the EEE feature. */
483 	if (!priv->dma_cap.eee) {
484 		priv->eee_enabled = false;
485 		return;
486 	}
487 
488 	mutex_lock(&priv->lock);
489 
490 	/* Check if it needs to be deactivated */
491 	if (!priv->eee_active) {
492 		if (priv->eee_enabled) {
493 			netdev_dbg(priv->dev, "disable EEE\n");
494 			priv->eee_sw_timer_en = false;
495 			stmmac_disable_hw_lpi_timer(priv);
496 			del_timer_sync(&priv->eee_ctrl_timer);
497 			stmmac_set_eee_timer(priv, priv->hw, 0,
498 					     STMMAC_DEFAULT_TWT_LS);
499 			if (priv->hw->xpcs)
500 				xpcs_config_eee(priv->hw->xpcs,
501 						priv->plat->mult_fact_100ns,
502 						false);
503 		}
504 		priv->eee_enabled = false;
505 		mutex_unlock(&priv->lock);
506 		return;
507 	}
508 
509 	if (priv->eee_active && !priv->eee_enabled) {
510 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
511 				     STMMAC_DEFAULT_TWT_LS);
512 		if (priv->hw->xpcs)
513 			xpcs_config_eee(priv->hw->xpcs,
514 					priv->plat->mult_fact_100ns,
515 					true);
516 	}
517 
518 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
519 		/* Use hardware LPI mode */
520 		del_timer_sync(&priv->eee_ctrl_timer);
521 		priv->tx_path_in_lpi_mode = false;
522 		priv->eee_sw_timer_en = false;
523 		stmmac_enable_hw_lpi_timer(priv);
524 	} else {
525 		/* Use software LPI mode */
526 		priv->eee_sw_timer_en = true;
527 		stmmac_disable_hw_lpi_timer(priv);
528 		stmmac_restart_sw_lpi_timer(priv);
529 	}
530 
531 	priv->eee_enabled = true;
532 
533 	mutex_unlock(&priv->lock);
534 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
535 }
536 
537 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
538  * @priv: driver private structure
539  * @p : descriptor pointer
540  * @skb : the socket buffer
541  * Description :
542  * This function will read timestamp from the descriptor & pass it to stack.
543  * and also perform some sanity checks.
544  */
545 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
546 				   struct dma_desc *p, struct sk_buff *skb)
547 {
548 	struct skb_shared_hwtstamps shhwtstamp;
549 	bool found = false;
550 	u64 ns = 0;
551 
552 	if (!priv->hwts_tx_en)
553 		return;
554 
555 	/* exit if skb doesn't support hw tstamp */
556 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
557 		return;
558 
559 	/* check tx tstamp status */
560 	if (stmmac_get_tx_timestamp_status(priv, p)) {
561 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
562 		found = true;
563 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
564 		found = true;
565 	}
566 
567 	if (found) {
568 		ns -= priv->plat->cdc_error_adj;
569 
570 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
571 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
572 
573 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
574 		/* pass tstamp to stack */
575 		skb_tstamp_tx(skb, &shhwtstamp);
576 	}
577 }
578 
579 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
580  * @priv: driver private structure
581  * @p : descriptor pointer
582  * @np : next descriptor pointer
583  * @skb : the socket buffer
584  * Description :
585  * This function will read received packet's timestamp from the descriptor
586  * and pass it to stack. It also perform some sanity checks.
587  */
588 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
589 				   struct dma_desc *np, struct sk_buff *skb)
590 {
591 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
592 	struct dma_desc *desc = p;
593 	u64 ns = 0;
594 
595 	if (!priv->hwts_rx_en)
596 		return;
597 	/* For GMAC4, the valid timestamp is from CTX next desc. */
598 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
599 		desc = np;
600 
601 	/* Check if timestamp is available */
602 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
603 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
604 
605 		ns -= priv->plat->cdc_error_adj;
606 
607 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
608 		shhwtstamp = skb_hwtstamps(skb);
609 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
610 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
611 	} else  {
612 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
613 	}
614 }
615 
616 /**
617  *  stmmac_hwtstamp_set - control hardware timestamping.
618  *  @dev: device pointer.
619  *  @ifr: An IOCTL specific structure, that can contain a pointer to
620  *  a proprietary structure used to pass information to the driver.
621  *  Description:
622  *  This function configures the MAC to enable/disable both outgoing(TX)
623  *  and incoming(RX) packets time stamping based on user input.
624  *  Return Value:
625  *  0 on success and an appropriate -ve integer on failure.
626  */
627 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
628 {
629 	struct stmmac_priv *priv = netdev_priv(dev);
630 	struct hwtstamp_config config;
631 	u32 ptp_v2 = 0;
632 	u32 tstamp_all = 0;
633 	u32 ptp_over_ipv4_udp = 0;
634 	u32 ptp_over_ipv6_udp = 0;
635 	u32 ptp_over_ethernet = 0;
636 	u32 snap_type_sel = 0;
637 	u32 ts_master_en = 0;
638 	u32 ts_event_en = 0;
639 
640 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
641 		netdev_alert(priv->dev, "No support for HW time stamping\n");
642 		priv->hwts_tx_en = 0;
643 		priv->hwts_rx_en = 0;
644 
645 		return -EOPNOTSUPP;
646 	}
647 
648 	if (copy_from_user(&config, ifr->ifr_data,
649 			   sizeof(config)))
650 		return -EFAULT;
651 
652 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
653 		   __func__, config.flags, config.tx_type, config.rx_filter);
654 
655 	if (config.tx_type != HWTSTAMP_TX_OFF &&
656 	    config.tx_type != HWTSTAMP_TX_ON)
657 		return -ERANGE;
658 
659 	if (priv->adv_ts) {
660 		switch (config.rx_filter) {
661 		case HWTSTAMP_FILTER_NONE:
662 			/* time stamp no incoming packet at all */
663 			config.rx_filter = HWTSTAMP_FILTER_NONE;
664 			break;
665 
666 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
667 			/* PTP v1, UDP, any kind of event packet */
668 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
669 			/* 'xmac' hardware can support Sync, Pdelay_Req and
670 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
671 			 * This leaves Delay_Req timestamps out.
672 			 * Enable all events *and* general purpose message
673 			 * timestamping
674 			 */
675 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
676 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678 			break;
679 
680 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
681 			/* PTP v1, UDP, Sync packet */
682 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
683 			/* take time stamp for SYNC messages only */
684 			ts_event_en = PTP_TCR_TSEVNTENA;
685 
686 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 			break;
689 
690 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
691 			/* PTP v1, UDP, Delay_req packet */
692 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
693 			/* take time stamp for Delay_Req messages only */
694 			ts_master_en = PTP_TCR_TSMSTRENA;
695 			ts_event_en = PTP_TCR_TSEVNTENA;
696 
697 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
698 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
699 			break;
700 
701 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
702 			/* PTP v2, UDP, any kind of event packet */
703 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
704 			ptp_v2 = PTP_TCR_TSVER2ENA;
705 			/* take time stamp for all event messages */
706 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
707 
708 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
709 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
710 			break;
711 
712 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
713 			/* PTP v2, UDP, Sync packet */
714 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
715 			ptp_v2 = PTP_TCR_TSVER2ENA;
716 			/* take time stamp for SYNC messages only */
717 			ts_event_en = PTP_TCR_TSEVNTENA;
718 
719 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
720 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
721 			break;
722 
723 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
724 			/* PTP v2, UDP, Delay_req packet */
725 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
726 			ptp_v2 = PTP_TCR_TSVER2ENA;
727 			/* take time stamp for Delay_Req messages only */
728 			ts_master_en = PTP_TCR_TSMSTRENA;
729 			ts_event_en = PTP_TCR_TSEVNTENA;
730 
731 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
732 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
733 			break;
734 
735 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
736 			/* PTP v2/802.AS1 any layer, any kind of event packet */
737 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
738 			ptp_v2 = PTP_TCR_TSVER2ENA;
739 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
740 			if (priv->synopsys_id < DWMAC_CORE_4_10)
741 				ts_event_en = PTP_TCR_TSEVNTENA;
742 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
743 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
744 			ptp_over_ethernet = PTP_TCR_TSIPENA;
745 			break;
746 
747 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
748 			/* PTP v2/802.AS1, any layer, Sync packet */
749 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
750 			ptp_v2 = PTP_TCR_TSVER2ENA;
751 			/* take time stamp for SYNC messages only */
752 			ts_event_en = PTP_TCR_TSEVNTENA;
753 
754 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
755 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
756 			ptp_over_ethernet = PTP_TCR_TSIPENA;
757 			break;
758 
759 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
760 			/* PTP v2/802.AS1, any layer, Delay_req packet */
761 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
762 			ptp_v2 = PTP_TCR_TSVER2ENA;
763 			/* take time stamp for Delay_Req messages only */
764 			ts_master_en = PTP_TCR_TSMSTRENA;
765 			ts_event_en = PTP_TCR_TSEVNTENA;
766 
767 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
768 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
769 			ptp_over_ethernet = PTP_TCR_TSIPENA;
770 			break;
771 
772 		case HWTSTAMP_FILTER_NTP_ALL:
773 		case HWTSTAMP_FILTER_ALL:
774 			/* time stamp any incoming packet */
775 			config.rx_filter = HWTSTAMP_FILTER_ALL;
776 			tstamp_all = PTP_TCR_TSENALL;
777 			break;
778 
779 		default:
780 			return -ERANGE;
781 		}
782 	} else {
783 		switch (config.rx_filter) {
784 		case HWTSTAMP_FILTER_NONE:
785 			config.rx_filter = HWTSTAMP_FILTER_NONE;
786 			break;
787 		default:
788 			/* PTP v1, UDP, any kind of event packet */
789 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
790 			break;
791 		}
792 	}
793 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
794 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
795 
796 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
797 
798 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
799 		priv->systime_flags |= tstamp_all | ptp_v2 |
800 				       ptp_over_ethernet | ptp_over_ipv6_udp |
801 				       ptp_over_ipv4_udp | ts_event_en |
802 				       ts_master_en | snap_type_sel;
803 	}
804 
805 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
806 
807 	memcpy(&priv->tstamp_config, &config, sizeof(config));
808 
809 	return copy_to_user(ifr->ifr_data, &config,
810 			    sizeof(config)) ? -EFAULT : 0;
811 }
812 
813 /**
814  *  stmmac_hwtstamp_get - read hardware timestamping.
815  *  @dev: device pointer.
816  *  @ifr: An IOCTL specific structure, that can contain a pointer to
817  *  a proprietary structure used to pass information to the driver.
818  *  Description:
819  *  This function obtain the current hardware timestamping settings
820  *  as requested.
821  */
822 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
823 {
824 	struct stmmac_priv *priv = netdev_priv(dev);
825 	struct hwtstamp_config *config = &priv->tstamp_config;
826 
827 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
828 		return -EOPNOTSUPP;
829 
830 	return copy_to_user(ifr->ifr_data, config,
831 			    sizeof(*config)) ? -EFAULT : 0;
832 }
833 
834 /**
835  * stmmac_init_tstamp_counter - init hardware timestamping counter
836  * @priv: driver private structure
837  * @systime_flags: timestamping flags
838  * Description:
839  * Initialize hardware counter for packet timestamping.
840  * This is valid as long as the interface is open and not suspended.
841  * Will be rerun after resuming from suspend, case in which the timestamping
842  * flags updated by stmmac_hwtstamp_set() also need to be restored.
843  */
844 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
845 {
846 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
847 	struct timespec64 now;
848 	u32 sec_inc = 0;
849 	u64 temp = 0;
850 
851 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
852 		return -EOPNOTSUPP;
853 
854 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
855 	priv->systime_flags = systime_flags;
856 
857 	/* program Sub Second Increment reg */
858 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
859 					   priv->plat->clk_ptp_rate,
860 					   xmac, &sec_inc);
861 	temp = div_u64(1000000000ULL, sec_inc);
862 
863 	/* Store sub second increment for later use */
864 	priv->sub_second_inc = sec_inc;
865 
866 	/* calculate default added value:
867 	 * formula is :
868 	 * addend = (2^32)/freq_div_ratio;
869 	 * where, freq_div_ratio = 1e9ns/sec_inc
870 	 */
871 	temp = (u64)(temp << 32);
872 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
873 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
874 
875 	/* initialize system time */
876 	ktime_get_real_ts64(&now);
877 
878 	/* lower 32 bits of tv_sec are safe until y2106 */
879 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
880 
881 	return 0;
882 }
883 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
884 
885 /**
886  * stmmac_init_ptp - init PTP
887  * @priv: driver private structure
888  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
889  * This is done by looking at the HW cap. register.
890  * This function also registers the ptp driver.
891  */
892 static int stmmac_init_ptp(struct stmmac_priv *priv)
893 {
894 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
895 	int ret;
896 
897 	if (priv->plat->ptp_clk_freq_config)
898 		priv->plat->ptp_clk_freq_config(priv);
899 
900 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
901 	if (ret)
902 		return ret;
903 
904 	priv->adv_ts = 0;
905 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
906 	if (xmac && priv->dma_cap.atime_stamp)
907 		priv->adv_ts = 1;
908 	/* Dwmac 3.x core with extend_desc can support adv_ts */
909 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
910 		priv->adv_ts = 1;
911 
912 	if (priv->dma_cap.time_stamp)
913 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
914 
915 	if (priv->adv_ts)
916 		netdev_info(priv->dev,
917 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
918 
919 	priv->hwts_tx_en = 0;
920 	priv->hwts_rx_en = 0;
921 
922 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
923 		stmmac_hwtstamp_correct_latency(priv, priv);
924 
925 	return 0;
926 }
927 
928 static void stmmac_release_ptp(struct stmmac_priv *priv)
929 {
930 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
931 	stmmac_ptp_unregister(priv);
932 }
933 
934 /**
935  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
936  *  @priv: driver private structure
937  *  @duplex: duplex passed to the next function
938  *  Description: It is used for configuring the flow control in all queues
939  */
940 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
941 {
942 	u32 tx_cnt = priv->plat->tx_queues_to_use;
943 
944 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
945 			priv->pause, tx_cnt);
946 }
947 
948 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
949 					 phy_interface_t interface)
950 {
951 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952 
953 	/* Refresh the MAC-specific capabilities */
954 	stmmac_mac_update_caps(priv);
955 
956 	config->mac_capabilities = priv->hw->link.caps;
957 
958 	if (priv->plat->max_speed)
959 		phylink_limit_mac_speed(config, priv->plat->max_speed);
960 
961 	return config->mac_capabilities;
962 }
963 
964 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
965 						 phy_interface_t interface)
966 {
967 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
968 	struct phylink_pcs *pcs;
969 
970 	if (priv->plat->select_pcs) {
971 		pcs = priv->plat->select_pcs(priv, interface);
972 		if (!IS_ERR(pcs))
973 			return pcs;
974 	}
975 
976 	return NULL;
977 }
978 
979 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
980 			      const struct phylink_link_state *state)
981 {
982 	/* Nothing to do, xpcs_config() handles everything */
983 }
984 
985 static void stmmac_mac_link_down(struct phylink_config *config,
986 				 unsigned int mode, phy_interface_t interface)
987 {
988 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989 
990 	stmmac_mac_set(priv, priv->ioaddr, false);
991 	if (priv->dma_cap.eee)
992 		stmmac_set_eee_pls(priv, priv->hw, false);
993 
994 	if (stmmac_fpe_supported(priv))
995 		stmmac_fpe_link_state_handle(priv, false);
996 }
997 
998 static void stmmac_mac_link_up(struct phylink_config *config,
999 			       struct phy_device *phy,
1000 			       unsigned int mode, phy_interface_t interface,
1001 			       int speed, int duplex,
1002 			       bool tx_pause, bool rx_pause)
1003 {
1004 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1005 	u32 old_ctrl, ctrl;
1006 
1007 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1008 	    priv->plat->serdes_powerup)
1009 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1010 
1011 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1012 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1013 
1014 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1015 		switch (speed) {
1016 		case SPEED_10000:
1017 			ctrl |= priv->hw->link.xgmii.speed10000;
1018 			break;
1019 		case SPEED_5000:
1020 			ctrl |= priv->hw->link.xgmii.speed5000;
1021 			break;
1022 		case SPEED_2500:
1023 			ctrl |= priv->hw->link.xgmii.speed2500;
1024 			break;
1025 		default:
1026 			return;
1027 		}
1028 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1029 		switch (speed) {
1030 		case SPEED_100000:
1031 			ctrl |= priv->hw->link.xlgmii.speed100000;
1032 			break;
1033 		case SPEED_50000:
1034 			ctrl |= priv->hw->link.xlgmii.speed50000;
1035 			break;
1036 		case SPEED_40000:
1037 			ctrl |= priv->hw->link.xlgmii.speed40000;
1038 			break;
1039 		case SPEED_25000:
1040 			ctrl |= priv->hw->link.xlgmii.speed25000;
1041 			break;
1042 		case SPEED_10000:
1043 			ctrl |= priv->hw->link.xgmii.speed10000;
1044 			break;
1045 		case SPEED_2500:
1046 			ctrl |= priv->hw->link.speed2500;
1047 			break;
1048 		case SPEED_1000:
1049 			ctrl |= priv->hw->link.speed1000;
1050 			break;
1051 		default:
1052 			return;
1053 		}
1054 	} else {
1055 		switch (speed) {
1056 		case SPEED_2500:
1057 			ctrl |= priv->hw->link.speed2500;
1058 			break;
1059 		case SPEED_1000:
1060 			ctrl |= priv->hw->link.speed1000;
1061 			break;
1062 		case SPEED_100:
1063 			ctrl |= priv->hw->link.speed100;
1064 			break;
1065 		case SPEED_10:
1066 			ctrl |= priv->hw->link.speed10;
1067 			break;
1068 		default:
1069 			return;
1070 		}
1071 	}
1072 
1073 	priv->speed = speed;
1074 
1075 	if (priv->plat->fix_mac_speed)
1076 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1077 
1078 	if (!duplex)
1079 		ctrl &= ~priv->hw->link.duplex;
1080 	else
1081 		ctrl |= priv->hw->link.duplex;
1082 
1083 	/* Flow Control operation */
1084 	if (rx_pause && tx_pause)
1085 		priv->flow_ctrl = FLOW_AUTO;
1086 	else if (rx_pause && !tx_pause)
1087 		priv->flow_ctrl = FLOW_RX;
1088 	else if (!rx_pause && tx_pause)
1089 		priv->flow_ctrl = FLOW_TX;
1090 	else
1091 		priv->flow_ctrl = FLOW_OFF;
1092 
1093 	stmmac_mac_flow_ctrl(priv, duplex);
1094 
1095 	if (ctrl != old_ctrl)
1096 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1097 
1098 	stmmac_mac_set(priv, priv->ioaddr, true);
1099 	if (priv->dma_cap.eee)
1100 		stmmac_set_eee_pls(priv, priv->hw, true);
1101 
1102 	if (stmmac_fpe_supported(priv))
1103 		stmmac_fpe_link_state_handle(priv, true);
1104 
1105 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1106 		stmmac_hwtstamp_correct_latency(priv, priv);
1107 }
1108 
1109 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1110 {
1111 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1112 
1113 	stmmac_eee_init(priv, false);
1114 }
1115 
1116 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1117 				    bool tx_clk_stop)
1118 {
1119 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1120 
1121 	priv->tx_lpi_timer = timer;
1122 	stmmac_eee_init(priv, true);
1123 
1124 	return 0;
1125 }
1126 
1127 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1128 	.mac_get_caps = stmmac_mac_get_caps,
1129 	.mac_select_pcs = stmmac_mac_select_pcs,
1130 	.mac_config = stmmac_mac_config,
1131 	.mac_link_down = stmmac_mac_link_down,
1132 	.mac_link_up = stmmac_mac_link_up,
1133 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1134 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1135 };
1136 
1137 /**
1138  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1139  * @priv: driver private structure
1140  * Description: this is to verify if the HW supports the PCS.
1141  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1142  * configured for the TBI, RTBI, or SGMII PHY interface.
1143  */
1144 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1145 {
1146 	int interface = priv->plat->mac_interface;
1147 
1148 	if (priv->dma_cap.pcs) {
1149 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1150 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1151 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1152 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1153 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1154 			priv->hw->pcs = STMMAC_PCS_RGMII;
1155 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1156 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1157 			priv->hw->pcs = STMMAC_PCS_SGMII;
1158 		}
1159 	}
1160 }
1161 
1162 /**
1163  * stmmac_init_phy - PHY initialization
1164  * @dev: net device structure
1165  * Description: it initializes the driver's PHY state, and attaches the PHY
1166  * to the mac driver.
1167  *  Return value:
1168  *  0 on success
1169  */
1170 static int stmmac_init_phy(struct net_device *dev)
1171 {
1172 	struct stmmac_priv *priv = netdev_priv(dev);
1173 	struct fwnode_handle *phy_fwnode;
1174 	struct fwnode_handle *fwnode;
1175 	int ret;
1176 
1177 	if (!phylink_expects_phy(priv->phylink))
1178 		return 0;
1179 
1180 	fwnode = priv->plat->port_node;
1181 	if (!fwnode)
1182 		fwnode = dev_fwnode(priv->device);
1183 
1184 	if (fwnode)
1185 		phy_fwnode = fwnode_get_phy_node(fwnode);
1186 	else
1187 		phy_fwnode = NULL;
1188 
1189 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1190 	 * manually parse it
1191 	 */
1192 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1193 		int addr = priv->plat->phy_addr;
1194 		struct phy_device *phydev;
1195 
1196 		if (addr < 0) {
1197 			netdev_err(priv->dev, "no phy found\n");
1198 			return -ENODEV;
1199 		}
1200 
1201 		phydev = mdiobus_get_phy(priv->mii, addr);
1202 		if (!phydev) {
1203 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1204 			return -ENODEV;
1205 		}
1206 
1207 		ret = phylink_connect_phy(priv->phylink, phydev);
1208 	} else {
1209 		fwnode_handle_put(phy_fwnode);
1210 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1211 	}
1212 
1213 	if (ret == 0) {
1214 		struct ethtool_keee eee;
1215 
1216 		/* Configure phylib's copy of the LPI timer. Normally,
1217 		 * phylink_config.lpi_timer_default would do this, but there is
1218 		 * a chance that userspace could change the eee_timer setting
1219 		 * via sysfs before the first open. Thus, preserve existing
1220 		 * behaviour.
1221 		 */
1222 		if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1223 			eee.tx_lpi_timer = priv->tx_lpi_timer;
1224 			phylink_ethtool_set_eee(priv->phylink, &eee);
1225 		}
1226 	}
1227 
1228 	if (!priv->plat->pmt) {
1229 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1230 
1231 		phylink_ethtool_get_wol(priv->phylink, &wol);
1232 		device_set_wakeup_capable(priv->device, !!wol.supported);
1233 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1234 	}
1235 
1236 	return ret;
1237 }
1238 
1239 static int stmmac_phy_setup(struct stmmac_priv *priv)
1240 {
1241 	struct stmmac_mdio_bus_data *mdio_bus_data;
1242 	int mode = priv->plat->phy_interface;
1243 	struct fwnode_handle *fwnode;
1244 	struct phylink_pcs *pcs;
1245 	struct phylink *phylink;
1246 
1247 	priv->phylink_config.dev = &priv->dev->dev;
1248 	priv->phylink_config.type = PHYLINK_NETDEV;
1249 	priv->phylink_config.mac_managed_pm = true;
1250 
1251 	/* Stmmac always requires an RX clock for hardware initialization */
1252 	priv->phylink_config.mac_requires_rxc = true;
1253 
1254 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1255 		priv->phylink_config.eee_rx_clk_stop_enable = true;
1256 
1257 	mdio_bus_data = priv->plat->mdio_bus_data;
1258 	if (mdio_bus_data)
1259 		priv->phylink_config.default_an_inband =
1260 			mdio_bus_data->default_an_inband;
1261 
1262 	/* Set the platform/firmware specified interface mode. Note, phylink
1263 	 * deals with the PHY interface mode, not the MAC interface mode.
1264 	 */
1265 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1266 
1267 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1268 	if (priv->hw->xpcs)
1269 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1270 	else
1271 		pcs = priv->hw->phylink_pcs;
1272 
1273 	if (pcs)
1274 		phy_interface_or(priv->phylink_config.supported_interfaces,
1275 				 priv->phylink_config.supported_interfaces,
1276 				 pcs->supported_interfaces);
1277 
1278 	if (priv->dma_cap.eee) {
1279 		/* Assume all supported interfaces also support LPI */
1280 		memcpy(priv->phylink_config.lpi_interfaces,
1281 		       priv->phylink_config.supported_interfaces,
1282 		       sizeof(priv->phylink_config.lpi_interfaces));
1283 
1284 		/* All full duplex speeds above 100Mbps are supported */
1285 		priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) |
1286 							MAC_100FD;
1287 		priv->phylink_config.lpi_timer_default = eee_timer * 1000;
1288 		priv->phylink_config.eee_enabled_default = true;
1289 	}
1290 
1291 	fwnode = priv->plat->port_node;
1292 	if (!fwnode)
1293 		fwnode = dev_fwnode(priv->device);
1294 
1295 	phylink = phylink_create(&priv->phylink_config, fwnode,
1296 				 mode, &stmmac_phylink_mac_ops);
1297 	if (IS_ERR(phylink))
1298 		return PTR_ERR(phylink);
1299 
1300 	priv->phylink = phylink;
1301 	return 0;
1302 }
1303 
1304 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1305 				    struct stmmac_dma_conf *dma_conf)
1306 {
1307 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1308 	unsigned int desc_size;
1309 	void *head_rx;
1310 	u32 queue;
1311 
1312 	/* Display RX rings */
1313 	for (queue = 0; queue < rx_cnt; queue++) {
1314 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1315 
1316 		pr_info("\tRX Queue %u rings\n", queue);
1317 
1318 		if (priv->extend_desc) {
1319 			head_rx = (void *)rx_q->dma_erx;
1320 			desc_size = sizeof(struct dma_extended_desc);
1321 		} else {
1322 			head_rx = (void *)rx_q->dma_rx;
1323 			desc_size = sizeof(struct dma_desc);
1324 		}
1325 
1326 		/* Display RX ring */
1327 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1328 				    rx_q->dma_rx_phy, desc_size);
1329 	}
1330 }
1331 
1332 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1333 				    struct stmmac_dma_conf *dma_conf)
1334 {
1335 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1336 	unsigned int desc_size;
1337 	void *head_tx;
1338 	u32 queue;
1339 
1340 	/* Display TX rings */
1341 	for (queue = 0; queue < tx_cnt; queue++) {
1342 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1343 
1344 		pr_info("\tTX Queue %d rings\n", queue);
1345 
1346 		if (priv->extend_desc) {
1347 			head_tx = (void *)tx_q->dma_etx;
1348 			desc_size = sizeof(struct dma_extended_desc);
1349 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1350 			head_tx = (void *)tx_q->dma_entx;
1351 			desc_size = sizeof(struct dma_edesc);
1352 		} else {
1353 			head_tx = (void *)tx_q->dma_tx;
1354 			desc_size = sizeof(struct dma_desc);
1355 		}
1356 
1357 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1358 				    tx_q->dma_tx_phy, desc_size);
1359 	}
1360 }
1361 
1362 static void stmmac_display_rings(struct stmmac_priv *priv,
1363 				 struct stmmac_dma_conf *dma_conf)
1364 {
1365 	/* Display RX ring */
1366 	stmmac_display_rx_rings(priv, dma_conf);
1367 
1368 	/* Display TX ring */
1369 	stmmac_display_tx_rings(priv, dma_conf);
1370 }
1371 
1372 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1373 {
1374 	if (stmmac_xdp_is_enabled(priv))
1375 		return XDP_PACKET_HEADROOM;
1376 
1377 	return NET_SKB_PAD;
1378 }
1379 
1380 static int stmmac_set_bfsize(int mtu, int bufsize)
1381 {
1382 	int ret = bufsize;
1383 
1384 	if (mtu >= BUF_SIZE_8KiB)
1385 		ret = BUF_SIZE_16KiB;
1386 	else if (mtu >= BUF_SIZE_4KiB)
1387 		ret = BUF_SIZE_8KiB;
1388 	else if (mtu >= BUF_SIZE_2KiB)
1389 		ret = BUF_SIZE_4KiB;
1390 	else if (mtu > DEFAULT_BUFSIZE)
1391 		ret = BUF_SIZE_2KiB;
1392 	else
1393 		ret = DEFAULT_BUFSIZE;
1394 
1395 	return ret;
1396 }
1397 
1398 /**
1399  * stmmac_clear_rx_descriptors - clear RX descriptors
1400  * @priv: driver private structure
1401  * @dma_conf: structure to take the dma data
1402  * @queue: RX queue index
1403  * Description: this function is called to clear the RX descriptors
1404  * in case of both basic and extended descriptors are used.
1405  */
1406 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1407 					struct stmmac_dma_conf *dma_conf,
1408 					u32 queue)
1409 {
1410 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1411 	int i;
1412 
1413 	/* Clear the RX descriptors */
1414 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1415 		if (priv->extend_desc)
1416 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1417 					priv->use_riwt, priv->mode,
1418 					(i == dma_conf->dma_rx_size - 1),
1419 					dma_conf->dma_buf_sz);
1420 		else
1421 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1422 					priv->use_riwt, priv->mode,
1423 					(i == dma_conf->dma_rx_size - 1),
1424 					dma_conf->dma_buf_sz);
1425 }
1426 
1427 /**
1428  * stmmac_clear_tx_descriptors - clear tx descriptors
1429  * @priv: driver private structure
1430  * @dma_conf: structure to take the dma data
1431  * @queue: TX queue index.
1432  * Description: this function is called to clear the TX descriptors
1433  * in case of both basic and extended descriptors are used.
1434  */
1435 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1436 					struct stmmac_dma_conf *dma_conf,
1437 					u32 queue)
1438 {
1439 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1440 	int i;
1441 
1442 	/* Clear the TX descriptors */
1443 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1444 		int last = (i == (dma_conf->dma_tx_size - 1));
1445 		struct dma_desc *p;
1446 
1447 		if (priv->extend_desc)
1448 			p = &tx_q->dma_etx[i].basic;
1449 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1450 			p = &tx_q->dma_entx[i].basic;
1451 		else
1452 			p = &tx_q->dma_tx[i];
1453 
1454 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1455 	}
1456 }
1457 
1458 /**
1459  * stmmac_clear_descriptors - clear descriptors
1460  * @priv: driver private structure
1461  * @dma_conf: structure to take the dma data
1462  * Description: this function is called to clear the TX and RX descriptors
1463  * in case of both basic and extended descriptors are used.
1464  */
1465 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1466 				     struct stmmac_dma_conf *dma_conf)
1467 {
1468 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1469 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1470 	u32 queue;
1471 
1472 	/* Clear the RX descriptors */
1473 	for (queue = 0; queue < rx_queue_cnt; queue++)
1474 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1475 
1476 	/* Clear the TX descriptors */
1477 	for (queue = 0; queue < tx_queue_cnt; queue++)
1478 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1479 }
1480 
1481 /**
1482  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1483  * @priv: driver private structure
1484  * @dma_conf: structure to take the dma data
1485  * @p: descriptor pointer
1486  * @i: descriptor index
1487  * @flags: gfp flag
1488  * @queue: RX queue index
1489  * Description: this function is called to allocate a receive buffer, perform
1490  * the DMA mapping and init the descriptor.
1491  */
1492 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1493 				  struct stmmac_dma_conf *dma_conf,
1494 				  struct dma_desc *p,
1495 				  int i, gfp_t flags, u32 queue)
1496 {
1497 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1498 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1499 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1500 
1501 	if (priv->dma_cap.host_dma_width <= 32)
1502 		gfp |= GFP_DMA32;
1503 
1504 	if (!buf->page) {
1505 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1506 		if (!buf->page)
1507 			return -ENOMEM;
1508 		buf->page_offset = stmmac_rx_offset(priv);
1509 	}
1510 
1511 	if (priv->sph && !buf->sec_page) {
1512 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1513 		if (!buf->sec_page)
1514 			return -ENOMEM;
1515 
1516 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1517 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1518 	} else {
1519 		buf->sec_page = NULL;
1520 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1521 	}
1522 
1523 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1524 
1525 	stmmac_set_desc_addr(priv, p, buf->addr);
1526 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1527 		stmmac_init_desc3(priv, p);
1528 
1529 	return 0;
1530 }
1531 
1532 /**
1533  * stmmac_free_rx_buffer - free RX dma buffers
1534  * @priv: private structure
1535  * @rx_q: RX queue
1536  * @i: buffer index.
1537  */
1538 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1539 				  struct stmmac_rx_queue *rx_q,
1540 				  int i)
1541 {
1542 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1543 
1544 	if (buf->page)
1545 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1546 	buf->page = NULL;
1547 
1548 	if (buf->sec_page)
1549 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1550 	buf->sec_page = NULL;
1551 }
1552 
1553 /**
1554  * stmmac_free_tx_buffer - free RX dma buffers
1555  * @priv: private structure
1556  * @dma_conf: structure to take the dma data
1557  * @queue: RX queue index
1558  * @i: buffer index.
1559  */
1560 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1561 				  struct stmmac_dma_conf *dma_conf,
1562 				  u32 queue, int i)
1563 {
1564 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1565 
1566 	if (tx_q->tx_skbuff_dma[i].buf &&
1567 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1568 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1569 			dma_unmap_page(priv->device,
1570 				       tx_q->tx_skbuff_dma[i].buf,
1571 				       tx_q->tx_skbuff_dma[i].len,
1572 				       DMA_TO_DEVICE);
1573 		else
1574 			dma_unmap_single(priv->device,
1575 					 tx_q->tx_skbuff_dma[i].buf,
1576 					 tx_q->tx_skbuff_dma[i].len,
1577 					 DMA_TO_DEVICE);
1578 	}
1579 
1580 	if (tx_q->xdpf[i] &&
1581 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1582 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1583 		xdp_return_frame(tx_q->xdpf[i]);
1584 		tx_q->xdpf[i] = NULL;
1585 	}
1586 
1587 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1588 		tx_q->xsk_frames_done++;
1589 
1590 	if (tx_q->tx_skbuff[i] &&
1591 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1592 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1593 		tx_q->tx_skbuff[i] = NULL;
1594 	}
1595 
1596 	tx_q->tx_skbuff_dma[i].buf = 0;
1597 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1598 }
1599 
1600 /**
1601  * dma_free_rx_skbufs - free RX dma buffers
1602  * @priv: private structure
1603  * @dma_conf: structure to take the dma data
1604  * @queue: RX queue index
1605  */
1606 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1607 			       struct stmmac_dma_conf *dma_conf,
1608 			       u32 queue)
1609 {
1610 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611 	int i;
1612 
1613 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1614 		stmmac_free_rx_buffer(priv, rx_q, i);
1615 }
1616 
1617 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1618 				   struct stmmac_dma_conf *dma_conf,
1619 				   u32 queue, gfp_t flags)
1620 {
1621 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1622 	int i;
1623 
1624 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1625 		struct dma_desc *p;
1626 		int ret;
1627 
1628 		if (priv->extend_desc)
1629 			p = &((rx_q->dma_erx + i)->basic);
1630 		else
1631 			p = rx_q->dma_rx + i;
1632 
1633 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1634 					     queue);
1635 		if (ret)
1636 			return ret;
1637 
1638 		rx_q->buf_alloc_num++;
1639 	}
1640 
1641 	return 0;
1642 }
1643 
1644 /**
1645  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1646  * @priv: private structure
1647  * @dma_conf: structure to take the dma data
1648  * @queue: RX queue index
1649  */
1650 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1651 				struct stmmac_dma_conf *dma_conf,
1652 				u32 queue)
1653 {
1654 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1655 	int i;
1656 
1657 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1658 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1659 
1660 		if (!buf->xdp)
1661 			continue;
1662 
1663 		xsk_buff_free(buf->xdp);
1664 		buf->xdp = NULL;
1665 	}
1666 }
1667 
1668 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1669 				      struct stmmac_dma_conf *dma_conf,
1670 				      u32 queue)
1671 {
1672 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1673 	int i;
1674 
1675 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1676 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1677 	 * use this macro to make sure no size violations.
1678 	 */
1679 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1680 
1681 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1682 		struct stmmac_rx_buffer *buf;
1683 		dma_addr_t dma_addr;
1684 		struct dma_desc *p;
1685 
1686 		if (priv->extend_desc)
1687 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1688 		else
1689 			p = rx_q->dma_rx + i;
1690 
1691 		buf = &rx_q->buf_pool[i];
1692 
1693 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1694 		if (!buf->xdp)
1695 			return -ENOMEM;
1696 
1697 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1698 		stmmac_set_desc_addr(priv, p, dma_addr);
1699 		rx_q->buf_alloc_num++;
1700 	}
1701 
1702 	return 0;
1703 }
1704 
1705 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1706 {
1707 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1708 		return NULL;
1709 
1710 	return xsk_get_pool_from_qid(priv->dev, queue);
1711 }
1712 
1713 /**
1714  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1715  * @priv: driver private structure
1716  * @dma_conf: structure to take the dma data
1717  * @queue: RX queue index
1718  * @flags: gfp flag.
1719  * Description: this function initializes the DMA RX descriptors
1720  * and allocates the socket buffers. It supports the chained and ring
1721  * modes.
1722  */
1723 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1724 				    struct stmmac_dma_conf *dma_conf,
1725 				    u32 queue, gfp_t flags)
1726 {
1727 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1728 	int ret;
1729 
1730 	netif_dbg(priv, probe, priv->dev,
1731 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1732 		  (u32)rx_q->dma_rx_phy);
1733 
1734 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1735 
1736 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1737 
1738 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1739 
1740 	if (rx_q->xsk_pool) {
1741 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1742 						   MEM_TYPE_XSK_BUFF_POOL,
1743 						   NULL));
1744 		netdev_info(priv->dev,
1745 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1746 			    rx_q->queue_index);
1747 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1748 	} else {
1749 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1750 						   MEM_TYPE_PAGE_POOL,
1751 						   rx_q->page_pool));
1752 		netdev_info(priv->dev,
1753 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1754 			    rx_q->queue_index);
1755 	}
1756 
1757 	if (rx_q->xsk_pool) {
1758 		/* RX XDP ZC buffer pool may not be populated, e.g.
1759 		 * xdpsock TX-only.
1760 		 */
1761 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1762 	} else {
1763 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1764 		if (ret < 0)
1765 			return -ENOMEM;
1766 	}
1767 
1768 	/* Setup the chained descriptor addresses */
1769 	if (priv->mode == STMMAC_CHAIN_MODE) {
1770 		if (priv->extend_desc)
1771 			stmmac_mode_init(priv, rx_q->dma_erx,
1772 					 rx_q->dma_rx_phy,
1773 					 dma_conf->dma_rx_size, 1);
1774 		else
1775 			stmmac_mode_init(priv, rx_q->dma_rx,
1776 					 rx_q->dma_rx_phy,
1777 					 dma_conf->dma_rx_size, 0);
1778 	}
1779 
1780 	return 0;
1781 }
1782 
1783 static int init_dma_rx_desc_rings(struct net_device *dev,
1784 				  struct stmmac_dma_conf *dma_conf,
1785 				  gfp_t flags)
1786 {
1787 	struct stmmac_priv *priv = netdev_priv(dev);
1788 	u32 rx_count = priv->plat->rx_queues_to_use;
1789 	int queue;
1790 	int ret;
1791 
1792 	/* RX INITIALIZATION */
1793 	netif_dbg(priv, probe, priv->dev,
1794 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1795 
1796 	for (queue = 0; queue < rx_count; queue++) {
1797 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1798 		if (ret)
1799 			goto err_init_rx_buffers;
1800 	}
1801 
1802 	return 0;
1803 
1804 err_init_rx_buffers:
1805 	while (queue >= 0) {
1806 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1807 
1808 		if (rx_q->xsk_pool)
1809 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1810 		else
1811 			dma_free_rx_skbufs(priv, dma_conf, queue);
1812 
1813 		rx_q->buf_alloc_num = 0;
1814 		rx_q->xsk_pool = NULL;
1815 
1816 		queue--;
1817 	}
1818 
1819 	return ret;
1820 }
1821 
1822 /**
1823  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1824  * @priv: driver private structure
1825  * @dma_conf: structure to take the dma data
1826  * @queue: TX queue index
1827  * Description: this function initializes the DMA TX descriptors
1828  * and allocates the socket buffers. It supports the chained and ring
1829  * modes.
1830  */
1831 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1832 				    struct stmmac_dma_conf *dma_conf,
1833 				    u32 queue)
1834 {
1835 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1836 	int i;
1837 
1838 	netif_dbg(priv, probe, priv->dev,
1839 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1840 		  (u32)tx_q->dma_tx_phy);
1841 
1842 	/* Setup the chained descriptor addresses */
1843 	if (priv->mode == STMMAC_CHAIN_MODE) {
1844 		if (priv->extend_desc)
1845 			stmmac_mode_init(priv, tx_q->dma_etx,
1846 					 tx_q->dma_tx_phy,
1847 					 dma_conf->dma_tx_size, 1);
1848 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1849 			stmmac_mode_init(priv, tx_q->dma_tx,
1850 					 tx_q->dma_tx_phy,
1851 					 dma_conf->dma_tx_size, 0);
1852 	}
1853 
1854 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1855 
1856 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1857 		struct dma_desc *p;
1858 
1859 		if (priv->extend_desc)
1860 			p = &((tx_q->dma_etx + i)->basic);
1861 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1862 			p = &((tx_q->dma_entx + i)->basic);
1863 		else
1864 			p = tx_q->dma_tx + i;
1865 
1866 		stmmac_clear_desc(priv, p);
1867 
1868 		tx_q->tx_skbuff_dma[i].buf = 0;
1869 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1870 		tx_q->tx_skbuff_dma[i].len = 0;
1871 		tx_q->tx_skbuff_dma[i].last_segment = false;
1872 		tx_q->tx_skbuff[i] = NULL;
1873 	}
1874 
1875 	return 0;
1876 }
1877 
1878 static int init_dma_tx_desc_rings(struct net_device *dev,
1879 				  struct stmmac_dma_conf *dma_conf)
1880 {
1881 	struct stmmac_priv *priv = netdev_priv(dev);
1882 	u32 tx_queue_cnt;
1883 	u32 queue;
1884 
1885 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1886 
1887 	for (queue = 0; queue < tx_queue_cnt; queue++)
1888 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1889 
1890 	return 0;
1891 }
1892 
1893 /**
1894  * init_dma_desc_rings - init the RX/TX descriptor rings
1895  * @dev: net device structure
1896  * @dma_conf: structure to take the dma data
1897  * @flags: gfp flag.
1898  * Description: this function initializes the DMA RX/TX descriptors
1899  * and allocates the socket buffers. It supports the chained and ring
1900  * modes.
1901  */
1902 static int init_dma_desc_rings(struct net_device *dev,
1903 			       struct stmmac_dma_conf *dma_conf,
1904 			       gfp_t flags)
1905 {
1906 	struct stmmac_priv *priv = netdev_priv(dev);
1907 	int ret;
1908 
1909 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1910 	if (ret)
1911 		return ret;
1912 
1913 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1914 
1915 	stmmac_clear_descriptors(priv, dma_conf);
1916 
1917 	if (netif_msg_hw(priv))
1918 		stmmac_display_rings(priv, dma_conf);
1919 
1920 	return ret;
1921 }
1922 
1923 /**
1924  * dma_free_tx_skbufs - free TX dma buffers
1925  * @priv: private structure
1926  * @dma_conf: structure to take the dma data
1927  * @queue: TX queue index
1928  */
1929 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1930 			       struct stmmac_dma_conf *dma_conf,
1931 			       u32 queue)
1932 {
1933 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1934 	int i;
1935 
1936 	tx_q->xsk_frames_done = 0;
1937 
1938 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1939 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1940 
1941 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1942 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1943 		tx_q->xsk_frames_done = 0;
1944 		tx_q->xsk_pool = NULL;
1945 	}
1946 }
1947 
1948 /**
1949  * stmmac_free_tx_skbufs - free TX skb buffers
1950  * @priv: private structure
1951  */
1952 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1953 {
1954 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1955 	u32 queue;
1956 
1957 	for (queue = 0; queue < tx_queue_cnt; queue++)
1958 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1959 }
1960 
1961 /**
1962  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1963  * @priv: private structure
1964  * @dma_conf: structure to take the dma data
1965  * @queue: RX queue index
1966  */
1967 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1968 					 struct stmmac_dma_conf *dma_conf,
1969 					 u32 queue)
1970 {
1971 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1972 
1973 	/* Release the DMA RX socket buffers */
1974 	if (rx_q->xsk_pool)
1975 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1976 	else
1977 		dma_free_rx_skbufs(priv, dma_conf, queue);
1978 
1979 	rx_q->buf_alloc_num = 0;
1980 	rx_q->xsk_pool = NULL;
1981 
1982 	/* Free DMA regions of consistent memory previously allocated */
1983 	if (!priv->extend_desc)
1984 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1985 				  sizeof(struct dma_desc),
1986 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1987 	else
1988 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1989 				  sizeof(struct dma_extended_desc),
1990 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1991 
1992 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1993 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1994 
1995 	kfree(rx_q->buf_pool);
1996 	if (rx_q->page_pool)
1997 		page_pool_destroy(rx_q->page_pool);
1998 }
1999 
2000 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2001 				       struct stmmac_dma_conf *dma_conf)
2002 {
2003 	u32 rx_count = priv->plat->rx_queues_to_use;
2004 	u32 queue;
2005 
2006 	/* Free RX queue resources */
2007 	for (queue = 0; queue < rx_count; queue++)
2008 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
2009 }
2010 
2011 /**
2012  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2013  * @priv: private structure
2014  * @dma_conf: structure to take the dma data
2015  * @queue: TX queue index
2016  */
2017 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2018 					 struct stmmac_dma_conf *dma_conf,
2019 					 u32 queue)
2020 {
2021 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2022 	size_t size;
2023 	void *addr;
2024 
2025 	/* Release the DMA TX socket buffers */
2026 	dma_free_tx_skbufs(priv, dma_conf, queue);
2027 
2028 	if (priv->extend_desc) {
2029 		size = sizeof(struct dma_extended_desc);
2030 		addr = tx_q->dma_etx;
2031 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2032 		size = sizeof(struct dma_edesc);
2033 		addr = tx_q->dma_entx;
2034 	} else {
2035 		size = sizeof(struct dma_desc);
2036 		addr = tx_q->dma_tx;
2037 	}
2038 
2039 	size *= dma_conf->dma_tx_size;
2040 
2041 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2042 
2043 	kfree(tx_q->tx_skbuff_dma);
2044 	kfree(tx_q->tx_skbuff);
2045 }
2046 
2047 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2048 				       struct stmmac_dma_conf *dma_conf)
2049 {
2050 	u32 tx_count = priv->plat->tx_queues_to_use;
2051 	u32 queue;
2052 
2053 	/* Free TX queue resources */
2054 	for (queue = 0; queue < tx_count; queue++)
2055 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2056 }
2057 
2058 /**
2059  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2060  * @priv: private structure
2061  * @dma_conf: structure to take the dma data
2062  * @queue: RX queue index
2063  * Description: according to which descriptor can be used (extend or basic)
2064  * this function allocates the resources for TX and RX paths. In case of
2065  * reception, for example, it pre-allocated the RX socket buffer in order to
2066  * allow zero-copy mechanism.
2067  */
2068 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2069 					 struct stmmac_dma_conf *dma_conf,
2070 					 u32 queue)
2071 {
2072 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2073 	struct stmmac_channel *ch = &priv->channel[queue];
2074 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2075 	struct page_pool_params pp_params = { 0 };
2076 	unsigned int dma_buf_sz_pad, num_pages;
2077 	unsigned int napi_id;
2078 	int ret;
2079 
2080 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2081 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2082 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2083 
2084 	rx_q->queue_index = queue;
2085 	rx_q->priv_data = priv;
2086 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2087 
2088 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2089 	pp_params.pool_size = dma_conf->dma_rx_size;
2090 	pp_params.order = order_base_2(num_pages);
2091 	pp_params.nid = dev_to_node(priv->device);
2092 	pp_params.dev = priv->device;
2093 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2094 	pp_params.offset = stmmac_rx_offset(priv);
2095 	pp_params.max_len = dma_conf->dma_buf_sz;
2096 
2097 	rx_q->page_pool = page_pool_create(&pp_params);
2098 	if (IS_ERR(rx_q->page_pool)) {
2099 		ret = PTR_ERR(rx_q->page_pool);
2100 		rx_q->page_pool = NULL;
2101 		return ret;
2102 	}
2103 
2104 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2105 				 sizeof(*rx_q->buf_pool),
2106 				 GFP_KERNEL);
2107 	if (!rx_q->buf_pool)
2108 		return -ENOMEM;
2109 
2110 	if (priv->extend_desc) {
2111 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2112 						   dma_conf->dma_rx_size *
2113 						   sizeof(struct dma_extended_desc),
2114 						   &rx_q->dma_rx_phy,
2115 						   GFP_KERNEL);
2116 		if (!rx_q->dma_erx)
2117 			return -ENOMEM;
2118 
2119 	} else {
2120 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2121 						  dma_conf->dma_rx_size *
2122 						  sizeof(struct dma_desc),
2123 						  &rx_q->dma_rx_phy,
2124 						  GFP_KERNEL);
2125 		if (!rx_q->dma_rx)
2126 			return -ENOMEM;
2127 	}
2128 
2129 	if (stmmac_xdp_is_enabled(priv) &&
2130 	    test_bit(queue, priv->af_xdp_zc_qps))
2131 		napi_id = ch->rxtx_napi.napi_id;
2132 	else
2133 		napi_id = ch->rx_napi.napi_id;
2134 
2135 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2136 			       rx_q->queue_index,
2137 			       napi_id);
2138 	if (ret) {
2139 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2140 		return -EINVAL;
2141 	}
2142 
2143 	return 0;
2144 }
2145 
2146 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2147 				       struct stmmac_dma_conf *dma_conf)
2148 {
2149 	u32 rx_count = priv->plat->rx_queues_to_use;
2150 	u32 queue;
2151 	int ret;
2152 
2153 	/* RX queues buffers and DMA */
2154 	for (queue = 0; queue < rx_count; queue++) {
2155 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2156 		if (ret)
2157 			goto err_dma;
2158 	}
2159 
2160 	return 0;
2161 
2162 err_dma:
2163 	free_dma_rx_desc_resources(priv, dma_conf);
2164 
2165 	return ret;
2166 }
2167 
2168 /**
2169  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2170  * @priv: private structure
2171  * @dma_conf: structure to take the dma data
2172  * @queue: TX queue index
2173  * Description: according to which descriptor can be used (extend or basic)
2174  * this function allocates the resources for TX and RX paths. In case of
2175  * reception, for example, it pre-allocated the RX socket buffer in order to
2176  * allow zero-copy mechanism.
2177  */
2178 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2179 					 struct stmmac_dma_conf *dma_conf,
2180 					 u32 queue)
2181 {
2182 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2183 	size_t size;
2184 	void *addr;
2185 
2186 	tx_q->queue_index = queue;
2187 	tx_q->priv_data = priv;
2188 
2189 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2190 				      sizeof(*tx_q->tx_skbuff_dma),
2191 				      GFP_KERNEL);
2192 	if (!tx_q->tx_skbuff_dma)
2193 		return -ENOMEM;
2194 
2195 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2196 				  sizeof(struct sk_buff *),
2197 				  GFP_KERNEL);
2198 	if (!tx_q->tx_skbuff)
2199 		return -ENOMEM;
2200 
2201 	if (priv->extend_desc)
2202 		size = sizeof(struct dma_extended_desc);
2203 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2204 		size = sizeof(struct dma_edesc);
2205 	else
2206 		size = sizeof(struct dma_desc);
2207 
2208 	size *= dma_conf->dma_tx_size;
2209 
2210 	addr = dma_alloc_coherent(priv->device, size,
2211 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2212 	if (!addr)
2213 		return -ENOMEM;
2214 
2215 	if (priv->extend_desc)
2216 		tx_q->dma_etx = addr;
2217 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2218 		tx_q->dma_entx = addr;
2219 	else
2220 		tx_q->dma_tx = addr;
2221 
2222 	return 0;
2223 }
2224 
2225 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2226 				       struct stmmac_dma_conf *dma_conf)
2227 {
2228 	u32 tx_count = priv->plat->tx_queues_to_use;
2229 	u32 queue;
2230 	int ret;
2231 
2232 	/* TX queues buffers and DMA */
2233 	for (queue = 0; queue < tx_count; queue++) {
2234 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2235 		if (ret)
2236 			goto err_dma;
2237 	}
2238 
2239 	return 0;
2240 
2241 err_dma:
2242 	free_dma_tx_desc_resources(priv, dma_conf);
2243 	return ret;
2244 }
2245 
2246 /**
2247  * alloc_dma_desc_resources - alloc TX/RX resources.
2248  * @priv: private structure
2249  * @dma_conf: structure to take the dma data
2250  * Description: according to which descriptor can be used (extend or basic)
2251  * this function allocates the resources for TX and RX paths. In case of
2252  * reception, for example, it pre-allocated the RX socket buffer in order to
2253  * allow zero-copy mechanism.
2254  */
2255 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2256 				    struct stmmac_dma_conf *dma_conf)
2257 {
2258 	/* RX Allocation */
2259 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2260 
2261 	if (ret)
2262 		return ret;
2263 
2264 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2265 
2266 	return ret;
2267 }
2268 
2269 /**
2270  * free_dma_desc_resources - free dma desc resources
2271  * @priv: private structure
2272  * @dma_conf: structure to take the dma data
2273  */
2274 static void free_dma_desc_resources(struct stmmac_priv *priv,
2275 				    struct stmmac_dma_conf *dma_conf)
2276 {
2277 	/* Release the DMA TX socket buffers */
2278 	free_dma_tx_desc_resources(priv, dma_conf);
2279 
2280 	/* Release the DMA RX socket buffers later
2281 	 * to ensure all pending XDP_TX buffers are returned.
2282 	 */
2283 	free_dma_rx_desc_resources(priv, dma_conf);
2284 }
2285 
2286 /**
2287  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2288  *  @priv: driver private structure
2289  *  Description: It is used for enabling the rx queues in the MAC
2290  */
2291 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2292 {
2293 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2294 	int queue;
2295 	u8 mode;
2296 
2297 	for (queue = 0; queue < rx_queues_count; queue++) {
2298 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2299 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2300 	}
2301 }
2302 
2303 /**
2304  * stmmac_start_rx_dma - start RX DMA channel
2305  * @priv: driver private structure
2306  * @chan: RX channel index
2307  * Description:
2308  * This starts a RX DMA channel
2309  */
2310 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2311 {
2312 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2313 	stmmac_start_rx(priv, priv->ioaddr, chan);
2314 }
2315 
2316 /**
2317  * stmmac_start_tx_dma - start TX DMA channel
2318  * @priv: driver private structure
2319  * @chan: TX channel index
2320  * Description:
2321  * This starts a TX DMA channel
2322  */
2323 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2324 {
2325 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2326 	stmmac_start_tx(priv, priv->ioaddr, chan);
2327 }
2328 
2329 /**
2330  * stmmac_stop_rx_dma - stop RX DMA channel
2331  * @priv: driver private structure
2332  * @chan: RX channel index
2333  * Description:
2334  * This stops a RX DMA channel
2335  */
2336 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2337 {
2338 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2339 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2340 }
2341 
2342 /**
2343  * stmmac_stop_tx_dma - stop TX DMA channel
2344  * @priv: driver private structure
2345  * @chan: TX channel index
2346  * Description:
2347  * This stops a TX DMA channel
2348  */
2349 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2350 {
2351 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2352 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2353 }
2354 
2355 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2356 {
2357 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2358 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2359 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2360 	u32 chan;
2361 
2362 	for (chan = 0; chan < dma_csr_ch; chan++) {
2363 		struct stmmac_channel *ch = &priv->channel[chan];
2364 		unsigned long flags;
2365 
2366 		spin_lock_irqsave(&ch->lock, flags);
2367 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2368 		spin_unlock_irqrestore(&ch->lock, flags);
2369 	}
2370 }
2371 
2372 /**
2373  * stmmac_start_all_dma - start all RX and TX DMA channels
2374  * @priv: driver private structure
2375  * Description:
2376  * This starts all the RX and TX DMA channels
2377  */
2378 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2379 {
2380 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2381 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2382 	u32 chan = 0;
2383 
2384 	for (chan = 0; chan < rx_channels_count; chan++)
2385 		stmmac_start_rx_dma(priv, chan);
2386 
2387 	for (chan = 0; chan < tx_channels_count; chan++)
2388 		stmmac_start_tx_dma(priv, chan);
2389 }
2390 
2391 /**
2392  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2393  * @priv: driver private structure
2394  * Description:
2395  * This stops the RX and TX DMA channels
2396  */
2397 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2398 {
2399 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2400 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2401 	u32 chan = 0;
2402 
2403 	for (chan = 0; chan < rx_channels_count; chan++)
2404 		stmmac_stop_rx_dma(priv, chan);
2405 
2406 	for (chan = 0; chan < tx_channels_count; chan++)
2407 		stmmac_stop_tx_dma(priv, chan);
2408 }
2409 
2410 /**
2411  *  stmmac_dma_operation_mode - HW DMA operation mode
2412  *  @priv: driver private structure
2413  *  Description: it is used for configuring the DMA operation mode register in
2414  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2415  */
2416 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2417 {
2418 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2419 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2420 	int rxfifosz = priv->plat->rx_fifo_size;
2421 	int txfifosz = priv->plat->tx_fifo_size;
2422 	u32 txmode = 0;
2423 	u32 rxmode = 0;
2424 	u32 chan = 0;
2425 	u8 qmode = 0;
2426 
2427 	if (rxfifosz == 0)
2428 		rxfifosz = priv->dma_cap.rx_fifo_size;
2429 	if (txfifosz == 0)
2430 		txfifosz = priv->dma_cap.tx_fifo_size;
2431 
2432 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2433 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2434 		rxfifosz /= rx_channels_count;
2435 		txfifosz /= tx_channels_count;
2436 	}
2437 
2438 	if (priv->plat->force_thresh_dma_mode) {
2439 		txmode = tc;
2440 		rxmode = tc;
2441 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2442 		/*
2443 		 * In case of GMAC, SF mode can be enabled
2444 		 * to perform the TX COE in HW. This depends on:
2445 		 * 1) TX COE if actually supported
2446 		 * 2) There is no bugged Jumbo frame support
2447 		 *    that needs to not insert csum in the TDES.
2448 		 */
2449 		txmode = SF_DMA_MODE;
2450 		rxmode = SF_DMA_MODE;
2451 		priv->xstats.threshold = SF_DMA_MODE;
2452 	} else {
2453 		txmode = tc;
2454 		rxmode = SF_DMA_MODE;
2455 	}
2456 
2457 	/* configure all channels */
2458 	for (chan = 0; chan < rx_channels_count; chan++) {
2459 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2460 		u32 buf_size;
2461 
2462 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2463 
2464 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2465 				rxfifosz, qmode);
2466 
2467 		if (rx_q->xsk_pool) {
2468 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2469 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2470 					      buf_size,
2471 					      chan);
2472 		} else {
2473 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2474 					      priv->dma_conf.dma_buf_sz,
2475 					      chan);
2476 		}
2477 	}
2478 
2479 	for (chan = 0; chan < tx_channels_count; chan++) {
2480 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2481 
2482 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2483 				txfifosz, qmode);
2484 	}
2485 }
2486 
2487 static void stmmac_xsk_request_timestamp(void *_priv)
2488 {
2489 	struct stmmac_metadata_request *meta_req = _priv;
2490 
2491 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2492 	*meta_req->set_ic = true;
2493 }
2494 
2495 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2496 {
2497 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2498 	struct stmmac_priv *priv = tx_compl->priv;
2499 	struct dma_desc *desc = tx_compl->desc;
2500 	bool found = false;
2501 	u64 ns = 0;
2502 
2503 	if (!priv->hwts_tx_en)
2504 		return 0;
2505 
2506 	/* check tx tstamp status */
2507 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2508 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2509 		found = true;
2510 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2511 		found = true;
2512 	}
2513 
2514 	if (found) {
2515 		ns -= priv->plat->cdc_error_adj;
2516 		return ns_to_ktime(ns);
2517 	}
2518 
2519 	return 0;
2520 }
2521 
2522 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2523 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2524 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2525 };
2526 
2527 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2528 {
2529 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2530 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2531 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2532 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2533 	unsigned int entry = tx_q->cur_tx;
2534 	struct dma_desc *tx_desc = NULL;
2535 	struct xdp_desc xdp_desc;
2536 	bool work_done = true;
2537 	u32 tx_set_ic_bit = 0;
2538 
2539 	/* Avoids TX time-out as we are sharing with slow path */
2540 	txq_trans_cond_update(nq);
2541 
2542 	budget = min(budget, stmmac_tx_avail(priv, queue));
2543 
2544 	while (budget-- > 0) {
2545 		struct stmmac_metadata_request meta_req;
2546 		struct xsk_tx_metadata *meta = NULL;
2547 		dma_addr_t dma_addr;
2548 		bool set_ic;
2549 
2550 		/* We are sharing with slow path and stop XSK TX desc submission when
2551 		 * available TX ring is less than threshold.
2552 		 */
2553 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2554 		    !netif_carrier_ok(priv->dev)) {
2555 			work_done = false;
2556 			break;
2557 		}
2558 
2559 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2560 			break;
2561 
2562 		if (priv->est && priv->est->enable &&
2563 		    priv->est->max_sdu[queue] &&
2564 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2565 			priv->xstats.max_sdu_txq_drop[queue]++;
2566 			continue;
2567 		}
2568 
2569 		if (likely(priv->extend_desc))
2570 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2571 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2572 			tx_desc = &tx_q->dma_entx[entry].basic;
2573 		else
2574 			tx_desc = tx_q->dma_tx + entry;
2575 
2576 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2577 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2578 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2579 
2580 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2581 
2582 		/* To return XDP buffer to XSK pool, we simple call
2583 		 * xsk_tx_completed(), so we don't need to fill up
2584 		 * 'buf' and 'xdpf'.
2585 		 */
2586 		tx_q->tx_skbuff_dma[entry].buf = 0;
2587 		tx_q->xdpf[entry] = NULL;
2588 
2589 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2590 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2591 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2592 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2593 
2594 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2595 
2596 		tx_q->tx_count_frames++;
2597 
2598 		if (!priv->tx_coal_frames[queue])
2599 			set_ic = false;
2600 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2601 			set_ic = true;
2602 		else
2603 			set_ic = false;
2604 
2605 		meta_req.priv = priv;
2606 		meta_req.tx_desc = tx_desc;
2607 		meta_req.set_ic = &set_ic;
2608 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2609 					&meta_req);
2610 		if (set_ic) {
2611 			tx_q->tx_count_frames = 0;
2612 			stmmac_set_tx_ic(priv, tx_desc);
2613 			tx_set_ic_bit++;
2614 		}
2615 
2616 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2617 				       true, priv->mode, true, true,
2618 				       xdp_desc.len);
2619 
2620 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2621 
2622 		xsk_tx_metadata_to_compl(meta,
2623 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2624 
2625 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2626 		entry = tx_q->cur_tx;
2627 	}
2628 	u64_stats_update_begin(&txq_stats->napi_syncp);
2629 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2630 	u64_stats_update_end(&txq_stats->napi_syncp);
2631 
2632 	if (tx_desc) {
2633 		stmmac_flush_tx_descriptors(priv, queue);
2634 		xsk_tx_release(pool);
2635 	}
2636 
2637 	/* Return true if all of the 3 conditions are met
2638 	 *  a) TX Budget is still available
2639 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2640 	 *     pending XSK TX for transmission)
2641 	 */
2642 	return !!budget && work_done;
2643 }
2644 
2645 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2646 {
2647 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2648 		tc += 64;
2649 
2650 		if (priv->plat->force_thresh_dma_mode)
2651 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2652 		else
2653 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2654 						      chan);
2655 
2656 		priv->xstats.threshold = tc;
2657 	}
2658 }
2659 
2660 /**
2661  * stmmac_tx_clean - to manage the transmission completion
2662  * @priv: driver private structure
2663  * @budget: napi budget limiting this functions packet handling
2664  * @queue: TX queue index
2665  * @pending_packets: signal to arm the TX coal timer
2666  * Description: it reclaims the transmit resources after transmission completes.
2667  * If some packets still needs to be handled, due to TX coalesce, set
2668  * pending_packets to true to make NAPI arm the TX coal timer.
2669  */
2670 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2671 			   bool *pending_packets)
2672 {
2673 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2674 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2675 	unsigned int bytes_compl = 0, pkts_compl = 0;
2676 	unsigned int entry, xmits = 0, count = 0;
2677 	u32 tx_packets = 0, tx_errors = 0;
2678 
2679 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2680 
2681 	tx_q->xsk_frames_done = 0;
2682 
2683 	entry = tx_q->dirty_tx;
2684 
2685 	/* Try to clean all TX complete frame in 1 shot */
2686 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2687 		struct xdp_frame *xdpf;
2688 		struct sk_buff *skb;
2689 		struct dma_desc *p;
2690 		int status;
2691 
2692 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2693 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2694 			xdpf = tx_q->xdpf[entry];
2695 			skb = NULL;
2696 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2697 			xdpf = NULL;
2698 			skb = tx_q->tx_skbuff[entry];
2699 		} else {
2700 			xdpf = NULL;
2701 			skb = NULL;
2702 		}
2703 
2704 		if (priv->extend_desc)
2705 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2706 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2707 			p = &tx_q->dma_entx[entry].basic;
2708 		else
2709 			p = tx_q->dma_tx + entry;
2710 
2711 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2712 		/* Check if the descriptor is owned by the DMA */
2713 		if (unlikely(status & tx_dma_own))
2714 			break;
2715 
2716 		count++;
2717 
2718 		/* Make sure descriptor fields are read after reading
2719 		 * the own bit.
2720 		 */
2721 		dma_rmb();
2722 
2723 		/* Just consider the last segment and ...*/
2724 		if (likely(!(status & tx_not_ls))) {
2725 			/* ... verify the status error condition */
2726 			if (unlikely(status & tx_err)) {
2727 				tx_errors++;
2728 				if (unlikely(status & tx_err_bump_tc))
2729 					stmmac_bump_dma_threshold(priv, queue);
2730 			} else {
2731 				tx_packets++;
2732 			}
2733 			if (skb) {
2734 				stmmac_get_tx_hwtstamp(priv, p, skb);
2735 			} else if (tx_q->xsk_pool &&
2736 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2737 				struct stmmac_xsk_tx_complete tx_compl = {
2738 					.priv = priv,
2739 					.desc = p,
2740 				};
2741 
2742 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2743 							 &stmmac_xsk_tx_metadata_ops,
2744 							 &tx_compl);
2745 			}
2746 		}
2747 
2748 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2749 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2750 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2751 				dma_unmap_page(priv->device,
2752 					       tx_q->tx_skbuff_dma[entry].buf,
2753 					       tx_q->tx_skbuff_dma[entry].len,
2754 					       DMA_TO_DEVICE);
2755 			else
2756 				dma_unmap_single(priv->device,
2757 						 tx_q->tx_skbuff_dma[entry].buf,
2758 						 tx_q->tx_skbuff_dma[entry].len,
2759 						 DMA_TO_DEVICE);
2760 			tx_q->tx_skbuff_dma[entry].buf = 0;
2761 			tx_q->tx_skbuff_dma[entry].len = 0;
2762 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2763 		}
2764 
2765 		stmmac_clean_desc3(priv, tx_q, p);
2766 
2767 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2768 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2769 
2770 		if (xdpf &&
2771 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2772 			xdp_return_frame_rx_napi(xdpf);
2773 			tx_q->xdpf[entry] = NULL;
2774 		}
2775 
2776 		if (xdpf &&
2777 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2778 			xdp_return_frame(xdpf);
2779 			tx_q->xdpf[entry] = NULL;
2780 		}
2781 
2782 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2783 			tx_q->xsk_frames_done++;
2784 
2785 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2786 			if (likely(skb)) {
2787 				pkts_compl++;
2788 				bytes_compl += skb->len;
2789 				dev_consume_skb_any(skb);
2790 				tx_q->tx_skbuff[entry] = NULL;
2791 			}
2792 		}
2793 
2794 		stmmac_release_tx_desc(priv, p, priv->mode);
2795 
2796 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2797 	}
2798 	tx_q->dirty_tx = entry;
2799 
2800 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2801 				  pkts_compl, bytes_compl);
2802 
2803 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2804 								queue))) &&
2805 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2806 
2807 		netif_dbg(priv, tx_done, priv->dev,
2808 			  "%s: restart transmit\n", __func__);
2809 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2810 	}
2811 
2812 	if (tx_q->xsk_pool) {
2813 		bool work_done;
2814 
2815 		if (tx_q->xsk_frames_done)
2816 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2817 
2818 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2819 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2820 
2821 		/* For XSK TX, we try to send as many as possible.
2822 		 * If XSK work done (XSK TX desc empty and budget still
2823 		 * available), return "budget - 1" to reenable TX IRQ.
2824 		 * Else, return "budget" to make NAPI continue polling.
2825 		 */
2826 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2827 					       STMMAC_XSK_TX_BUDGET_MAX);
2828 		if (work_done)
2829 			xmits = budget - 1;
2830 		else
2831 			xmits = budget;
2832 	}
2833 
2834 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2835 		stmmac_restart_sw_lpi_timer(priv);
2836 
2837 	/* We still have pending packets, let's call for a new scheduling */
2838 	if (tx_q->dirty_tx != tx_q->cur_tx)
2839 		*pending_packets = true;
2840 
2841 	u64_stats_update_begin(&txq_stats->napi_syncp);
2842 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2843 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2844 	u64_stats_inc(&txq_stats->napi.tx_clean);
2845 	u64_stats_update_end(&txq_stats->napi_syncp);
2846 
2847 	priv->xstats.tx_errors += tx_errors;
2848 
2849 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2850 
2851 	/* Combine decisions from TX clean and XSK TX */
2852 	return max(count, xmits);
2853 }
2854 
2855 /**
2856  * stmmac_tx_err - to manage the tx error
2857  * @priv: driver private structure
2858  * @chan: channel index
2859  * Description: it cleans the descriptors and restarts the transmission
2860  * in case of transmission errors.
2861  */
2862 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2863 {
2864 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2865 
2866 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2867 
2868 	stmmac_stop_tx_dma(priv, chan);
2869 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2870 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2871 	stmmac_reset_tx_queue(priv, chan);
2872 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2873 			    tx_q->dma_tx_phy, chan);
2874 	stmmac_start_tx_dma(priv, chan);
2875 
2876 	priv->xstats.tx_errors++;
2877 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2878 }
2879 
2880 /**
2881  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2882  *  @priv: driver private structure
2883  *  @txmode: TX operating mode
2884  *  @rxmode: RX operating mode
2885  *  @chan: channel index
2886  *  Description: it is used for configuring of the DMA operation mode in
2887  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2888  *  mode.
2889  */
2890 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2891 					  u32 rxmode, u32 chan)
2892 {
2893 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2894 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2895 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2896 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2897 	int rxfifosz = priv->plat->rx_fifo_size;
2898 	int txfifosz = priv->plat->tx_fifo_size;
2899 
2900 	if (rxfifosz == 0)
2901 		rxfifosz = priv->dma_cap.rx_fifo_size;
2902 	if (txfifosz == 0)
2903 		txfifosz = priv->dma_cap.tx_fifo_size;
2904 
2905 	/* Adjust for real per queue fifo size */
2906 	rxfifosz /= rx_channels_count;
2907 	txfifosz /= tx_channels_count;
2908 
2909 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2910 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2911 }
2912 
2913 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2914 {
2915 	int ret;
2916 
2917 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2918 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2919 	if (ret && (ret != -EINVAL)) {
2920 		stmmac_global_err(priv);
2921 		return true;
2922 	}
2923 
2924 	return false;
2925 }
2926 
2927 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2928 {
2929 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2930 						 &priv->xstats, chan, dir);
2931 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2932 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2933 	struct stmmac_channel *ch = &priv->channel[chan];
2934 	struct napi_struct *rx_napi;
2935 	struct napi_struct *tx_napi;
2936 	unsigned long flags;
2937 
2938 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2939 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2940 
2941 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2942 		if (napi_schedule_prep(rx_napi)) {
2943 			spin_lock_irqsave(&ch->lock, flags);
2944 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2945 			spin_unlock_irqrestore(&ch->lock, flags);
2946 			__napi_schedule(rx_napi);
2947 		}
2948 	}
2949 
2950 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2951 		if (napi_schedule_prep(tx_napi)) {
2952 			spin_lock_irqsave(&ch->lock, flags);
2953 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2954 			spin_unlock_irqrestore(&ch->lock, flags);
2955 			__napi_schedule(tx_napi);
2956 		}
2957 	}
2958 
2959 	return status;
2960 }
2961 
2962 /**
2963  * stmmac_dma_interrupt - DMA ISR
2964  * @priv: driver private structure
2965  * Description: this is the DMA ISR. It is called by the main ISR.
2966  * It calls the dwmac dma routine and schedule poll method in case of some
2967  * work can be done.
2968  */
2969 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2970 {
2971 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2972 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2973 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2974 				tx_channel_count : rx_channel_count;
2975 	u32 chan;
2976 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2977 
2978 	/* Make sure we never check beyond our status buffer. */
2979 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2980 		channels_to_check = ARRAY_SIZE(status);
2981 
2982 	for (chan = 0; chan < channels_to_check; chan++)
2983 		status[chan] = stmmac_napi_check(priv, chan,
2984 						 DMA_DIR_RXTX);
2985 
2986 	for (chan = 0; chan < tx_channel_count; chan++) {
2987 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2988 			/* Try to bump up the dma threshold on this failure */
2989 			stmmac_bump_dma_threshold(priv, chan);
2990 		} else if (unlikely(status[chan] == tx_hard_error)) {
2991 			stmmac_tx_err(priv, chan);
2992 		}
2993 	}
2994 }
2995 
2996 /**
2997  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2998  * @priv: driver private structure
2999  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3000  */
3001 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3002 {
3003 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3004 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3005 
3006 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3007 
3008 	if (priv->dma_cap.rmon) {
3009 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3010 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3011 	} else
3012 		netdev_info(priv->dev, "No MAC Management Counters available\n");
3013 }
3014 
3015 /**
3016  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3017  * @priv: driver private structure
3018  * Description:
3019  *  new GMAC chip generations have a new register to indicate the
3020  *  presence of the optional feature/functions.
3021  *  This can be also used to override the value passed through the
3022  *  platform and necessary for old MAC10/100 and GMAC chips.
3023  */
3024 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3025 {
3026 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3027 }
3028 
3029 /**
3030  * stmmac_check_ether_addr - check if the MAC addr is valid
3031  * @priv: driver private structure
3032  * Description:
3033  * it is to verify if the MAC address is valid, in case of failures it
3034  * generates a random MAC address
3035  */
3036 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3037 {
3038 	u8 addr[ETH_ALEN];
3039 
3040 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3041 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3042 		if (is_valid_ether_addr(addr))
3043 			eth_hw_addr_set(priv->dev, addr);
3044 		else
3045 			eth_hw_addr_random(priv->dev);
3046 		dev_info(priv->device, "device MAC address %pM\n",
3047 			 priv->dev->dev_addr);
3048 	}
3049 }
3050 
3051 /**
3052  * stmmac_init_dma_engine - DMA init.
3053  * @priv: driver private structure
3054  * Description:
3055  * It inits the DMA invoking the specific MAC/GMAC callback.
3056  * Some DMA parameters can be passed from the platform;
3057  * in case of these are not passed a default is kept for the MAC or GMAC.
3058  */
3059 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3060 {
3061 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3062 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3063 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3064 	struct stmmac_rx_queue *rx_q;
3065 	struct stmmac_tx_queue *tx_q;
3066 	u32 chan = 0;
3067 	int ret = 0;
3068 
3069 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3070 		dev_err(priv->device, "Invalid DMA configuration\n");
3071 		return -EINVAL;
3072 	}
3073 
3074 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3075 		priv->plat->dma_cfg->atds = 1;
3076 
3077 	ret = stmmac_reset(priv, priv->ioaddr);
3078 	if (ret) {
3079 		dev_err(priv->device, "Failed to reset the dma\n");
3080 		return ret;
3081 	}
3082 
3083 	/* DMA Configuration */
3084 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3085 
3086 	if (priv->plat->axi)
3087 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3088 
3089 	/* DMA CSR Channel configuration */
3090 	for (chan = 0; chan < dma_csr_ch; chan++) {
3091 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3092 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3093 	}
3094 
3095 	/* DMA RX Channel Configuration */
3096 	for (chan = 0; chan < rx_channels_count; chan++) {
3097 		rx_q = &priv->dma_conf.rx_queue[chan];
3098 
3099 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3100 				    rx_q->dma_rx_phy, chan);
3101 
3102 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3103 				     (rx_q->buf_alloc_num *
3104 				      sizeof(struct dma_desc));
3105 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3106 				       rx_q->rx_tail_addr, chan);
3107 	}
3108 
3109 	/* DMA TX Channel Configuration */
3110 	for (chan = 0; chan < tx_channels_count; chan++) {
3111 		tx_q = &priv->dma_conf.tx_queue[chan];
3112 
3113 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3114 				    tx_q->dma_tx_phy, chan);
3115 
3116 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3117 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3118 				       tx_q->tx_tail_addr, chan);
3119 	}
3120 
3121 	return ret;
3122 }
3123 
3124 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3125 {
3126 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3127 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3128 	struct stmmac_channel *ch;
3129 	struct napi_struct *napi;
3130 
3131 	if (!tx_coal_timer)
3132 		return;
3133 
3134 	ch = &priv->channel[tx_q->queue_index];
3135 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3136 
3137 	/* Arm timer only if napi is not already scheduled.
3138 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3139 	 * again in the next scheduled napi.
3140 	 */
3141 	if (unlikely(!napi_is_scheduled(napi)))
3142 		hrtimer_start(&tx_q->txtimer,
3143 			      STMMAC_COAL_TIMER(tx_coal_timer),
3144 			      HRTIMER_MODE_REL);
3145 	else
3146 		hrtimer_try_to_cancel(&tx_q->txtimer);
3147 }
3148 
3149 /**
3150  * stmmac_tx_timer - mitigation sw timer for tx.
3151  * @t: data pointer
3152  * Description:
3153  * This is the timer handler to directly invoke the stmmac_tx_clean.
3154  */
3155 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3156 {
3157 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3158 	struct stmmac_priv *priv = tx_q->priv_data;
3159 	struct stmmac_channel *ch;
3160 	struct napi_struct *napi;
3161 
3162 	ch = &priv->channel[tx_q->queue_index];
3163 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3164 
3165 	if (likely(napi_schedule_prep(napi))) {
3166 		unsigned long flags;
3167 
3168 		spin_lock_irqsave(&ch->lock, flags);
3169 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3170 		spin_unlock_irqrestore(&ch->lock, flags);
3171 		__napi_schedule(napi);
3172 	}
3173 
3174 	return HRTIMER_NORESTART;
3175 }
3176 
3177 /**
3178  * stmmac_init_coalesce - init mitigation options.
3179  * @priv: driver private structure
3180  * Description:
3181  * This inits the coalesce parameters: i.e. timer rate,
3182  * timer handler and default threshold used for enabling the
3183  * interrupt on completion bit.
3184  */
3185 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3186 {
3187 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3188 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3189 	u32 chan;
3190 
3191 	for (chan = 0; chan < tx_channel_count; chan++) {
3192 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3193 
3194 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3195 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3196 
3197 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3198 		tx_q->txtimer.function = stmmac_tx_timer;
3199 	}
3200 
3201 	for (chan = 0; chan < rx_channel_count; chan++)
3202 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3203 }
3204 
3205 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3206 {
3207 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3208 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3209 	u32 chan;
3210 
3211 	/* set TX ring length */
3212 	for (chan = 0; chan < tx_channels_count; chan++)
3213 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3214 				       (priv->dma_conf.dma_tx_size - 1), chan);
3215 
3216 	/* set RX ring length */
3217 	for (chan = 0; chan < rx_channels_count; chan++)
3218 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3219 				       (priv->dma_conf.dma_rx_size - 1), chan);
3220 }
3221 
3222 /**
3223  *  stmmac_set_tx_queue_weight - Set TX queue weight
3224  *  @priv: driver private structure
3225  *  Description: It is used for setting TX queues weight
3226  */
3227 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3228 {
3229 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3230 	u32 weight;
3231 	u32 queue;
3232 
3233 	for (queue = 0; queue < tx_queues_count; queue++) {
3234 		weight = priv->plat->tx_queues_cfg[queue].weight;
3235 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3236 	}
3237 }
3238 
3239 /**
3240  *  stmmac_configure_cbs - Configure CBS in TX queue
3241  *  @priv: driver private structure
3242  *  Description: It is used for configuring CBS in AVB TX queues
3243  */
3244 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3245 {
3246 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3247 	u32 mode_to_use;
3248 	u32 queue;
3249 
3250 	/* queue 0 is reserved for legacy traffic */
3251 	for (queue = 1; queue < tx_queues_count; queue++) {
3252 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3253 		if (mode_to_use == MTL_QUEUE_DCB)
3254 			continue;
3255 
3256 		stmmac_config_cbs(priv, priv->hw,
3257 				priv->plat->tx_queues_cfg[queue].send_slope,
3258 				priv->plat->tx_queues_cfg[queue].idle_slope,
3259 				priv->plat->tx_queues_cfg[queue].high_credit,
3260 				priv->plat->tx_queues_cfg[queue].low_credit,
3261 				queue);
3262 	}
3263 }
3264 
3265 /**
3266  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3267  *  @priv: driver private structure
3268  *  Description: It is used for mapping RX queues to RX dma channels
3269  */
3270 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3271 {
3272 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3273 	u32 queue;
3274 	u32 chan;
3275 
3276 	for (queue = 0; queue < rx_queues_count; queue++) {
3277 		chan = priv->plat->rx_queues_cfg[queue].chan;
3278 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3279 	}
3280 }
3281 
3282 /**
3283  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3284  *  @priv: driver private structure
3285  *  Description: It is used for configuring the RX Queue Priority
3286  */
3287 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3288 {
3289 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3290 	u32 queue;
3291 	u32 prio;
3292 
3293 	for (queue = 0; queue < rx_queues_count; queue++) {
3294 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3295 			continue;
3296 
3297 		prio = priv->plat->rx_queues_cfg[queue].prio;
3298 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3299 	}
3300 }
3301 
3302 /**
3303  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3304  *  @priv: driver private structure
3305  *  Description: It is used for configuring the TX Queue Priority
3306  */
3307 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3308 {
3309 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3310 	u32 queue;
3311 	u32 prio;
3312 
3313 	for (queue = 0; queue < tx_queues_count; queue++) {
3314 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3315 			continue;
3316 
3317 		prio = priv->plat->tx_queues_cfg[queue].prio;
3318 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3319 	}
3320 }
3321 
3322 /**
3323  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3324  *  @priv: driver private structure
3325  *  Description: It is used for configuring the RX queue routing
3326  */
3327 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3328 {
3329 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3330 	u32 queue;
3331 	u8 packet;
3332 
3333 	for (queue = 0; queue < rx_queues_count; queue++) {
3334 		/* no specific packet type routing specified for the queue */
3335 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3336 			continue;
3337 
3338 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3339 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3340 	}
3341 }
3342 
3343 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3344 {
3345 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3346 		priv->rss.enable = false;
3347 		return;
3348 	}
3349 
3350 	if (priv->dev->features & NETIF_F_RXHASH)
3351 		priv->rss.enable = true;
3352 	else
3353 		priv->rss.enable = false;
3354 
3355 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3356 			     priv->plat->rx_queues_to_use);
3357 }
3358 
3359 /**
3360  *  stmmac_mtl_configuration - Configure MTL
3361  *  @priv: driver private structure
3362  *  Description: It is used for configurring MTL
3363  */
3364 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3365 {
3366 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3367 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3368 
3369 	if (tx_queues_count > 1)
3370 		stmmac_set_tx_queue_weight(priv);
3371 
3372 	/* Configure MTL RX algorithms */
3373 	if (rx_queues_count > 1)
3374 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3375 				priv->plat->rx_sched_algorithm);
3376 
3377 	/* Configure MTL TX algorithms */
3378 	if (tx_queues_count > 1)
3379 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3380 				priv->plat->tx_sched_algorithm);
3381 
3382 	/* Configure CBS in AVB TX queues */
3383 	if (tx_queues_count > 1)
3384 		stmmac_configure_cbs(priv);
3385 
3386 	/* Map RX MTL to DMA channels */
3387 	stmmac_rx_queue_dma_chan_map(priv);
3388 
3389 	/* Enable MAC RX Queues */
3390 	stmmac_mac_enable_rx_queues(priv);
3391 
3392 	/* Set RX priorities */
3393 	if (rx_queues_count > 1)
3394 		stmmac_mac_config_rx_queues_prio(priv);
3395 
3396 	/* Set TX priorities */
3397 	if (tx_queues_count > 1)
3398 		stmmac_mac_config_tx_queues_prio(priv);
3399 
3400 	/* Set RX routing */
3401 	if (rx_queues_count > 1)
3402 		stmmac_mac_config_rx_queues_routing(priv);
3403 
3404 	/* Receive Side Scaling */
3405 	if (rx_queues_count > 1)
3406 		stmmac_mac_config_rss(priv);
3407 }
3408 
3409 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3410 {
3411 	if (priv->dma_cap.asp) {
3412 		netdev_info(priv->dev, "Enabling Safety Features\n");
3413 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3414 					  priv->plat->safety_feat_cfg);
3415 	} else {
3416 		netdev_info(priv->dev, "No Safety Features support found\n");
3417 	}
3418 }
3419 
3420 /**
3421  * stmmac_hw_setup - setup mac in a usable state.
3422  *  @dev : pointer to the device structure.
3423  *  @ptp_register: register PTP if set
3424  *  Description:
3425  *  this is the main function to setup the HW in a usable state because the
3426  *  dma engine is reset, the core registers are configured (e.g. AXI,
3427  *  Checksum features, timers). The DMA is ready to start receiving and
3428  *  transmitting.
3429  *  Return value:
3430  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3431  *  file on failure.
3432  */
3433 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3434 {
3435 	struct stmmac_priv *priv = netdev_priv(dev);
3436 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3437 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3438 	bool sph_en;
3439 	u32 chan;
3440 	int ret;
3441 
3442 	/* Make sure RX clock is enabled */
3443 	if (priv->hw->phylink_pcs)
3444 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3445 
3446 	/* DMA initialization and SW reset */
3447 	ret = stmmac_init_dma_engine(priv);
3448 	if (ret < 0) {
3449 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3450 			   __func__);
3451 		return ret;
3452 	}
3453 
3454 	/* Copy the MAC addr into the HW  */
3455 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3456 
3457 	/* PS and related bits will be programmed according to the speed */
3458 	if (priv->hw->pcs) {
3459 		int speed = priv->plat->mac_port_sel_speed;
3460 
3461 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3462 		    (speed == SPEED_1000)) {
3463 			priv->hw->ps = speed;
3464 		} else {
3465 			dev_warn(priv->device, "invalid port speed\n");
3466 			priv->hw->ps = 0;
3467 		}
3468 	}
3469 
3470 	/* Initialize the MAC Core */
3471 	stmmac_core_init(priv, priv->hw, dev);
3472 
3473 	/* Initialize MTL*/
3474 	stmmac_mtl_configuration(priv);
3475 
3476 	/* Initialize Safety Features */
3477 	stmmac_safety_feat_configuration(priv);
3478 
3479 	ret = stmmac_rx_ipc(priv, priv->hw);
3480 	if (!ret) {
3481 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3482 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3483 		priv->hw->rx_csum = 0;
3484 	}
3485 
3486 	/* Enable the MAC Rx/Tx */
3487 	stmmac_mac_set(priv, priv->ioaddr, true);
3488 
3489 	/* Set the HW DMA mode and the COE */
3490 	stmmac_dma_operation_mode(priv);
3491 
3492 	stmmac_mmc_setup(priv);
3493 
3494 	if (ptp_register) {
3495 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3496 		if (ret < 0)
3497 			netdev_warn(priv->dev,
3498 				    "failed to enable PTP reference clock: %pe\n",
3499 				    ERR_PTR(ret));
3500 	}
3501 
3502 	ret = stmmac_init_ptp(priv);
3503 	if (ret == -EOPNOTSUPP)
3504 		netdev_info(priv->dev, "PTP not supported by HW\n");
3505 	else if (ret)
3506 		netdev_warn(priv->dev, "PTP init failed\n");
3507 	else if (ptp_register)
3508 		stmmac_ptp_register(priv);
3509 
3510 	if (priv->use_riwt) {
3511 		u32 queue;
3512 
3513 		for (queue = 0; queue < rx_cnt; queue++) {
3514 			if (!priv->rx_riwt[queue])
3515 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3516 
3517 			stmmac_rx_watchdog(priv, priv->ioaddr,
3518 					   priv->rx_riwt[queue], queue);
3519 		}
3520 	}
3521 
3522 	if (priv->hw->pcs)
3523 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3524 
3525 	/* set TX and RX rings length */
3526 	stmmac_set_rings_length(priv);
3527 
3528 	/* Enable TSO */
3529 	if (priv->tso) {
3530 		for (chan = 0; chan < tx_cnt; chan++) {
3531 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3532 
3533 			/* TSO and TBS cannot co-exist */
3534 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3535 				continue;
3536 
3537 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3538 		}
3539 	}
3540 
3541 	/* Enable Split Header */
3542 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3543 	for (chan = 0; chan < rx_cnt; chan++)
3544 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3545 
3546 
3547 	/* VLAN Tag Insertion */
3548 	if (priv->dma_cap.vlins)
3549 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3550 
3551 	/* TBS */
3552 	for (chan = 0; chan < tx_cnt; chan++) {
3553 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3554 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3555 
3556 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3557 	}
3558 
3559 	/* Configure real RX and TX queues */
3560 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3561 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3562 
3563 	/* Start the ball rolling... */
3564 	stmmac_start_all_dma(priv);
3565 
3566 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3567 
3568 	return 0;
3569 }
3570 
3571 static void stmmac_hw_teardown(struct net_device *dev)
3572 {
3573 	struct stmmac_priv *priv = netdev_priv(dev);
3574 
3575 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3576 }
3577 
3578 static void stmmac_free_irq(struct net_device *dev,
3579 			    enum request_irq_err irq_err, int irq_idx)
3580 {
3581 	struct stmmac_priv *priv = netdev_priv(dev);
3582 	int j;
3583 
3584 	switch (irq_err) {
3585 	case REQ_IRQ_ERR_ALL:
3586 		irq_idx = priv->plat->tx_queues_to_use;
3587 		fallthrough;
3588 	case REQ_IRQ_ERR_TX:
3589 		for (j = irq_idx - 1; j >= 0; j--) {
3590 			if (priv->tx_irq[j] > 0) {
3591 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3592 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3593 			}
3594 		}
3595 		irq_idx = priv->plat->rx_queues_to_use;
3596 		fallthrough;
3597 	case REQ_IRQ_ERR_RX:
3598 		for (j = irq_idx - 1; j >= 0; j--) {
3599 			if (priv->rx_irq[j] > 0) {
3600 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3601 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3602 			}
3603 		}
3604 
3605 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3606 			free_irq(priv->sfty_ue_irq, dev);
3607 		fallthrough;
3608 	case REQ_IRQ_ERR_SFTY_UE:
3609 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3610 			free_irq(priv->sfty_ce_irq, dev);
3611 		fallthrough;
3612 	case REQ_IRQ_ERR_SFTY_CE:
3613 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3614 			free_irq(priv->lpi_irq, dev);
3615 		fallthrough;
3616 	case REQ_IRQ_ERR_LPI:
3617 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3618 			free_irq(priv->wol_irq, dev);
3619 		fallthrough;
3620 	case REQ_IRQ_ERR_SFTY:
3621 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3622 			free_irq(priv->sfty_irq, dev);
3623 		fallthrough;
3624 	case REQ_IRQ_ERR_WOL:
3625 		free_irq(dev->irq, dev);
3626 		fallthrough;
3627 	case REQ_IRQ_ERR_MAC:
3628 	case REQ_IRQ_ERR_NO:
3629 		/* If MAC IRQ request error, no more IRQ to free */
3630 		break;
3631 	}
3632 }
3633 
3634 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3635 {
3636 	struct stmmac_priv *priv = netdev_priv(dev);
3637 	enum request_irq_err irq_err;
3638 	cpumask_t cpu_mask;
3639 	int irq_idx = 0;
3640 	char *int_name;
3641 	int ret;
3642 	int i;
3643 
3644 	/* For common interrupt */
3645 	int_name = priv->int_name_mac;
3646 	sprintf(int_name, "%s:%s", dev->name, "mac");
3647 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3648 			  0, int_name, dev);
3649 	if (unlikely(ret < 0)) {
3650 		netdev_err(priv->dev,
3651 			   "%s: alloc mac MSI %d (error: %d)\n",
3652 			   __func__, dev->irq, ret);
3653 		irq_err = REQ_IRQ_ERR_MAC;
3654 		goto irq_error;
3655 	}
3656 
3657 	/* Request the Wake IRQ in case of another line
3658 	 * is used for WoL
3659 	 */
3660 	priv->wol_irq_disabled = true;
3661 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3662 		int_name = priv->int_name_wol;
3663 		sprintf(int_name, "%s:%s", dev->name, "wol");
3664 		ret = request_irq(priv->wol_irq,
3665 				  stmmac_mac_interrupt,
3666 				  0, int_name, dev);
3667 		if (unlikely(ret < 0)) {
3668 			netdev_err(priv->dev,
3669 				   "%s: alloc wol MSI %d (error: %d)\n",
3670 				   __func__, priv->wol_irq, ret);
3671 			irq_err = REQ_IRQ_ERR_WOL;
3672 			goto irq_error;
3673 		}
3674 	}
3675 
3676 	/* Request the LPI IRQ in case of another line
3677 	 * is used for LPI
3678 	 */
3679 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3680 		int_name = priv->int_name_lpi;
3681 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3682 		ret = request_irq(priv->lpi_irq,
3683 				  stmmac_mac_interrupt,
3684 				  0, int_name, dev);
3685 		if (unlikely(ret < 0)) {
3686 			netdev_err(priv->dev,
3687 				   "%s: alloc lpi MSI %d (error: %d)\n",
3688 				   __func__, priv->lpi_irq, ret);
3689 			irq_err = REQ_IRQ_ERR_LPI;
3690 			goto irq_error;
3691 		}
3692 	}
3693 
3694 	/* Request the common Safety Feature Correctible/Uncorrectible
3695 	 * Error line in case of another line is used
3696 	 */
3697 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3698 		int_name = priv->int_name_sfty;
3699 		sprintf(int_name, "%s:%s", dev->name, "safety");
3700 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3701 				  0, int_name, dev);
3702 		if (unlikely(ret < 0)) {
3703 			netdev_err(priv->dev,
3704 				   "%s: alloc sfty MSI %d (error: %d)\n",
3705 				   __func__, priv->sfty_irq, ret);
3706 			irq_err = REQ_IRQ_ERR_SFTY;
3707 			goto irq_error;
3708 		}
3709 	}
3710 
3711 	/* Request the Safety Feature Correctible Error line in
3712 	 * case of another line is used
3713 	 */
3714 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3715 		int_name = priv->int_name_sfty_ce;
3716 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3717 		ret = request_irq(priv->sfty_ce_irq,
3718 				  stmmac_safety_interrupt,
3719 				  0, int_name, dev);
3720 		if (unlikely(ret < 0)) {
3721 			netdev_err(priv->dev,
3722 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3723 				   __func__, priv->sfty_ce_irq, ret);
3724 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3725 			goto irq_error;
3726 		}
3727 	}
3728 
3729 	/* Request the Safety Feature Uncorrectible Error line in
3730 	 * case of another line is used
3731 	 */
3732 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3733 		int_name = priv->int_name_sfty_ue;
3734 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3735 		ret = request_irq(priv->sfty_ue_irq,
3736 				  stmmac_safety_interrupt,
3737 				  0, int_name, dev);
3738 		if (unlikely(ret < 0)) {
3739 			netdev_err(priv->dev,
3740 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3741 				   __func__, priv->sfty_ue_irq, ret);
3742 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3743 			goto irq_error;
3744 		}
3745 	}
3746 
3747 	/* Request Rx MSI irq */
3748 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3749 		if (i >= MTL_MAX_RX_QUEUES)
3750 			break;
3751 		if (priv->rx_irq[i] == 0)
3752 			continue;
3753 
3754 		int_name = priv->int_name_rx_irq[i];
3755 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3756 		ret = request_irq(priv->rx_irq[i],
3757 				  stmmac_msi_intr_rx,
3758 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3759 		if (unlikely(ret < 0)) {
3760 			netdev_err(priv->dev,
3761 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3762 				   __func__, i, priv->rx_irq[i], ret);
3763 			irq_err = REQ_IRQ_ERR_RX;
3764 			irq_idx = i;
3765 			goto irq_error;
3766 		}
3767 		cpumask_clear(&cpu_mask);
3768 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3769 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3770 	}
3771 
3772 	/* Request Tx MSI irq */
3773 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3774 		if (i >= MTL_MAX_TX_QUEUES)
3775 			break;
3776 		if (priv->tx_irq[i] == 0)
3777 			continue;
3778 
3779 		int_name = priv->int_name_tx_irq[i];
3780 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3781 		ret = request_irq(priv->tx_irq[i],
3782 				  stmmac_msi_intr_tx,
3783 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3784 		if (unlikely(ret < 0)) {
3785 			netdev_err(priv->dev,
3786 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3787 				   __func__, i, priv->tx_irq[i], ret);
3788 			irq_err = REQ_IRQ_ERR_TX;
3789 			irq_idx = i;
3790 			goto irq_error;
3791 		}
3792 		cpumask_clear(&cpu_mask);
3793 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3794 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3795 	}
3796 
3797 	return 0;
3798 
3799 irq_error:
3800 	stmmac_free_irq(dev, irq_err, irq_idx);
3801 	return ret;
3802 }
3803 
3804 static int stmmac_request_irq_single(struct net_device *dev)
3805 {
3806 	struct stmmac_priv *priv = netdev_priv(dev);
3807 	enum request_irq_err irq_err;
3808 	int ret;
3809 
3810 	ret = request_irq(dev->irq, stmmac_interrupt,
3811 			  IRQF_SHARED, dev->name, dev);
3812 	if (unlikely(ret < 0)) {
3813 		netdev_err(priv->dev,
3814 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3815 			   __func__, dev->irq, ret);
3816 		irq_err = REQ_IRQ_ERR_MAC;
3817 		goto irq_error;
3818 	}
3819 
3820 	/* Request the Wake IRQ in case of another line
3821 	 * is used for WoL
3822 	 */
3823 	priv->wol_irq_disabled = true;
3824 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3825 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3826 				  IRQF_SHARED, dev->name, dev);
3827 		if (unlikely(ret < 0)) {
3828 			netdev_err(priv->dev,
3829 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3830 				   __func__, priv->wol_irq, ret);
3831 			irq_err = REQ_IRQ_ERR_WOL;
3832 			goto irq_error;
3833 		}
3834 	}
3835 
3836 	/* Request the IRQ lines */
3837 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3838 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3839 				  IRQF_SHARED, dev->name, dev);
3840 		if (unlikely(ret < 0)) {
3841 			netdev_err(priv->dev,
3842 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3843 				   __func__, priv->lpi_irq, ret);
3844 			irq_err = REQ_IRQ_ERR_LPI;
3845 			goto irq_error;
3846 		}
3847 	}
3848 
3849 	/* Request the common Safety Feature Correctible/Uncorrectible
3850 	 * Error line in case of another line is used
3851 	 */
3852 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3853 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3854 				  IRQF_SHARED, dev->name, dev);
3855 		if (unlikely(ret < 0)) {
3856 			netdev_err(priv->dev,
3857 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3858 				   __func__, priv->sfty_irq, ret);
3859 			irq_err = REQ_IRQ_ERR_SFTY;
3860 			goto irq_error;
3861 		}
3862 	}
3863 
3864 	return 0;
3865 
3866 irq_error:
3867 	stmmac_free_irq(dev, irq_err, 0);
3868 	return ret;
3869 }
3870 
3871 static int stmmac_request_irq(struct net_device *dev)
3872 {
3873 	struct stmmac_priv *priv = netdev_priv(dev);
3874 	int ret;
3875 
3876 	/* Request the IRQ lines */
3877 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3878 		ret = stmmac_request_irq_multi_msi(dev);
3879 	else
3880 		ret = stmmac_request_irq_single(dev);
3881 
3882 	return ret;
3883 }
3884 
3885 /**
3886  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3887  *  @priv: driver private structure
3888  *  @mtu: MTU to setup the dma queue and buf with
3889  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3890  *  Allocate the Tx/Rx DMA queue and init them.
3891  *  Return value:
3892  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3893  */
3894 static struct stmmac_dma_conf *
3895 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3896 {
3897 	struct stmmac_dma_conf *dma_conf;
3898 	int chan, bfsize, ret;
3899 
3900 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3901 	if (!dma_conf) {
3902 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3903 			   __func__);
3904 		return ERR_PTR(-ENOMEM);
3905 	}
3906 
3907 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3908 	if (bfsize < 0)
3909 		bfsize = 0;
3910 
3911 	if (bfsize < BUF_SIZE_16KiB)
3912 		bfsize = stmmac_set_bfsize(mtu, 0);
3913 
3914 	dma_conf->dma_buf_sz = bfsize;
3915 	/* Chose the tx/rx size from the already defined one in the
3916 	 * priv struct. (if defined)
3917 	 */
3918 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3919 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3920 
3921 	if (!dma_conf->dma_tx_size)
3922 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3923 	if (!dma_conf->dma_rx_size)
3924 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3925 
3926 	/* Earlier check for TBS */
3927 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3928 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3929 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3930 
3931 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3932 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3933 	}
3934 
3935 	ret = alloc_dma_desc_resources(priv, dma_conf);
3936 	if (ret < 0) {
3937 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3938 			   __func__);
3939 		goto alloc_error;
3940 	}
3941 
3942 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3943 	if (ret < 0) {
3944 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3945 			   __func__);
3946 		goto init_error;
3947 	}
3948 
3949 	return dma_conf;
3950 
3951 init_error:
3952 	free_dma_desc_resources(priv, dma_conf);
3953 alloc_error:
3954 	kfree(dma_conf);
3955 	return ERR_PTR(ret);
3956 }
3957 
3958 /**
3959  *  __stmmac_open - open entry point of the driver
3960  *  @dev : pointer to the device structure.
3961  *  @dma_conf :  structure to take the dma data
3962  *  Description:
3963  *  This function is the open entry point of the driver.
3964  *  Return value:
3965  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3966  *  file on failure.
3967  */
3968 static int __stmmac_open(struct net_device *dev,
3969 			 struct stmmac_dma_conf *dma_conf)
3970 {
3971 	struct stmmac_priv *priv = netdev_priv(dev);
3972 	int mode = priv->plat->phy_interface;
3973 	u32 chan;
3974 	int ret;
3975 
3976 	/* Initialise the tx lpi timer, converting from msec to usec */
3977 	if (!priv->tx_lpi_timer)
3978 		priv->tx_lpi_timer = eee_timer * 1000;
3979 
3980 	ret = pm_runtime_resume_and_get(priv->device);
3981 	if (ret < 0)
3982 		return ret;
3983 
3984 	if ((!priv->hw->xpcs ||
3985 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3986 		ret = stmmac_init_phy(dev);
3987 		if (ret) {
3988 			netdev_err(priv->dev,
3989 				   "%s: Cannot attach to PHY (error: %d)\n",
3990 				   __func__, ret);
3991 			goto init_phy_error;
3992 		}
3993 	}
3994 
3995 	buf_sz = dma_conf->dma_buf_sz;
3996 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3997 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3998 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3999 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4000 
4001 	stmmac_reset_queues_param(priv);
4002 
4003 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4004 	    priv->plat->serdes_powerup) {
4005 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4006 		if (ret < 0) {
4007 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4008 				   __func__);
4009 			goto init_error;
4010 		}
4011 	}
4012 
4013 	ret = stmmac_hw_setup(dev, true);
4014 	if (ret < 0) {
4015 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4016 		goto init_error;
4017 	}
4018 
4019 	stmmac_init_coalesce(priv);
4020 
4021 	phylink_start(priv->phylink);
4022 	/* We may have called phylink_speed_down before */
4023 	phylink_speed_up(priv->phylink);
4024 
4025 	ret = stmmac_request_irq(dev);
4026 	if (ret)
4027 		goto irq_error;
4028 
4029 	stmmac_enable_all_queues(priv);
4030 	netif_tx_start_all_queues(priv->dev);
4031 	stmmac_enable_all_dma_irq(priv);
4032 
4033 	return 0;
4034 
4035 irq_error:
4036 	phylink_stop(priv->phylink);
4037 
4038 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4039 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4040 
4041 	stmmac_hw_teardown(dev);
4042 init_error:
4043 	phylink_disconnect_phy(priv->phylink);
4044 init_phy_error:
4045 	pm_runtime_put(priv->device);
4046 	return ret;
4047 }
4048 
4049 static int stmmac_open(struct net_device *dev)
4050 {
4051 	struct stmmac_priv *priv = netdev_priv(dev);
4052 	struct stmmac_dma_conf *dma_conf;
4053 	int ret;
4054 
4055 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4056 	if (IS_ERR(dma_conf))
4057 		return PTR_ERR(dma_conf);
4058 
4059 	ret = __stmmac_open(dev, dma_conf);
4060 	if (ret)
4061 		free_dma_desc_resources(priv, dma_conf);
4062 
4063 	kfree(dma_conf);
4064 	return ret;
4065 }
4066 
4067 /**
4068  *  stmmac_release - close entry point of the driver
4069  *  @dev : device pointer.
4070  *  Description:
4071  *  This is the stop entry point of the driver.
4072  */
4073 static int stmmac_release(struct net_device *dev)
4074 {
4075 	struct stmmac_priv *priv = netdev_priv(dev);
4076 	u32 chan;
4077 
4078 	if (device_may_wakeup(priv->device))
4079 		phylink_speed_down(priv->phylink, false);
4080 	/* Stop and disconnect the PHY */
4081 	phylink_stop(priv->phylink);
4082 	phylink_disconnect_phy(priv->phylink);
4083 
4084 	stmmac_disable_all_queues(priv);
4085 
4086 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4087 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4088 
4089 	netif_tx_disable(dev);
4090 
4091 	/* Free the IRQ lines */
4092 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4093 
4094 	/* Stop TX/RX DMA and clear the descriptors */
4095 	stmmac_stop_all_dma(priv);
4096 
4097 	/* Release and free the Rx/Tx resources */
4098 	free_dma_desc_resources(priv, &priv->dma_conf);
4099 
4100 	/* Disable the MAC Rx/Tx */
4101 	stmmac_mac_set(priv, priv->ioaddr, false);
4102 
4103 	/* Powerdown Serdes if there is */
4104 	if (priv->plat->serdes_powerdown)
4105 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4106 
4107 	stmmac_release_ptp(priv);
4108 
4109 	if (stmmac_fpe_supported(priv))
4110 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4111 
4112 	pm_runtime_put(priv->device);
4113 
4114 	return 0;
4115 }
4116 
4117 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4118 			       struct stmmac_tx_queue *tx_q)
4119 {
4120 	u16 tag = 0x0, inner_tag = 0x0;
4121 	u32 inner_type = 0x0;
4122 	struct dma_desc *p;
4123 
4124 	if (!priv->dma_cap.vlins)
4125 		return false;
4126 	if (!skb_vlan_tag_present(skb))
4127 		return false;
4128 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4129 		inner_tag = skb_vlan_tag_get(skb);
4130 		inner_type = STMMAC_VLAN_INSERT;
4131 	}
4132 
4133 	tag = skb_vlan_tag_get(skb);
4134 
4135 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4136 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4137 	else
4138 		p = &tx_q->dma_tx[tx_q->cur_tx];
4139 
4140 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4141 		return false;
4142 
4143 	stmmac_set_tx_owner(priv, p);
4144 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4145 	return true;
4146 }
4147 
4148 /**
4149  *  stmmac_tso_allocator - close entry point of the driver
4150  *  @priv: driver private structure
4151  *  @des: buffer start address
4152  *  @total_len: total length to fill in descriptors
4153  *  @last_segment: condition for the last descriptor
4154  *  @queue: TX queue index
4155  *  Description:
4156  *  This function fills descriptor and request new descriptors according to
4157  *  buffer length to fill
4158  */
4159 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4160 				 int total_len, bool last_segment, u32 queue)
4161 {
4162 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4163 	struct dma_desc *desc;
4164 	u32 buff_size;
4165 	int tmp_len;
4166 
4167 	tmp_len = total_len;
4168 
4169 	while (tmp_len > 0) {
4170 		dma_addr_t curr_addr;
4171 
4172 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4173 						priv->dma_conf.dma_tx_size);
4174 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4175 
4176 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4177 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4178 		else
4179 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4180 
4181 		curr_addr = des + (total_len - tmp_len);
4182 		stmmac_set_desc_addr(priv, desc, curr_addr);
4183 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4184 			    TSO_MAX_BUFF_SIZE : tmp_len;
4185 
4186 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4187 				0, 1,
4188 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4189 				0, 0);
4190 
4191 		tmp_len -= TSO_MAX_BUFF_SIZE;
4192 	}
4193 }
4194 
4195 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4196 {
4197 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4198 	int desc_size;
4199 
4200 	if (likely(priv->extend_desc))
4201 		desc_size = sizeof(struct dma_extended_desc);
4202 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4203 		desc_size = sizeof(struct dma_edesc);
4204 	else
4205 		desc_size = sizeof(struct dma_desc);
4206 
4207 	/* The own bit must be the latest setting done when prepare the
4208 	 * descriptor and then barrier is needed to make sure that
4209 	 * all is coherent before granting the DMA engine.
4210 	 */
4211 	wmb();
4212 
4213 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4214 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4215 }
4216 
4217 /**
4218  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4219  *  @skb : the socket buffer
4220  *  @dev : device pointer
4221  *  Description: this is the transmit function that is called on TSO frames
4222  *  (support available on GMAC4 and newer chips).
4223  *  Diagram below show the ring programming in case of TSO frames:
4224  *
4225  *  First Descriptor
4226  *   --------
4227  *   | DES0 |---> buffer1 = L2/L3/L4 header
4228  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4229  *   |      |     width is 32-bit, but we never use it.
4230  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4231  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4232  *   |      |     or 48-bit, and we always use it.
4233  *   | DES2 |---> buffer1 len
4234  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4235  *   --------
4236  *   --------
4237  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4238  *   | DES1 |---> same as the First Descriptor
4239  *   | DES2 |---> buffer1 len
4240  *   | DES3 |
4241  *   --------
4242  *	|
4243  *     ...
4244  *	|
4245  *   --------
4246  *   | DES0 |---> buffer1 = Split TCP Payload
4247  *   | DES1 |---> same as the First Descriptor
4248  *   | DES2 |---> buffer1 len
4249  *   | DES3 |
4250  *   --------
4251  *
4252  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4253  */
4254 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4255 {
4256 	struct dma_desc *desc, *first, *mss_desc = NULL;
4257 	struct stmmac_priv *priv = netdev_priv(dev);
4258 	unsigned int first_entry, tx_packets;
4259 	struct stmmac_txq_stats *txq_stats;
4260 	struct stmmac_tx_queue *tx_q;
4261 	u32 pay_len, mss, queue;
4262 	int i, first_tx, nfrags;
4263 	u8 proto_hdr_len, hdr;
4264 	dma_addr_t des;
4265 	bool set_ic;
4266 
4267 	/* Always insert VLAN tag to SKB payload for TSO frames.
4268 	 *
4269 	 * Never insert VLAN tag by HW, since segments splited by
4270 	 * TSO engine will be un-tagged by mistake.
4271 	 */
4272 	if (skb_vlan_tag_present(skb)) {
4273 		skb = __vlan_hwaccel_push_inside(skb);
4274 		if (unlikely(!skb)) {
4275 			priv->xstats.tx_dropped++;
4276 			return NETDEV_TX_OK;
4277 		}
4278 	}
4279 
4280 	nfrags = skb_shinfo(skb)->nr_frags;
4281 	queue = skb_get_queue_mapping(skb);
4282 
4283 	tx_q = &priv->dma_conf.tx_queue[queue];
4284 	txq_stats = &priv->xstats.txq_stats[queue];
4285 	first_tx = tx_q->cur_tx;
4286 
4287 	/* Compute header lengths */
4288 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4289 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4290 		hdr = sizeof(struct udphdr);
4291 	} else {
4292 		proto_hdr_len = skb_tcp_all_headers(skb);
4293 		hdr = tcp_hdrlen(skb);
4294 	}
4295 
4296 	/* Desc availability based on threshold should be enough safe */
4297 	if (unlikely(stmmac_tx_avail(priv, queue) <
4298 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4299 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4300 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4301 								queue));
4302 			/* This is a hard error, log it. */
4303 			netdev_err(priv->dev,
4304 				   "%s: Tx Ring full when queue awake\n",
4305 				   __func__);
4306 		}
4307 		return NETDEV_TX_BUSY;
4308 	}
4309 
4310 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4311 
4312 	mss = skb_shinfo(skb)->gso_size;
4313 
4314 	/* set new MSS value if needed */
4315 	if (mss != tx_q->mss) {
4316 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4317 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4318 		else
4319 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4320 
4321 		stmmac_set_mss(priv, mss_desc, mss);
4322 		tx_q->mss = mss;
4323 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4324 						priv->dma_conf.dma_tx_size);
4325 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4326 	}
4327 
4328 	if (netif_msg_tx_queued(priv)) {
4329 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4330 			__func__, hdr, proto_hdr_len, pay_len, mss);
4331 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4332 			skb->data_len);
4333 	}
4334 
4335 	first_entry = tx_q->cur_tx;
4336 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4337 
4338 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4339 		desc = &tx_q->dma_entx[first_entry].basic;
4340 	else
4341 		desc = &tx_q->dma_tx[first_entry];
4342 	first = desc;
4343 
4344 	/* first descriptor: fill Headers on Buf1 */
4345 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4346 			     DMA_TO_DEVICE);
4347 	if (dma_mapping_error(priv->device, des))
4348 		goto dma_map_err;
4349 
4350 	stmmac_set_desc_addr(priv, first, des);
4351 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4352 			     (nfrags == 0), queue);
4353 
4354 	/* In case two or more DMA transmit descriptors are allocated for this
4355 	 * non-paged SKB data, the DMA buffer address should be saved to
4356 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4357 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4358 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4359 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4360 	 * sooner or later.
4361 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4362 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4363 	 * this DMA buffer right after the DMA engine completely finishes the
4364 	 * full buffer transmission.
4365 	 */
4366 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4367 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4368 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4369 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4370 
4371 	/* Prepare fragments */
4372 	for (i = 0; i < nfrags; i++) {
4373 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4374 
4375 		des = skb_frag_dma_map(priv->device, frag, 0,
4376 				       skb_frag_size(frag),
4377 				       DMA_TO_DEVICE);
4378 		if (dma_mapping_error(priv->device, des))
4379 			goto dma_map_err;
4380 
4381 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4382 				     (i == nfrags - 1), queue);
4383 
4384 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4385 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4386 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4387 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4388 	}
4389 
4390 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4391 
4392 	/* Only the last descriptor gets to point to the skb. */
4393 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4394 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4395 
4396 	/* Manage tx mitigation */
4397 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4398 	tx_q->tx_count_frames += tx_packets;
4399 
4400 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4401 		set_ic = true;
4402 	else if (!priv->tx_coal_frames[queue])
4403 		set_ic = false;
4404 	else if (tx_packets > priv->tx_coal_frames[queue])
4405 		set_ic = true;
4406 	else if ((tx_q->tx_count_frames %
4407 		  priv->tx_coal_frames[queue]) < tx_packets)
4408 		set_ic = true;
4409 	else
4410 		set_ic = false;
4411 
4412 	if (set_ic) {
4413 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4414 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4415 		else
4416 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4417 
4418 		tx_q->tx_count_frames = 0;
4419 		stmmac_set_tx_ic(priv, desc);
4420 	}
4421 
4422 	/* We've used all descriptors we need for this skb, however,
4423 	 * advance cur_tx so that it references a fresh descriptor.
4424 	 * ndo_start_xmit will fill this descriptor the next time it's
4425 	 * called and stmmac_tx_clean may clean up to this descriptor.
4426 	 */
4427 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4428 
4429 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4430 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4431 			  __func__);
4432 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4433 	}
4434 
4435 	u64_stats_update_begin(&txq_stats->q_syncp);
4436 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4437 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4438 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4439 	if (set_ic)
4440 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4441 	u64_stats_update_end(&txq_stats->q_syncp);
4442 
4443 	if (priv->sarc_type)
4444 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4445 
4446 	skb_tx_timestamp(skb);
4447 
4448 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4449 		     priv->hwts_tx_en)) {
4450 		/* declare that device is doing timestamping */
4451 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4452 		stmmac_enable_tx_timestamp(priv, first);
4453 	}
4454 
4455 	/* Complete the first descriptor before granting the DMA */
4456 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4457 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4458 				   hdr / 4, (skb->len - proto_hdr_len));
4459 
4460 	/* If context desc is used to change MSS */
4461 	if (mss_desc) {
4462 		/* Make sure that first descriptor has been completely
4463 		 * written, including its own bit. This is because MSS is
4464 		 * actually before first descriptor, so we need to make
4465 		 * sure that MSS's own bit is the last thing written.
4466 		 */
4467 		dma_wmb();
4468 		stmmac_set_tx_owner(priv, mss_desc);
4469 	}
4470 
4471 	if (netif_msg_pktdata(priv)) {
4472 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4473 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4474 			tx_q->cur_tx, first, nfrags);
4475 		pr_info(">>> frame to be transmitted: ");
4476 		print_pkt(skb->data, skb_headlen(skb));
4477 	}
4478 
4479 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4480 
4481 	stmmac_flush_tx_descriptors(priv, queue);
4482 	stmmac_tx_timer_arm(priv, queue);
4483 
4484 	return NETDEV_TX_OK;
4485 
4486 dma_map_err:
4487 	dev_err(priv->device, "Tx dma map failed\n");
4488 	dev_kfree_skb(skb);
4489 	priv->xstats.tx_dropped++;
4490 	return NETDEV_TX_OK;
4491 }
4492 
4493 /**
4494  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4495  * @skb: socket buffer to check
4496  *
4497  * Check if a packet has an ethertype that will trigger the IP header checks
4498  * and IP/TCP checksum engine of the stmmac core.
4499  *
4500  * Return: true if the ethertype can trigger the checksum engine, false
4501  * otherwise
4502  */
4503 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4504 {
4505 	int depth = 0;
4506 	__be16 proto;
4507 
4508 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4509 				    &depth);
4510 
4511 	return (depth <= ETH_HLEN) &&
4512 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4513 }
4514 
4515 /**
4516  *  stmmac_xmit - Tx entry point of the driver
4517  *  @skb : the socket buffer
4518  *  @dev : device pointer
4519  *  Description : this is the tx entry point of the driver.
4520  *  It programs the chain or the ring and supports oversized frames
4521  *  and SG feature.
4522  */
4523 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4524 {
4525 	unsigned int first_entry, tx_packets, enh_desc;
4526 	struct stmmac_priv *priv = netdev_priv(dev);
4527 	unsigned int nopaged_len = skb_headlen(skb);
4528 	int i, csum_insertion = 0, is_jumbo = 0;
4529 	u32 queue = skb_get_queue_mapping(skb);
4530 	int nfrags = skb_shinfo(skb)->nr_frags;
4531 	int gso = skb_shinfo(skb)->gso_type;
4532 	struct stmmac_txq_stats *txq_stats;
4533 	struct dma_edesc *tbs_desc = NULL;
4534 	struct dma_desc *desc, *first;
4535 	struct stmmac_tx_queue *tx_q;
4536 	bool has_vlan, set_ic;
4537 	int entry, first_tx;
4538 	dma_addr_t des;
4539 
4540 	tx_q = &priv->dma_conf.tx_queue[queue];
4541 	txq_stats = &priv->xstats.txq_stats[queue];
4542 	first_tx = tx_q->cur_tx;
4543 
4544 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4545 		stmmac_stop_sw_lpi(priv);
4546 
4547 	/* Manage oversized TCP frames for GMAC4 device */
4548 	if (skb_is_gso(skb) && priv->tso) {
4549 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4550 			return stmmac_tso_xmit(skb, dev);
4551 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4552 			return stmmac_tso_xmit(skb, dev);
4553 	}
4554 
4555 	if (priv->est && priv->est->enable &&
4556 	    priv->est->max_sdu[queue] &&
4557 	    skb->len > priv->est->max_sdu[queue]){
4558 		priv->xstats.max_sdu_txq_drop[queue]++;
4559 		goto max_sdu_err;
4560 	}
4561 
4562 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4563 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4564 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4565 								queue));
4566 			/* This is a hard error, log it. */
4567 			netdev_err(priv->dev,
4568 				   "%s: Tx Ring full when queue awake\n",
4569 				   __func__);
4570 		}
4571 		return NETDEV_TX_BUSY;
4572 	}
4573 
4574 	/* Check if VLAN can be inserted by HW */
4575 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4576 
4577 	entry = tx_q->cur_tx;
4578 	first_entry = entry;
4579 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4580 
4581 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4582 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4583 	 * queues. In that case, checksum offloading for those queues that don't
4584 	 * support tx coe needs to fallback to software checksum calculation.
4585 	 *
4586 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4587 	 * also have to be checksummed in software.
4588 	 */
4589 	if (csum_insertion &&
4590 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4591 	     !stmmac_has_ip_ethertype(skb))) {
4592 		if (unlikely(skb_checksum_help(skb)))
4593 			goto dma_map_err;
4594 		csum_insertion = !csum_insertion;
4595 	}
4596 
4597 	if (likely(priv->extend_desc))
4598 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4599 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4600 		desc = &tx_q->dma_entx[entry].basic;
4601 	else
4602 		desc = tx_q->dma_tx + entry;
4603 
4604 	first = desc;
4605 
4606 	if (has_vlan)
4607 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4608 
4609 	enh_desc = priv->plat->enh_desc;
4610 	/* To program the descriptors according to the size of the frame */
4611 	if (enh_desc)
4612 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4613 
4614 	if (unlikely(is_jumbo)) {
4615 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4616 		if (unlikely(entry < 0) && (entry != -EINVAL))
4617 			goto dma_map_err;
4618 	}
4619 
4620 	for (i = 0; i < nfrags; i++) {
4621 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4622 		int len = skb_frag_size(frag);
4623 		bool last_segment = (i == (nfrags - 1));
4624 
4625 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4626 		WARN_ON(tx_q->tx_skbuff[entry]);
4627 
4628 		if (likely(priv->extend_desc))
4629 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4630 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4631 			desc = &tx_q->dma_entx[entry].basic;
4632 		else
4633 			desc = tx_q->dma_tx + entry;
4634 
4635 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4636 				       DMA_TO_DEVICE);
4637 		if (dma_mapping_error(priv->device, des))
4638 			goto dma_map_err; /* should reuse desc w/o issues */
4639 
4640 		tx_q->tx_skbuff_dma[entry].buf = des;
4641 
4642 		stmmac_set_desc_addr(priv, desc, des);
4643 
4644 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4645 		tx_q->tx_skbuff_dma[entry].len = len;
4646 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4647 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4648 
4649 		/* Prepare the descriptor and set the own bit too */
4650 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4651 				priv->mode, 1, last_segment, skb->len);
4652 	}
4653 
4654 	/* Only the last descriptor gets to point to the skb. */
4655 	tx_q->tx_skbuff[entry] = skb;
4656 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4657 
4658 	/* According to the coalesce parameter the IC bit for the latest
4659 	 * segment is reset and the timer re-started to clean the tx status.
4660 	 * This approach takes care about the fragments: desc is the first
4661 	 * element in case of no SG.
4662 	 */
4663 	tx_packets = (entry + 1) - first_tx;
4664 	tx_q->tx_count_frames += tx_packets;
4665 
4666 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4667 		set_ic = true;
4668 	else if (!priv->tx_coal_frames[queue])
4669 		set_ic = false;
4670 	else if (tx_packets > priv->tx_coal_frames[queue])
4671 		set_ic = true;
4672 	else if ((tx_q->tx_count_frames %
4673 		  priv->tx_coal_frames[queue]) < tx_packets)
4674 		set_ic = true;
4675 	else
4676 		set_ic = false;
4677 
4678 	if (set_ic) {
4679 		if (likely(priv->extend_desc))
4680 			desc = &tx_q->dma_etx[entry].basic;
4681 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4682 			desc = &tx_q->dma_entx[entry].basic;
4683 		else
4684 			desc = &tx_q->dma_tx[entry];
4685 
4686 		tx_q->tx_count_frames = 0;
4687 		stmmac_set_tx_ic(priv, desc);
4688 	}
4689 
4690 	/* We've used all descriptors we need for this skb, however,
4691 	 * advance cur_tx so that it references a fresh descriptor.
4692 	 * ndo_start_xmit will fill this descriptor the next time it's
4693 	 * called and stmmac_tx_clean may clean up to this descriptor.
4694 	 */
4695 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4696 	tx_q->cur_tx = entry;
4697 
4698 	if (netif_msg_pktdata(priv)) {
4699 		netdev_dbg(priv->dev,
4700 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4701 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4702 			   entry, first, nfrags);
4703 
4704 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4705 		print_pkt(skb->data, skb->len);
4706 	}
4707 
4708 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4709 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4710 			  __func__);
4711 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4712 	}
4713 
4714 	u64_stats_update_begin(&txq_stats->q_syncp);
4715 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4716 	if (set_ic)
4717 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4718 	u64_stats_update_end(&txq_stats->q_syncp);
4719 
4720 	if (priv->sarc_type)
4721 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4722 
4723 	skb_tx_timestamp(skb);
4724 
4725 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4726 	 * problems because all the descriptors are actually ready to be
4727 	 * passed to the DMA engine.
4728 	 */
4729 	if (likely(!is_jumbo)) {
4730 		bool last_segment = (nfrags == 0);
4731 
4732 		des = dma_map_single(priv->device, skb->data,
4733 				     nopaged_len, DMA_TO_DEVICE);
4734 		if (dma_mapping_error(priv->device, des))
4735 			goto dma_map_err;
4736 
4737 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4738 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4739 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4740 
4741 		stmmac_set_desc_addr(priv, first, des);
4742 
4743 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4744 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4745 
4746 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4747 			     priv->hwts_tx_en)) {
4748 			/* declare that device is doing timestamping */
4749 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4750 			stmmac_enable_tx_timestamp(priv, first);
4751 		}
4752 
4753 		/* Prepare the first descriptor setting the OWN bit too */
4754 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4755 				csum_insertion, priv->mode, 0, last_segment,
4756 				skb->len);
4757 	}
4758 
4759 	if (tx_q->tbs & STMMAC_TBS_EN) {
4760 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4761 
4762 		tbs_desc = &tx_q->dma_entx[first_entry];
4763 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4764 	}
4765 
4766 	stmmac_set_tx_owner(priv, first);
4767 
4768 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4769 
4770 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4771 
4772 	stmmac_flush_tx_descriptors(priv, queue);
4773 	stmmac_tx_timer_arm(priv, queue);
4774 
4775 	return NETDEV_TX_OK;
4776 
4777 dma_map_err:
4778 	netdev_err(priv->dev, "Tx DMA map failed\n");
4779 max_sdu_err:
4780 	dev_kfree_skb(skb);
4781 	priv->xstats.tx_dropped++;
4782 	return NETDEV_TX_OK;
4783 }
4784 
4785 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4786 {
4787 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4788 	__be16 vlan_proto = veth->h_vlan_proto;
4789 	u16 vlanid;
4790 
4791 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4792 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4793 	    (vlan_proto == htons(ETH_P_8021AD) &&
4794 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4795 		/* pop the vlan tag */
4796 		vlanid = ntohs(veth->h_vlan_TCI);
4797 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4798 		skb_pull(skb, VLAN_HLEN);
4799 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4800 	}
4801 }
4802 
4803 /**
4804  * stmmac_rx_refill - refill used skb preallocated buffers
4805  * @priv: driver private structure
4806  * @queue: RX queue index
4807  * Description : this is to reallocate the skb for the reception process
4808  * that is based on zero-copy.
4809  */
4810 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4811 {
4812 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4813 	int dirty = stmmac_rx_dirty(priv, queue);
4814 	unsigned int entry = rx_q->dirty_rx;
4815 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4816 
4817 	if (priv->dma_cap.host_dma_width <= 32)
4818 		gfp |= GFP_DMA32;
4819 
4820 	while (dirty-- > 0) {
4821 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4822 		struct dma_desc *p;
4823 		bool use_rx_wd;
4824 
4825 		if (priv->extend_desc)
4826 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4827 		else
4828 			p = rx_q->dma_rx + entry;
4829 
4830 		if (!buf->page) {
4831 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4832 			if (!buf->page)
4833 				break;
4834 		}
4835 
4836 		if (priv->sph && !buf->sec_page) {
4837 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4838 			if (!buf->sec_page)
4839 				break;
4840 
4841 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4842 		}
4843 
4844 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4845 
4846 		stmmac_set_desc_addr(priv, p, buf->addr);
4847 		if (priv->sph)
4848 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4849 		else
4850 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4851 		stmmac_refill_desc3(priv, rx_q, p);
4852 
4853 		rx_q->rx_count_frames++;
4854 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4855 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4856 			rx_q->rx_count_frames = 0;
4857 
4858 		use_rx_wd = !priv->rx_coal_frames[queue];
4859 		use_rx_wd |= rx_q->rx_count_frames > 0;
4860 		if (!priv->use_riwt)
4861 			use_rx_wd = false;
4862 
4863 		dma_wmb();
4864 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4865 
4866 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4867 	}
4868 	rx_q->dirty_rx = entry;
4869 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4870 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4871 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4872 }
4873 
4874 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4875 				       struct dma_desc *p,
4876 				       int status, unsigned int len)
4877 {
4878 	unsigned int plen = 0, hlen = 0;
4879 	int coe = priv->hw->rx_csum;
4880 
4881 	/* Not first descriptor, buffer is always zero */
4882 	if (priv->sph && len)
4883 		return 0;
4884 
4885 	/* First descriptor, get split header length */
4886 	stmmac_get_rx_header_len(priv, p, &hlen);
4887 	if (priv->sph && hlen) {
4888 		priv->xstats.rx_split_hdr_pkt_n++;
4889 		return hlen;
4890 	}
4891 
4892 	/* First descriptor, not last descriptor and not split header */
4893 	if (status & rx_not_ls)
4894 		return priv->dma_conf.dma_buf_sz;
4895 
4896 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4897 
4898 	/* First descriptor and last descriptor and not split header */
4899 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4900 }
4901 
4902 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4903 				       struct dma_desc *p,
4904 				       int status, unsigned int len)
4905 {
4906 	int coe = priv->hw->rx_csum;
4907 	unsigned int plen = 0;
4908 
4909 	/* Not split header, buffer is not available */
4910 	if (!priv->sph)
4911 		return 0;
4912 
4913 	/* Not last descriptor */
4914 	if (status & rx_not_ls)
4915 		return priv->dma_conf.dma_buf_sz;
4916 
4917 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4918 
4919 	/* Last descriptor */
4920 	return plen - len;
4921 }
4922 
4923 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4924 				struct xdp_frame *xdpf, bool dma_map)
4925 {
4926 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4927 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4928 	unsigned int entry = tx_q->cur_tx;
4929 	struct dma_desc *tx_desc;
4930 	dma_addr_t dma_addr;
4931 	bool set_ic;
4932 
4933 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4934 		return STMMAC_XDP_CONSUMED;
4935 
4936 	if (priv->est && priv->est->enable &&
4937 	    priv->est->max_sdu[queue] &&
4938 	    xdpf->len > priv->est->max_sdu[queue]) {
4939 		priv->xstats.max_sdu_txq_drop[queue]++;
4940 		return STMMAC_XDP_CONSUMED;
4941 	}
4942 
4943 	if (likely(priv->extend_desc))
4944 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4945 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4946 		tx_desc = &tx_q->dma_entx[entry].basic;
4947 	else
4948 		tx_desc = tx_q->dma_tx + entry;
4949 
4950 	if (dma_map) {
4951 		dma_addr = dma_map_single(priv->device, xdpf->data,
4952 					  xdpf->len, DMA_TO_DEVICE);
4953 		if (dma_mapping_error(priv->device, dma_addr))
4954 			return STMMAC_XDP_CONSUMED;
4955 
4956 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4957 	} else {
4958 		struct page *page = virt_to_page(xdpf->data);
4959 
4960 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4961 			   xdpf->headroom;
4962 		dma_sync_single_for_device(priv->device, dma_addr,
4963 					   xdpf->len, DMA_BIDIRECTIONAL);
4964 
4965 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4966 	}
4967 
4968 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4969 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4970 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4971 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4972 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4973 
4974 	tx_q->xdpf[entry] = xdpf;
4975 
4976 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4977 
4978 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4979 			       true, priv->mode, true, true,
4980 			       xdpf->len);
4981 
4982 	tx_q->tx_count_frames++;
4983 
4984 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4985 		set_ic = true;
4986 	else
4987 		set_ic = false;
4988 
4989 	if (set_ic) {
4990 		tx_q->tx_count_frames = 0;
4991 		stmmac_set_tx_ic(priv, tx_desc);
4992 		u64_stats_update_begin(&txq_stats->q_syncp);
4993 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4994 		u64_stats_update_end(&txq_stats->q_syncp);
4995 	}
4996 
4997 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4998 
4999 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5000 	tx_q->cur_tx = entry;
5001 
5002 	return STMMAC_XDP_TX;
5003 }
5004 
5005 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5006 				   int cpu)
5007 {
5008 	int index = cpu;
5009 
5010 	if (unlikely(index < 0))
5011 		index = 0;
5012 
5013 	while (index >= priv->plat->tx_queues_to_use)
5014 		index -= priv->plat->tx_queues_to_use;
5015 
5016 	return index;
5017 }
5018 
5019 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5020 				struct xdp_buff *xdp)
5021 {
5022 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5023 	int cpu = smp_processor_id();
5024 	struct netdev_queue *nq;
5025 	int queue;
5026 	int res;
5027 
5028 	if (unlikely(!xdpf))
5029 		return STMMAC_XDP_CONSUMED;
5030 
5031 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5032 	nq = netdev_get_tx_queue(priv->dev, queue);
5033 
5034 	__netif_tx_lock(nq, cpu);
5035 	/* Avoids TX time-out as we are sharing with slow path */
5036 	txq_trans_cond_update(nq);
5037 
5038 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5039 	if (res == STMMAC_XDP_TX)
5040 		stmmac_flush_tx_descriptors(priv, queue);
5041 
5042 	__netif_tx_unlock(nq);
5043 
5044 	return res;
5045 }
5046 
5047 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5048 				 struct bpf_prog *prog,
5049 				 struct xdp_buff *xdp)
5050 {
5051 	u32 act;
5052 	int res;
5053 
5054 	act = bpf_prog_run_xdp(prog, xdp);
5055 	switch (act) {
5056 	case XDP_PASS:
5057 		res = STMMAC_XDP_PASS;
5058 		break;
5059 	case XDP_TX:
5060 		res = stmmac_xdp_xmit_back(priv, xdp);
5061 		break;
5062 	case XDP_REDIRECT:
5063 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5064 			res = STMMAC_XDP_CONSUMED;
5065 		else
5066 			res = STMMAC_XDP_REDIRECT;
5067 		break;
5068 	default:
5069 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5070 		fallthrough;
5071 	case XDP_ABORTED:
5072 		trace_xdp_exception(priv->dev, prog, act);
5073 		fallthrough;
5074 	case XDP_DROP:
5075 		res = STMMAC_XDP_CONSUMED;
5076 		break;
5077 	}
5078 
5079 	return res;
5080 }
5081 
5082 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5083 					   struct xdp_buff *xdp)
5084 {
5085 	struct bpf_prog *prog;
5086 	int res;
5087 
5088 	prog = READ_ONCE(priv->xdp_prog);
5089 	if (!prog) {
5090 		res = STMMAC_XDP_PASS;
5091 		goto out;
5092 	}
5093 
5094 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5095 out:
5096 	return ERR_PTR(-res);
5097 }
5098 
5099 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5100 				   int xdp_status)
5101 {
5102 	int cpu = smp_processor_id();
5103 	int queue;
5104 
5105 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5106 
5107 	if (xdp_status & STMMAC_XDP_TX)
5108 		stmmac_tx_timer_arm(priv, queue);
5109 
5110 	if (xdp_status & STMMAC_XDP_REDIRECT)
5111 		xdp_do_flush();
5112 }
5113 
5114 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5115 					       struct xdp_buff *xdp)
5116 {
5117 	unsigned int metasize = xdp->data - xdp->data_meta;
5118 	unsigned int datasize = xdp->data_end - xdp->data;
5119 	struct sk_buff *skb;
5120 
5121 	skb = napi_alloc_skb(&ch->rxtx_napi,
5122 			     xdp->data_end - xdp->data_hard_start);
5123 	if (unlikely(!skb))
5124 		return NULL;
5125 
5126 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5127 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5128 	if (metasize)
5129 		skb_metadata_set(skb, metasize);
5130 
5131 	return skb;
5132 }
5133 
5134 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5135 				   struct dma_desc *p, struct dma_desc *np,
5136 				   struct xdp_buff *xdp)
5137 {
5138 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5139 	struct stmmac_channel *ch = &priv->channel[queue];
5140 	unsigned int len = xdp->data_end - xdp->data;
5141 	enum pkt_hash_types hash_type;
5142 	int coe = priv->hw->rx_csum;
5143 	struct sk_buff *skb;
5144 	u32 hash;
5145 
5146 	skb = stmmac_construct_skb_zc(ch, xdp);
5147 	if (!skb) {
5148 		priv->xstats.rx_dropped++;
5149 		return;
5150 	}
5151 
5152 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5153 	if (priv->hw->hw_vlan_en)
5154 		/* MAC level stripping. */
5155 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5156 	else
5157 		/* Driver level stripping. */
5158 		stmmac_rx_vlan(priv->dev, skb);
5159 	skb->protocol = eth_type_trans(skb, priv->dev);
5160 
5161 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5162 		skb_checksum_none_assert(skb);
5163 	else
5164 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5165 
5166 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5167 		skb_set_hash(skb, hash, hash_type);
5168 
5169 	skb_record_rx_queue(skb, queue);
5170 	napi_gro_receive(&ch->rxtx_napi, skb);
5171 
5172 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5173 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5174 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5175 	u64_stats_update_end(&rxq_stats->napi_syncp);
5176 }
5177 
5178 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5179 {
5180 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5181 	unsigned int entry = rx_q->dirty_rx;
5182 	struct dma_desc *rx_desc = NULL;
5183 	bool ret = true;
5184 
5185 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5186 
5187 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5188 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5189 		dma_addr_t dma_addr;
5190 		bool use_rx_wd;
5191 
5192 		if (!buf->xdp) {
5193 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5194 			if (!buf->xdp) {
5195 				ret = false;
5196 				break;
5197 			}
5198 		}
5199 
5200 		if (priv->extend_desc)
5201 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5202 		else
5203 			rx_desc = rx_q->dma_rx + entry;
5204 
5205 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5206 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5207 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5208 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5209 
5210 		rx_q->rx_count_frames++;
5211 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5212 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5213 			rx_q->rx_count_frames = 0;
5214 
5215 		use_rx_wd = !priv->rx_coal_frames[queue];
5216 		use_rx_wd |= rx_q->rx_count_frames > 0;
5217 		if (!priv->use_riwt)
5218 			use_rx_wd = false;
5219 
5220 		dma_wmb();
5221 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5222 
5223 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5224 	}
5225 
5226 	if (rx_desc) {
5227 		rx_q->dirty_rx = entry;
5228 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5229 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5230 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5231 	}
5232 
5233 	return ret;
5234 }
5235 
5236 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5237 {
5238 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5239 	 * to represent incoming packet, whereas cb field in the same structure
5240 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5241 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5242 	 */
5243 	return (struct stmmac_xdp_buff *)xdp;
5244 }
5245 
5246 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5247 {
5248 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5249 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5250 	unsigned int count = 0, error = 0, len = 0;
5251 	int dirty = stmmac_rx_dirty(priv, queue);
5252 	unsigned int next_entry = rx_q->cur_rx;
5253 	u32 rx_errors = 0, rx_dropped = 0;
5254 	unsigned int desc_size;
5255 	struct bpf_prog *prog;
5256 	bool failure = false;
5257 	int xdp_status = 0;
5258 	int status = 0;
5259 
5260 	if (netif_msg_rx_status(priv)) {
5261 		void *rx_head;
5262 
5263 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5264 		if (priv->extend_desc) {
5265 			rx_head = (void *)rx_q->dma_erx;
5266 			desc_size = sizeof(struct dma_extended_desc);
5267 		} else {
5268 			rx_head = (void *)rx_q->dma_rx;
5269 			desc_size = sizeof(struct dma_desc);
5270 		}
5271 
5272 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5273 				    rx_q->dma_rx_phy, desc_size);
5274 	}
5275 	while (count < limit) {
5276 		struct stmmac_rx_buffer *buf;
5277 		struct stmmac_xdp_buff *ctx;
5278 		unsigned int buf1_len = 0;
5279 		struct dma_desc *np, *p;
5280 		int entry;
5281 		int res;
5282 
5283 		if (!count && rx_q->state_saved) {
5284 			error = rx_q->state.error;
5285 			len = rx_q->state.len;
5286 		} else {
5287 			rx_q->state_saved = false;
5288 			error = 0;
5289 			len = 0;
5290 		}
5291 
5292 		if (count >= limit)
5293 			break;
5294 
5295 read_again:
5296 		buf1_len = 0;
5297 		entry = next_entry;
5298 		buf = &rx_q->buf_pool[entry];
5299 
5300 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5301 			failure = failure ||
5302 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5303 			dirty = 0;
5304 		}
5305 
5306 		if (priv->extend_desc)
5307 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5308 		else
5309 			p = rx_q->dma_rx + entry;
5310 
5311 		/* read the status of the incoming frame */
5312 		status = stmmac_rx_status(priv, &priv->xstats, p);
5313 		/* check if managed by the DMA otherwise go ahead */
5314 		if (unlikely(status & dma_own))
5315 			break;
5316 
5317 		/* Prefetch the next RX descriptor */
5318 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5319 						priv->dma_conf.dma_rx_size);
5320 		next_entry = rx_q->cur_rx;
5321 
5322 		if (priv->extend_desc)
5323 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5324 		else
5325 			np = rx_q->dma_rx + next_entry;
5326 
5327 		prefetch(np);
5328 
5329 		/* Ensure a valid XSK buffer before proceed */
5330 		if (!buf->xdp)
5331 			break;
5332 
5333 		if (priv->extend_desc)
5334 			stmmac_rx_extended_status(priv, &priv->xstats,
5335 						  rx_q->dma_erx + entry);
5336 		if (unlikely(status == discard_frame)) {
5337 			xsk_buff_free(buf->xdp);
5338 			buf->xdp = NULL;
5339 			dirty++;
5340 			error = 1;
5341 			if (!priv->hwts_rx_en)
5342 				rx_errors++;
5343 		}
5344 
5345 		if (unlikely(error && (status & rx_not_ls)))
5346 			goto read_again;
5347 		if (unlikely(error)) {
5348 			count++;
5349 			continue;
5350 		}
5351 
5352 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5353 		if (likely(status & rx_not_ls)) {
5354 			xsk_buff_free(buf->xdp);
5355 			buf->xdp = NULL;
5356 			dirty++;
5357 			count++;
5358 			goto read_again;
5359 		}
5360 
5361 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5362 		ctx->priv = priv;
5363 		ctx->desc = p;
5364 		ctx->ndesc = np;
5365 
5366 		/* XDP ZC Frame only support primary buffers for now */
5367 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5368 		len += buf1_len;
5369 
5370 		/* ACS is disabled; strip manually. */
5371 		if (likely(!(status & rx_not_ls))) {
5372 			buf1_len -= ETH_FCS_LEN;
5373 			len -= ETH_FCS_LEN;
5374 		}
5375 
5376 		/* RX buffer is good and fit into a XSK pool buffer */
5377 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5378 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5379 
5380 		prog = READ_ONCE(priv->xdp_prog);
5381 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5382 
5383 		switch (res) {
5384 		case STMMAC_XDP_PASS:
5385 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5386 			xsk_buff_free(buf->xdp);
5387 			break;
5388 		case STMMAC_XDP_CONSUMED:
5389 			xsk_buff_free(buf->xdp);
5390 			rx_dropped++;
5391 			break;
5392 		case STMMAC_XDP_TX:
5393 		case STMMAC_XDP_REDIRECT:
5394 			xdp_status |= res;
5395 			break;
5396 		}
5397 
5398 		buf->xdp = NULL;
5399 		dirty++;
5400 		count++;
5401 	}
5402 
5403 	if (status & rx_not_ls) {
5404 		rx_q->state_saved = true;
5405 		rx_q->state.error = error;
5406 		rx_q->state.len = len;
5407 	}
5408 
5409 	stmmac_finalize_xdp_rx(priv, xdp_status);
5410 
5411 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5412 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5413 	u64_stats_update_end(&rxq_stats->napi_syncp);
5414 
5415 	priv->xstats.rx_dropped += rx_dropped;
5416 	priv->xstats.rx_errors += rx_errors;
5417 
5418 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5419 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5420 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5421 		else
5422 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5423 
5424 		return (int)count;
5425 	}
5426 
5427 	return failure ? limit : (int)count;
5428 }
5429 
5430 /**
5431  * stmmac_rx - manage the receive process
5432  * @priv: driver private structure
5433  * @limit: napi bugget
5434  * @queue: RX queue index.
5435  * Description :  this the function called by the napi poll method.
5436  * It gets all the frames inside the ring.
5437  */
5438 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5439 {
5440 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5441 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5442 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5443 	struct stmmac_channel *ch = &priv->channel[queue];
5444 	unsigned int count = 0, error = 0, len = 0;
5445 	int status = 0, coe = priv->hw->rx_csum;
5446 	unsigned int next_entry = rx_q->cur_rx;
5447 	enum dma_data_direction dma_dir;
5448 	unsigned int desc_size;
5449 	struct sk_buff *skb = NULL;
5450 	struct stmmac_xdp_buff ctx;
5451 	int xdp_status = 0;
5452 	int buf_sz;
5453 
5454 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5455 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5456 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5457 
5458 	if (netif_msg_rx_status(priv)) {
5459 		void *rx_head;
5460 
5461 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5462 		if (priv->extend_desc) {
5463 			rx_head = (void *)rx_q->dma_erx;
5464 			desc_size = sizeof(struct dma_extended_desc);
5465 		} else {
5466 			rx_head = (void *)rx_q->dma_rx;
5467 			desc_size = sizeof(struct dma_desc);
5468 		}
5469 
5470 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5471 				    rx_q->dma_rx_phy, desc_size);
5472 	}
5473 	while (count < limit) {
5474 		unsigned int buf1_len = 0, buf2_len = 0;
5475 		enum pkt_hash_types hash_type;
5476 		struct stmmac_rx_buffer *buf;
5477 		struct dma_desc *np, *p;
5478 		int entry;
5479 		u32 hash;
5480 
5481 		if (!count && rx_q->state_saved) {
5482 			skb = rx_q->state.skb;
5483 			error = rx_q->state.error;
5484 			len = rx_q->state.len;
5485 		} else {
5486 			rx_q->state_saved = false;
5487 			skb = NULL;
5488 			error = 0;
5489 			len = 0;
5490 		}
5491 
5492 read_again:
5493 		if (count >= limit)
5494 			break;
5495 
5496 		buf1_len = 0;
5497 		buf2_len = 0;
5498 		entry = next_entry;
5499 		buf = &rx_q->buf_pool[entry];
5500 
5501 		if (priv->extend_desc)
5502 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5503 		else
5504 			p = rx_q->dma_rx + entry;
5505 
5506 		/* read the status of the incoming frame */
5507 		status = stmmac_rx_status(priv, &priv->xstats, p);
5508 		/* check if managed by the DMA otherwise go ahead */
5509 		if (unlikely(status & dma_own))
5510 			break;
5511 
5512 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5513 						priv->dma_conf.dma_rx_size);
5514 		next_entry = rx_q->cur_rx;
5515 
5516 		if (priv->extend_desc)
5517 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5518 		else
5519 			np = rx_q->dma_rx + next_entry;
5520 
5521 		prefetch(np);
5522 
5523 		if (priv->extend_desc)
5524 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5525 		if (unlikely(status == discard_frame)) {
5526 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5527 			buf->page = NULL;
5528 			error = 1;
5529 			if (!priv->hwts_rx_en)
5530 				rx_errors++;
5531 		}
5532 
5533 		if (unlikely(error && (status & rx_not_ls)))
5534 			goto read_again;
5535 		if (unlikely(error)) {
5536 			dev_kfree_skb(skb);
5537 			skb = NULL;
5538 			count++;
5539 			continue;
5540 		}
5541 
5542 		/* Buffer is good. Go on. */
5543 
5544 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5545 		len += buf1_len;
5546 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5547 		len += buf2_len;
5548 
5549 		/* ACS is disabled; strip manually. */
5550 		if (likely(!(status & rx_not_ls))) {
5551 			if (buf2_len) {
5552 				buf2_len -= ETH_FCS_LEN;
5553 				len -= ETH_FCS_LEN;
5554 			} else if (buf1_len) {
5555 				buf1_len -= ETH_FCS_LEN;
5556 				len -= ETH_FCS_LEN;
5557 			}
5558 		}
5559 
5560 		if (!skb) {
5561 			unsigned int pre_len, sync_len;
5562 
5563 			dma_sync_single_for_cpu(priv->device, buf->addr,
5564 						buf1_len, dma_dir);
5565 			net_prefetch(page_address(buf->page) +
5566 				     buf->page_offset);
5567 
5568 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5569 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5570 					 buf->page_offset, buf1_len, true);
5571 
5572 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5573 				  buf->page_offset;
5574 
5575 			ctx.priv = priv;
5576 			ctx.desc = p;
5577 			ctx.ndesc = np;
5578 
5579 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5580 			/* Due xdp_adjust_tail: DMA sync for_device
5581 			 * cover max len CPU touch
5582 			 */
5583 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5584 				   buf->page_offset;
5585 			sync_len = max(sync_len, pre_len);
5586 
5587 			/* For Not XDP_PASS verdict */
5588 			if (IS_ERR(skb)) {
5589 				unsigned int xdp_res = -PTR_ERR(skb);
5590 
5591 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5592 					page_pool_put_page(rx_q->page_pool,
5593 							   virt_to_head_page(ctx.xdp.data),
5594 							   sync_len, true);
5595 					buf->page = NULL;
5596 					rx_dropped++;
5597 
5598 					/* Clear skb as it was set as
5599 					 * status by XDP program.
5600 					 */
5601 					skb = NULL;
5602 
5603 					if (unlikely((status & rx_not_ls)))
5604 						goto read_again;
5605 
5606 					count++;
5607 					continue;
5608 				} else if (xdp_res & (STMMAC_XDP_TX |
5609 						      STMMAC_XDP_REDIRECT)) {
5610 					xdp_status |= xdp_res;
5611 					buf->page = NULL;
5612 					skb = NULL;
5613 					count++;
5614 					continue;
5615 				}
5616 			}
5617 		}
5618 
5619 		if (!skb) {
5620 			unsigned int head_pad_len;
5621 
5622 			/* XDP program may expand or reduce tail */
5623 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5624 
5625 			skb = napi_build_skb(page_address(buf->page),
5626 					     rx_q->napi_skb_frag_size);
5627 			if (!skb) {
5628 				page_pool_recycle_direct(rx_q->page_pool,
5629 							 buf->page);
5630 				rx_dropped++;
5631 				count++;
5632 				goto drain_data;
5633 			}
5634 
5635 			/* XDP program may adjust header */
5636 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5637 			skb_reserve(skb, head_pad_len);
5638 			skb_put(skb, buf1_len);
5639 			skb_mark_for_recycle(skb);
5640 			buf->page = NULL;
5641 		} else if (buf1_len) {
5642 			dma_sync_single_for_cpu(priv->device, buf->addr,
5643 						buf1_len, dma_dir);
5644 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5645 					buf->page, buf->page_offset, buf1_len,
5646 					priv->dma_conf.dma_buf_sz);
5647 			buf->page = NULL;
5648 		}
5649 
5650 		if (buf2_len) {
5651 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5652 						buf2_len, dma_dir);
5653 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5654 					buf->sec_page, 0, buf2_len,
5655 					priv->dma_conf.dma_buf_sz);
5656 			buf->sec_page = NULL;
5657 		}
5658 
5659 drain_data:
5660 		if (likely(status & rx_not_ls))
5661 			goto read_again;
5662 		if (!skb)
5663 			continue;
5664 
5665 		/* Got entire packet into SKB. Finish it. */
5666 
5667 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5668 
5669 		if (priv->hw->hw_vlan_en)
5670 			/* MAC level stripping. */
5671 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5672 		else
5673 			/* Driver level stripping. */
5674 			stmmac_rx_vlan(priv->dev, skb);
5675 
5676 		skb->protocol = eth_type_trans(skb, priv->dev);
5677 
5678 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5679 			skb_checksum_none_assert(skb);
5680 		else
5681 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5682 
5683 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5684 			skb_set_hash(skb, hash, hash_type);
5685 
5686 		skb_record_rx_queue(skb, queue);
5687 		napi_gro_receive(&ch->rx_napi, skb);
5688 		skb = NULL;
5689 
5690 		rx_packets++;
5691 		rx_bytes += len;
5692 		count++;
5693 	}
5694 
5695 	if (status & rx_not_ls || skb) {
5696 		rx_q->state_saved = true;
5697 		rx_q->state.skb = skb;
5698 		rx_q->state.error = error;
5699 		rx_q->state.len = len;
5700 	}
5701 
5702 	stmmac_finalize_xdp_rx(priv, xdp_status);
5703 
5704 	stmmac_rx_refill(priv, queue);
5705 
5706 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5707 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5708 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5709 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5710 	u64_stats_update_end(&rxq_stats->napi_syncp);
5711 
5712 	priv->xstats.rx_dropped += rx_dropped;
5713 	priv->xstats.rx_errors += rx_errors;
5714 
5715 	return count;
5716 }
5717 
5718 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5719 {
5720 	struct stmmac_channel *ch =
5721 		container_of(napi, struct stmmac_channel, rx_napi);
5722 	struct stmmac_priv *priv = ch->priv_data;
5723 	struct stmmac_rxq_stats *rxq_stats;
5724 	u32 chan = ch->index;
5725 	int work_done;
5726 
5727 	rxq_stats = &priv->xstats.rxq_stats[chan];
5728 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5729 	u64_stats_inc(&rxq_stats->napi.poll);
5730 	u64_stats_update_end(&rxq_stats->napi_syncp);
5731 
5732 	work_done = stmmac_rx(priv, budget, chan);
5733 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5734 		unsigned long flags;
5735 
5736 		spin_lock_irqsave(&ch->lock, flags);
5737 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5738 		spin_unlock_irqrestore(&ch->lock, flags);
5739 	}
5740 
5741 	return work_done;
5742 }
5743 
5744 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5745 {
5746 	struct stmmac_channel *ch =
5747 		container_of(napi, struct stmmac_channel, tx_napi);
5748 	struct stmmac_priv *priv = ch->priv_data;
5749 	struct stmmac_txq_stats *txq_stats;
5750 	bool pending_packets = false;
5751 	u32 chan = ch->index;
5752 	int work_done;
5753 
5754 	txq_stats = &priv->xstats.txq_stats[chan];
5755 	u64_stats_update_begin(&txq_stats->napi_syncp);
5756 	u64_stats_inc(&txq_stats->napi.poll);
5757 	u64_stats_update_end(&txq_stats->napi_syncp);
5758 
5759 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5760 	work_done = min(work_done, budget);
5761 
5762 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5763 		unsigned long flags;
5764 
5765 		spin_lock_irqsave(&ch->lock, flags);
5766 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5767 		spin_unlock_irqrestore(&ch->lock, flags);
5768 	}
5769 
5770 	/* TX still have packet to handle, check if we need to arm tx timer */
5771 	if (pending_packets)
5772 		stmmac_tx_timer_arm(priv, chan);
5773 
5774 	return work_done;
5775 }
5776 
5777 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5778 {
5779 	struct stmmac_channel *ch =
5780 		container_of(napi, struct stmmac_channel, rxtx_napi);
5781 	struct stmmac_priv *priv = ch->priv_data;
5782 	bool tx_pending_packets = false;
5783 	int rx_done, tx_done, rxtx_done;
5784 	struct stmmac_rxq_stats *rxq_stats;
5785 	struct stmmac_txq_stats *txq_stats;
5786 	u32 chan = ch->index;
5787 
5788 	rxq_stats = &priv->xstats.rxq_stats[chan];
5789 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5790 	u64_stats_inc(&rxq_stats->napi.poll);
5791 	u64_stats_update_end(&rxq_stats->napi_syncp);
5792 
5793 	txq_stats = &priv->xstats.txq_stats[chan];
5794 	u64_stats_update_begin(&txq_stats->napi_syncp);
5795 	u64_stats_inc(&txq_stats->napi.poll);
5796 	u64_stats_update_end(&txq_stats->napi_syncp);
5797 
5798 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5799 	tx_done = min(tx_done, budget);
5800 
5801 	rx_done = stmmac_rx_zc(priv, budget, chan);
5802 
5803 	rxtx_done = max(tx_done, rx_done);
5804 
5805 	/* If either TX or RX work is not complete, return budget
5806 	 * and keep pooling
5807 	 */
5808 	if (rxtx_done >= budget)
5809 		return budget;
5810 
5811 	/* all work done, exit the polling mode */
5812 	if (napi_complete_done(napi, rxtx_done)) {
5813 		unsigned long flags;
5814 
5815 		spin_lock_irqsave(&ch->lock, flags);
5816 		/* Both RX and TX work done are compelte,
5817 		 * so enable both RX & TX IRQs.
5818 		 */
5819 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5820 		spin_unlock_irqrestore(&ch->lock, flags);
5821 	}
5822 
5823 	/* TX still have packet to handle, check if we need to arm tx timer */
5824 	if (tx_pending_packets)
5825 		stmmac_tx_timer_arm(priv, chan);
5826 
5827 	return min(rxtx_done, budget - 1);
5828 }
5829 
5830 /**
5831  *  stmmac_tx_timeout
5832  *  @dev : Pointer to net device structure
5833  *  @txqueue: the index of the hanging transmit queue
5834  *  Description: this function is called when a packet transmission fails to
5835  *   complete within a reasonable time. The driver will mark the error in the
5836  *   netdev structure and arrange for the device to be reset to a sane state
5837  *   in order to transmit a new packet.
5838  */
5839 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5840 {
5841 	struct stmmac_priv *priv = netdev_priv(dev);
5842 
5843 	stmmac_global_err(priv);
5844 }
5845 
5846 /**
5847  *  stmmac_set_rx_mode - entry point for multicast addressing
5848  *  @dev : pointer to the device structure
5849  *  Description:
5850  *  This function is a driver entry point which gets called by the kernel
5851  *  whenever multicast addresses must be enabled/disabled.
5852  *  Return value:
5853  *  void.
5854  */
5855 static void stmmac_set_rx_mode(struct net_device *dev)
5856 {
5857 	struct stmmac_priv *priv = netdev_priv(dev);
5858 
5859 	stmmac_set_filter(priv, priv->hw, dev);
5860 }
5861 
5862 /**
5863  *  stmmac_change_mtu - entry point to change MTU size for the device.
5864  *  @dev : device pointer.
5865  *  @new_mtu : the new MTU size for the device.
5866  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5867  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5868  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5869  *  Return value:
5870  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5871  *  file on failure.
5872  */
5873 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5874 {
5875 	struct stmmac_priv *priv = netdev_priv(dev);
5876 	int txfifosz = priv->plat->tx_fifo_size;
5877 	struct stmmac_dma_conf *dma_conf;
5878 	const int mtu = new_mtu;
5879 	int ret;
5880 
5881 	if (txfifosz == 0)
5882 		txfifosz = priv->dma_cap.tx_fifo_size;
5883 
5884 	txfifosz /= priv->plat->tx_queues_to_use;
5885 
5886 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5887 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5888 		return -EINVAL;
5889 	}
5890 
5891 	new_mtu = STMMAC_ALIGN(new_mtu);
5892 
5893 	/* If condition true, FIFO is too small or MTU too large */
5894 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5895 		return -EINVAL;
5896 
5897 	if (netif_running(dev)) {
5898 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5899 		/* Try to allocate the new DMA conf with the new mtu */
5900 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5901 		if (IS_ERR(dma_conf)) {
5902 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5903 				   mtu);
5904 			return PTR_ERR(dma_conf);
5905 		}
5906 
5907 		stmmac_release(dev);
5908 
5909 		ret = __stmmac_open(dev, dma_conf);
5910 		if (ret) {
5911 			free_dma_desc_resources(priv, dma_conf);
5912 			kfree(dma_conf);
5913 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5914 			return ret;
5915 		}
5916 
5917 		kfree(dma_conf);
5918 
5919 		stmmac_set_rx_mode(dev);
5920 	}
5921 
5922 	WRITE_ONCE(dev->mtu, mtu);
5923 	netdev_update_features(dev);
5924 
5925 	return 0;
5926 }
5927 
5928 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5929 					     netdev_features_t features)
5930 {
5931 	struct stmmac_priv *priv = netdev_priv(dev);
5932 
5933 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5934 		features &= ~NETIF_F_RXCSUM;
5935 
5936 	if (!priv->plat->tx_coe)
5937 		features &= ~NETIF_F_CSUM_MASK;
5938 
5939 	/* Some GMAC devices have a bugged Jumbo frame support that
5940 	 * needs to have the Tx COE disabled for oversized frames
5941 	 * (due to limited buffer sizes). In this case we disable
5942 	 * the TX csum insertion in the TDES and not use SF.
5943 	 */
5944 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5945 		features &= ~NETIF_F_CSUM_MASK;
5946 
5947 	/* Disable tso if asked by ethtool */
5948 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5949 		if (features & NETIF_F_TSO)
5950 			priv->tso = true;
5951 		else
5952 			priv->tso = false;
5953 	}
5954 
5955 	return features;
5956 }
5957 
5958 static int stmmac_set_features(struct net_device *netdev,
5959 			       netdev_features_t features)
5960 {
5961 	struct stmmac_priv *priv = netdev_priv(netdev);
5962 
5963 	/* Keep the COE Type in case of csum is supporting */
5964 	if (features & NETIF_F_RXCSUM)
5965 		priv->hw->rx_csum = priv->plat->rx_coe;
5966 	else
5967 		priv->hw->rx_csum = 0;
5968 	/* No check needed because rx_coe has been set before and it will be
5969 	 * fixed in case of issue.
5970 	 */
5971 	stmmac_rx_ipc(priv, priv->hw);
5972 
5973 	if (priv->sph_cap) {
5974 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5975 		u32 chan;
5976 
5977 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5978 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5979 	}
5980 
5981 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5982 		priv->hw->hw_vlan_en = true;
5983 	else
5984 		priv->hw->hw_vlan_en = false;
5985 
5986 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5987 
5988 	return 0;
5989 }
5990 
5991 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5992 {
5993 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5994 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5995 	u32 queues_count;
5996 	u32 queue;
5997 	bool xmac;
5998 
5999 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6000 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6001 
6002 	if (priv->irq_wake)
6003 		pm_wakeup_event(priv->device, 0);
6004 
6005 	if (priv->dma_cap.estsel)
6006 		stmmac_est_irq_status(priv, priv, priv->dev,
6007 				      &priv->xstats, tx_cnt);
6008 
6009 	if (stmmac_fpe_supported(priv))
6010 		stmmac_fpe_irq_status(priv);
6011 
6012 	/* To handle GMAC own interrupts */
6013 	if ((priv->plat->has_gmac) || xmac) {
6014 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6015 
6016 		if (unlikely(status)) {
6017 			/* For LPI we need to save the tx status */
6018 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6019 				priv->tx_path_in_lpi_mode = true;
6020 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6021 				priv->tx_path_in_lpi_mode = false;
6022 		}
6023 
6024 		for (queue = 0; queue < queues_count; queue++)
6025 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6026 
6027 		/* PCS link status */
6028 		if (priv->hw->pcs &&
6029 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6030 			if (priv->xstats.pcs_link)
6031 				netif_carrier_on(priv->dev);
6032 			else
6033 				netif_carrier_off(priv->dev);
6034 		}
6035 
6036 		stmmac_timestamp_interrupt(priv, priv);
6037 	}
6038 }
6039 
6040 /**
6041  *  stmmac_interrupt - main ISR
6042  *  @irq: interrupt number.
6043  *  @dev_id: to pass the net device pointer.
6044  *  Description: this is the main driver interrupt service routine.
6045  *  It can call:
6046  *  o DMA service routine (to manage incoming frame reception and transmission
6047  *    status)
6048  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6049  *    interrupts.
6050  */
6051 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6052 {
6053 	struct net_device *dev = (struct net_device *)dev_id;
6054 	struct stmmac_priv *priv = netdev_priv(dev);
6055 
6056 	/* Check if adapter is up */
6057 	if (test_bit(STMMAC_DOWN, &priv->state))
6058 		return IRQ_HANDLED;
6059 
6060 	/* Check ASP error if it isn't delivered via an individual IRQ */
6061 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6062 		return IRQ_HANDLED;
6063 
6064 	/* To handle Common interrupts */
6065 	stmmac_common_interrupt(priv);
6066 
6067 	/* To handle DMA interrupts */
6068 	stmmac_dma_interrupt(priv);
6069 
6070 	return IRQ_HANDLED;
6071 }
6072 
6073 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6074 {
6075 	struct net_device *dev = (struct net_device *)dev_id;
6076 	struct stmmac_priv *priv = netdev_priv(dev);
6077 
6078 	/* Check if adapter is up */
6079 	if (test_bit(STMMAC_DOWN, &priv->state))
6080 		return IRQ_HANDLED;
6081 
6082 	/* To handle Common interrupts */
6083 	stmmac_common_interrupt(priv);
6084 
6085 	return IRQ_HANDLED;
6086 }
6087 
6088 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6089 {
6090 	struct net_device *dev = (struct net_device *)dev_id;
6091 	struct stmmac_priv *priv = netdev_priv(dev);
6092 
6093 	/* Check if adapter is up */
6094 	if (test_bit(STMMAC_DOWN, &priv->state))
6095 		return IRQ_HANDLED;
6096 
6097 	/* Check if a fatal error happened */
6098 	stmmac_safety_feat_interrupt(priv);
6099 
6100 	return IRQ_HANDLED;
6101 }
6102 
6103 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6104 {
6105 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6106 	struct stmmac_dma_conf *dma_conf;
6107 	int chan = tx_q->queue_index;
6108 	struct stmmac_priv *priv;
6109 	int status;
6110 
6111 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6112 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6113 
6114 	/* Check if adapter is up */
6115 	if (test_bit(STMMAC_DOWN, &priv->state))
6116 		return IRQ_HANDLED;
6117 
6118 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6119 
6120 	if (unlikely(status & tx_hard_error_bump_tc)) {
6121 		/* Try to bump up the dma threshold on this failure */
6122 		stmmac_bump_dma_threshold(priv, chan);
6123 	} else if (unlikely(status == tx_hard_error)) {
6124 		stmmac_tx_err(priv, chan);
6125 	}
6126 
6127 	return IRQ_HANDLED;
6128 }
6129 
6130 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6131 {
6132 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6133 	struct stmmac_dma_conf *dma_conf;
6134 	int chan = rx_q->queue_index;
6135 	struct stmmac_priv *priv;
6136 
6137 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6138 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6139 
6140 	/* Check if adapter is up */
6141 	if (test_bit(STMMAC_DOWN, &priv->state))
6142 		return IRQ_HANDLED;
6143 
6144 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6145 
6146 	return IRQ_HANDLED;
6147 }
6148 
6149 /**
6150  *  stmmac_ioctl - Entry point for the Ioctl
6151  *  @dev: Device pointer.
6152  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6153  *  a proprietary structure used to pass information to the driver.
6154  *  @cmd: IOCTL command
6155  *  Description:
6156  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6157  */
6158 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6159 {
6160 	struct stmmac_priv *priv = netdev_priv (dev);
6161 	int ret = -EOPNOTSUPP;
6162 
6163 	if (!netif_running(dev))
6164 		return -EINVAL;
6165 
6166 	switch (cmd) {
6167 	case SIOCGMIIPHY:
6168 	case SIOCGMIIREG:
6169 	case SIOCSMIIREG:
6170 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6171 		break;
6172 	case SIOCSHWTSTAMP:
6173 		ret = stmmac_hwtstamp_set(dev, rq);
6174 		break;
6175 	case SIOCGHWTSTAMP:
6176 		ret = stmmac_hwtstamp_get(dev, rq);
6177 		break;
6178 	default:
6179 		break;
6180 	}
6181 
6182 	return ret;
6183 }
6184 
6185 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6186 				    void *cb_priv)
6187 {
6188 	struct stmmac_priv *priv = cb_priv;
6189 	int ret = -EOPNOTSUPP;
6190 
6191 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6192 		return ret;
6193 
6194 	__stmmac_disable_all_queues(priv);
6195 
6196 	switch (type) {
6197 	case TC_SETUP_CLSU32:
6198 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6199 		break;
6200 	case TC_SETUP_CLSFLOWER:
6201 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6202 		break;
6203 	default:
6204 		break;
6205 	}
6206 
6207 	stmmac_enable_all_queues(priv);
6208 	return ret;
6209 }
6210 
6211 static LIST_HEAD(stmmac_block_cb_list);
6212 
6213 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6214 			   void *type_data)
6215 {
6216 	struct stmmac_priv *priv = netdev_priv(ndev);
6217 
6218 	switch (type) {
6219 	case TC_QUERY_CAPS:
6220 		return stmmac_tc_query_caps(priv, priv, type_data);
6221 	case TC_SETUP_QDISC_MQPRIO:
6222 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6223 	case TC_SETUP_BLOCK:
6224 		return flow_block_cb_setup_simple(type_data,
6225 						  &stmmac_block_cb_list,
6226 						  stmmac_setup_tc_block_cb,
6227 						  priv, priv, true);
6228 	case TC_SETUP_QDISC_CBS:
6229 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6230 	case TC_SETUP_QDISC_TAPRIO:
6231 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6232 	case TC_SETUP_QDISC_ETF:
6233 		return stmmac_tc_setup_etf(priv, priv, type_data);
6234 	default:
6235 		return -EOPNOTSUPP;
6236 	}
6237 }
6238 
6239 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6240 			       struct net_device *sb_dev)
6241 {
6242 	int gso = skb_shinfo(skb)->gso_type;
6243 
6244 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6245 		/*
6246 		 * There is no way to determine the number of TSO/USO
6247 		 * capable Queues. Let's use always the Queue 0
6248 		 * because if TSO/USO is supported then at least this
6249 		 * one will be capable.
6250 		 */
6251 		return 0;
6252 	}
6253 
6254 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6255 }
6256 
6257 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6258 {
6259 	struct stmmac_priv *priv = netdev_priv(ndev);
6260 	int ret = 0;
6261 
6262 	ret = pm_runtime_resume_and_get(priv->device);
6263 	if (ret < 0)
6264 		return ret;
6265 
6266 	ret = eth_mac_addr(ndev, addr);
6267 	if (ret)
6268 		goto set_mac_error;
6269 
6270 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6271 
6272 set_mac_error:
6273 	pm_runtime_put(priv->device);
6274 
6275 	return ret;
6276 }
6277 
6278 #ifdef CONFIG_DEBUG_FS
6279 static struct dentry *stmmac_fs_dir;
6280 
6281 static void sysfs_display_ring(void *head, int size, int extend_desc,
6282 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6283 {
6284 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6285 	struct dma_desc *p = (struct dma_desc *)head;
6286 	unsigned int desc_size;
6287 	dma_addr_t dma_addr;
6288 	int i;
6289 
6290 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6291 	for (i = 0; i < size; i++) {
6292 		dma_addr = dma_phy_addr + i * desc_size;
6293 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6294 				i, &dma_addr,
6295 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6296 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6297 		if (extend_desc)
6298 			p = &(++ep)->basic;
6299 		else
6300 			p++;
6301 	}
6302 }
6303 
6304 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6305 {
6306 	struct net_device *dev = seq->private;
6307 	struct stmmac_priv *priv = netdev_priv(dev);
6308 	u32 rx_count = priv->plat->rx_queues_to_use;
6309 	u32 tx_count = priv->plat->tx_queues_to_use;
6310 	u32 queue;
6311 
6312 	if ((dev->flags & IFF_UP) == 0)
6313 		return 0;
6314 
6315 	for (queue = 0; queue < rx_count; queue++) {
6316 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6317 
6318 		seq_printf(seq, "RX Queue %d:\n", queue);
6319 
6320 		if (priv->extend_desc) {
6321 			seq_printf(seq, "Extended descriptor ring:\n");
6322 			sysfs_display_ring((void *)rx_q->dma_erx,
6323 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6324 		} else {
6325 			seq_printf(seq, "Descriptor ring:\n");
6326 			sysfs_display_ring((void *)rx_q->dma_rx,
6327 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6328 		}
6329 	}
6330 
6331 	for (queue = 0; queue < tx_count; queue++) {
6332 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6333 
6334 		seq_printf(seq, "TX Queue %d:\n", queue);
6335 
6336 		if (priv->extend_desc) {
6337 			seq_printf(seq, "Extended descriptor ring:\n");
6338 			sysfs_display_ring((void *)tx_q->dma_etx,
6339 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6340 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6341 			seq_printf(seq, "Descriptor ring:\n");
6342 			sysfs_display_ring((void *)tx_q->dma_tx,
6343 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6344 		}
6345 	}
6346 
6347 	return 0;
6348 }
6349 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6350 
6351 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6352 {
6353 	static const char * const dwxgmac_timestamp_source[] = {
6354 		"None",
6355 		"Internal",
6356 		"External",
6357 		"Both",
6358 	};
6359 	static const char * const dwxgmac_safety_feature_desc[] = {
6360 		"No",
6361 		"All Safety Features with ECC and Parity",
6362 		"All Safety Features without ECC or Parity",
6363 		"All Safety Features with Parity Only",
6364 		"ECC Only",
6365 		"UNDEFINED",
6366 		"UNDEFINED",
6367 		"UNDEFINED",
6368 	};
6369 	struct net_device *dev = seq->private;
6370 	struct stmmac_priv *priv = netdev_priv(dev);
6371 
6372 	if (!priv->hw_cap_support) {
6373 		seq_printf(seq, "DMA HW features not supported\n");
6374 		return 0;
6375 	}
6376 
6377 	seq_printf(seq, "==============================\n");
6378 	seq_printf(seq, "\tDMA HW features\n");
6379 	seq_printf(seq, "==============================\n");
6380 
6381 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6382 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6383 	seq_printf(seq, "\t1000 Mbps: %s\n",
6384 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6385 	seq_printf(seq, "\tHalf duplex: %s\n",
6386 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6387 	if (priv->plat->has_xgmac) {
6388 		seq_printf(seq,
6389 			   "\tNumber of Additional MAC address registers: %d\n",
6390 			   priv->dma_cap.multi_addr);
6391 	} else {
6392 		seq_printf(seq, "\tHash Filter: %s\n",
6393 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6394 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6395 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6396 	}
6397 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6398 		   (priv->dma_cap.pcs) ? "Y" : "N");
6399 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6400 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6401 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6402 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6403 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6404 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6405 	seq_printf(seq, "\tRMON module: %s\n",
6406 		   (priv->dma_cap.rmon) ? "Y" : "N");
6407 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6408 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6409 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6410 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6411 	if (priv->plat->has_xgmac)
6412 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6413 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6414 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6415 		   (priv->dma_cap.eee) ? "Y" : "N");
6416 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6417 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6418 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6419 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6420 	    priv->plat->has_xgmac) {
6421 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6422 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6423 	} else {
6424 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6425 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6426 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6427 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6428 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6429 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6430 	}
6431 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6432 		   priv->dma_cap.number_rx_channel);
6433 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6434 		   priv->dma_cap.number_tx_channel);
6435 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6436 		   priv->dma_cap.number_rx_queues);
6437 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6438 		   priv->dma_cap.number_tx_queues);
6439 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6440 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6441 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6442 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6443 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6444 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6445 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6446 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6447 		   priv->dma_cap.pps_out_num);
6448 	seq_printf(seq, "\tSafety Features: %s\n",
6449 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6450 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6451 		   priv->dma_cap.frpsel ? "Y" : "N");
6452 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6453 		   priv->dma_cap.host_dma_width);
6454 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6455 		   priv->dma_cap.rssen ? "Y" : "N");
6456 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6457 		   priv->dma_cap.vlhash ? "Y" : "N");
6458 	seq_printf(seq, "\tSplit Header: %s\n",
6459 		   priv->dma_cap.sphen ? "Y" : "N");
6460 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6461 		   priv->dma_cap.vlins ? "Y" : "N");
6462 	seq_printf(seq, "\tDouble VLAN: %s\n",
6463 		   priv->dma_cap.dvlan ? "Y" : "N");
6464 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6465 		   priv->dma_cap.l3l4fnum);
6466 	seq_printf(seq, "\tARP Offloading: %s\n",
6467 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6468 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6469 		   priv->dma_cap.estsel ? "Y" : "N");
6470 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6471 		   priv->dma_cap.fpesel ? "Y" : "N");
6472 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6473 		   priv->dma_cap.tbssel ? "Y" : "N");
6474 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6475 		   priv->dma_cap.tbs_ch_num);
6476 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6477 		   priv->dma_cap.sgfsel ? "Y" : "N");
6478 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6479 		   BIT(priv->dma_cap.ttsfd) >> 1);
6480 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6481 		   priv->dma_cap.numtc);
6482 	seq_printf(seq, "\tDCB Feature: %s\n",
6483 		   priv->dma_cap.dcben ? "Y" : "N");
6484 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6485 		   priv->dma_cap.advthword ? "Y" : "N");
6486 	seq_printf(seq, "\tPTP Offload: %s\n",
6487 		   priv->dma_cap.ptoen ? "Y" : "N");
6488 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6489 		   priv->dma_cap.osten ? "Y" : "N");
6490 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6491 		   priv->dma_cap.pfcen ? "Y" : "N");
6492 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6493 		   BIT(priv->dma_cap.frpes) << 6);
6494 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6495 		   BIT(priv->dma_cap.frpbs) << 6);
6496 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6497 		   priv->dma_cap.frppipe_num);
6498 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6499 		   priv->dma_cap.nrvf_num ?
6500 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6501 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6502 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6503 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6504 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6505 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6506 		   priv->dma_cap.cbtisel ? "Y" : "N");
6507 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6508 		   priv->dma_cap.aux_snapshot_n);
6509 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6510 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6511 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6512 		   priv->dma_cap.edma ? "Y" : "N");
6513 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6514 		   priv->dma_cap.ediffc ? "Y" : "N");
6515 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6516 		   priv->dma_cap.vxn ? "Y" : "N");
6517 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6518 		   priv->dma_cap.dbgmem ? "Y" : "N");
6519 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6520 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6521 	return 0;
6522 }
6523 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6524 
6525 /* Use network device events to rename debugfs file entries.
6526  */
6527 static int stmmac_device_event(struct notifier_block *unused,
6528 			       unsigned long event, void *ptr)
6529 {
6530 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6531 	struct stmmac_priv *priv = netdev_priv(dev);
6532 
6533 	if (dev->netdev_ops != &stmmac_netdev_ops)
6534 		goto done;
6535 
6536 	switch (event) {
6537 	case NETDEV_CHANGENAME:
6538 		if (priv->dbgfs_dir)
6539 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6540 							 priv->dbgfs_dir,
6541 							 stmmac_fs_dir,
6542 							 dev->name);
6543 		break;
6544 	}
6545 done:
6546 	return NOTIFY_DONE;
6547 }
6548 
6549 static struct notifier_block stmmac_notifier = {
6550 	.notifier_call = stmmac_device_event,
6551 };
6552 
6553 static void stmmac_init_fs(struct net_device *dev)
6554 {
6555 	struct stmmac_priv *priv = netdev_priv(dev);
6556 
6557 	rtnl_lock();
6558 
6559 	/* Create per netdev entries */
6560 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6561 
6562 	/* Entry to report DMA RX/TX rings */
6563 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6564 			    &stmmac_rings_status_fops);
6565 
6566 	/* Entry to report the DMA HW features */
6567 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6568 			    &stmmac_dma_cap_fops);
6569 
6570 	rtnl_unlock();
6571 }
6572 
6573 static void stmmac_exit_fs(struct net_device *dev)
6574 {
6575 	struct stmmac_priv *priv = netdev_priv(dev);
6576 
6577 	debugfs_remove_recursive(priv->dbgfs_dir);
6578 }
6579 #endif /* CONFIG_DEBUG_FS */
6580 
6581 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6582 {
6583 	unsigned char *data = (unsigned char *)&vid_le;
6584 	unsigned char data_byte = 0;
6585 	u32 crc = ~0x0;
6586 	u32 temp = 0;
6587 	int i, bits;
6588 
6589 	bits = get_bitmask_order(VLAN_VID_MASK);
6590 	for (i = 0; i < bits; i++) {
6591 		if ((i % 8) == 0)
6592 			data_byte = data[i / 8];
6593 
6594 		temp = ((crc & 1) ^ data_byte) & 1;
6595 		crc >>= 1;
6596 		data_byte >>= 1;
6597 
6598 		if (temp)
6599 			crc ^= 0xedb88320;
6600 	}
6601 
6602 	return crc;
6603 }
6604 
6605 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6606 {
6607 	u32 crc, hash = 0;
6608 	u16 pmatch = 0;
6609 	int count = 0;
6610 	u16 vid = 0;
6611 
6612 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6613 		__le16 vid_le = cpu_to_le16(vid);
6614 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6615 		hash |= (1 << crc);
6616 		count++;
6617 	}
6618 
6619 	if (!priv->dma_cap.vlhash) {
6620 		if (count > 2) /* VID = 0 always passes filter */
6621 			return -EOPNOTSUPP;
6622 
6623 		pmatch = vid;
6624 		hash = 0;
6625 	}
6626 
6627 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6628 }
6629 
6630 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6631 {
6632 	struct stmmac_priv *priv = netdev_priv(ndev);
6633 	bool is_double = false;
6634 	int ret;
6635 
6636 	ret = pm_runtime_resume_and_get(priv->device);
6637 	if (ret < 0)
6638 		return ret;
6639 
6640 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6641 		is_double = true;
6642 
6643 	set_bit(vid, priv->active_vlans);
6644 	ret = stmmac_vlan_update(priv, is_double);
6645 	if (ret) {
6646 		clear_bit(vid, priv->active_vlans);
6647 		goto err_pm_put;
6648 	}
6649 
6650 	if (priv->hw->num_vlan) {
6651 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6652 		if (ret)
6653 			goto err_pm_put;
6654 	}
6655 err_pm_put:
6656 	pm_runtime_put(priv->device);
6657 
6658 	return ret;
6659 }
6660 
6661 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6662 {
6663 	struct stmmac_priv *priv = netdev_priv(ndev);
6664 	bool is_double = false;
6665 	int ret;
6666 
6667 	ret = pm_runtime_resume_and_get(priv->device);
6668 	if (ret < 0)
6669 		return ret;
6670 
6671 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6672 		is_double = true;
6673 
6674 	clear_bit(vid, priv->active_vlans);
6675 
6676 	if (priv->hw->num_vlan) {
6677 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6678 		if (ret)
6679 			goto del_vlan_error;
6680 	}
6681 
6682 	ret = stmmac_vlan_update(priv, is_double);
6683 
6684 del_vlan_error:
6685 	pm_runtime_put(priv->device);
6686 
6687 	return ret;
6688 }
6689 
6690 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6691 {
6692 	struct stmmac_priv *priv = netdev_priv(dev);
6693 
6694 	switch (bpf->command) {
6695 	case XDP_SETUP_PROG:
6696 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6697 	case XDP_SETUP_XSK_POOL:
6698 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6699 					     bpf->xsk.queue_id);
6700 	default:
6701 		return -EOPNOTSUPP;
6702 	}
6703 }
6704 
6705 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6706 			   struct xdp_frame **frames, u32 flags)
6707 {
6708 	struct stmmac_priv *priv = netdev_priv(dev);
6709 	int cpu = smp_processor_id();
6710 	struct netdev_queue *nq;
6711 	int i, nxmit = 0;
6712 	int queue;
6713 
6714 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6715 		return -ENETDOWN;
6716 
6717 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6718 		return -EINVAL;
6719 
6720 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6721 	nq = netdev_get_tx_queue(priv->dev, queue);
6722 
6723 	__netif_tx_lock(nq, cpu);
6724 	/* Avoids TX time-out as we are sharing with slow path */
6725 	txq_trans_cond_update(nq);
6726 
6727 	for (i = 0; i < num_frames; i++) {
6728 		int res;
6729 
6730 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6731 		if (res == STMMAC_XDP_CONSUMED)
6732 			break;
6733 
6734 		nxmit++;
6735 	}
6736 
6737 	if (flags & XDP_XMIT_FLUSH) {
6738 		stmmac_flush_tx_descriptors(priv, queue);
6739 		stmmac_tx_timer_arm(priv, queue);
6740 	}
6741 
6742 	__netif_tx_unlock(nq);
6743 
6744 	return nxmit;
6745 }
6746 
6747 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6748 {
6749 	struct stmmac_channel *ch = &priv->channel[queue];
6750 	unsigned long flags;
6751 
6752 	spin_lock_irqsave(&ch->lock, flags);
6753 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6754 	spin_unlock_irqrestore(&ch->lock, flags);
6755 
6756 	stmmac_stop_rx_dma(priv, queue);
6757 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6758 }
6759 
6760 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6761 {
6762 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6763 	struct stmmac_channel *ch = &priv->channel[queue];
6764 	unsigned long flags;
6765 	u32 buf_size;
6766 	int ret;
6767 
6768 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6769 	if (ret) {
6770 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6771 		return;
6772 	}
6773 
6774 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6775 	if (ret) {
6776 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6777 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6778 		return;
6779 	}
6780 
6781 	stmmac_reset_rx_queue(priv, queue);
6782 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6783 
6784 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6785 			    rx_q->dma_rx_phy, rx_q->queue_index);
6786 
6787 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6788 			     sizeof(struct dma_desc));
6789 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6790 			       rx_q->rx_tail_addr, rx_q->queue_index);
6791 
6792 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6793 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6794 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6795 				      buf_size,
6796 				      rx_q->queue_index);
6797 	} else {
6798 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6799 				      priv->dma_conf.dma_buf_sz,
6800 				      rx_q->queue_index);
6801 	}
6802 
6803 	stmmac_start_rx_dma(priv, queue);
6804 
6805 	spin_lock_irqsave(&ch->lock, flags);
6806 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6807 	spin_unlock_irqrestore(&ch->lock, flags);
6808 }
6809 
6810 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6811 {
6812 	struct stmmac_channel *ch = &priv->channel[queue];
6813 	unsigned long flags;
6814 
6815 	spin_lock_irqsave(&ch->lock, flags);
6816 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6817 	spin_unlock_irqrestore(&ch->lock, flags);
6818 
6819 	stmmac_stop_tx_dma(priv, queue);
6820 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6821 }
6822 
6823 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6824 {
6825 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6826 	struct stmmac_channel *ch = &priv->channel[queue];
6827 	unsigned long flags;
6828 	int ret;
6829 
6830 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6831 	if (ret) {
6832 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6833 		return;
6834 	}
6835 
6836 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6837 	if (ret) {
6838 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6839 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6840 		return;
6841 	}
6842 
6843 	stmmac_reset_tx_queue(priv, queue);
6844 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6845 
6846 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6847 			    tx_q->dma_tx_phy, tx_q->queue_index);
6848 
6849 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6850 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6851 
6852 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6853 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6854 			       tx_q->tx_tail_addr, tx_q->queue_index);
6855 
6856 	stmmac_start_tx_dma(priv, queue);
6857 
6858 	spin_lock_irqsave(&ch->lock, flags);
6859 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6860 	spin_unlock_irqrestore(&ch->lock, flags);
6861 }
6862 
6863 void stmmac_xdp_release(struct net_device *dev)
6864 {
6865 	struct stmmac_priv *priv = netdev_priv(dev);
6866 	u32 chan;
6867 
6868 	/* Ensure tx function is not running */
6869 	netif_tx_disable(dev);
6870 
6871 	/* Disable NAPI process */
6872 	stmmac_disable_all_queues(priv);
6873 
6874 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6875 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6876 
6877 	/* Free the IRQ lines */
6878 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6879 
6880 	/* Stop TX/RX DMA channels */
6881 	stmmac_stop_all_dma(priv);
6882 
6883 	/* Release and free the Rx/Tx resources */
6884 	free_dma_desc_resources(priv, &priv->dma_conf);
6885 
6886 	/* Disable the MAC Rx/Tx */
6887 	stmmac_mac_set(priv, priv->ioaddr, false);
6888 
6889 	/* set trans_start so we don't get spurious
6890 	 * watchdogs during reset
6891 	 */
6892 	netif_trans_update(dev);
6893 	netif_carrier_off(dev);
6894 }
6895 
6896 int stmmac_xdp_open(struct net_device *dev)
6897 {
6898 	struct stmmac_priv *priv = netdev_priv(dev);
6899 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6900 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6901 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6902 	struct stmmac_rx_queue *rx_q;
6903 	struct stmmac_tx_queue *tx_q;
6904 	u32 buf_size;
6905 	bool sph_en;
6906 	u32 chan;
6907 	int ret;
6908 
6909 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6910 	if (ret < 0) {
6911 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6912 			   __func__);
6913 		goto dma_desc_error;
6914 	}
6915 
6916 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6917 	if (ret < 0) {
6918 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6919 			   __func__);
6920 		goto init_error;
6921 	}
6922 
6923 	stmmac_reset_queues_param(priv);
6924 
6925 	/* DMA CSR Channel configuration */
6926 	for (chan = 0; chan < dma_csr_ch; chan++) {
6927 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6928 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6929 	}
6930 
6931 	/* Adjust Split header */
6932 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6933 
6934 	/* DMA RX Channel Configuration */
6935 	for (chan = 0; chan < rx_cnt; chan++) {
6936 		rx_q = &priv->dma_conf.rx_queue[chan];
6937 
6938 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6939 				    rx_q->dma_rx_phy, chan);
6940 
6941 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6942 				     (rx_q->buf_alloc_num *
6943 				      sizeof(struct dma_desc));
6944 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6945 				       rx_q->rx_tail_addr, chan);
6946 
6947 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6948 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6949 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6950 					      buf_size,
6951 					      rx_q->queue_index);
6952 		} else {
6953 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6954 					      priv->dma_conf.dma_buf_sz,
6955 					      rx_q->queue_index);
6956 		}
6957 
6958 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6959 	}
6960 
6961 	/* DMA TX Channel Configuration */
6962 	for (chan = 0; chan < tx_cnt; chan++) {
6963 		tx_q = &priv->dma_conf.tx_queue[chan];
6964 
6965 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6966 				    tx_q->dma_tx_phy, chan);
6967 
6968 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6969 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6970 				       tx_q->tx_tail_addr, chan);
6971 
6972 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6973 		tx_q->txtimer.function = stmmac_tx_timer;
6974 	}
6975 
6976 	/* Enable the MAC Rx/Tx */
6977 	stmmac_mac_set(priv, priv->ioaddr, true);
6978 
6979 	/* Start Rx & Tx DMA Channels */
6980 	stmmac_start_all_dma(priv);
6981 
6982 	ret = stmmac_request_irq(dev);
6983 	if (ret)
6984 		goto irq_error;
6985 
6986 	/* Enable NAPI process*/
6987 	stmmac_enable_all_queues(priv);
6988 	netif_carrier_on(dev);
6989 	netif_tx_start_all_queues(dev);
6990 	stmmac_enable_all_dma_irq(priv);
6991 
6992 	return 0;
6993 
6994 irq_error:
6995 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6996 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6997 
6998 	stmmac_hw_teardown(dev);
6999 init_error:
7000 	free_dma_desc_resources(priv, &priv->dma_conf);
7001 dma_desc_error:
7002 	return ret;
7003 }
7004 
7005 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7006 {
7007 	struct stmmac_priv *priv = netdev_priv(dev);
7008 	struct stmmac_rx_queue *rx_q;
7009 	struct stmmac_tx_queue *tx_q;
7010 	struct stmmac_channel *ch;
7011 
7012 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7013 	    !netif_carrier_ok(priv->dev))
7014 		return -ENETDOWN;
7015 
7016 	if (!stmmac_xdp_is_enabled(priv))
7017 		return -EINVAL;
7018 
7019 	if (queue >= priv->plat->rx_queues_to_use ||
7020 	    queue >= priv->plat->tx_queues_to_use)
7021 		return -EINVAL;
7022 
7023 	rx_q = &priv->dma_conf.rx_queue[queue];
7024 	tx_q = &priv->dma_conf.tx_queue[queue];
7025 	ch = &priv->channel[queue];
7026 
7027 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7028 		return -EINVAL;
7029 
7030 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7031 		/* EQoS does not have per-DMA channel SW interrupt,
7032 		 * so we schedule RX Napi straight-away.
7033 		 */
7034 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7035 			__napi_schedule(&ch->rxtx_napi);
7036 	}
7037 
7038 	return 0;
7039 }
7040 
7041 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7042 {
7043 	struct stmmac_priv *priv = netdev_priv(dev);
7044 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7045 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7046 	unsigned int start;
7047 	int q;
7048 
7049 	for (q = 0; q < tx_cnt; q++) {
7050 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7051 		u64 tx_packets;
7052 		u64 tx_bytes;
7053 
7054 		do {
7055 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7056 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7057 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7058 		do {
7059 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7060 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7061 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7062 
7063 		stats->tx_packets += tx_packets;
7064 		stats->tx_bytes += tx_bytes;
7065 	}
7066 
7067 	for (q = 0; q < rx_cnt; q++) {
7068 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7069 		u64 rx_packets;
7070 		u64 rx_bytes;
7071 
7072 		do {
7073 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7074 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7075 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7076 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7077 
7078 		stats->rx_packets += rx_packets;
7079 		stats->rx_bytes += rx_bytes;
7080 	}
7081 
7082 	stats->rx_dropped = priv->xstats.rx_dropped;
7083 	stats->rx_errors = priv->xstats.rx_errors;
7084 	stats->tx_dropped = priv->xstats.tx_dropped;
7085 	stats->tx_errors = priv->xstats.tx_errors;
7086 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7087 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7088 	stats->rx_length_errors = priv->xstats.rx_length;
7089 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7090 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7091 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7092 }
7093 
7094 static const struct net_device_ops stmmac_netdev_ops = {
7095 	.ndo_open = stmmac_open,
7096 	.ndo_start_xmit = stmmac_xmit,
7097 	.ndo_stop = stmmac_release,
7098 	.ndo_change_mtu = stmmac_change_mtu,
7099 	.ndo_fix_features = stmmac_fix_features,
7100 	.ndo_set_features = stmmac_set_features,
7101 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7102 	.ndo_tx_timeout = stmmac_tx_timeout,
7103 	.ndo_eth_ioctl = stmmac_ioctl,
7104 	.ndo_get_stats64 = stmmac_get_stats64,
7105 	.ndo_setup_tc = stmmac_setup_tc,
7106 	.ndo_select_queue = stmmac_select_queue,
7107 	.ndo_set_mac_address = stmmac_set_mac_address,
7108 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7109 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7110 	.ndo_bpf = stmmac_bpf,
7111 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7112 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7113 };
7114 
7115 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7116 {
7117 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7118 		return;
7119 	if (test_bit(STMMAC_DOWN, &priv->state))
7120 		return;
7121 
7122 	netdev_err(priv->dev, "Reset adapter.\n");
7123 
7124 	rtnl_lock();
7125 	netif_trans_update(priv->dev);
7126 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7127 		usleep_range(1000, 2000);
7128 
7129 	set_bit(STMMAC_DOWN, &priv->state);
7130 	dev_close(priv->dev);
7131 	dev_open(priv->dev, NULL);
7132 	clear_bit(STMMAC_DOWN, &priv->state);
7133 	clear_bit(STMMAC_RESETING, &priv->state);
7134 	rtnl_unlock();
7135 }
7136 
7137 static void stmmac_service_task(struct work_struct *work)
7138 {
7139 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7140 			service_task);
7141 
7142 	stmmac_reset_subtask(priv);
7143 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7144 }
7145 
7146 /**
7147  *  stmmac_hw_init - Init the MAC device
7148  *  @priv: driver private structure
7149  *  Description: this function is to configure the MAC device according to
7150  *  some platform parameters or the HW capability register. It prepares the
7151  *  driver to use either ring or chain modes and to setup either enhanced or
7152  *  normal descriptors.
7153  */
7154 static int stmmac_hw_init(struct stmmac_priv *priv)
7155 {
7156 	int ret;
7157 
7158 	/* dwmac-sun8i only work in chain mode */
7159 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7160 		chain_mode = 1;
7161 	priv->chain_mode = chain_mode;
7162 
7163 	/* Initialize HW Interface */
7164 	ret = stmmac_hwif_init(priv);
7165 	if (ret)
7166 		return ret;
7167 
7168 	/* Get the HW capability (new GMAC newer than 3.50a) */
7169 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7170 	if (priv->hw_cap_support) {
7171 		dev_info(priv->device, "DMA HW capability register supported\n");
7172 
7173 		/* We can override some gmac/dma configuration fields: e.g.
7174 		 * enh_desc, tx_coe (e.g. that are passed through the
7175 		 * platform) with the values from the HW capability
7176 		 * register (if supported).
7177 		 */
7178 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7179 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7180 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7181 		priv->hw->pmt = priv->plat->pmt;
7182 		if (priv->dma_cap.hash_tb_sz) {
7183 			priv->hw->multicast_filter_bins =
7184 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7185 			priv->hw->mcast_bits_log2 =
7186 					ilog2(priv->hw->multicast_filter_bins);
7187 		}
7188 
7189 		/* TXCOE doesn't work in thresh DMA mode */
7190 		if (priv->plat->force_thresh_dma_mode)
7191 			priv->plat->tx_coe = 0;
7192 		else
7193 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7194 
7195 		/* In case of GMAC4 rx_coe is from HW cap register. */
7196 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7197 
7198 		if (priv->dma_cap.rx_coe_type2)
7199 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7200 		else if (priv->dma_cap.rx_coe_type1)
7201 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7202 
7203 	} else {
7204 		dev_info(priv->device, "No HW DMA feature register supported\n");
7205 	}
7206 
7207 	if (priv->plat->rx_coe) {
7208 		priv->hw->rx_csum = priv->plat->rx_coe;
7209 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7210 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7211 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7212 	}
7213 	if (priv->plat->tx_coe)
7214 		dev_info(priv->device, "TX Checksum insertion supported\n");
7215 
7216 	if (priv->plat->pmt) {
7217 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7218 		device_set_wakeup_capable(priv->device, 1);
7219 	}
7220 
7221 	if (priv->dma_cap.tsoen)
7222 		dev_info(priv->device, "TSO supported\n");
7223 
7224 	priv->hw->vlan_fail_q_en =
7225 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7226 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7227 
7228 	/* Run HW quirks, if any */
7229 	if (priv->hwif_quirks) {
7230 		ret = priv->hwif_quirks(priv);
7231 		if (ret)
7232 			return ret;
7233 	}
7234 
7235 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7236 	 * In some case, for example on bugged HW this feature
7237 	 * has to be disable and this can be done by passing the
7238 	 * riwt_off field from the platform.
7239 	 */
7240 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7241 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7242 		priv->use_riwt = 1;
7243 		dev_info(priv->device,
7244 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7245 	}
7246 
7247 	return 0;
7248 }
7249 
7250 static void stmmac_napi_add(struct net_device *dev)
7251 {
7252 	struct stmmac_priv *priv = netdev_priv(dev);
7253 	u32 queue, maxq;
7254 
7255 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7256 
7257 	for (queue = 0; queue < maxq; queue++) {
7258 		struct stmmac_channel *ch = &priv->channel[queue];
7259 
7260 		ch->priv_data = priv;
7261 		ch->index = queue;
7262 		spin_lock_init(&ch->lock);
7263 
7264 		if (queue < priv->plat->rx_queues_to_use) {
7265 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7266 		}
7267 		if (queue < priv->plat->tx_queues_to_use) {
7268 			netif_napi_add_tx(dev, &ch->tx_napi,
7269 					  stmmac_napi_poll_tx);
7270 		}
7271 		if (queue < priv->plat->rx_queues_to_use &&
7272 		    queue < priv->plat->tx_queues_to_use) {
7273 			netif_napi_add(dev, &ch->rxtx_napi,
7274 				       stmmac_napi_poll_rxtx);
7275 		}
7276 	}
7277 }
7278 
7279 static void stmmac_napi_del(struct net_device *dev)
7280 {
7281 	struct stmmac_priv *priv = netdev_priv(dev);
7282 	u32 queue, maxq;
7283 
7284 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7285 
7286 	for (queue = 0; queue < maxq; queue++) {
7287 		struct stmmac_channel *ch = &priv->channel[queue];
7288 
7289 		if (queue < priv->plat->rx_queues_to_use)
7290 			netif_napi_del(&ch->rx_napi);
7291 		if (queue < priv->plat->tx_queues_to_use)
7292 			netif_napi_del(&ch->tx_napi);
7293 		if (queue < priv->plat->rx_queues_to_use &&
7294 		    queue < priv->plat->tx_queues_to_use) {
7295 			netif_napi_del(&ch->rxtx_napi);
7296 		}
7297 	}
7298 }
7299 
7300 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7301 {
7302 	struct stmmac_priv *priv = netdev_priv(dev);
7303 	int ret = 0, i;
7304 
7305 	if (netif_running(dev))
7306 		stmmac_release(dev);
7307 
7308 	stmmac_napi_del(dev);
7309 
7310 	priv->plat->rx_queues_to_use = rx_cnt;
7311 	priv->plat->tx_queues_to_use = tx_cnt;
7312 	if (!netif_is_rxfh_configured(dev))
7313 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7314 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7315 									rx_cnt);
7316 
7317 	stmmac_napi_add(dev);
7318 
7319 	if (netif_running(dev))
7320 		ret = stmmac_open(dev);
7321 
7322 	return ret;
7323 }
7324 
7325 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7326 {
7327 	struct stmmac_priv *priv = netdev_priv(dev);
7328 	int ret = 0;
7329 
7330 	if (netif_running(dev))
7331 		stmmac_release(dev);
7332 
7333 	priv->dma_conf.dma_rx_size = rx_size;
7334 	priv->dma_conf.dma_tx_size = tx_size;
7335 
7336 	if (netif_running(dev))
7337 		ret = stmmac_open(dev);
7338 
7339 	return ret;
7340 }
7341 
7342 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7343 {
7344 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7345 	struct dma_desc *desc_contains_ts = ctx->desc;
7346 	struct stmmac_priv *priv = ctx->priv;
7347 	struct dma_desc *ndesc = ctx->ndesc;
7348 	struct dma_desc *desc = ctx->desc;
7349 	u64 ns = 0;
7350 
7351 	if (!priv->hwts_rx_en)
7352 		return -ENODATA;
7353 
7354 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7355 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7356 		desc_contains_ts = ndesc;
7357 
7358 	/* Check if timestamp is available */
7359 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7360 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7361 		ns -= priv->plat->cdc_error_adj;
7362 		*timestamp = ns_to_ktime(ns);
7363 		return 0;
7364 	}
7365 
7366 	return -ENODATA;
7367 }
7368 
7369 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7370 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7371 };
7372 
7373 /**
7374  * stmmac_dvr_probe
7375  * @device: device pointer
7376  * @plat_dat: platform data pointer
7377  * @res: stmmac resource pointer
7378  * Description: this is the main probe function used to
7379  * call the alloc_etherdev, allocate the priv structure.
7380  * Return:
7381  * returns 0 on success, otherwise errno.
7382  */
7383 int stmmac_dvr_probe(struct device *device,
7384 		     struct plat_stmmacenet_data *plat_dat,
7385 		     struct stmmac_resources *res)
7386 {
7387 	struct net_device *ndev = NULL;
7388 	struct stmmac_priv *priv;
7389 	u32 rxq;
7390 	int i, ret = 0;
7391 
7392 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7393 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7394 	if (!ndev)
7395 		return -ENOMEM;
7396 
7397 	SET_NETDEV_DEV(ndev, device);
7398 
7399 	priv = netdev_priv(ndev);
7400 	priv->device = device;
7401 	priv->dev = ndev;
7402 
7403 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7404 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7405 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7406 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7407 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7408 	}
7409 
7410 	priv->xstats.pcpu_stats =
7411 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7412 	if (!priv->xstats.pcpu_stats)
7413 		return -ENOMEM;
7414 
7415 	stmmac_set_ethtool_ops(ndev);
7416 	priv->pause = pause;
7417 	priv->plat = plat_dat;
7418 	priv->ioaddr = res->addr;
7419 	priv->dev->base_addr = (unsigned long)res->addr;
7420 	priv->plat->dma_cfg->multi_msi_en =
7421 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7422 
7423 	priv->dev->irq = res->irq;
7424 	priv->wol_irq = res->wol_irq;
7425 	priv->lpi_irq = res->lpi_irq;
7426 	priv->sfty_irq = res->sfty_irq;
7427 	priv->sfty_ce_irq = res->sfty_ce_irq;
7428 	priv->sfty_ue_irq = res->sfty_ue_irq;
7429 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7430 		priv->rx_irq[i] = res->rx_irq[i];
7431 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7432 		priv->tx_irq[i] = res->tx_irq[i];
7433 
7434 	if (!is_zero_ether_addr(res->mac))
7435 		eth_hw_addr_set(priv->dev, res->mac);
7436 
7437 	dev_set_drvdata(device, priv->dev);
7438 
7439 	/* Verify driver arguments */
7440 	stmmac_verify_args();
7441 
7442 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7443 	if (!priv->af_xdp_zc_qps)
7444 		return -ENOMEM;
7445 
7446 	/* Allocate workqueue */
7447 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7448 	if (!priv->wq) {
7449 		dev_err(priv->device, "failed to create workqueue\n");
7450 		ret = -ENOMEM;
7451 		goto error_wq_init;
7452 	}
7453 
7454 	INIT_WORK(&priv->service_task, stmmac_service_task);
7455 
7456 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7457 
7458 	/* Override with kernel parameters if supplied XXX CRS XXX
7459 	 * this needs to have multiple instances
7460 	 */
7461 	if ((phyaddr >= 0) && (phyaddr <= 31))
7462 		priv->plat->phy_addr = phyaddr;
7463 
7464 	if (priv->plat->stmmac_rst) {
7465 		ret = reset_control_assert(priv->plat->stmmac_rst);
7466 		reset_control_deassert(priv->plat->stmmac_rst);
7467 		/* Some reset controllers have only reset callback instead of
7468 		 * assert + deassert callbacks pair.
7469 		 */
7470 		if (ret == -ENOTSUPP)
7471 			reset_control_reset(priv->plat->stmmac_rst);
7472 	}
7473 
7474 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7475 	if (ret == -ENOTSUPP)
7476 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7477 			ERR_PTR(ret));
7478 
7479 	/* Wait a bit for the reset to take effect */
7480 	udelay(10);
7481 
7482 	/* Init MAC and get the capabilities */
7483 	ret = stmmac_hw_init(priv);
7484 	if (ret)
7485 		goto error_hw_init;
7486 
7487 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7488 	 */
7489 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7490 		priv->plat->dma_cfg->dche = false;
7491 
7492 	stmmac_check_ether_addr(priv);
7493 
7494 	ndev->netdev_ops = &stmmac_netdev_ops;
7495 
7496 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7497 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7498 
7499 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7500 			    NETIF_F_RXCSUM;
7501 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7502 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7503 
7504 	ret = stmmac_tc_init(priv, priv);
7505 	if (!ret) {
7506 		ndev->hw_features |= NETIF_F_HW_TC;
7507 	}
7508 
7509 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7510 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7511 		if (priv->plat->has_gmac4)
7512 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7513 		priv->tso = true;
7514 		dev_info(priv->device, "TSO feature enabled\n");
7515 	}
7516 
7517 	if (priv->dma_cap.sphen &&
7518 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7519 		ndev->hw_features |= NETIF_F_GRO;
7520 		priv->sph_cap = true;
7521 		priv->sph = priv->sph_cap;
7522 		dev_info(priv->device, "SPH feature enabled\n");
7523 	}
7524 
7525 	/* Ideally our host DMA address width is the same as for the
7526 	 * device. However, it may differ and then we have to use our
7527 	 * host DMA width for allocation and the device DMA width for
7528 	 * register handling.
7529 	 */
7530 	if (priv->plat->host_dma_width)
7531 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7532 	else
7533 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7534 
7535 	if (priv->dma_cap.host_dma_width) {
7536 		ret = dma_set_mask_and_coherent(device,
7537 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7538 		if (!ret) {
7539 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7540 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7541 
7542 			/*
7543 			 * If more than 32 bits can be addressed, make sure to
7544 			 * enable enhanced addressing mode.
7545 			 */
7546 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7547 				priv->plat->dma_cfg->eame = true;
7548 		} else {
7549 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7550 			if (ret) {
7551 				dev_err(priv->device, "Failed to set DMA Mask\n");
7552 				goto error_hw_init;
7553 			}
7554 
7555 			priv->dma_cap.host_dma_width = 32;
7556 		}
7557 	}
7558 
7559 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7560 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7561 #ifdef STMMAC_VLAN_TAG_USED
7562 	/* Both mac100 and gmac support receive VLAN tag detection */
7563 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7564 	if (priv->plat->has_gmac4) {
7565 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7566 		priv->hw->hw_vlan_en = true;
7567 	}
7568 	if (priv->dma_cap.vlhash) {
7569 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7570 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7571 	}
7572 	if (priv->dma_cap.vlins) {
7573 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7574 		if (priv->dma_cap.dvlan)
7575 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7576 	}
7577 #endif
7578 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7579 
7580 	priv->xstats.threshold = tc;
7581 
7582 	/* Initialize RSS */
7583 	rxq = priv->plat->rx_queues_to_use;
7584 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7585 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7586 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7587 
7588 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7589 		ndev->features |= NETIF_F_RXHASH;
7590 
7591 	ndev->vlan_features |= ndev->features;
7592 
7593 	/* MTU range: 46 - hw-specific max */
7594 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7595 	if (priv->plat->has_xgmac)
7596 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7597 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7598 		ndev->max_mtu = JUMBO_LEN;
7599 	else
7600 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7601 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7602 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7603 	 */
7604 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7605 	    (priv->plat->maxmtu >= ndev->min_mtu))
7606 		ndev->max_mtu = priv->plat->maxmtu;
7607 	else if (priv->plat->maxmtu < ndev->min_mtu)
7608 		dev_warn(priv->device,
7609 			 "%s: warning: maxmtu having invalid value (%d)\n",
7610 			 __func__, priv->plat->maxmtu);
7611 
7612 	if (flow_ctrl)
7613 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7614 
7615 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7616 
7617 	/* Setup channels NAPI */
7618 	stmmac_napi_add(ndev);
7619 
7620 	mutex_init(&priv->lock);
7621 
7622 	stmmac_fpe_init(priv);
7623 
7624 	/* If a specific clk_csr value is passed from the platform
7625 	 * this means that the CSR Clock Range selection cannot be
7626 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7627 	 * set the MDC clock dynamically according to the csr actual
7628 	 * clock input.
7629 	 */
7630 	if (priv->plat->clk_csr >= 0)
7631 		priv->clk_csr = priv->plat->clk_csr;
7632 	else
7633 		stmmac_clk_csr_set(priv);
7634 
7635 	stmmac_check_pcs_mode(priv);
7636 
7637 	pm_runtime_get_noresume(device);
7638 	pm_runtime_set_active(device);
7639 	if (!pm_runtime_enabled(device))
7640 		pm_runtime_enable(device);
7641 
7642 	ret = stmmac_mdio_register(ndev);
7643 	if (ret < 0) {
7644 		dev_err_probe(priv->device, ret,
7645 			      "MDIO bus (id: %d) registration failed\n",
7646 			      priv->plat->bus_id);
7647 		goto error_mdio_register;
7648 	}
7649 
7650 	if (priv->plat->speed_mode_2500)
7651 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7652 
7653 	ret = stmmac_pcs_setup(ndev);
7654 	if (ret)
7655 		goto error_pcs_setup;
7656 
7657 	ret = stmmac_phy_setup(priv);
7658 	if (ret) {
7659 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7660 		goto error_phy_setup;
7661 	}
7662 
7663 	ret = register_netdev(ndev);
7664 	if (ret) {
7665 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7666 			__func__, ret);
7667 		goto error_netdev_register;
7668 	}
7669 
7670 #ifdef CONFIG_DEBUG_FS
7671 	stmmac_init_fs(ndev);
7672 #endif
7673 
7674 	if (priv->plat->dump_debug_regs)
7675 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7676 
7677 	/* Let pm_runtime_put() disable the clocks.
7678 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7679 	 */
7680 	pm_runtime_put(device);
7681 
7682 	return ret;
7683 
7684 error_netdev_register:
7685 	phylink_destroy(priv->phylink);
7686 error_phy_setup:
7687 	stmmac_pcs_clean(ndev);
7688 error_pcs_setup:
7689 	stmmac_mdio_unregister(ndev);
7690 error_mdio_register:
7691 	stmmac_napi_del(ndev);
7692 error_hw_init:
7693 	destroy_workqueue(priv->wq);
7694 error_wq_init:
7695 	bitmap_free(priv->af_xdp_zc_qps);
7696 
7697 	return ret;
7698 }
7699 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7700 
7701 /**
7702  * stmmac_dvr_remove
7703  * @dev: device pointer
7704  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7705  * changes the link status, releases the DMA descriptor rings.
7706  */
7707 void stmmac_dvr_remove(struct device *dev)
7708 {
7709 	struct net_device *ndev = dev_get_drvdata(dev);
7710 	struct stmmac_priv *priv = netdev_priv(ndev);
7711 
7712 	netdev_info(priv->dev, "%s: removing driver", __func__);
7713 
7714 	pm_runtime_get_sync(dev);
7715 
7716 	stmmac_stop_all_dma(priv);
7717 	stmmac_mac_set(priv, priv->ioaddr, false);
7718 	unregister_netdev(ndev);
7719 
7720 #ifdef CONFIG_DEBUG_FS
7721 	stmmac_exit_fs(ndev);
7722 #endif
7723 	phylink_destroy(priv->phylink);
7724 	if (priv->plat->stmmac_rst)
7725 		reset_control_assert(priv->plat->stmmac_rst);
7726 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7727 
7728 	stmmac_pcs_clean(ndev);
7729 	stmmac_mdio_unregister(ndev);
7730 
7731 	destroy_workqueue(priv->wq);
7732 	mutex_destroy(&priv->lock);
7733 	bitmap_free(priv->af_xdp_zc_qps);
7734 
7735 	pm_runtime_disable(dev);
7736 	pm_runtime_put_noidle(dev);
7737 }
7738 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7739 
7740 /**
7741  * stmmac_suspend - suspend callback
7742  * @dev: device pointer
7743  * Description: this is the function to suspend the device and it is called
7744  * by the platform driver to stop the network queue, release the resources,
7745  * program the PMT register (for WoL), clean and release driver resources.
7746  */
7747 int stmmac_suspend(struct device *dev)
7748 {
7749 	struct net_device *ndev = dev_get_drvdata(dev);
7750 	struct stmmac_priv *priv = netdev_priv(ndev);
7751 	u32 chan;
7752 
7753 	if (!ndev || !netif_running(ndev))
7754 		return 0;
7755 
7756 	mutex_lock(&priv->lock);
7757 
7758 	netif_device_detach(ndev);
7759 
7760 	stmmac_disable_all_queues(priv);
7761 
7762 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7763 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7764 
7765 	if (priv->eee_sw_timer_en) {
7766 		priv->tx_path_in_lpi_mode = false;
7767 		del_timer_sync(&priv->eee_ctrl_timer);
7768 	}
7769 
7770 	/* Stop TX/RX DMA */
7771 	stmmac_stop_all_dma(priv);
7772 
7773 	if (priv->plat->serdes_powerdown)
7774 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7775 
7776 	/* Enable Power down mode by programming the PMT regs */
7777 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7778 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7779 		priv->irq_wake = 1;
7780 	} else {
7781 		stmmac_mac_set(priv, priv->ioaddr, false);
7782 		pinctrl_pm_select_sleep_state(priv->device);
7783 	}
7784 
7785 	mutex_unlock(&priv->lock);
7786 
7787 	rtnl_lock();
7788 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7789 		phylink_suspend(priv->phylink, true);
7790 	} else {
7791 		if (device_may_wakeup(priv->device))
7792 			phylink_speed_down(priv->phylink, false);
7793 		phylink_suspend(priv->phylink, false);
7794 	}
7795 	rtnl_unlock();
7796 
7797 	if (stmmac_fpe_supported(priv))
7798 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7799 
7800 	priv->speed = SPEED_UNKNOWN;
7801 	return 0;
7802 }
7803 EXPORT_SYMBOL_GPL(stmmac_suspend);
7804 
7805 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7806 {
7807 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7808 
7809 	rx_q->cur_rx = 0;
7810 	rx_q->dirty_rx = 0;
7811 }
7812 
7813 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7814 {
7815 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7816 
7817 	tx_q->cur_tx = 0;
7818 	tx_q->dirty_tx = 0;
7819 	tx_q->mss = 0;
7820 
7821 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7822 }
7823 
7824 /**
7825  * stmmac_reset_queues_param - reset queue parameters
7826  * @priv: device pointer
7827  */
7828 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7829 {
7830 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7831 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7832 	u32 queue;
7833 
7834 	for (queue = 0; queue < rx_cnt; queue++)
7835 		stmmac_reset_rx_queue(priv, queue);
7836 
7837 	for (queue = 0; queue < tx_cnt; queue++)
7838 		stmmac_reset_tx_queue(priv, queue);
7839 }
7840 
7841 /**
7842  * stmmac_resume - resume callback
7843  * @dev: device pointer
7844  * Description: when resume this function is invoked to setup the DMA and CORE
7845  * in a usable state.
7846  */
7847 int stmmac_resume(struct device *dev)
7848 {
7849 	struct net_device *ndev = dev_get_drvdata(dev);
7850 	struct stmmac_priv *priv = netdev_priv(ndev);
7851 	int ret;
7852 
7853 	if (!netif_running(ndev))
7854 		return 0;
7855 
7856 	/* Power Down bit, into the PM register, is cleared
7857 	 * automatically as soon as a magic packet or a Wake-up frame
7858 	 * is received. Anyway, it's better to manually clear
7859 	 * this bit because it can generate problems while resuming
7860 	 * from another devices (e.g. serial console).
7861 	 */
7862 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7863 		mutex_lock(&priv->lock);
7864 		stmmac_pmt(priv, priv->hw, 0);
7865 		mutex_unlock(&priv->lock);
7866 		priv->irq_wake = 0;
7867 	} else {
7868 		pinctrl_pm_select_default_state(priv->device);
7869 		/* reset the phy so that it's ready */
7870 		if (priv->mii)
7871 			stmmac_mdio_reset(priv->mii);
7872 	}
7873 
7874 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7875 	    priv->plat->serdes_powerup) {
7876 		ret = priv->plat->serdes_powerup(ndev,
7877 						 priv->plat->bsp_priv);
7878 
7879 		if (ret < 0)
7880 			return ret;
7881 	}
7882 
7883 	rtnl_lock();
7884 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7885 		phylink_resume(priv->phylink);
7886 	} else {
7887 		phylink_resume(priv->phylink);
7888 		if (device_may_wakeup(priv->device))
7889 			phylink_speed_up(priv->phylink);
7890 	}
7891 	rtnl_unlock();
7892 
7893 	rtnl_lock();
7894 	mutex_lock(&priv->lock);
7895 
7896 	stmmac_reset_queues_param(priv);
7897 
7898 	stmmac_free_tx_skbufs(priv);
7899 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7900 
7901 	stmmac_hw_setup(ndev, false);
7902 	stmmac_init_coalesce(priv);
7903 	stmmac_set_rx_mode(ndev);
7904 
7905 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7906 
7907 	stmmac_enable_all_queues(priv);
7908 	stmmac_enable_all_dma_irq(priv);
7909 
7910 	mutex_unlock(&priv->lock);
7911 	rtnl_unlock();
7912 
7913 	netif_device_attach(ndev);
7914 
7915 	return 0;
7916 }
7917 EXPORT_SYMBOL_GPL(stmmac_resume);
7918 
7919 #ifndef MODULE
7920 static int __init stmmac_cmdline_opt(char *str)
7921 {
7922 	char *opt;
7923 
7924 	if (!str || !*str)
7925 		return 1;
7926 	while ((opt = strsep(&str, ",")) != NULL) {
7927 		if (!strncmp(opt, "debug:", 6)) {
7928 			if (kstrtoint(opt + 6, 0, &debug))
7929 				goto err;
7930 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7931 			if (kstrtoint(opt + 8, 0, &phyaddr))
7932 				goto err;
7933 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7934 			if (kstrtoint(opt + 7, 0, &buf_sz))
7935 				goto err;
7936 		} else if (!strncmp(opt, "tc:", 3)) {
7937 			if (kstrtoint(opt + 3, 0, &tc))
7938 				goto err;
7939 		} else if (!strncmp(opt, "watchdog:", 9)) {
7940 			if (kstrtoint(opt + 9, 0, &watchdog))
7941 				goto err;
7942 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7943 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7944 				goto err;
7945 		} else if (!strncmp(opt, "pause:", 6)) {
7946 			if (kstrtoint(opt + 6, 0, &pause))
7947 				goto err;
7948 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7949 			if (kstrtoint(opt + 10, 0, &eee_timer))
7950 				goto err;
7951 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7952 			if (kstrtoint(opt + 11, 0, &chain_mode))
7953 				goto err;
7954 		}
7955 	}
7956 	return 1;
7957 
7958 err:
7959 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7960 	return 1;
7961 }
7962 
7963 __setup("stmmaceth=", stmmac_cmdline_opt);
7964 #endif /* MODULE */
7965 
7966 static int __init stmmac_init(void)
7967 {
7968 #ifdef CONFIG_DEBUG_FS
7969 	/* Create debugfs main directory if it doesn't exist yet */
7970 	if (!stmmac_fs_dir)
7971 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7972 	register_netdevice_notifier(&stmmac_notifier);
7973 #endif
7974 
7975 	return 0;
7976 }
7977 
7978 static void __exit stmmac_exit(void)
7979 {
7980 #ifdef CONFIG_DEBUG_FS
7981 	unregister_netdevice_notifier(&stmmac_notifier);
7982 	debugfs_remove_recursive(stmmac_fs_dir);
7983 #endif
7984 }
7985 
7986 module_init(stmmac_init)
7987 module_exit(stmmac_exit)
7988 
7989 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7990 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7991 MODULE_LICENSE("GPL");
7992