xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 5de6c855e23e99d76c143ee2a29766e7f7f9fe65)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/pm_wakeirq.h>
33 #include <linux/prefetch.h>
34 #include <linux/pinctrl/consumer.h>
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #endif /* CONFIG_DEBUG_FS */
39 #include <linux/net_tstamp.h>
40 #include <linux/phylink.h>
41 #include <linux/udp.h>
42 #include <linux/bpf_trace.h>
43 #include <net/page_pool/helpers.h>
44 #include <net/pkt_cls.h>
45 #include <net/xdp_sock_drv.h>
46 #include "stmmac_ptp.h"
47 #include "stmmac_fpe.h"
48 #include "stmmac.h"
49 #include "stmmac_xdp.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 #include "dwxgmac2.h"
54 #include "hwif.h"
55 
56 /* As long as the interface is active, we keep the timestamping counter enabled
57  * with fine resolution and binary rollover. This avoid non-monotonic behavior
58  * (clock jumps) when changing timestamping settings at runtime.
59  */
60 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
61 				 PTP_TCR_TSCTRLSSR)
62 
63 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
64 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
65 
66 /* Module parameters */
67 #define TX_TIMEO	5000
68 static int watchdog = TX_TIMEO;
69 module_param(watchdog, int, 0644);
70 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
71 
72 static int debug = -1;
73 module_param(debug, int, 0644);
74 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
75 
76 static int phyaddr = -1;
77 module_param(phyaddr, int, 0444);
78 MODULE_PARM_DESC(phyaddr, "Physical device address");
79 
80 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
81 
82 /* Limit to make sure XDP TX and slow path can coexist */
83 #define STMMAC_XSK_TX_BUDGET_MAX	256
84 #define STMMAC_TX_XSK_AVAIL		16
85 #define STMMAC_RX_FILL_BATCH		16
86 
87 #define STMMAC_XDP_PASS		0
88 #define STMMAC_XDP_CONSUMED	BIT(0)
89 #define STMMAC_XDP_TX		BIT(1)
90 #define STMMAC_XDP_REDIRECT	BIT(2)
91 
92 static int flow_ctrl = 0xdead;
93 module_param(flow_ctrl, int, 0644);
94 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
95 
96 static int pause = PAUSE_TIME;
97 module_param(pause, int, 0644);
98 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
99 
100 #define TC_DEFAULT 64
101 static int tc = TC_DEFAULT;
102 module_param(tc, int, 0644);
103 MODULE_PARM_DESC(tc, "DMA threshold control value");
104 
105 /* This is unused */
106 #define	DEFAULT_BUFSIZE	1536
107 static int buf_sz = DEFAULT_BUFSIZE;
108 module_param(buf_sz, int, 0644);
109 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, uint, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
184  * @bsp_priv: BSP private data structure (unused)
185  * @clk_tx_i: the transmit clock
186  * @interface: the selected interface mode
187  * @speed: the speed that the MAC will be operating at
188  *
189  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
190  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
191  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
192  * the plat_data->set_clk_tx_rate method directly, call it via their own
193  * implementation, or implement their own method should they have more
194  * complex requirements. It is intended to only be used in this method.
195  *
196  * plat_data->clk_tx_i must be filled in.
197  */
198 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
199 			   phy_interface_t interface, int speed)
200 {
201 	long rate = rgmii_clock(speed);
202 
203 	/* Silently ignore unsupported speeds as rgmii_clock() only
204 	 * supports 10, 100 and 1000Mbps. We do not want to spit
205 	 * errors for 2500 and higher speeds here.
206 	 */
207 	if (rate < 0)
208 		return 0;
209 
210 	return clk_set_rate(clk_tx_i, rate);
211 }
212 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
213 
214 /**
215  * stmmac_verify_args - verify the driver parameters.
216  * Description: it checks the driver parameters and set a default in case of
217  * errors.
218  */
219 static void stmmac_verify_args(void)
220 {
221 	if (unlikely(watchdog < 0))
222 		watchdog = TX_TIMEO;
223 	if (unlikely((pause < 0) || (pause > 0xffff)))
224 		pause = PAUSE_TIME;
225 
226 	if (flow_ctrl != 0xdead)
227 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
228 }
229 
230 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
234 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
235 	u32 queue;
236 
237 	for (queue = 0; queue < maxq; queue++) {
238 		struct stmmac_channel *ch = &priv->channel[queue];
239 
240 		if (stmmac_xdp_is_enabled(priv) &&
241 		    test_bit(queue, priv->af_xdp_zc_qps)) {
242 			napi_disable(&ch->rxtx_napi);
243 			continue;
244 		}
245 
246 		if (queue < rx_queues_cnt)
247 			napi_disable(&ch->rx_napi);
248 		if (queue < tx_queues_cnt)
249 			napi_disable(&ch->tx_napi);
250 	}
251 }
252 
253 /**
254  * stmmac_disable_all_queues - Disable all queues
255  * @priv: driver private structure
256  */
257 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
258 {
259 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
260 	struct stmmac_rx_queue *rx_q;
261 	u32 queue;
262 
263 	/* synchronize_rcu() needed for pending XDP buffers to drain */
264 	for (queue = 0; queue < rx_queues_cnt; queue++) {
265 		rx_q = &priv->dma_conf.rx_queue[queue];
266 		if (rx_q->xsk_pool) {
267 			synchronize_rcu();
268 			break;
269 		}
270 	}
271 
272 	__stmmac_disable_all_queues(priv);
273 }
274 
275 /**
276  * stmmac_enable_all_queues - Enable all queues
277  * @priv: driver private structure
278  */
279 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
280 {
281 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
282 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
283 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
284 	u32 queue;
285 
286 	for (queue = 0; queue < maxq; queue++) {
287 		struct stmmac_channel *ch = &priv->channel[queue];
288 
289 		if (stmmac_xdp_is_enabled(priv) &&
290 		    test_bit(queue, priv->af_xdp_zc_qps)) {
291 			napi_enable(&ch->rxtx_napi);
292 			continue;
293 		}
294 
295 		if (queue < rx_queues_cnt)
296 			napi_enable(&ch->rx_napi);
297 		if (queue < tx_queues_cnt)
298 			napi_enable(&ch->tx_napi);
299 	}
300 }
301 
302 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
303 {
304 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
305 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
306 		queue_work(priv->wq, &priv->service_task);
307 }
308 
309 static void stmmac_global_err(struct stmmac_priv *priv)
310 {
311 	netif_carrier_off(priv->dev);
312 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
313 	stmmac_service_event_schedule(priv);
314 }
315 
316 /**
317  * stmmac_clk_csr_set - dynamically set the MDC clock
318  * @priv: driver private structure
319  * Description: this is to dynamically set the MDC clock according to the csr
320  * clock input.
321  * Note:
322  *	If a specific clk_csr value is passed from the platform
323  *	this means that the CSR Clock Range selection cannot be
324  *	changed at run-time and it is fixed (as reported in the driver
325  *	documentation). Viceversa the driver will try to set the MDC
326  *	clock dynamically according to the actual clock input.
327  */
328 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
329 {
330 	unsigned long clk_rate;
331 
332 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
333 
334 	/* Platform provided default clk_csr would be assumed valid
335 	 * for all other cases except for the below mentioned ones.
336 	 * For values higher than the IEEE 802.3 specified frequency
337 	 * we can not estimate the proper divider as it is not known
338 	 * the frequency of clk_csr_i. So we do not change the default
339 	 * divider.
340 	 */
341 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
342 		if (clk_rate < CSR_F_35M)
343 			priv->clk_csr = STMMAC_CSR_20_35M;
344 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
345 			priv->clk_csr = STMMAC_CSR_35_60M;
346 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
347 			priv->clk_csr = STMMAC_CSR_60_100M;
348 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
349 			priv->clk_csr = STMMAC_CSR_100_150M;
350 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
351 			priv->clk_csr = STMMAC_CSR_150_250M;
352 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
353 			priv->clk_csr = STMMAC_CSR_250_300M;
354 		else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
355 			priv->clk_csr = STMMAC_CSR_300_500M;
356 		else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
357 			priv->clk_csr = STMMAC_CSR_500_800M;
358 	}
359 
360 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
361 		if (clk_rate > 160000000)
362 			priv->clk_csr = 0x03;
363 		else if (clk_rate > 80000000)
364 			priv->clk_csr = 0x02;
365 		else if (clk_rate > 40000000)
366 			priv->clk_csr = 0x01;
367 		else
368 			priv->clk_csr = 0;
369 	}
370 
371 	if (priv->plat->has_xgmac) {
372 		if (clk_rate > 400000000)
373 			priv->clk_csr = 0x5;
374 		else if (clk_rate > 350000000)
375 			priv->clk_csr = 0x4;
376 		else if (clk_rate > 300000000)
377 			priv->clk_csr = 0x3;
378 		else if (clk_rate > 250000000)
379 			priv->clk_csr = 0x2;
380 		else if (clk_rate > 150000000)
381 			priv->clk_csr = 0x1;
382 		else
383 			priv->clk_csr = 0x0;
384 	}
385 }
386 
387 static void print_pkt(unsigned char *buf, int len)
388 {
389 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
390 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
391 }
392 
393 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
394 {
395 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
396 	u32 avail;
397 
398 	if (tx_q->dirty_tx > tx_q->cur_tx)
399 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
400 	else
401 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
402 
403 	return avail;
404 }
405 
406 /**
407  * stmmac_rx_dirty - Get RX queue dirty
408  * @priv: driver private structure
409  * @queue: RX queue index
410  */
411 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
412 {
413 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
414 	u32 dirty;
415 
416 	if (rx_q->dirty_rx <= rx_q->cur_rx)
417 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
418 	else
419 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
420 
421 	return dirty;
422 }
423 
424 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
425 {
426 	u32 tx_cnt = priv->plat->tx_queues_to_use;
427 	u32 queue;
428 
429 	/* check if all TX queues have the work finished */
430 	for (queue = 0; queue < tx_cnt; queue++) {
431 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
432 
433 		if (tx_q->dirty_tx != tx_q->cur_tx)
434 			return true; /* still unfinished work */
435 	}
436 
437 	return false;
438 }
439 
440 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
441 {
442 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
443 }
444 
445 /**
446  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
447  * @priv: driver private structure
448  * Description: this function is to verify and enter in LPI mode in case of
449  * EEE.
450  */
451 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
452 {
453 	if (stmmac_eee_tx_busy(priv)) {
454 		stmmac_restart_sw_lpi_timer(priv);
455 		return;
456 	}
457 
458 	/* Check and enter in LPI mode */
459 	if (!priv->tx_path_in_lpi_mode)
460 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
461 				    priv->tx_lpi_clk_stop, 0);
462 }
463 
464 /**
465  * stmmac_stop_sw_lpi - stop transmitting LPI
466  * @priv: driver private structure
467  * Description: When using software-controlled LPI, stop transmitting LPI state.
468  */
469 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
470 {
471 	timer_delete_sync(&priv->eee_ctrl_timer);
472 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
473 	priv->tx_path_in_lpi_mode = false;
474 }
475 
476 /**
477  * stmmac_eee_ctrl_timer - EEE TX SW timer.
478  * @t:  timer_list struct containing private info
479  * Description:
480  *  if there is no data transfer and if we are not in LPI state,
481  *  then MAC Transmitter can be moved to LPI state.
482  */
483 static void stmmac_eee_ctrl_timer(struct timer_list *t)
484 {
485 	struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
486 
487 	stmmac_try_to_start_sw_lpi(priv);
488 }
489 
490 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
491  * @priv: driver private structure
492  * @p : descriptor pointer
493  * @skb : the socket buffer
494  * Description :
495  * This function will read timestamp from the descriptor & pass it to stack.
496  * and also perform some sanity checks.
497  */
498 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
499 				   struct dma_desc *p, struct sk_buff *skb)
500 {
501 	struct skb_shared_hwtstamps shhwtstamp;
502 	bool found = false;
503 	u64 ns = 0;
504 
505 	if (!priv->hwts_tx_en)
506 		return;
507 
508 	/* exit if skb doesn't support hw tstamp */
509 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
510 		return;
511 
512 	/* check tx tstamp status */
513 	if (stmmac_get_tx_timestamp_status(priv, p)) {
514 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
515 		found = true;
516 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
517 		found = true;
518 	}
519 
520 	if (found) {
521 		ns -= priv->plat->cdc_error_adj;
522 
523 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
524 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
525 
526 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
527 		/* pass tstamp to stack */
528 		skb_tstamp_tx(skb, &shhwtstamp);
529 	}
530 }
531 
532 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
533  * @priv: driver private structure
534  * @p : descriptor pointer
535  * @np : next descriptor pointer
536  * @skb : the socket buffer
537  * Description :
538  * This function will read received packet's timestamp from the descriptor
539  * and pass it to stack. It also perform some sanity checks.
540  */
541 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
542 				   struct dma_desc *np, struct sk_buff *skb)
543 {
544 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
545 	struct dma_desc *desc = p;
546 	u64 ns = 0;
547 
548 	if (!priv->hwts_rx_en)
549 		return;
550 	/* For GMAC4, the valid timestamp is from CTX next desc. */
551 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
552 		desc = np;
553 
554 	/* Check if timestamp is available */
555 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
556 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
557 
558 		ns -= priv->plat->cdc_error_adj;
559 
560 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
561 		shhwtstamp = skb_hwtstamps(skb);
562 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
563 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
564 	} else  {
565 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
566 	}
567 }
568 
569 /**
570  *  stmmac_hwtstamp_set - control hardware timestamping.
571  *  @dev: device pointer.
572  *  @config: the timestamping configuration.
573  *  @extack: netlink extended ack structure for error reporting.
574  *  Description:
575  *  This function configures the MAC to enable/disable both outgoing(TX)
576  *  and incoming(RX) packets time stamping based on user input.
577  *  Return Value:
578  *  0 on success and an appropriate -ve integer on failure.
579  */
580 static int stmmac_hwtstamp_set(struct net_device *dev,
581 			       struct kernel_hwtstamp_config *config,
582 			       struct netlink_ext_ack *extack)
583 {
584 	struct stmmac_priv *priv = netdev_priv(dev);
585 	u32 ptp_v2 = 0;
586 	u32 tstamp_all = 0;
587 	u32 ptp_over_ipv4_udp = 0;
588 	u32 ptp_over_ipv6_udp = 0;
589 	u32 ptp_over_ethernet = 0;
590 	u32 snap_type_sel = 0;
591 	u32 ts_master_en = 0;
592 	u32 ts_event_en = 0;
593 
594 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
595 		NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
596 		priv->hwts_tx_en = 0;
597 		priv->hwts_rx_en = 0;
598 
599 		return -EOPNOTSUPP;
600 	}
601 
602 	if (!netif_running(dev)) {
603 		NL_SET_ERR_MSG_MOD(extack,
604 				   "Cannot change timestamping configuration while down");
605 		return -ENODEV;
606 	}
607 
608 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
609 		   __func__, config->flags, config->tx_type, config->rx_filter);
610 
611 	if (config->tx_type != HWTSTAMP_TX_OFF &&
612 	    config->tx_type != HWTSTAMP_TX_ON)
613 		return -ERANGE;
614 
615 	if (priv->adv_ts) {
616 		switch (config->rx_filter) {
617 		case HWTSTAMP_FILTER_NONE:
618 			/* time stamp no incoming packet at all */
619 			config->rx_filter = HWTSTAMP_FILTER_NONE;
620 			break;
621 
622 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
623 			/* PTP v1, UDP, any kind of event packet */
624 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
625 			/* 'xmac' hardware can support Sync, Pdelay_Req and
626 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
627 			 * This leaves Delay_Req timestamps out.
628 			 * Enable all events *and* general purpose message
629 			 * timestamping
630 			 */
631 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
632 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
633 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
634 			break;
635 
636 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
637 			/* PTP v1, UDP, Sync packet */
638 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
639 			/* take time stamp for SYNC messages only */
640 			ts_event_en = PTP_TCR_TSEVNTENA;
641 
642 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
643 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
644 			break;
645 
646 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
647 			/* PTP v1, UDP, Delay_req packet */
648 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
649 			/* take time stamp for Delay_Req messages only */
650 			ts_master_en = PTP_TCR_TSMSTRENA;
651 			ts_event_en = PTP_TCR_TSEVNTENA;
652 
653 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
654 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
658 			/* PTP v2, UDP, any kind of event packet */
659 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
660 			ptp_v2 = PTP_TCR_TSVER2ENA;
661 			/* take time stamp for all event messages */
662 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
663 
664 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
665 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
666 			break;
667 
668 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
669 			/* PTP v2, UDP, Sync packet */
670 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
671 			ptp_v2 = PTP_TCR_TSVER2ENA;
672 			/* take time stamp for SYNC messages only */
673 			ts_event_en = PTP_TCR_TSEVNTENA;
674 
675 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
676 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
677 			break;
678 
679 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
680 			/* PTP v2, UDP, Delay_req packet */
681 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
682 			ptp_v2 = PTP_TCR_TSVER2ENA;
683 			/* take time stamp for Delay_Req messages only */
684 			ts_master_en = PTP_TCR_TSMSTRENA;
685 			ts_event_en = PTP_TCR_TSEVNTENA;
686 
687 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
688 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
689 			break;
690 
691 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
692 			/* PTP v2/802.AS1 any layer, any kind of event packet */
693 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
694 			ptp_v2 = PTP_TCR_TSVER2ENA;
695 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
696 			if (priv->synopsys_id < DWMAC_CORE_4_10)
697 				ts_event_en = PTP_TCR_TSEVNTENA;
698 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
699 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
700 			ptp_over_ethernet = PTP_TCR_TSIPENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
704 			/* PTP v2/802.AS1, any layer, Sync packet */
705 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			ptp_over_ethernet = PTP_TCR_TSIPENA;
713 			break;
714 
715 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
716 			/* PTP v2/802.AS1, any layer, Delay_req packet */
717 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
718 			ptp_v2 = PTP_TCR_TSVER2ENA;
719 			/* take time stamp for Delay_Req messages only */
720 			ts_master_en = PTP_TCR_TSMSTRENA;
721 			ts_event_en = PTP_TCR_TSEVNTENA;
722 
723 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
724 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
725 			ptp_over_ethernet = PTP_TCR_TSIPENA;
726 			break;
727 
728 		case HWTSTAMP_FILTER_NTP_ALL:
729 		case HWTSTAMP_FILTER_ALL:
730 			/* time stamp any incoming packet */
731 			config->rx_filter = HWTSTAMP_FILTER_ALL;
732 			tstamp_all = PTP_TCR_TSENALL;
733 			break;
734 
735 		default:
736 			return -ERANGE;
737 		}
738 	} else {
739 		switch (config->rx_filter) {
740 		case HWTSTAMP_FILTER_NONE:
741 			config->rx_filter = HWTSTAMP_FILTER_NONE;
742 			break;
743 		default:
744 			/* PTP v1, UDP, any kind of event packet */
745 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
746 			break;
747 		}
748 	}
749 	priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
750 	priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
751 
752 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
753 
754 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
755 		priv->systime_flags |= tstamp_all | ptp_v2 |
756 				       ptp_over_ethernet | ptp_over_ipv6_udp |
757 				       ptp_over_ipv4_udp | ts_event_en |
758 				       ts_master_en | snap_type_sel;
759 	}
760 
761 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
762 
763 	priv->tstamp_config = *config;
764 
765 	return 0;
766 }
767 
768 /**
769  *  stmmac_hwtstamp_get - read hardware timestamping.
770  *  @dev: device pointer.
771  *  @config: the timestamping configuration.
772  *  Description:
773  *  This function obtain the current hardware timestamping settings
774  *  as requested.
775  */
776 static int stmmac_hwtstamp_get(struct net_device *dev,
777 			       struct kernel_hwtstamp_config *config)
778 {
779 	struct stmmac_priv *priv = netdev_priv(dev);
780 
781 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
782 		return -EOPNOTSUPP;
783 
784 	*config = priv->tstamp_config;
785 
786 	return 0;
787 }
788 
789 /**
790  * stmmac_init_tstamp_counter - init hardware timestamping counter
791  * @priv: driver private structure
792  * @systime_flags: timestamping flags
793  * Description:
794  * Initialize hardware counter for packet timestamping.
795  * This is valid as long as the interface is open and not suspended.
796  * Will be rerun after resuming from suspend, case in which the timestamping
797  * flags updated by stmmac_hwtstamp_set() also need to be restored.
798  */
799 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
800 {
801 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
802 	struct timespec64 now;
803 	u32 sec_inc = 0;
804 	u64 temp = 0;
805 
806 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
807 		return -EOPNOTSUPP;
808 
809 	if (!priv->plat->clk_ptp_rate) {
810 		netdev_err(priv->dev, "Invalid PTP clock rate");
811 		return -EINVAL;
812 	}
813 
814 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
815 	priv->systime_flags = systime_flags;
816 
817 	/* program Sub Second Increment reg */
818 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
819 					   priv->plat->clk_ptp_rate,
820 					   xmac, &sec_inc);
821 	temp = div_u64(1000000000ULL, sec_inc);
822 
823 	/* Store sub second increment for later use */
824 	priv->sub_second_inc = sec_inc;
825 
826 	/* calculate default added value:
827 	 * formula is :
828 	 * addend = (2^32)/freq_div_ratio;
829 	 * where, freq_div_ratio = 1e9ns/sec_inc
830 	 */
831 	temp = (u64)(temp << 32);
832 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
833 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
834 
835 	/* initialize system time */
836 	ktime_get_real_ts64(&now);
837 
838 	/* lower 32 bits of tv_sec are safe until y2106 */
839 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
840 
841 	return 0;
842 }
843 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
844 
845 /**
846  * stmmac_init_ptp - init PTP
847  * @priv: driver private structure
848  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
849  * This is done by looking at the HW cap. register.
850  * This function also registers the ptp driver.
851  */
852 static int stmmac_init_ptp(struct stmmac_priv *priv)
853 {
854 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
855 	int ret;
856 
857 	if (priv->plat->ptp_clk_freq_config)
858 		priv->plat->ptp_clk_freq_config(priv);
859 
860 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
861 	if (ret)
862 		return ret;
863 
864 	priv->adv_ts = 0;
865 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
866 	if (xmac && priv->dma_cap.atime_stamp)
867 		priv->adv_ts = 1;
868 	/* Dwmac 3.x core with extend_desc can support adv_ts */
869 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
870 		priv->adv_ts = 1;
871 
872 	if (priv->dma_cap.time_stamp)
873 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
874 
875 	if (priv->adv_ts)
876 		netdev_info(priv->dev,
877 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
878 
879 	priv->hwts_tx_en = 0;
880 	priv->hwts_rx_en = 0;
881 
882 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
883 		stmmac_hwtstamp_correct_latency(priv, priv);
884 
885 	return 0;
886 }
887 
888 static void stmmac_release_ptp(struct stmmac_priv *priv)
889 {
890 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
891 	stmmac_ptp_unregister(priv);
892 }
893 
894 /**
895  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
896  *  @priv: driver private structure
897  *  @duplex: duplex passed to the next function
898  *  @flow_ctrl: desired flow control modes
899  *  Description: It is used for configuring the flow control in all queues
900  */
901 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
902 				 unsigned int flow_ctrl)
903 {
904 	u32 tx_cnt = priv->plat->tx_queues_to_use;
905 
906 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
907 			 tx_cnt);
908 }
909 
910 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
911 					 phy_interface_t interface)
912 {
913 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
914 
915 	/* Refresh the MAC-specific capabilities */
916 	stmmac_mac_update_caps(priv);
917 
918 	config->mac_capabilities = priv->hw->link.caps;
919 
920 	if (priv->plat->max_speed)
921 		phylink_limit_mac_speed(config, priv->plat->max_speed);
922 
923 	return config->mac_capabilities;
924 }
925 
926 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
927 						 phy_interface_t interface)
928 {
929 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
930 	struct phylink_pcs *pcs;
931 
932 	if (priv->plat->select_pcs) {
933 		pcs = priv->plat->select_pcs(priv, interface);
934 		if (!IS_ERR(pcs))
935 			return pcs;
936 	}
937 
938 	return NULL;
939 }
940 
941 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
942 			      const struct phylink_link_state *state)
943 {
944 	/* Nothing to do, xpcs_config() handles everything */
945 }
946 
947 static void stmmac_mac_link_down(struct phylink_config *config,
948 				 unsigned int mode, phy_interface_t interface)
949 {
950 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
951 
952 	stmmac_mac_set(priv, priv->ioaddr, false);
953 	if (priv->dma_cap.eee)
954 		stmmac_set_eee_pls(priv, priv->hw, false);
955 
956 	if (stmmac_fpe_supported(priv))
957 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
958 }
959 
960 static void stmmac_mac_link_up(struct phylink_config *config,
961 			       struct phy_device *phy,
962 			       unsigned int mode, phy_interface_t interface,
963 			       int speed, int duplex,
964 			       bool tx_pause, bool rx_pause)
965 {
966 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
967 	unsigned int flow_ctrl;
968 	u32 old_ctrl, ctrl;
969 	int ret;
970 
971 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
972 	    priv->plat->serdes_powerup)
973 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
974 
975 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
976 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
977 
978 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
979 		switch (speed) {
980 		case SPEED_10000:
981 			ctrl |= priv->hw->link.xgmii.speed10000;
982 			break;
983 		case SPEED_5000:
984 			ctrl |= priv->hw->link.xgmii.speed5000;
985 			break;
986 		case SPEED_2500:
987 			ctrl |= priv->hw->link.xgmii.speed2500;
988 			break;
989 		default:
990 			return;
991 		}
992 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
993 		switch (speed) {
994 		case SPEED_100000:
995 			ctrl |= priv->hw->link.xlgmii.speed100000;
996 			break;
997 		case SPEED_50000:
998 			ctrl |= priv->hw->link.xlgmii.speed50000;
999 			break;
1000 		case SPEED_40000:
1001 			ctrl |= priv->hw->link.xlgmii.speed40000;
1002 			break;
1003 		case SPEED_25000:
1004 			ctrl |= priv->hw->link.xlgmii.speed25000;
1005 			break;
1006 		case SPEED_10000:
1007 			ctrl |= priv->hw->link.xgmii.speed10000;
1008 			break;
1009 		case SPEED_2500:
1010 			ctrl |= priv->hw->link.speed2500;
1011 			break;
1012 		case SPEED_1000:
1013 			ctrl |= priv->hw->link.speed1000;
1014 			break;
1015 		default:
1016 			return;
1017 		}
1018 	} else {
1019 		switch (speed) {
1020 		case SPEED_2500:
1021 			ctrl |= priv->hw->link.speed2500;
1022 			break;
1023 		case SPEED_1000:
1024 			ctrl |= priv->hw->link.speed1000;
1025 			break;
1026 		case SPEED_100:
1027 			ctrl |= priv->hw->link.speed100;
1028 			break;
1029 		case SPEED_10:
1030 			ctrl |= priv->hw->link.speed10;
1031 			break;
1032 		default:
1033 			return;
1034 		}
1035 	}
1036 
1037 	if (priv->plat->fix_mac_speed)
1038 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1039 
1040 	if (!duplex)
1041 		ctrl &= ~priv->hw->link.duplex;
1042 	else
1043 		ctrl |= priv->hw->link.duplex;
1044 
1045 	/* Flow Control operation */
1046 	if (rx_pause && tx_pause)
1047 		flow_ctrl = FLOW_AUTO;
1048 	else if (rx_pause && !tx_pause)
1049 		flow_ctrl = FLOW_RX;
1050 	else if (!rx_pause && tx_pause)
1051 		flow_ctrl = FLOW_TX;
1052 	else
1053 		flow_ctrl = FLOW_OFF;
1054 
1055 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1056 
1057 	if (ctrl != old_ctrl)
1058 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1059 
1060 	if (priv->plat->set_clk_tx_rate) {
1061 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1062 						priv->plat->clk_tx_i,
1063 						interface, speed);
1064 		if (ret < 0)
1065 			netdev_err(priv->dev,
1066 				   "failed to configure %s transmit clock for %dMbps: %pe\n",
1067 				   phy_modes(interface), speed, ERR_PTR(ret));
1068 	}
1069 
1070 	stmmac_mac_set(priv, priv->ioaddr, true);
1071 	if (priv->dma_cap.eee)
1072 		stmmac_set_eee_pls(priv, priv->hw, true);
1073 
1074 	if (stmmac_fpe_supported(priv))
1075 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1076 
1077 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1078 		stmmac_hwtstamp_correct_latency(priv, priv);
1079 }
1080 
1081 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1082 {
1083 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1084 
1085 	priv->eee_active = false;
1086 
1087 	mutex_lock(&priv->lock);
1088 
1089 	priv->eee_enabled = false;
1090 
1091 	netdev_dbg(priv->dev, "disable EEE\n");
1092 	priv->eee_sw_timer_en = false;
1093 	timer_delete_sync(&priv->eee_ctrl_timer);
1094 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1095 	priv->tx_path_in_lpi_mode = false;
1096 
1097 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1098 	mutex_unlock(&priv->lock);
1099 }
1100 
1101 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1102 				    bool tx_clk_stop)
1103 {
1104 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1105 	int ret;
1106 
1107 	priv->tx_lpi_timer = timer;
1108 	priv->eee_active = true;
1109 
1110 	mutex_lock(&priv->lock);
1111 
1112 	priv->eee_enabled = true;
1113 
1114 	/* Update the transmit clock stop according to PHY capability if
1115 	 * the platform allows
1116 	 */
1117 	if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1118 		priv->tx_lpi_clk_stop = tx_clk_stop;
1119 
1120 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1121 			     STMMAC_DEFAULT_TWT_LS);
1122 
1123 	/* Try to cnfigure the hardware timer. */
1124 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1125 				  priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1126 
1127 	if (ret) {
1128 		/* Hardware timer mode not supported, or value out of range.
1129 		 * Fall back to using software LPI mode
1130 		 */
1131 		priv->eee_sw_timer_en = true;
1132 		stmmac_restart_sw_lpi_timer(priv);
1133 	}
1134 
1135 	mutex_unlock(&priv->lock);
1136 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1137 
1138 	return 0;
1139 }
1140 
1141 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1142 			     phy_interface_t interface)
1143 {
1144 	struct net_device *ndev = to_net_dev(config->dev);
1145 	struct stmmac_priv *priv = netdev_priv(ndev);
1146 
1147 	if (priv->plat->mac_finish)
1148 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1149 
1150 	return 0;
1151 }
1152 
1153 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1154 	.mac_get_caps = stmmac_mac_get_caps,
1155 	.mac_select_pcs = stmmac_mac_select_pcs,
1156 	.mac_config = stmmac_mac_config,
1157 	.mac_link_down = stmmac_mac_link_down,
1158 	.mac_link_up = stmmac_mac_link_up,
1159 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1160 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1161 	.mac_finish = stmmac_mac_finish,
1162 };
1163 
1164 /**
1165  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1166  * @priv: driver private structure
1167  * Description: this is to verify if the HW supports the PCS.
1168  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1169  * configured for the TBI, RTBI, or SGMII PHY interface.
1170  */
1171 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1172 {
1173 	int interface = priv->plat->mac_interface;
1174 
1175 	if (priv->dma_cap.pcs) {
1176 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1177 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1178 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1179 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1180 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1181 			priv->hw->pcs = STMMAC_PCS_RGMII;
1182 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1183 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1184 			priv->hw->pcs = STMMAC_PCS_SGMII;
1185 		}
1186 	}
1187 }
1188 
1189 /**
1190  * stmmac_init_phy - PHY initialization
1191  * @dev: net device structure
1192  * Description: it initializes the driver's PHY state, and attaches the PHY
1193  * to the mac driver.
1194  *  Return value:
1195  *  0 on success
1196  */
1197 static int stmmac_init_phy(struct net_device *dev)
1198 {
1199 	struct stmmac_priv *priv = netdev_priv(dev);
1200 	struct fwnode_handle *phy_fwnode;
1201 	struct fwnode_handle *fwnode;
1202 	int ret;
1203 
1204 	if (!phylink_expects_phy(priv->phylink))
1205 		return 0;
1206 
1207 	fwnode = priv->plat->port_node;
1208 	if (!fwnode)
1209 		fwnode = dev_fwnode(priv->device);
1210 
1211 	if (fwnode)
1212 		phy_fwnode = fwnode_get_phy_node(fwnode);
1213 	else
1214 		phy_fwnode = NULL;
1215 
1216 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1217 	 * manually parse it
1218 	 */
1219 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1220 		int addr = priv->plat->phy_addr;
1221 		struct phy_device *phydev;
1222 
1223 		if (addr < 0) {
1224 			netdev_err(priv->dev, "no phy found\n");
1225 			return -ENODEV;
1226 		}
1227 
1228 		phydev = mdiobus_get_phy(priv->mii, addr);
1229 		if (!phydev) {
1230 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1231 			return -ENODEV;
1232 		}
1233 
1234 		ret = phylink_connect_phy(priv->phylink, phydev);
1235 	} else {
1236 		fwnode_handle_put(phy_fwnode);
1237 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1238 	}
1239 
1240 	if (ret == 0) {
1241 		struct ethtool_keee eee;
1242 
1243 		/* Configure phylib's copy of the LPI timer. Normally,
1244 		 * phylink_config.lpi_timer_default would do this, but there is
1245 		 * a chance that userspace could change the eee_timer setting
1246 		 * via sysfs before the first open. Thus, preserve existing
1247 		 * behaviour.
1248 		 */
1249 		if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1250 			eee.tx_lpi_timer = priv->tx_lpi_timer;
1251 			phylink_ethtool_set_eee(priv->phylink, &eee);
1252 		}
1253 	}
1254 
1255 	if (!priv->plat->pmt) {
1256 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1257 
1258 		phylink_ethtool_get_wol(priv->phylink, &wol);
1259 		device_set_wakeup_capable(priv->device, !!wol.supported);
1260 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1261 	}
1262 
1263 	return ret;
1264 }
1265 
1266 static int stmmac_phy_setup(struct stmmac_priv *priv)
1267 {
1268 	struct stmmac_mdio_bus_data *mdio_bus_data;
1269 	struct phylink_config *config;
1270 	struct fwnode_handle *fwnode;
1271 	struct phylink_pcs *pcs;
1272 	struct phylink *phylink;
1273 
1274 	config = &priv->phylink_config;
1275 
1276 	config->dev = &priv->dev->dev;
1277 	config->type = PHYLINK_NETDEV;
1278 	config->mac_managed_pm = true;
1279 
1280 	/* Stmmac always requires an RX clock for hardware initialization */
1281 	config->mac_requires_rxc = true;
1282 
1283 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1284 		config->eee_rx_clk_stop_enable = true;
1285 
1286 	/* Set the default transmit clock stop bit based on the platform glue */
1287 	priv->tx_lpi_clk_stop = priv->plat->flags &
1288 				STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1289 
1290 	mdio_bus_data = priv->plat->mdio_bus_data;
1291 	if (mdio_bus_data)
1292 		config->default_an_inband = mdio_bus_data->default_an_inband;
1293 
1294 	/* Get the PHY interface modes (at the PHY end of the link) that
1295 	 * are supported by the platform.
1296 	 */
1297 	if (priv->plat->get_interfaces)
1298 		priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1299 					   config->supported_interfaces);
1300 
1301 	/* Set the platform/firmware specified interface mode if the
1302 	 * supported interfaces have not already been provided using
1303 	 * phy_interface as a last resort.
1304 	 */
1305 	if (phy_interface_empty(config->supported_interfaces))
1306 		__set_bit(priv->plat->phy_interface,
1307 			  config->supported_interfaces);
1308 
1309 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1310 	if (priv->hw->xpcs)
1311 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1312 	else
1313 		pcs = priv->hw->phylink_pcs;
1314 
1315 	if (pcs)
1316 		phy_interface_or(config->supported_interfaces,
1317 				 config->supported_interfaces,
1318 				 pcs->supported_interfaces);
1319 
1320 	if (priv->dma_cap.eee) {
1321 		/* Assume all supported interfaces also support LPI */
1322 		memcpy(config->lpi_interfaces, config->supported_interfaces,
1323 		       sizeof(config->lpi_interfaces));
1324 
1325 		/* All full duplex speeds above 100Mbps are supported */
1326 		config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1327 		config->lpi_timer_default = eee_timer * 1000;
1328 		config->eee_enabled_default = true;
1329 	}
1330 
1331 	fwnode = priv->plat->port_node;
1332 	if (!fwnode)
1333 		fwnode = dev_fwnode(priv->device);
1334 
1335 	phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1336 				 &stmmac_phylink_mac_ops);
1337 	if (IS_ERR(phylink))
1338 		return PTR_ERR(phylink);
1339 
1340 	priv->phylink = phylink;
1341 	return 0;
1342 }
1343 
1344 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1345 				    struct stmmac_dma_conf *dma_conf)
1346 {
1347 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1348 	unsigned int desc_size;
1349 	void *head_rx;
1350 	u32 queue;
1351 
1352 	/* Display RX rings */
1353 	for (queue = 0; queue < rx_cnt; queue++) {
1354 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1355 
1356 		pr_info("\tRX Queue %u rings\n", queue);
1357 
1358 		if (priv->extend_desc) {
1359 			head_rx = (void *)rx_q->dma_erx;
1360 			desc_size = sizeof(struct dma_extended_desc);
1361 		} else {
1362 			head_rx = (void *)rx_q->dma_rx;
1363 			desc_size = sizeof(struct dma_desc);
1364 		}
1365 
1366 		/* Display RX ring */
1367 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1368 				    rx_q->dma_rx_phy, desc_size);
1369 	}
1370 }
1371 
1372 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1373 				    struct stmmac_dma_conf *dma_conf)
1374 {
1375 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1376 	unsigned int desc_size;
1377 	void *head_tx;
1378 	u32 queue;
1379 
1380 	/* Display TX rings */
1381 	for (queue = 0; queue < tx_cnt; queue++) {
1382 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1383 
1384 		pr_info("\tTX Queue %d rings\n", queue);
1385 
1386 		if (priv->extend_desc) {
1387 			head_tx = (void *)tx_q->dma_etx;
1388 			desc_size = sizeof(struct dma_extended_desc);
1389 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1390 			head_tx = (void *)tx_q->dma_entx;
1391 			desc_size = sizeof(struct dma_edesc);
1392 		} else {
1393 			head_tx = (void *)tx_q->dma_tx;
1394 			desc_size = sizeof(struct dma_desc);
1395 		}
1396 
1397 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1398 				    tx_q->dma_tx_phy, desc_size);
1399 	}
1400 }
1401 
1402 static void stmmac_display_rings(struct stmmac_priv *priv,
1403 				 struct stmmac_dma_conf *dma_conf)
1404 {
1405 	/* Display RX ring */
1406 	stmmac_display_rx_rings(priv, dma_conf);
1407 
1408 	/* Display TX ring */
1409 	stmmac_display_tx_rings(priv, dma_conf);
1410 }
1411 
1412 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1413 {
1414 	if (stmmac_xdp_is_enabled(priv))
1415 		return XDP_PACKET_HEADROOM;
1416 
1417 	return NET_SKB_PAD;
1418 }
1419 
1420 static int stmmac_set_bfsize(int mtu, int bufsize)
1421 {
1422 	int ret = bufsize;
1423 
1424 	if (mtu >= BUF_SIZE_8KiB)
1425 		ret = BUF_SIZE_16KiB;
1426 	else if (mtu >= BUF_SIZE_4KiB)
1427 		ret = BUF_SIZE_8KiB;
1428 	else if (mtu >= BUF_SIZE_2KiB)
1429 		ret = BUF_SIZE_4KiB;
1430 	else if (mtu > DEFAULT_BUFSIZE)
1431 		ret = BUF_SIZE_2KiB;
1432 	else
1433 		ret = DEFAULT_BUFSIZE;
1434 
1435 	return ret;
1436 }
1437 
1438 /**
1439  * stmmac_clear_rx_descriptors - clear RX descriptors
1440  * @priv: driver private structure
1441  * @dma_conf: structure to take the dma data
1442  * @queue: RX queue index
1443  * Description: this function is called to clear the RX descriptors
1444  * in case of both basic and extended descriptors are used.
1445  */
1446 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1447 					struct stmmac_dma_conf *dma_conf,
1448 					u32 queue)
1449 {
1450 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451 	int i;
1452 
1453 	/* Clear the RX descriptors */
1454 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1455 		if (priv->extend_desc)
1456 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1457 					priv->use_riwt, priv->mode,
1458 					(i == dma_conf->dma_rx_size - 1),
1459 					dma_conf->dma_buf_sz);
1460 		else
1461 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1462 					priv->use_riwt, priv->mode,
1463 					(i == dma_conf->dma_rx_size - 1),
1464 					dma_conf->dma_buf_sz);
1465 }
1466 
1467 /**
1468  * stmmac_clear_tx_descriptors - clear tx descriptors
1469  * @priv: driver private structure
1470  * @dma_conf: structure to take the dma data
1471  * @queue: TX queue index.
1472  * Description: this function is called to clear the TX descriptors
1473  * in case of both basic and extended descriptors are used.
1474  */
1475 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1476 					struct stmmac_dma_conf *dma_conf,
1477 					u32 queue)
1478 {
1479 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1480 	int i;
1481 
1482 	/* Clear the TX descriptors */
1483 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1484 		int last = (i == (dma_conf->dma_tx_size - 1));
1485 		struct dma_desc *p;
1486 
1487 		if (priv->extend_desc)
1488 			p = &tx_q->dma_etx[i].basic;
1489 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1490 			p = &tx_q->dma_entx[i].basic;
1491 		else
1492 			p = &tx_q->dma_tx[i];
1493 
1494 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1495 	}
1496 }
1497 
1498 /**
1499  * stmmac_clear_descriptors - clear descriptors
1500  * @priv: driver private structure
1501  * @dma_conf: structure to take the dma data
1502  * Description: this function is called to clear the TX and RX descriptors
1503  * in case of both basic and extended descriptors are used.
1504  */
1505 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1506 				     struct stmmac_dma_conf *dma_conf)
1507 {
1508 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1509 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1510 	u32 queue;
1511 
1512 	/* Clear the RX descriptors */
1513 	for (queue = 0; queue < rx_queue_cnt; queue++)
1514 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1515 
1516 	/* Clear the TX descriptors */
1517 	for (queue = 0; queue < tx_queue_cnt; queue++)
1518 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1519 }
1520 
1521 /**
1522  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1523  * @priv: driver private structure
1524  * @dma_conf: structure to take the dma data
1525  * @p: descriptor pointer
1526  * @i: descriptor index
1527  * @flags: gfp flag
1528  * @queue: RX queue index
1529  * Description: this function is called to allocate a receive buffer, perform
1530  * the DMA mapping and init the descriptor.
1531  */
1532 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1533 				  struct stmmac_dma_conf *dma_conf,
1534 				  struct dma_desc *p,
1535 				  int i, gfp_t flags, u32 queue)
1536 {
1537 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1538 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1539 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1540 
1541 	if (priv->dma_cap.host_dma_width <= 32)
1542 		gfp |= GFP_DMA32;
1543 
1544 	if (!buf->page) {
1545 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1546 		if (!buf->page)
1547 			return -ENOMEM;
1548 		buf->page_offset = stmmac_rx_offset(priv);
1549 	}
1550 
1551 	if (priv->sph && !buf->sec_page) {
1552 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1553 		if (!buf->sec_page)
1554 			return -ENOMEM;
1555 
1556 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1557 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1558 	} else {
1559 		buf->sec_page = NULL;
1560 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1561 	}
1562 
1563 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1564 
1565 	stmmac_set_desc_addr(priv, p, buf->addr);
1566 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1567 		stmmac_init_desc3(priv, p);
1568 
1569 	return 0;
1570 }
1571 
1572 /**
1573  * stmmac_free_rx_buffer - free RX dma buffers
1574  * @priv: private structure
1575  * @rx_q: RX queue
1576  * @i: buffer index.
1577  */
1578 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1579 				  struct stmmac_rx_queue *rx_q,
1580 				  int i)
1581 {
1582 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1583 
1584 	if (buf->page)
1585 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1586 	buf->page = NULL;
1587 
1588 	if (buf->sec_page)
1589 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1590 	buf->sec_page = NULL;
1591 }
1592 
1593 /**
1594  * stmmac_free_tx_buffer - free RX dma buffers
1595  * @priv: private structure
1596  * @dma_conf: structure to take the dma data
1597  * @queue: RX queue index
1598  * @i: buffer index.
1599  */
1600 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1601 				  struct stmmac_dma_conf *dma_conf,
1602 				  u32 queue, int i)
1603 {
1604 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1605 
1606 	if (tx_q->tx_skbuff_dma[i].buf &&
1607 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1608 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1609 			dma_unmap_page(priv->device,
1610 				       tx_q->tx_skbuff_dma[i].buf,
1611 				       tx_q->tx_skbuff_dma[i].len,
1612 				       DMA_TO_DEVICE);
1613 		else
1614 			dma_unmap_single(priv->device,
1615 					 tx_q->tx_skbuff_dma[i].buf,
1616 					 tx_q->tx_skbuff_dma[i].len,
1617 					 DMA_TO_DEVICE);
1618 	}
1619 
1620 	if (tx_q->xdpf[i] &&
1621 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1622 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1623 		xdp_return_frame(tx_q->xdpf[i]);
1624 		tx_q->xdpf[i] = NULL;
1625 	}
1626 
1627 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1628 		tx_q->xsk_frames_done++;
1629 
1630 	if (tx_q->tx_skbuff[i] &&
1631 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1632 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1633 		tx_q->tx_skbuff[i] = NULL;
1634 	}
1635 
1636 	tx_q->tx_skbuff_dma[i].buf = 0;
1637 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1638 }
1639 
1640 /**
1641  * dma_free_rx_skbufs - free RX dma buffers
1642  * @priv: private structure
1643  * @dma_conf: structure to take the dma data
1644  * @queue: RX queue index
1645  */
1646 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1647 			       struct stmmac_dma_conf *dma_conf,
1648 			       u32 queue)
1649 {
1650 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1651 	int i;
1652 
1653 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1654 		stmmac_free_rx_buffer(priv, rx_q, i);
1655 }
1656 
1657 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1658 				   struct stmmac_dma_conf *dma_conf,
1659 				   u32 queue, gfp_t flags)
1660 {
1661 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1662 	int i;
1663 
1664 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1665 		struct dma_desc *p;
1666 		int ret;
1667 
1668 		if (priv->extend_desc)
1669 			p = &((rx_q->dma_erx + i)->basic);
1670 		else
1671 			p = rx_q->dma_rx + i;
1672 
1673 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1674 					     queue);
1675 		if (ret)
1676 			return ret;
1677 
1678 		rx_q->buf_alloc_num++;
1679 	}
1680 
1681 	return 0;
1682 }
1683 
1684 /**
1685  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1686  * @priv: private structure
1687  * @dma_conf: structure to take the dma data
1688  * @queue: RX queue index
1689  */
1690 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1691 				struct stmmac_dma_conf *dma_conf,
1692 				u32 queue)
1693 {
1694 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1695 	int i;
1696 
1697 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1698 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1699 
1700 		if (!buf->xdp)
1701 			continue;
1702 
1703 		xsk_buff_free(buf->xdp);
1704 		buf->xdp = NULL;
1705 	}
1706 }
1707 
1708 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1709 				      struct stmmac_dma_conf *dma_conf,
1710 				      u32 queue)
1711 {
1712 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1713 	int i;
1714 
1715 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1716 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1717 	 * use this macro to make sure no size violations.
1718 	 */
1719 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1720 
1721 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1722 		struct stmmac_rx_buffer *buf;
1723 		dma_addr_t dma_addr;
1724 		struct dma_desc *p;
1725 
1726 		if (priv->extend_desc)
1727 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1728 		else
1729 			p = rx_q->dma_rx + i;
1730 
1731 		buf = &rx_q->buf_pool[i];
1732 
1733 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1734 		if (!buf->xdp)
1735 			return -ENOMEM;
1736 
1737 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1738 		stmmac_set_desc_addr(priv, p, dma_addr);
1739 		rx_q->buf_alloc_num++;
1740 	}
1741 
1742 	return 0;
1743 }
1744 
1745 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1746 {
1747 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1748 		return NULL;
1749 
1750 	return xsk_get_pool_from_qid(priv->dev, queue);
1751 }
1752 
1753 /**
1754  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1755  * @priv: driver private structure
1756  * @dma_conf: structure to take the dma data
1757  * @queue: RX queue index
1758  * @flags: gfp flag.
1759  * Description: this function initializes the DMA RX descriptors
1760  * and allocates the socket buffers. It supports the chained and ring
1761  * modes.
1762  */
1763 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1764 				    struct stmmac_dma_conf *dma_conf,
1765 				    u32 queue, gfp_t flags)
1766 {
1767 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1768 	int ret;
1769 
1770 	netif_dbg(priv, probe, priv->dev,
1771 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1772 		  (u32)rx_q->dma_rx_phy);
1773 
1774 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1775 
1776 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1777 
1778 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1779 
1780 	if (rx_q->xsk_pool) {
1781 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1782 						   MEM_TYPE_XSK_BUFF_POOL,
1783 						   NULL));
1784 		netdev_info(priv->dev,
1785 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1786 			    rx_q->queue_index);
1787 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1788 	} else {
1789 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1790 						   MEM_TYPE_PAGE_POOL,
1791 						   rx_q->page_pool));
1792 		netdev_info(priv->dev,
1793 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1794 			    rx_q->queue_index);
1795 	}
1796 
1797 	if (rx_q->xsk_pool) {
1798 		/* RX XDP ZC buffer pool may not be populated, e.g.
1799 		 * xdpsock TX-only.
1800 		 */
1801 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1802 	} else {
1803 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1804 		if (ret < 0)
1805 			return -ENOMEM;
1806 	}
1807 
1808 	/* Setup the chained descriptor addresses */
1809 	if (priv->mode == STMMAC_CHAIN_MODE) {
1810 		if (priv->extend_desc)
1811 			stmmac_mode_init(priv, rx_q->dma_erx,
1812 					 rx_q->dma_rx_phy,
1813 					 dma_conf->dma_rx_size, 1);
1814 		else
1815 			stmmac_mode_init(priv, rx_q->dma_rx,
1816 					 rx_q->dma_rx_phy,
1817 					 dma_conf->dma_rx_size, 0);
1818 	}
1819 
1820 	return 0;
1821 }
1822 
1823 static int init_dma_rx_desc_rings(struct net_device *dev,
1824 				  struct stmmac_dma_conf *dma_conf,
1825 				  gfp_t flags)
1826 {
1827 	struct stmmac_priv *priv = netdev_priv(dev);
1828 	u32 rx_count = priv->plat->rx_queues_to_use;
1829 	int queue;
1830 	int ret;
1831 
1832 	/* RX INITIALIZATION */
1833 	netif_dbg(priv, probe, priv->dev,
1834 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1835 
1836 	for (queue = 0; queue < rx_count; queue++) {
1837 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1838 		if (ret)
1839 			goto err_init_rx_buffers;
1840 	}
1841 
1842 	return 0;
1843 
1844 err_init_rx_buffers:
1845 	while (queue >= 0) {
1846 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1847 
1848 		if (rx_q->xsk_pool)
1849 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1850 		else
1851 			dma_free_rx_skbufs(priv, dma_conf, queue);
1852 
1853 		rx_q->buf_alloc_num = 0;
1854 		rx_q->xsk_pool = NULL;
1855 
1856 		queue--;
1857 	}
1858 
1859 	return ret;
1860 }
1861 
1862 /**
1863  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1864  * @priv: driver private structure
1865  * @dma_conf: structure to take the dma data
1866  * @queue: TX queue index
1867  * Description: this function initializes the DMA TX descriptors
1868  * and allocates the socket buffers. It supports the chained and ring
1869  * modes.
1870  */
1871 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1872 				    struct stmmac_dma_conf *dma_conf,
1873 				    u32 queue)
1874 {
1875 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1876 	int i;
1877 
1878 	netif_dbg(priv, probe, priv->dev,
1879 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1880 		  (u32)tx_q->dma_tx_phy);
1881 
1882 	/* Setup the chained descriptor addresses */
1883 	if (priv->mode == STMMAC_CHAIN_MODE) {
1884 		if (priv->extend_desc)
1885 			stmmac_mode_init(priv, tx_q->dma_etx,
1886 					 tx_q->dma_tx_phy,
1887 					 dma_conf->dma_tx_size, 1);
1888 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1889 			stmmac_mode_init(priv, tx_q->dma_tx,
1890 					 tx_q->dma_tx_phy,
1891 					 dma_conf->dma_tx_size, 0);
1892 	}
1893 
1894 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1895 
1896 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1897 		struct dma_desc *p;
1898 
1899 		if (priv->extend_desc)
1900 			p = &((tx_q->dma_etx + i)->basic);
1901 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1902 			p = &((tx_q->dma_entx + i)->basic);
1903 		else
1904 			p = tx_q->dma_tx + i;
1905 
1906 		stmmac_clear_desc(priv, p);
1907 
1908 		tx_q->tx_skbuff_dma[i].buf = 0;
1909 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1910 		tx_q->tx_skbuff_dma[i].len = 0;
1911 		tx_q->tx_skbuff_dma[i].last_segment = false;
1912 		tx_q->tx_skbuff[i] = NULL;
1913 	}
1914 
1915 	return 0;
1916 }
1917 
1918 static int init_dma_tx_desc_rings(struct net_device *dev,
1919 				  struct stmmac_dma_conf *dma_conf)
1920 {
1921 	struct stmmac_priv *priv = netdev_priv(dev);
1922 	u32 tx_queue_cnt;
1923 	u32 queue;
1924 
1925 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1926 
1927 	for (queue = 0; queue < tx_queue_cnt; queue++)
1928 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1929 
1930 	return 0;
1931 }
1932 
1933 /**
1934  * init_dma_desc_rings - init the RX/TX descriptor rings
1935  * @dev: net device structure
1936  * @dma_conf: structure to take the dma data
1937  * @flags: gfp flag.
1938  * Description: this function initializes the DMA RX/TX descriptors
1939  * and allocates the socket buffers. It supports the chained and ring
1940  * modes.
1941  */
1942 static int init_dma_desc_rings(struct net_device *dev,
1943 			       struct stmmac_dma_conf *dma_conf,
1944 			       gfp_t flags)
1945 {
1946 	struct stmmac_priv *priv = netdev_priv(dev);
1947 	int ret;
1948 
1949 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1950 	if (ret)
1951 		return ret;
1952 
1953 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1954 
1955 	stmmac_clear_descriptors(priv, dma_conf);
1956 
1957 	if (netif_msg_hw(priv))
1958 		stmmac_display_rings(priv, dma_conf);
1959 
1960 	return ret;
1961 }
1962 
1963 /**
1964  * dma_free_tx_skbufs - free TX dma buffers
1965  * @priv: private structure
1966  * @dma_conf: structure to take the dma data
1967  * @queue: TX queue index
1968  */
1969 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1970 			       struct stmmac_dma_conf *dma_conf,
1971 			       u32 queue)
1972 {
1973 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1974 	int i;
1975 
1976 	tx_q->xsk_frames_done = 0;
1977 
1978 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1979 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1980 
1981 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1982 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1983 		tx_q->xsk_frames_done = 0;
1984 		tx_q->xsk_pool = NULL;
1985 	}
1986 }
1987 
1988 /**
1989  * stmmac_free_tx_skbufs - free TX skb buffers
1990  * @priv: private structure
1991  */
1992 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1993 {
1994 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1995 	u32 queue;
1996 
1997 	for (queue = 0; queue < tx_queue_cnt; queue++)
1998 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1999 }
2000 
2001 /**
2002  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
2003  * @priv: private structure
2004  * @dma_conf: structure to take the dma data
2005  * @queue: RX queue index
2006  */
2007 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
2008 					 struct stmmac_dma_conf *dma_conf,
2009 					 u32 queue)
2010 {
2011 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2012 
2013 	/* Release the DMA RX socket buffers */
2014 	if (rx_q->xsk_pool)
2015 		dma_free_rx_xskbufs(priv, dma_conf, queue);
2016 	else
2017 		dma_free_rx_skbufs(priv, dma_conf, queue);
2018 
2019 	rx_q->buf_alloc_num = 0;
2020 	rx_q->xsk_pool = NULL;
2021 
2022 	/* Free DMA regions of consistent memory previously allocated */
2023 	if (!priv->extend_desc)
2024 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2025 				  sizeof(struct dma_desc),
2026 				  rx_q->dma_rx, rx_q->dma_rx_phy);
2027 	else
2028 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2029 				  sizeof(struct dma_extended_desc),
2030 				  rx_q->dma_erx, rx_q->dma_rx_phy);
2031 
2032 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2033 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2034 
2035 	kfree(rx_q->buf_pool);
2036 	if (rx_q->page_pool)
2037 		page_pool_destroy(rx_q->page_pool);
2038 }
2039 
2040 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2041 				       struct stmmac_dma_conf *dma_conf)
2042 {
2043 	u32 rx_count = priv->plat->rx_queues_to_use;
2044 	u32 queue;
2045 
2046 	/* Free RX queue resources */
2047 	for (queue = 0; queue < rx_count; queue++)
2048 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
2049 }
2050 
2051 /**
2052  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2053  * @priv: private structure
2054  * @dma_conf: structure to take the dma data
2055  * @queue: TX queue index
2056  */
2057 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2058 					 struct stmmac_dma_conf *dma_conf,
2059 					 u32 queue)
2060 {
2061 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2062 	size_t size;
2063 	void *addr;
2064 
2065 	/* Release the DMA TX socket buffers */
2066 	dma_free_tx_skbufs(priv, dma_conf, queue);
2067 
2068 	if (priv->extend_desc) {
2069 		size = sizeof(struct dma_extended_desc);
2070 		addr = tx_q->dma_etx;
2071 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2072 		size = sizeof(struct dma_edesc);
2073 		addr = tx_q->dma_entx;
2074 	} else {
2075 		size = sizeof(struct dma_desc);
2076 		addr = tx_q->dma_tx;
2077 	}
2078 
2079 	size *= dma_conf->dma_tx_size;
2080 
2081 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2082 
2083 	kfree(tx_q->tx_skbuff_dma);
2084 	kfree(tx_q->tx_skbuff);
2085 }
2086 
2087 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2088 				       struct stmmac_dma_conf *dma_conf)
2089 {
2090 	u32 tx_count = priv->plat->tx_queues_to_use;
2091 	u32 queue;
2092 
2093 	/* Free TX queue resources */
2094 	for (queue = 0; queue < tx_count; queue++)
2095 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2096 }
2097 
2098 /**
2099  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2100  * @priv: private structure
2101  * @dma_conf: structure to take the dma data
2102  * @queue: RX queue index
2103  * Description: according to which descriptor can be used (extend or basic)
2104  * this function allocates the resources for TX and RX paths. In case of
2105  * reception, for example, it pre-allocated the RX socket buffer in order to
2106  * allow zero-copy mechanism.
2107  */
2108 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2109 					 struct stmmac_dma_conf *dma_conf,
2110 					 u32 queue)
2111 {
2112 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2113 	struct stmmac_channel *ch = &priv->channel[queue];
2114 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2115 	struct page_pool_params pp_params = { 0 };
2116 	unsigned int dma_buf_sz_pad, num_pages;
2117 	unsigned int napi_id;
2118 	int ret;
2119 
2120 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2121 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2122 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2123 
2124 	rx_q->queue_index = queue;
2125 	rx_q->priv_data = priv;
2126 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2127 
2128 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2129 	pp_params.pool_size = dma_conf->dma_rx_size;
2130 	pp_params.order = order_base_2(num_pages);
2131 	pp_params.nid = dev_to_node(priv->device);
2132 	pp_params.dev = priv->device;
2133 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2134 	pp_params.offset = stmmac_rx_offset(priv);
2135 	pp_params.max_len = dma_conf->dma_buf_sz;
2136 
2137 	if (priv->sph) {
2138 		pp_params.offset = 0;
2139 		pp_params.max_len += stmmac_rx_offset(priv);
2140 	}
2141 
2142 	rx_q->page_pool = page_pool_create(&pp_params);
2143 	if (IS_ERR(rx_q->page_pool)) {
2144 		ret = PTR_ERR(rx_q->page_pool);
2145 		rx_q->page_pool = NULL;
2146 		return ret;
2147 	}
2148 
2149 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2150 				 sizeof(*rx_q->buf_pool),
2151 				 GFP_KERNEL);
2152 	if (!rx_q->buf_pool)
2153 		return -ENOMEM;
2154 
2155 	if (priv->extend_desc) {
2156 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2157 						   dma_conf->dma_rx_size *
2158 						   sizeof(struct dma_extended_desc),
2159 						   &rx_q->dma_rx_phy,
2160 						   GFP_KERNEL);
2161 		if (!rx_q->dma_erx)
2162 			return -ENOMEM;
2163 
2164 	} else {
2165 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2166 						  dma_conf->dma_rx_size *
2167 						  sizeof(struct dma_desc),
2168 						  &rx_q->dma_rx_phy,
2169 						  GFP_KERNEL);
2170 		if (!rx_q->dma_rx)
2171 			return -ENOMEM;
2172 	}
2173 
2174 	if (stmmac_xdp_is_enabled(priv) &&
2175 	    test_bit(queue, priv->af_xdp_zc_qps))
2176 		napi_id = ch->rxtx_napi.napi_id;
2177 	else
2178 		napi_id = ch->rx_napi.napi_id;
2179 
2180 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2181 			       rx_q->queue_index,
2182 			       napi_id);
2183 	if (ret) {
2184 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2185 		return -EINVAL;
2186 	}
2187 
2188 	return 0;
2189 }
2190 
2191 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2192 				       struct stmmac_dma_conf *dma_conf)
2193 {
2194 	u32 rx_count = priv->plat->rx_queues_to_use;
2195 	u32 queue;
2196 	int ret;
2197 
2198 	/* RX queues buffers and DMA */
2199 	for (queue = 0; queue < rx_count; queue++) {
2200 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2201 		if (ret)
2202 			goto err_dma;
2203 	}
2204 
2205 	return 0;
2206 
2207 err_dma:
2208 	free_dma_rx_desc_resources(priv, dma_conf);
2209 
2210 	return ret;
2211 }
2212 
2213 /**
2214  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2215  * @priv: private structure
2216  * @dma_conf: structure to take the dma data
2217  * @queue: TX queue index
2218  * Description: according to which descriptor can be used (extend or basic)
2219  * this function allocates the resources for TX and RX paths. In case of
2220  * reception, for example, it pre-allocated the RX socket buffer in order to
2221  * allow zero-copy mechanism.
2222  */
2223 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2224 					 struct stmmac_dma_conf *dma_conf,
2225 					 u32 queue)
2226 {
2227 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2228 	size_t size;
2229 	void *addr;
2230 
2231 	tx_q->queue_index = queue;
2232 	tx_q->priv_data = priv;
2233 
2234 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2235 				      sizeof(*tx_q->tx_skbuff_dma),
2236 				      GFP_KERNEL);
2237 	if (!tx_q->tx_skbuff_dma)
2238 		return -ENOMEM;
2239 
2240 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2241 				  sizeof(struct sk_buff *),
2242 				  GFP_KERNEL);
2243 	if (!tx_q->tx_skbuff)
2244 		return -ENOMEM;
2245 
2246 	if (priv->extend_desc)
2247 		size = sizeof(struct dma_extended_desc);
2248 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2249 		size = sizeof(struct dma_edesc);
2250 	else
2251 		size = sizeof(struct dma_desc);
2252 
2253 	size *= dma_conf->dma_tx_size;
2254 
2255 	addr = dma_alloc_coherent(priv->device, size,
2256 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2257 	if (!addr)
2258 		return -ENOMEM;
2259 
2260 	if (priv->extend_desc)
2261 		tx_q->dma_etx = addr;
2262 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2263 		tx_q->dma_entx = addr;
2264 	else
2265 		tx_q->dma_tx = addr;
2266 
2267 	return 0;
2268 }
2269 
2270 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2271 				       struct stmmac_dma_conf *dma_conf)
2272 {
2273 	u32 tx_count = priv->plat->tx_queues_to_use;
2274 	u32 queue;
2275 	int ret;
2276 
2277 	/* TX queues buffers and DMA */
2278 	for (queue = 0; queue < tx_count; queue++) {
2279 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2280 		if (ret)
2281 			goto err_dma;
2282 	}
2283 
2284 	return 0;
2285 
2286 err_dma:
2287 	free_dma_tx_desc_resources(priv, dma_conf);
2288 	return ret;
2289 }
2290 
2291 /**
2292  * alloc_dma_desc_resources - alloc TX/RX resources.
2293  * @priv: private structure
2294  * @dma_conf: structure to take the dma data
2295  * Description: according to which descriptor can be used (extend or basic)
2296  * this function allocates the resources for TX and RX paths. In case of
2297  * reception, for example, it pre-allocated the RX socket buffer in order to
2298  * allow zero-copy mechanism.
2299  */
2300 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2301 				    struct stmmac_dma_conf *dma_conf)
2302 {
2303 	/* RX Allocation */
2304 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2305 
2306 	if (ret)
2307 		return ret;
2308 
2309 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2310 
2311 	return ret;
2312 }
2313 
2314 /**
2315  * free_dma_desc_resources - free dma desc resources
2316  * @priv: private structure
2317  * @dma_conf: structure to take the dma data
2318  */
2319 static void free_dma_desc_resources(struct stmmac_priv *priv,
2320 				    struct stmmac_dma_conf *dma_conf)
2321 {
2322 	/* Release the DMA TX socket buffers */
2323 	free_dma_tx_desc_resources(priv, dma_conf);
2324 
2325 	/* Release the DMA RX socket buffers later
2326 	 * to ensure all pending XDP_TX buffers are returned.
2327 	 */
2328 	free_dma_rx_desc_resources(priv, dma_conf);
2329 }
2330 
2331 /**
2332  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2333  *  @priv: driver private structure
2334  *  Description: It is used for enabling the rx queues in the MAC
2335  */
2336 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2337 {
2338 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2339 	int queue;
2340 	u8 mode;
2341 
2342 	for (queue = 0; queue < rx_queues_count; queue++) {
2343 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2344 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2345 	}
2346 }
2347 
2348 /**
2349  * stmmac_start_rx_dma - start RX DMA channel
2350  * @priv: driver private structure
2351  * @chan: RX channel index
2352  * Description:
2353  * This starts a RX DMA channel
2354  */
2355 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2356 {
2357 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2358 	stmmac_start_rx(priv, priv->ioaddr, chan);
2359 }
2360 
2361 /**
2362  * stmmac_start_tx_dma - start TX DMA channel
2363  * @priv: driver private structure
2364  * @chan: TX channel index
2365  * Description:
2366  * This starts a TX DMA channel
2367  */
2368 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2369 {
2370 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2371 	stmmac_start_tx(priv, priv->ioaddr, chan);
2372 }
2373 
2374 /**
2375  * stmmac_stop_rx_dma - stop RX DMA channel
2376  * @priv: driver private structure
2377  * @chan: RX channel index
2378  * Description:
2379  * This stops a RX DMA channel
2380  */
2381 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2382 {
2383 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2384 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2385 }
2386 
2387 /**
2388  * stmmac_stop_tx_dma - stop TX DMA channel
2389  * @priv: driver private structure
2390  * @chan: TX channel index
2391  * Description:
2392  * This stops a TX DMA channel
2393  */
2394 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2395 {
2396 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2397 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2398 }
2399 
2400 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2401 {
2402 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2403 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2404 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2405 	u32 chan;
2406 
2407 	for (chan = 0; chan < dma_csr_ch; chan++) {
2408 		struct stmmac_channel *ch = &priv->channel[chan];
2409 		unsigned long flags;
2410 
2411 		spin_lock_irqsave(&ch->lock, flags);
2412 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2413 		spin_unlock_irqrestore(&ch->lock, flags);
2414 	}
2415 }
2416 
2417 /**
2418  * stmmac_start_all_dma - start all RX and TX DMA channels
2419  * @priv: driver private structure
2420  * Description:
2421  * This starts all the RX and TX DMA channels
2422  */
2423 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2424 {
2425 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2426 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2427 	u32 chan = 0;
2428 
2429 	for (chan = 0; chan < rx_channels_count; chan++)
2430 		stmmac_start_rx_dma(priv, chan);
2431 
2432 	for (chan = 0; chan < tx_channels_count; chan++)
2433 		stmmac_start_tx_dma(priv, chan);
2434 }
2435 
2436 /**
2437  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2438  * @priv: driver private structure
2439  * Description:
2440  * This stops the RX and TX DMA channels
2441  */
2442 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2443 {
2444 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2445 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2446 	u32 chan = 0;
2447 
2448 	for (chan = 0; chan < rx_channels_count; chan++)
2449 		stmmac_stop_rx_dma(priv, chan);
2450 
2451 	for (chan = 0; chan < tx_channels_count; chan++)
2452 		stmmac_stop_tx_dma(priv, chan);
2453 }
2454 
2455 /**
2456  *  stmmac_dma_operation_mode - HW DMA operation mode
2457  *  @priv: driver private structure
2458  *  Description: it is used for configuring the DMA operation mode register in
2459  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2460  */
2461 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2462 {
2463 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2464 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2465 	int rxfifosz = priv->plat->rx_fifo_size;
2466 	int txfifosz = priv->plat->tx_fifo_size;
2467 	u32 txmode = 0;
2468 	u32 rxmode = 0;
2469 	u32 chan = 0;
2470 	u8 qmode = 0;
2471 
2472 	if (rxfifosz == 0)
2473 		rxfifosz = priv->dma_cap.rx_fifo_size;
2474 	if (txfifosz == 0)
2475 		txfifosz = priv->dma_cap.tx_fifo_size;
2476 
2477 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2478 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2479 		rxfifosz /= rx_channels_count;
2480 		txfifosz /= tx_channels_count;
2481 	}
2482 
2483 	if (priv->plat->force_thresh_dma_mode) {
2484 		txmode = tc;
2485 		rxmode = tc;
2486 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2487 		/*
2488 		 * In case of GMAC, SF mode can be enabled
2489 		 * to perform the TX COE in HW. This depends on:
2490 		 * 1) TX COE if actually supported
2491 		 * 2) There is no bugged Jumbo frame support
2492 		 *    that needs to not insert csum in the TDES.
2493 		 */
2494 		txmode = SF_DMA_MODE;
2495 		rxmode = SF_DMA_MODE;
2496 		priv->xstats.threshold = SF_DMA_MODE;
2497 	} else {
2498 		txmode = tc;
2499 		rxmode = SF_DMA_MODE;
2500 	}
2501 
2502 	/* configure all channels */
2503 	for (chan = 0; chan < rx_channels_count; chan++) {
2504 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2505 		u32 buf_size;
2506 
2507 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2508 
2509 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2510 				rxfifosz, qmode);
2511 
2512 		if (rx_q->xsk_pool) {
2513 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2514 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2515 					      buf_size,
2516 					      chan);
2517 		} else {
2518 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2519 					      priv->dma_conf.dma_buf_sz,
2520 					      chan);
2521 		}
2522 	}
2523 
2524 	for (chan = 0; chan < tx_channels_count; chan++) {
2525 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2526 
2527 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2528 				txfifosz, qmode);
2529 	}
2530 }
2531 
2532 static void stmmac_xsk_request_timestamp(void *_priv)
2533 {
2534 	struct stmmac_metadata_request *meta_req = _priv;
2535 
2536 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2537 	*meta_req->set_ic = true;
2538 }
2539 
2540 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2541 {
2542 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2543 	struct stmmac_priv *priv = tx_compl->priv;
2544 	struct dma_desc *desc = tx_compl->desc;
2545 	bool found = false;
2546 	u64 ns = 0;
2547 
2548 	if (!priv->hwts_tx_en)
2549 		return 0;
2550 
2551 	/* check tx tstamp status */
2552 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2553 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2554 		found = true;
2555 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2556 		found = true;
2557 	}
2558 
2559 	if (found) {
2560 		ns -= priv->plat->cdc_error_adj;
2561 		return ns_to_ktime(ns);
2562 	}
2563 
2564 	return 0;
2565 }
2566 
2567 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2568 {
2569 	struct timespec64 ts = ns_to_timespec64(launch_time);
2570 	struct stmmac_metadata_request *meta_req = _priv;
2571 
2572 	if (meta_req->tbs & STMMAC_TBS_EN)
2573 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2574 				    ts.tv_nsec);
2575 }
2576 
2577 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2578 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2579 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2580 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2581 };
2582 
2583 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2584 {
2585 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2586 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2587 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2588 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2589 	unsigned int entry = tx_q->cur_tx;
2590 	struct dma_desc *tx_desc = NULL;
2591 	struct xdp_desc xdp_desc;
2592 	bool work_done = true;
2593 	u32 tx_set_ic_bit = 0;
2594 
2595 	/* Avoids TX time-out as we are sharing with slow path */
2596 	txq_trans_cond_update(nq);
2597 
2598 	budget = min(budget, stmmac_tx_avail(priv, queue));
2599 
2600 	for (; budget > 0; budget--) {
2601 		struct stmmac_metadata_request meta_req;
2602 		struct xsk_tx_metadata *meta = NULL;
2603 		dma_addr_t dma_addr;
2604 		bool set_ic;
2605 
2606 		/* We are sharing with slow path and stop XSK TX desc submission when
2607 		 * available TX ring is less than threshold.
2608 		 */
2609 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2610 		    !netif_carrier_ok(priv->dev)) {
2611 			work_done = false;
2612 			break;
2613 		}
2614 
2615 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2616 			break;
2617 
2618 		if (priv->est && priv->est->enable &&
2619 		    priv->est->max_sdu[queue] &&
2620 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2621 			priv->xstats.max_sdu_txq_drop[queue]++;
2622 			continue;
2623 		}
2624 
2625 		if (likely(priv->extend_desc))
2626 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2627 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2628 			tx_desc = &tx_q->dma_entx[entry].basic;
2629 		else
2630 			tx_desc = tx_q->dma_tx + entry;
2631 
2632 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2633 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2634 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2635 
2636 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2637 
2638 		/* To return XDP buffer to XSK pool, we simple call
2639 		 * xsk_tx_completed(), so we don't need to fill up
2640 		 * 'buf' and 'xdpf'.
2641 		 */
2642 		tx_q->tx_skbuff_dma[entry].buf = 0;
2643 		tx_q->xdpf[entry] = NULL;
2644 
2645 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2646 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2647 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2648 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2649 
2650 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2651 
2652 		tx_q->tx_count_frames++;
2653 
2654 		if (!priv->tx_coal_frames[queue])
2655 			set_ic = false;
2656 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2657 			set_ic = true;
2658 		else
2659 			set_ic = false;
2660 
2661 		meta_req.priv = priv;
2662 		meta_req.tx_desc = tx_desc;
2663 		meta_req.set_ic = &set_ic;
2664 		meta_req.tbs = tx_q->tbs;
2665 		meta_req.edesc = &tx_q->dma_entx[entry];
2666 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2667 					&meta_req);
2668 		if (set_ic) {
2669 			tx_q->tx_count_frames = 0;
2670 			stmmac_set_tx_ic(priv, tx_desc);
2671 			tx_set_ic_bit++;
2672 		}
2673 
2674 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2675 				       true, priv->mode, true, true,
2676 				       xdp_desc.len);
2677 
2678 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2679 
2680 		xsk_tx_metadata_to_compl(meta,
2681 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2682 
2683 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2684 		entry = tx_q->cur_tx;
2685 	}
2686 	u64_stats_update_begin(&txq_stats->napi_syncp);
2687 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2688 	u64_stats_update_end(&txq_stats->napi_syncp);
2689 
2690 	if (tx_desc) {
2691 		stmmac_flush_tx_descriptors(priv, queue);
2692 		xsk_tx_release(pool);
2693 	}
2694 
2695 	/* Return true if all of the 3 conditions are met
2696 	 *  a) TX Budget is still available
2697 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2698 	 *     pending XSK TX for transmission)
2699 	 */
2700 	return !!budget && work_done;
2701 }
2702 
2703 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2704 {
2705 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2706 		tc += 64;
2707 
2708 		if (priv->plat->force_thresh_dma_mode)
2709 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2710 		else
2711 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2712 						      chan);
2713 
2714 		priv->xstats.threshold = tc;
2715 	}
2716 }
2717 
2718 /**
2719  * stmmac_tx_clean - to manage the transmission completion
2720  * @priv: driver private structure
2721  * @budget: napi budget limiting this functions packet handling
2722  * @queue: TX queue index
2723  * @pending_packets: signal to arm the TX coal timer
2724  * Description: it reclaims the transmit resources after transmission completes.
2725  * If some packets still needs to be handled, due to TX coalesce, set
2726  * pending_packets to true to make NAPI arm the TX coal timer.
2727  */
2728 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2729 			   bool *pending_packets)
2730 {
2731 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2732 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2733 	unsigned int bytes_compl = 0, pkts_compl = 0;
2734 	unsigned int entry, xmits = 0, count = 0;
2735 	u32 tx_packets = 0, tx_errors = 0;
2736 
2737 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2738 
2739 	tx_q->xsk_frames_done = 0;
2740 
2741 	entry = tx_q->dirty_tx;
2742 
2743 	/* Try to clean all TX complete frame in 1 shot */
2744 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2745 		struct xdp_frame *xdpf;
2746 		struct sk_buff *skb;
2747 		struct dma_desc *p;
2748 		int status;
2749 
2750 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2751 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2752 			xdpf = tx_q->xdpf[entry];
2753 			skb = NULL;
2754 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2755 			xdpf = NULL;
2756 			skb = tx_q->tx_skbuff[entry];
2757 		} else {
2758 			xdpf = NULL;
2759 			skb = NULL;
2760 		}
2761 
2762 		if (priv->extend_desc)
2763 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2764 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2765 			p = &tx_q->dma_entx[entry].basic;
2766 		else
2767 			p = tx_q->dma_tx + entry;
2768 
2769 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2770 		/* Check if the descriptor is owned by the DMA */
2771 		if (unlikely(status & tx_dma_own))
2772 			break;
2773 
2774 		count++;
2775 
2776 		/* Make sure descriptor fields are read after reading
2777 		 * the own bit.
2778 		 */
2779 		dma_rmb();
2780 
2781 		/* Just consider the last segment and ...*/
2782 		if (likely(!(status & tx_not_ls))) {
2783 			/* ... verify the status error condition */
2784 			if (unlikely(status & tx_err)) {
2785 				tx_errors++;
2786 				if (unlikely(status & tx_err_bump_tc))
2787 					stmmac_bump_dma_threshold(priv, queue);
2788 			} else {
2789 				tx_packets++;
2790 			}
2791 			if (skb) {
2792 				stmmac_get_tx_hwtstamp(priv, p, skb);
2793 			} else if (tx_q->xsk_pool &&
2794 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2795 				struct stmmac_xsk_tx_complete tx_compl = {
2796 					.priv = priv,
2797 					.desc = p,
2798 				};
2799 
2800 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2801 							 &stmmac_xsk_tx_metadata_ops,
2802 							 &tx_compl);
2803 			}
2804 		}
2805 
2806 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2807 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2808 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2809 				dma_unmap_page(priv->device,
2810 					       tx_q->tx_skbuff_dma[entry].buf,
2811 					       tx_q->tx_skbuff_dma[entry].len,
2812 					       DMA_TO_DEVICE);
2813 			else
2814 				dma_unmap_single(priv->device,
2815 						 tx_q->tx_skbuff_dma[entry].buf,
2816 						 tx_q->tx_skbuff_dma[entry].len,
2817 						 DMA_TO_DEVICE);
2818 			tx_q->tx_skbuff_dma[entry].buf = 0;
2819 			tx_q->tx_skbuff_dma[entry].len = 0;
2820 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2821 		}
2822 
2823 		stmmac_clean_desc3(priv, tx_q, p);
2824 
2825 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2826 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2827 
2828 		if (xdpf &&
2829 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2830 			xdp_return_frame_rx_napi(xdpf);
2831 			tx_q->xdpf[entry] = NULL;
2832 		}
2833 
2834 		if (xdpf &&
2835 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2836 			xdp_return_frame(xdpf);
2837 			tx_q->xdpf[entry] = NULL;
2838 		}
2839 
2840 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2841 			tx_q->xsk_frames_done++;
2842 
2843 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2844 			if (likely(skb)) {
2845 				pkts_compl++;
2846 				bytes_compl += skb->len;
2847 				dev_consume_skb_any(skb);
2848 				tx_q->tx_skbuff[entry] = NULL;
2849 			}
2850 		}
2851 
2852 		stmmac_release_tx_desc(priv, p, priv->mode);
2853 
2854 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2855 	}
2856 	tx_q->dirty_tx = entry;
2857 
2858 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2859 				  pkts_compl, bytes_compl);
2860 
2861 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2862 								queue))) &&
2863 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2864 
2865 		netif_dbg(priv, tx_done, priv->dev,
2866 			  "%s: restart transmit\n", __func__);
2867 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2868 	}
2869 
2870 	if (tx_q->xsk_pool) {
2871 		bool work_done;
2872 
2873 		if (tx_q->xsk_frames_done)
2874 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2875 
2876 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2877 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2878 
2879 		/* For XSK TX, we try to send as many as possible.
2880 		 * If XSK work done (XSK TX desc empty and budget still
2881 		 * available), return "budget - 1" to reenable TX IRQ.
2882 		 * Else, return "budget" to make NAPI continue polling.
2883 		 */
2884 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2885 					       STMMAC_XSK_TX_BUDGET_MAX);
2886 		if (work_done)
2887 			xmits = budget - 1;
2888 		else
2889 			xmits = budget;
2890 	}
2891 
2892 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2893 		stmmac_restart_sw_lpi_timer(priv);
2894 
2895 	/* We still have pending packets, let's call for a new scheduling */
2896 	if (tx_q->dirty_tx != tx_q->cur_tx)
2897 		*pending_packets = true;
2898 
2899 	u64_stats_update_begin(&txq_stats->napi_syncp);
2900 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2901 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2902 	u64_stats_inc(&txq_stats->napi.tx_clean);
2903 	u64_stats_update_end(&txq_stats->napi_syncp);
2904 
2905 	priv->xstats.tx_errors += tx_errors;
2906 
2907 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2908 
2909 	/* Combine decisions from TX clean and XSK TX */
2910 	return max(count, xmits);
2911 }
2912 
2913 /**
2914  * stmmac_tx_err - to manage the tx error
2915  * @priv: driver private structure
2916  * @chan: channel index
2917  * Description: it cleans the descriptors and restarts the transmission
2918  * in case of transmission errors.
2919  */
2920 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2921 {
2922 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2923 
2924 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2925 
2926 	stmmac_stop_tx_dma(priv, chan);
2927 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2928 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2929 	stmmac_reset_tx_queue(priv, chan);
2930 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2931 			    tx_q->dma_tx_phy, chan);
2932 	stmmac_start_tx_dma(priv, chan);
2933 
2934 	priv->xstats.tx_errors++;
2935 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2936 }
2937 
2938 /**
2939  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2940  *  @priv: driver private structure
2941  *  @txmode: TX operating mode
2942  *  @rxmode: RX operating mode
2943  *  @chan: channel index
2944  *  Description: it is used for configuring of the DMA operation mode in
2945  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2946  *  mode.
2947  */
2948 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2949 					  u32 rxmode, u32 chan)
2950 {
2951 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2952 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2953 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2954 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2955 	int rxfifosz = priv->plat->rx_fifo_size;
2956 	int txfifosz = priv->plat->tx_fifo_size;
2957 
2958 	if (rxfifosz == 0)
2959 		rxfifosz = priv->dma_cap.rx_fifo_size;
2960 	if (txfifosz == 0)
2961 		txfifosz = priv->dma_cap.tx_fifo_size;
2962 
2963 	/* Adjust for real per queue fifo size */
2964 	rxfifosz /= rx_channels_count;
2965 	txfifosz /= tx_channels_count;
2966 
2967 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2968 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2969 }
2970 
2971 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2972 {
2973 	int ret;
2974 
2975 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2976 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2977 	if (ret && (ret != -EINVAL)) {
2978 		stmmac_global_err(priv);
2979 		return true;
2980 	}
2981 
2982 	return false;
2983 }
2984 
2985 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2986 {
2987 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2988 						 &priv->xstats, chan, dir);
2989 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2990 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2991 	struct stmmac_channel *ch = &priv->channel[chan];
2992 	struct napi_struct *rx_napi;
2993 	struct napi_struct *tx_napi;
2994 	unsigned long flags;
2995 
2996 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2997 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2998 
2999 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
3000 		if (napi_schedule_prep(rx_napi)) {
3001 			spin_lock_irqsave(&ch->lock, flags);
3002 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3003 			spin_unlock_irqrestore(&ch->lock, flags);
3004 			__napi_schedule(rx_napi);
3005 		}
3006 	}
3007 
3008 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
3009 		if (napi_schedule_prep(tx_napi)) {
3010 			spin_lock_irqsave(&ch->lock, flags);
3011 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3012 			spin_unlock_irqrestore(&ch->lock, flags);
3013 			__napi_schedule(tx_napi);
3014 		}
3015 	}
3016 
3017 	return status;
3018 }
3019 
3020 /**
3021  * stmmac_dma_interrupt - DMA ISR
3022  * @priv: driver private structure
3023  * Description: this is the DMA ISR. It is called by the main ISR.
3024  * It calls the dwmac dma routine and schedule poll method in case of some
3025  * work can be done.
3026  */
3027 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3028 {
3029 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3030 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3031 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
3032 				tx_channel_count : rx_channel_count;
3033 	u32 chan;
3034 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3035 
3036 	/* Make sure we never check beyond our status buffer. */
3037 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3038 		channels_to_check = ARRAY_SIZE(status);
3039 
3040 	for (chan = 0; chan < channels_to_check; chan++)
3041 		status[chan] = stmmac_napi_check(priv, chan,
3042 						 DMA_DIR_RXTX);
3043 
3044 	for (chan = 0; chan < tx_channel_count; chan++) {
3045 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3046 			/* Try to bump up the dma threshold on this failure */
3047 			stmmac_bump_dma_threshold(priv, chan);
3048 		} else if (unlikely(status[chan] == tx_hard_error)) {
3049 			stmmac_tx_err(priv, chan);
3050 		}
3051 	}
3052 }
3053 
3054 /**
3055  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3056  * @priv: driver private structure
3057  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3058  */
3059 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3060 {
3061 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3062 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3063 
3064 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3065 
3066 	if (priv->dma_cap.rmon) {
3067 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3068 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3069 	} else
3070 		netdev_info(priv->dev, "No MAC Management Counters available\n");
3071 }
3072 
3073 /**
3074  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3075  * @priv: driver private structure
3076  * Description:
3077  *  new GMAC chip generations have a new register to indicate the
3078  *  presence of the optional feature/functions.
3079  *  This can be also used to override the value passed through the
3080  *  platform and necessary for old MAC10/100 and GMAC chips.
3081  */
3082 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3083 {
3084 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3085 }
3086 
3087 /**
3088  * stmmac_check_ether_addr - check if the MAC addr is valid
3089  * @priv: driver private structure
3090  * Description:
3091  * it is to verify if the MAC address is valid, in case of failures it
3092  * generates a random MAC address
3093  */
3094 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3095 {
3096 	u8 addr[ETH_ALEN];
3097 
3098 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3099 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3100 		if (is_valid_ether_addr(addr))
3101 			eth_hw_addr_set(priv->dev, addr);
3102 		else
3103 			eth_hw_addr_random(priv->dev);
3104 		dev_info(priv->device, "device MAC address %pM\n",
3105 			 priv->dev->dev_addr);
3106 	}
3107 }
3108 
3109 /**
3110  * stmmac_init_dma_engine - DMA init.
3111  * @priv: driver private structure
3112  * Description:
3113  * It inits the DMA invoking the specific MAC/GMAC callback.
3114  * Some DMA parameters can be passed from the platform;
3115  * in case of these are not passed a default is kept for the MAC or GMAC.
3116  */
3117 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3118 {
3119 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3120 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3121 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3122 	struct stmmac_rx_queue *rx_q;
3123 	struct stmmac_tx_queue *tx_q;
3124 	u32 chan = 0;
3125 	int ret = 0;
3126 
3127 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3128 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3129 		return -EINVAL;
3130 	}
3131 
3132 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3133 		priv->plat->dma_cfg->atds = 1;
3134 
3135 	ret = stmmac_reset(priv, priv->ioaddr);
3136 	if (ret) {
3137 		netdev_err(priv->dev, "Failed to reset the dma\n");
3138 		return ret;
3139 	}
3140 
3141 	/* DMA Configuration */
3142 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3143 
3144 	if (priv->plat->axi)
3145 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3146 
3147 	/* DMA CSR Channel configuration */
3148 	for (chan = 0; chan < dma_csr_ch; chan++) {
3149 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3150 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3151 	}
3152 
3153 	/* DMA RX Channel Configuration */
3154 	for (chan = 0; chan < rx_channels_count; chan++) {
3155 		rx_q = &priv->dma_conf.rx_queue[chan];
3156 
3157 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3158 				    rx_q->dma_rx_phy, chan);
3159 
3160 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3161 				     (rx_q->buf_alloc_num *
3162 				      sizeof(struct dma_desc));
3163 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3164 				       rx_q->rx_tail_addr, chan);
3165 	}
3166 
3167 	/* DMA TX Channel Configuration */
3168 	for (chan = 0; chan < tx_channels_count; chan++) {
3169 		tx_q = &priv->dma_conf.tx_queue[chan];
3170 
3171 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3172 				    tx_q->dma_tx_phy, chan);
3173 
3174 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3175 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3176 				       tx_q->tx_tail_addr, chan);
3177 	}
3178 
3179 	return ret;
3180 }
3181 
3182 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3183 {
3184 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3185 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3186 	struct stmmac_channel *ch;
3187 	struct napi_struct *napi;
3188 
3189 	if (!tx_coal_timer)
3190 		return;
3191 
3192 	ch = &priv->channel[tx_q->queue_index];
3193 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3194 
3195 	/* Arm timer only if napi is not already scheduled.
3196 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3197 	 * again in the next scheduled napi.
3198 	 */
3199 	if (unlikely(!napi_is_scheduled(napi)))
3200 		hrtimer_start(&tx_q->txtimer,
3201 			      STMMAC_COAL_TIMER(tx_coal_timer),
3202 			      HRTIMER_MODE_REL);
3203 	else
3204 		hrtimer_try_to_cancel(&tx_q->txtimer);
3205 }
3206 
3207 /**
3208  * stmmac_tx_timer - mitigation sw timer for tx.
3209  * @t: data pointer
3210  * Description:
3211  * This is the timer handler to directly invoke the stmmac_tx_clean.
3212  */
3213 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3214 {
3215 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3216 	struct stmmac_priv *priv = tx_q->priv_data;
3217 	struct stmmac_channel *ch;
3218 	struct napi_struct *napi;
3219 
3220 	ch = &priv->channel[tx_q->queue_index];
3221 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3222 
3223 	if (likely(napi_schedule_prep(napi))) {
3224 		unsigned long flags;
3225 
3226 		spin_lock_irqsave(&ch->lock, flags);
3227 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3228 		spin_unlock_irqrestore(&ch->lock, flags);
3229 		__napi_schedule(napi);
3230 	}
3231 
3232 	return HRTIMER_NORESTART;
3233 }
3234 
3235 /**
3236  * stmmac_init_coalesce - init mitigation options.
3237  * @priv: driver private structure
3238  * Description:
3239  * This inits the coalesce parameters: i.e. timer rate,
3240  * timer handler and default threshold used for enabling the
3241  * interrupt on completion bit.
3242  */
3243 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3244 {
3245 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3246 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3247 	u32 chan;
3248 
3249 	for (chan = 0; chan < tx_channel_count; chan++) {
3250 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3251 
3252 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3253 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3254 
3255 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3256 	}
3257 
3258 	for (chan = 0; chan < rx_channel_count; chan++)
3259 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3260 }
3261 
3262 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3263 {
3264 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3265 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3266 	u32 chan;
3267 
3268 	/* set TX ring length */
3269 	for (chan = 0; chan < tx_channels_count; chan++)
3270 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3271 				       (priv->dma_conf.dma_tx_size - 1), chan);
3272 
3273 	/* set RX ring length */
3274 	for (chan = 0; chan < rx_channels_count; chan++)
3275 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3276 				       (priv->dma_conf.dma_rx_size - 1), chan);
3277 }
3278 
3279 /**
3280  *  stmmac_set_tx_queue_weight - Set TX queue weight
3281  *  @priv: driver private structure
3282  *  Description: It is used for setting TX queues weight
3283  */
3284 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3285 {
3286 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3287 	u32 weight;
3288 	u32 queue;
3289 
3290 	for (queue = 0; queue < tx_queues_count; queue++) {
3291 		weight = priv->plat->tx_queues_cfg[queue].weight;
3292 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3293 	}
3294 }
3295 
3296 /**
3297  *  stmmac_configure_cbs - Configure CBS in TX queue
3298  *  @priv: driver private structure
3299  *  Description: It is used for configuring CBS in AVB TX queues
3300  */
3301 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3302 {
3303 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3304 	u32 mode_to_use;
3305 	u32 queue;
3306 
3307 	/* queue 0 is reserved for legacy traffic */
3308 	for (queue = 1; queue < tx_queues_count; queue++) {
3309 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3310 		if (mode_to_use == MTL_QUEUE_DCB)
3311 			continue;
3312 
3313 		stmmac_config_cbs(priv, priv->hw,
3314 				priv->plat->tx_queues_cfg[queue].send_slope,
3315 				priv->plat->tx_queues_cfg[queue].idle_slope,
3316 				priv->plat->tx_queues_cfg[queue].high_credit,
3317 				priv->plat->tx_queues_cfg[queue].low_credit,
3318 				queue);
3319 	}
3320 }
3321 
3322 /**
3323  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3324  *  @priv: driver private structure
3325  *  Description: It is used for mapping RX queues to RX dma channels
3326  */
3327 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3328 {
3329 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3330 	u32 queue;
3331 	u32 chan;
3332 
3333 	for (queue = 0; queue < rx_queues_count; queue++) {
3334 		chan = priv->plat->rx_queues_cfg[queue].chan;
3335 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3336 	}
3337 }
3338 
3339 /**
3340  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3341  *  @priv: driver private structure
3342  *  Description: It is used for configuring the RX Queue Priority
3343  */
3344 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3345 {
3346 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3347 	u32 queue;
3348 	u32 prio;
3349 
3350 	for (queue = 0; queue < rx_queues_count; queue++) {
3351 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3352 			continue;
3353 
3354 		prio = priv->plat->rx_queues_cfg[queue].prio;
3355 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3356 	}
3357 }
3358 
3359 /**
3360  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3361  *  @priv: driver private structure
3362  *  Description: It is used for configuring the TX Queue Priority
3363  */
3364 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3365 {
3366 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3367 	u32 queue;
3368 	u32 prio;
3369 
3370 	for (queue = 0; queue < tx_queues_count; queue++) {
3371 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3372 			continue;
3373 
3374 		prio = priv->plat->tx_queues_cfg[queue].prio;
3375 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3376 	}
3377 }
3378 
3379 /**
3380  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3381  *  @priv: driver private structure
3382  *  Description: It is used for configuring the RX queue routing
3383  */
3384 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3385 {
3386 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3387 	u32 queue;
3388 	u8 packet;
3389 
3390 	for (queue = 0; queue < rx_queues_count; queue++) {
3391 		/* no specific packet type routing specified for the queue */
3392 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3393 			continue;
3394 
3395 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3396 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3397 	}
3398 }
3399 
3400 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3401 {
3402 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3403 		priv->rss.enable = false;
3404 		return;
3405 	}
3406 
3407 	if (priv->dev->features & NETIF_F_RXHASH)
3408 		priv->rss.enable = true;
3409 	else
3410 		priv->rss.enable = false;
3411 
3412 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3413 			     priv->plat->rx_queues_to_use);
3414 }
3415 
3416 /**
3417  *  stmmac_mtl_configuration - Configure MTL
3418  *  @priv: driver private structure
3419  *  Description: It is used for configurring MTL
3420  */
3421 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3422 {
3423 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3424 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3425 
3426 	if (tx_queues_count > 1)
3427 		stmmac_set_tx_queue_weight(priv);
3428 
3429 	/* Configure MTL RX algorithms */
3430 	if (rx_queues_count > 1)
3431 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3432 				priv->plat->rx_sched_algorithm);
3433 
3434 	/* Configure MTL TX algorithms */
3435 	if (tx_queues_count > 1)
3436 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3437 				priv->plat->tx_sched_algorithm);
3438 
3439 	/* Configure CBS in AVB TX queues */
3440 	if (tx_queues_count > 1)
3441 		stmmac_configure_cbs(priv);
3442 
3443 	/* Map RX MTL to DMA channels */
3444 	stmmac_rx_queue_dma_chan_map(priv);
3445 
3446 	/* Enable MAC RX Queues */
3447 	stmmac_mac_enable_rx_queues(priv);
3448 
3449 	/* Set RX priorities */
3450 	if (rx_queues_count > 1)
3451 		stmmac_mac_config_rx_queues_prio(priv);
3452 
3453 	/* Set TX priorities */
3454 	if (tx_queues_count > 1)
3455 		stmmac_mac_config_tx_queues_prio(priv);
3456 
3457 	/* Set RX routing */
3458 	if (rx_queues_count > 1)
3459 		stmmac_mac_config_rx_queues_routing(priv);
3460 
3461 	/* Receive Side Scaling */
3462 	if (rx_queues_count > 1)
3463 		stmmac_mac_config_rss(priv);
3464 }
3465 
3466 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3467 {
3468 	if (priv->dma_cap.asp) {
3469 		netdev_info(priv->dev, "Enabling Safety Features\n");
3470 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3471 					  priv->plat->safety_feat_cfg);
3472 	} else {
3473 		netdev_info(priv->dev, "No Safety Features support found\n");
3474 	}
3475 }
3476 
3477 /**
3478  * stmmac_hw_setup - setup mac in a usable state.
3479  *  @dev : pointer to the device structure.
3480  *  @ptp_register: register PTP if set
3481  *  Description:
3482  *  this is the main function to setup the HW in a usable state because the
3483  *  dma engine is reset, the core registers are configured (e.g. AXI,
3484  *  Checksum features, timers). The DMA is ready to start receiving and
3485  *  transmitting.
3486  *  Return value:
3487  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3488  *  file on failure.
3489  */
3490 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3491 {
3492 	struct stmmac_priv *priv = netdev_priv(dev);
3493 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3494 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3495 	bool sph_en;
3496 	u32 chan;
3497 	int ret;
3498 
3499 	/* Make sure RX clock is enabled */
3500 	if (priv->hw->phylink_pcs)
3501 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3502 
3503 	/* Note that clk_rx_i must be running for reset to complete. This
3504 	 * clock may also be required when setting the MAC address.
3505 	 *
3506 	 * Block the receive clock stop for LPI mode at the PHY in case
3507 	 * the link is established with EEE mode active.
3508 	 */
3509 	phylink_rx_clk_stop_block(priv->phylink);
3510 
3511 	/* DMA initialization and SW reset */
3512 	ret = stmmac_init_dma_engine(priv);
3513 	if (ret < 0) {
3514 		phylink_rx_clk_stop_unblock(priv->phylink);
3515 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3516 			   __func__);
3517 		return ret;
3518 	}
3519 
3520 	/* Copy the MAC addr into the HW  */
3521 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3522 	phylink_rx_clk_stop_unblock(priv->phylink);
3523 
3524 	/* PS and related bits will be programmed according to the speed */
3525 	if (priv->hw->pcs) {
3526 		int speed = priv->plat->mac_port_sel_speed;
3527 
3528 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3529 		    (speed == SPEED_1000)) {
3530 			priv->hw->ps = speed;
3531 		} else {
3532 			dev_warn(priv->device, "invalid port speed\n");
3533 			priv->hw->ps = 0;
3534 		}
3535 	}
3536 
3537 	/* Initialize the MAC Core */
3538 	stmmac_core_init(priv, priv->hw, dev);
3539 
3540 	/* Initialize MTL*/
3541 	stmmac_mtl_configuration(priv);
3542 
3543 	/* Initialize Safety Features */
3544 	stmmac_safety_feat_configuration(priv);
3545 
3546 	ret = stmmac_rx_ipc(priv, priv->hw);
3547 	if (!ret) {
3548 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3549 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3550 		priv->hw->rx_csum = 0;
3551 	}
3552 
3553 	/* Enable the MAC Rx/Tx */
3554 	stmmac_mac_set(priv, priv->ioaddr, true);
3555 
3556 	/* Set the HW DMA mode and the COE */
3557 	stmmac_dma_operation_mode(priv);
3558 
3559 	stmmac_mmc_setup(priv);
3560 
3561 	if (ptp_register) {
3562 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3563 		if (ret < 0)
3564 			netdev_warn(priv->dev,
3565 				    "failed to enable PTP reference clock: %pe\n",
3566 				    ERR_PTR(ret));
3567 	}
3568 
3569 	ret = stmmac_init_ptp(priv);
3570 	if (ret == -EOPNOTSUPP)
3571 		netdev_info(priv->dev, "PTP not supported by HW\n");
3572 	else if (ret)
3573 		netdev_warn(priv->dev, "PTP init failed\n");
3574 	else if (ptp_register)
3575 		stmmac_ptp_register(priv);
3576 
3577 	if (priv->use_riwt) {
3578 		u32 queue;
3579 
3580 		for (queue = 0; queue < rx_cnt; queue++) {
3581 			if (!priv->rx_riwt[queue])
3582 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3583 
3584 			stmmac_rx_watchdog(priv, priv->ioaddr,
3585 					   priv->rx_riwt[queue], queue);
3586 		}
3587 	}
3588 
3589 	if (priv->hw->pcs)
3590 		stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
3591 
3592 	/* set TX and RX rings length */
3593 	stmmac_set_rings_length(priv);
3594 
3595 	/* Enable TSO */
3596 	if (priv->tso) {
3597 		for (chan = 0; chan < tx_cnt; chan++) {
3598 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3599 
3600 			/* TSO and TBS cannot co-exist */
3601 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3602 				continue;
3603 
3604 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3605 		}
3606 	}
3607 
3608 	/* Enable Split Header */
3609 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3610 	for (chan = 0; chan < rx_cnt; chan++)
3611 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3612 
3613 
3614 	/* VLAN Tag Insertion */
3615 	if (priv->dma_cap.vlins)
3616 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3617 
3618 	/* TBS */
3619 	for (chan = 0; chan < tx_cnt; chan++) {
3620 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3621 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3622 
3623 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3624 	}
3625 
3626 	/* Configure real RX and TX queues */
3627 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3628 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3629 
3630 	/* Start the ball rolling... */
3631 	stmmac_start_all_dma(priv);
3632 
3633 	phylink_rx_clk_stop_block(priv->phylink);
3634 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3635 	phylink_rx_clk_stop_unblock(priv->phylink);
3636 
3637 	return 0;
3638 }
3639 
3640 static void stmmac_hw_teardown(struct net_device *dev)
3641 {
3642 	struct stmmac_priv *priv = netdev_priv(dev);
3643 
3644 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3645 }
3646 
3647 static void stmmac_free_irq(struct net_device *dev,
3648 			    enum request_irq_err irq_err, int irq_idx)
3649 {
3650 	struct stmmac_priv *priv = netdev_priv(dev);
3651 	int j;
3652 
3653 	switch (irq_err) {
3654 	case REQ_IRQ_ERR_ALL:
3655 		irq_idx = priv->plat->tx_queues_to_use;
3656 		fallthrough;
3657 	case REQ_IRQ_ERR_TX:
3658 		for (j = irq_idx - 1; j >= 0; j--) {
3659 			if (priv->tx_irq[j] > 0) {
3660 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3661 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3662 			}
3663 		}
3664 		irq_idx = priv->plat->rx_queues_to_use;
3665 		fallthrough;
3666 	case REQ_IRQ_ERR_RX:
3667 		for (j = irq_idx - 1; j >= 0; j--) {
3668 			if (priv->rx_irq[j] > 0) {
3669 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3670 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3671 			}
3672 		}
3673 
3674 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3675 			free_irq(priv->sfty_ue_irq, dev);
3676 		fallthrough;
3677 	case REQ_IRQ_ERR_SFTY_UE:
3678 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3679 			free_irq(priv->sfty_ce_irq, dev);
3680 		fallthrough;
3681 	case REQ_IRQ_ERR_SFTY_CE:
3682 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3683 			free_irq(priv->lpi_irq, dev);
3684 		fallthrough;
3685 	case REQ_IRQ_ERR_LPI:
3686 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3687 			free_irq(priv->wol_irq, dev);
3688 		fallthrough;
3689 	case REQ_IRQ_ERR_SFTY:
3690 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3691 			free_irq(priv->sfty_irq, dev);
3692 		fallthrough;
3693 	case REQ_IRQ_ERR_WOL:
3694 		free_irq(dev->irq, dev);
3695 		fallthrough;
3696 	case REQ_IRQ_ERR_MAC:
3697 	case REQ_IRQ_ERR_NO:
3698 		/* If MAC IRQ request error, no more IRQ to free */
3699 		break;
3700 	}
3701 }
3702 
3703 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3704 {
3705 	struct stmmac_priv *priv = netdev_priv(dev);
3706 	enum request_irq_err irq_err;
3707 	int irq_idx = 0;
3708 	char *int_name;
3709 	int ret;
3710 	int i;
3711 
3712 	/* For common interrupt */
3713 	int_name = priv->int_name_mac;
3714 	sprintf(int_name, "%s:%s", dev->name, "mac");
3715 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3716 			  0, int_name, dev);
3717 	if (unlikely(ret < 0)) {
3718 		netdev_err(priv->dev,
3719 			   "%s: alloc mac MSI %d (error: %d)\n",
3720 			   __func__, dev->irq, ret);
3721 		irq_err = REQ_IRQ_ERR_MAC;
3722 		goto irq_error;
3723 	}
3724 
3725 	/* Request the Wake IRQ in case of another line
3726 	 * is used for WoL
3727 	 */
3728 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3729 		int_name = priv->int_name_wol;
3730 		sprintf(int_name, "%s:%s", dev->name, "wol");
3731 		ret = request_irq(priv->wol_irq,
3732 				  stmmac_mac_interrupt,
3733 				  0, int_name, dev);
3734 		if (unlikely(ret < 0)) {
3735 			netdev_err(priv->dev,
3736 				   "%s: alloc wol MSI %d (error: %d)\n",
3737 				   __func__, priv->wol_irq, ret);
3738 			irq_err = REQ_IRQ_ERR_WOL;
3739 			goto irq_error;
3740 		}
3741 	}
3742 
3743 	/* Request the LPI IRQ in case of another line
3744 	 * is used for LPI
3745 	 */
3746 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3747 		int_name = priv->int_name_lpi;
3748 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3749 		ret = request_irq(priv->lpi_irq,
3750 				  stmmac_mac_interrupt,
3751 				  0, int_name, dev);
3752 		if (unlikely(ret < 0)) {
3753 			netdev_err(priv->dev,
3754 				   "%s: alloc lpi MSI %d (error: %d)\n",
3755 				   __func__, priv->lpi_irq, ret);
3756 			irq_err = REQ_IRQ_ERR_LPI;
3757 			goto irq_error;
3758 		}
3759 	}
3760 
3761 	/* Request the common Safety Feature Correctible/Uncorrectible
3762 	 * Error line in case of another line is used
3763 	 */
3764 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3765 		int_name = priv->int_name_sfty;
3766 		sprintf(int_name, "%s:%s", dev->name, "safety");
3767 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3768 				  0, int_name, dev);
3769 		if (unlikely(ret < 0)) {
3770 			netdev_err(priv->dev,
3771 				   "%s: alloc sfty MSI %d (error: %d)\n",
3772 				   __func__, priv->sfty_irq, ret);
3773 			irq_err = REQ_IRQ_ERR_SFTY;
3774 			goto irq_error;
3775 		}
3776 	}
3777 
3778 	/* Request the Safety Feature Correctible Error line in
3779 	 * case of another line is used
3780 	 */
3781 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3782 		int_name = priv->int_name_sfty_ce;
3783 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3784 		ret = request_irq(priv->sfty_ce_irq,
3785 				  stmmac_safety_interrupt,
3786 				  0, int_name, dev);
3787 		if (unlikely(ret < 0)) {
3788 			netdev_err(priv->dev,
3789 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3790 				   __func__, priv->sfty_ce_irq, ret);
3791 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3792 			goto irq_error;
3793 		}
3794 	}
3795 
3796 	/* Request the Safety Feature Uncorrectible Error line in
3797 	 * case of another line is used
3798 	 */
3799 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3800 		int_name = priv->int_name_sfty_ue;
3801 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3802 		ret = request_irq(priv->sfty_ue_irq,
3803 				  stmmac_safety_interrupt,
3804 				  0, int_name, dev);
3805 		if (unlikely(ret < 0)) {
3806 			netdev_err(priv->dev,
3807 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3808 				   __func__, priv->sfty_ue_irq, ret);
3809 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3810 			goto irq_error;
3811 		}
3812 	}
3813 
3814 	/* Request Rx MSI irq */
3815 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3816 		if (i >= MTL_MAX_RX_QUEUES)
3817 			break;
3818 		if (priv->rx_irq[i] == 0)
3819 			continue;
3820 
3821 		int_name = priv->int_name_rx_irq[i];
3822 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3823 		ret = request_irq(priv->rx_irq[i],
3824 				  stmmac_msi_intr_rx,
3825 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3826 		if (unlikely(ret < 0)) {
3827 			netdev_err(priv->dev,
3828 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3829 				   __func__, i, priv->rx_irq[i], ret);
3830 			irq_err = REQ_IRQ_ERR_RX;
3831 			irq_idx = i;
3832 			goto irq_error;
3833 		}
3834 		irq_set_affinity_hint(priv->rx_irq[i],
3835 				      cpumask_of(i % num_online_cpus()));
3836 	}
3837 
3838 	/* Request Tx MSI irq */
3839 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3840 		if (i >= MTL_MAX_TX_QUEUES)
3841 			break;
3842 		if (priv->tx_irq[i] == 0)
3843 			continue;
3844 
3845 		int_name = priv->int_name_tx_irq[i];
3846 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3847 		ret = request_irq(priv->tx_irq[i],
3848 				  stmmac_msi_intr_tx,
3849 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3850 		if (unlikely(ret < 0)) {
3851 			netdev_err(priv->dev,
3852 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3853 				   __func__, i, priv->tx_irq[i], ret);
3854 			irq_err = REQ_IRQ_ERR_TX;
3855 			irq_idx = i;
3856 			goto irq_error;
3857 		}
3858 		irq_set_affinity_hint(priv->tx_irq[i],
3859 				      cpumask_of(i % num_online_cpus()));
3860 	}
3861 
3862 	return 0;
3863 
3864 irq_error:
3865 	stmmac_free_irq(dev, irq_err, irq_idx);
3866 	return ret;
3867 }
3868 
3869 static int stmmac_request_irq_single(struct net_device *dev)
3870 {
3871 	struct stmmac_priv *priv = netdev_priv(dev);
3872 	enum request_irq_err irq_err;
3873 	int ret;
3874 
3875 	ret = request_irq(dev->irq, stmmac_interrupt,
3876 			  IRQF_SHARED, dev->name, dev);
3877 	if (unlikely(ret < 0)) {
3878 		netdev_err(priv->dev,
3879 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3880 			   __func__, dev->irq, ret);
3881 		irq_err = REQ_IRQ_ERR_MAC;
3882 		goto irq_error;
3883 	}
3884 
3885 	/* Request the Wake IRQ in case of another line
3886 	 * is used for WoL
3887 	 */
3888 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3889 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3890 				  IRQF_SHARED, dev->name, dev);
3891 		if (unlikely(ret < 0)) {
3892 			netdev_err(priv->dev,
3893 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3894 				   __func__, priv->wol_irq, ret);
3895 			irq_err = REQ_IRQ_ERR_WOL;
3896 			goto irq_error;
3897 		}
3898 	}
3899 
3900 	/* Request the IRQ lines */
3901 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3902 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3903 				  IRQF_SHARED, dev->name, dev);
3904 		if (unlikely(ret < 0)) {
3905 			netdev_err(priv->dev,
3906 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3907 				   __func__, priv->lpi_irq, ret);
3908 			irq_err = REQ_IRQ_ERR_LPI;
3909 			goto irq_error;
3910 		}
3911 	}
3912 
3913 	/* Request the common Safety Feature Correctible/Uncorrectible
3914 	 * Error line in case of another line is used
3915 	 */
3916 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3917 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3918 				  IRQF_SHARED, dev->name, dev);
3919 		if (unlikely(ret < 0)) {
3920 			netdev_err(priv->dev,
3921 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3922 				   __func__, priv->sfty_irq, ret);
3923 			irq_err = REQ_IRQ_ERR_SFTY;
3924 			goto irq_error;
3925 		}
3926 	}
3927 
3928 	return 0;
3929 
3930 irq_error:
3931 	stmmac_free_irq(dev, irq_err, 0);
3932 	return ret;
3933 }
3934 
3935 static int stmmac_request_irq(struct net_device *dev)
3936 {
3937 	struct stmmac_priv *priv = netdev_priv(dev);
3938 	int ret;
3939 
3940 	/* Request the IRQ lines */
3941 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3942 		ret = stmmac_request_irq_multi_msi(dev);
3943 	else
3944 		ret = stmmac_request_irq_single(dev);
3945 
3946 	return ret;
3947 }
3948 
3949 /**
3950  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3951  *  @priv: driver private structure
3952  *  @mtu: MTU to setup the dma queue and buf with
3953  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3954  *  Allocate the Tx/Rx DMA queue and init them.
3955  *  Return value:
3956  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3957  */
3958 static struct stmmac_dma_conf *
3959 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3960 {
3961 	struct stmmac_dma_conf *dma_conf;
3962 	int chan, bfsize, ret;
3963 
3964 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3965 	if (!dma_conf) {
3966 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3967 			   __func__);
3968 		return ERR_PTR(-ENOMEM);
3969 	}
3970 
3971 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3972 	if (bfsize < 0)
3973 		bfsize = 0;
3974 
3975 	if (bfsize < BUF_SIZE_16KiB)
3976 		bfsize = stmmac_set_bfsize(mtu, 0);
3977 
3978 	dma_conf->dma_buf_sz = bfsize;
3979 	/* Chose the tx/rx size from the already defined one in the
3980 	 * priv struct. (if defined)
3981 	 */
3982 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3983 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3984 
3985 	if (!dma_conf->dma_tx_size)
3986 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3987 	if (!dma_conf->dma_rx_size)
3988 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3989 
3990 	/* Earlier check for TBS */
3991 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3992 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3993 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3994 
3995 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3996 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3997 	}
3998 
3999 	ret = alloc_dma_desc_resources(priv, dma_conf);
4000 	if (ret < 0) {
4001 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4002 			   __func__);
4003 		goto alloc_error;
4004 	}
4005 
4006 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
4007 	if (ret < 0) {
4008 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4009 			   __func__);
4010 		goto init_error;
4011 	}
4012 
4013 	return dma_conf;
4014 
4015 init_error:
4016 	free_dma_desc_resources(priv, dma_conf);
4017 alloc_error:
4018 	kfree(dma_conf);
4019 	return ERR_PTR(ret);
4020 }
4021 
4022 /**
4023  *  __stmmac_open - open entry point of the driver
4024  *  @dev : pointer to the device structure.
4025  *  @dma_conf :  structure to take the dma data
4026  *  Description:
4027  *  This function is the open entry point of the driver.
4028  *  Return value:
4029  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4030  *  file on failure.
4031  */
4032 static int __stmmac_open(struct net_device *dev,
4033 			 struct stmmac_dma_conf *dma_conf)
4034 {
4035 	struct stmmac_priv *priv = netdev_priv(dev);
4036 	int mode = priv->plat->phy_interface;
4037 	u32 chan;
4038 	int ret;
4039 
4040 	/* Initialise the tx lpi timer, converting from msec to usec */
4041 	if (!priv->tx_lpi_timer)
4042 		priv->tx_lpi_timer = eee_timer * 1000;
4043 
4044 	ret = pm_runtime_resume_and_get(priv->device);
4045 	if (ret < 0)
4046 		return ret;
4047 
4048 	if ((!priv->hw->xpcs ||
4049 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
4050 		ret = stmmac_init_phy(dev);
4051 		if (ret) {
4052 			netdev_err(priv->dev,
4053 				   "%s: Cannot attach to PHY (error: %d)\n",
4054 				   __func__, ret);
4055 			goto init_phy_error;
4056 		}
4057 	}
4058 
4059 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4060 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4061 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4062 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4063 
4064 	stmmac_reset_queues_param(priv);
4065 
4066 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4067 	    priv->plat->serdes_powerup) {
4068 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4069 		if (ret < 0) {
4070 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4071 				   __func__);
4072 			goto init_error;
4073 		}
4074 	}
4075 
4076 	ret = stmmac_hw_setup(dev, true);
4077 	if (ret < 0) {
4078 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4079 		goto init_error;
4080 	}
4081 
4082 	stmmac_init_coalesce(priv);
4083 
4084 	phylink_start(priv->phylink);
4085 	/* We may have called phylink_speed_down before */
4086 	phylink_speed_up(priv->phylink);
4087 
4088 	ret = stmmac_request_irq(dev);
4089 	if (ret)
4090 		goto irq_error;
4091 
4092 	stmmac_enable_all_queues(priv);
4093 	netif_tx_start_all_queues(priv->dev);
4094 	stmmac_enable_all_dma_irq(priv);
4095 
4096 	return 0;
4097 
4098 irq_error:
4099 	phylink_stop(priv->phylink);
4100 
4101 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4102 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4103 
4104 	stmmac_hw_teardown(dev);
4105 init_error:
4106 	phylink_disconnect_phy(priv->phylink);
4107 init_phy_error:
4108 	pm_runtime_put(priv->device);
4109 	return ret;
4110 }
4111 
4112 static int stmmac_open(struct net_device *dev)
4113 {
4114 	struct stmmac_priv *priv = netdev_priv(dev);
4115 	struct stmmac_dma_conf *dma_conf;
4116 	int ret;
4117 
4118 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4119 	if (IS_ERR(dma_conf))
4120 		return PTR_ERR(dma_conf);
4121 
4122 	ret = __stmmac_open(dev, dma_conf);
4123 	if (ret)
4124 		free_dma_desc_resources(priv, dma_conf);
4125 
4126 	kfree(dma_conf);
4127 	return ret;
4128 }
4129 
4130 /**
4131  *  stmmac_release - close entry point of the driver
4132  *  @dev : device pointer.
4133  *  Description:
4134  *  This is the stop entry point of the driver.
4135  */
4136 static int stmmac_release(struct net_device *dev)
4137 {
4138 	struct stmmac_priv *priv = netdev_priv(dev);
4139 	u32 chan;
4140 
4141 	/* If the PHY or MAC has WoL enabled, then the PHY will not be
4142 	 * suspended when phylink_stop() is called below. Set the PHY
4143 	 * to its slowest speed to save power.
4144 	 */
4145 	if (device_may_wakeup(priv->device))
4146 		phylink_speed_down(priv->phylink, false);
4147 
4148 	/* Stop and disconnect the PHY */
4149 	phylink_stop(priv->phylink);
4150 	phylink_disconnect_phy(priv->phylink);
4151 
4152 	stmmac_disable_all_queues(priv);
4153 
4154 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4155 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4156 
4157 	netif_tx_disable(dev);
4158 
4159 	/* Free the IRQ lines */
4160 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4161 
4162 	/* Stop TX/RX DMA and clear the descriptors */
4163 	stmmac_stop_all_dma(priv);
4164 
4165 	/* Release and free the Rx/Tx resources */
4166 	free_dma_desc_resources(priv, &priv->dma_conf);
4167 
4168 	/* Powerdown Serdes if there is */
4169 	if (priv->plat->serdes_powerdown)
4170 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4171 
4172 	stmmac_release_ptp(priv);
4173 
4174 	if (stmmac_fpe_supported(priv))
4175 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4176 
4177 	pm_runtime_put(priv->device);
4178 
4179 	return 0;
4180 }
4181 
4182 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4183 			       struct stmmac_tx_queue *tx_q)
4184 {
4185 	u16 tag = 0x0, inner_tag = 0x0;
4186 	u32 inner_type = 0x0;
4187 	struct dma_desc *p;
4188 
4189 	if (!priv->dma_cap.vlins)
4190 		return false;
4191 	if (!skb_vlan_tag_present(skb))
4192 		return false;
4193 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4194 		inner_tag = skb_vlan_tag_get(skb);
4195 		inner_type = STMMAC_VLAN_INSERT;
4196 	}
4197 
4198 	tag = skb_vlan_tag_get(skb);
4199 
4200 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4201 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4202 	else
4203 		p = &tx_q->dma_tx[tx_q->cur_tx];
4204 
4205 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4206 		return false;
4207 
4208 	stmmac_set_tx_owner(priv, p);
4209 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4210 	return true;
4211 }
4212 
4213 /**
4214  *  stmmac_tso_allocator - close entry point of the driver
4215  *  @priv: driver private structure
4216  *  @des: buffer start address
4217  *  @total_len: total length to fill in descriptors
4218  *  @last_segment: condition for the last descriptor
4219  *  @queue: TX queue index
4220  *  Description:
4221  *  This function fills descriptor and request new descriptors according to
4222  *  buffer length to fill
4223  */
4224 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4225 				 int total_len, bool last_segment, u32 queue)
4226 {
4227 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4228 	struct dma_desc *desc;
4229 	u32 buff_size;
4230 	int tmp_len;
4231 
4232 	tmp_len = total_len;
4233 
4234 	while (tmp_len > 0) {
4235 		dma_addr_t curr_addr;
4236 
4237 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4238 						priv->dma_conf.dma_tx_size);
4239 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4240 
4241 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4242 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4243 		else
4244 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4245 
4246 		curr_addr = des + (total_len - tmp_len);
4247 		stmmac_set_desc_addr(priv, desc, curr_addr);
4248 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4249 			    TSO_MAX_BUFF_SIZE : tmp_len;
4250 
4251 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4252 				0, 1,
4253 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4254 				0, 0);
4255 
4256 		tmp_len -= TSO_MAX_BUFF_SIZE;
4257 	}
4258 }
4259 
4260 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4261 {
4262 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4263 	int desc_size;
4264 
4265 	if (likely(priv->extend_desc))
4266 		desc_size = sizeof(struct dma_extended_desc);
4267 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4268 		desc_size = sizeof(struct dma_edesc);
4269 	else
4270 		desc_size = sizeof(struct dma_desc);
4271 
4272 	/* The own bit must be the latest setting done when prepare the
4273 	 * descriptor and then barrier is needed to make sure that
4274 	 * all is coherent before granting the DMA engine.
4275 	 */
4276 	wmb();
4277 
4278 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4279 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4280 }
4281 
4282 /**
4283  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4284  *  @skb : the socket buffer
4285  *  @dev : device pointer
4286  *  Description: this is the transmit function that is called on TSO frames
4287  *  (support available on GMAC4 and newer chips).
4288  *  Diagram below show the ring programming in case of TSO frames:
4289  *
4290  *  First Descriptor
4291  *   --------
4292  *   | DES0 |---> buffer1 = L2/L3/L4 header
4293  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4294  *   |      |     width is 32-bit, but we never use it.
4295  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4296  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4297  *   |      |     or 48-bit, and we always use it.
4298  *   | DES2 |---> buffer1 len
4299  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4300  *   --------
4301  *   --------
4302  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4303  *   | DES1 |---> same as the First Descriptor
4304  *   | DES2 |---> buffer1 len
4305  *   | DES3 |
4306  *   --------
4307  *	|
4308  *     ...
4309  *	|
4310  *   --------
4311  *   | DES0 |---> buffer1 = Split TCP Payload
4312  *   | DES1 |---> same as the First Descriptor
4313  *   | DES2 |---> buffer1 len
4314  *   | DES3 |
4315  *   --------
4316  *
4317  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4318  */
4319 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4320 {
4321 	struct dma_desc *desc, *first, *mss_desc = NULL;
4322 	struct stmmac_priv *priv = netdev_priv(dev);
4323 	unsigned int first_entry, tx_packets;
4324 	struct stmmac_txq_stats *txq_stats;
4325 	struct stmmac_tx_queue *tx_q;
4326 	u32 pay_len, mss, queue;
4327 	int i, first_tx, nfrags;
4328 	u8 proto_hdr_len, hdr;
4329 	dma_addr_t des;
4330 	bool set_ic;
4331 
4332 	/* Always insert VLAN tag to SKB payload for TSO frames.
4333 	 *
4334 	 * Never insert VLAN tag by HW, since segments splited by
4335 	 * TSO engine will be un-tagged by mistake.
4336 	 */
4337 	if (skb_vlan_tag_present(skb)) {
4338 		skb = __vlan_hwaccel_push_inside(skb);
4339 		if (unlikely(!skb)) {
4340 			priv->xstats.tx_dropped++;
4341 			return NETDEV_TX_OK;
4342 		}
4343 	}
4344 
4345 	nfrags = skb_shinfo(skb)->nr_frags;
4346 	queue = skb_get_queue_mapping(skb);
4347 
4348 	tx_q = &priv->dma_conf.tx_queue[queue];
4349 	txq_stats = &priv->xstats.txq_stats[queue];
4350 	first_tx = tx_q->cur_tx;
4351 
4352 	/* Compute header lengths */
4353 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4354 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4355 		hdr = sizeof(struct udphdr);
4356 	} else {
4357 		proto_hdr_len = skb_tcp_all_headers(skb);
4358 		hdr = tcp_hdrlen(skb);
4359 	}
4360 
4361 	/* Desc availability based on threshold should be enough safe */
4362 	if (unlikely(stmmac_tx_avail(priv, queue) <
4363 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4364 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4365 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4366 								queue));
4367 			/* This is a hard error, log it. */
4368 			netdev_err(priv->dev,
4369 				   "%s: Tx Ring full when queue awake\n",
4370 				   __func__);
4371 		}
4372 		return NETDEV_TX_BUSY;
4373 	}
4374 
4375 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4376 
4377 	mss = skb_shinfo(skb)->gso_size;
4378 
4379 	/* set new MSS value if needed */
4380 	if (mss != tx_q->mss) {
4381 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4382 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4383 		else
4384 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4385 
4386 		stmmac_set_mss(priv, mss_desc, mss);
4387 		tx_q->mss = mss;
4388 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4389 						priv->dma_conf.dma_tx_size);
4390 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4391 	}
4392 
4393 	if (netif_msg_tx_queued(priv)) {
4394 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4395 			__func__, hdr, proto_hdr_len, pay_len, mss);
4396 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4397 			skb->data_len);
4398 	}
4399 
4400 	first_entry = tx_q->cur_tx;
4401 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4402 
4403 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4404 		desc = &tx_q->dma_entx[first_entry].basic;
4405 	else
4406 		desc = &tx_q->dma_tx[first_entry];
4407 	first = desc;
4408 
4409 	/* first descriptor: fill Headers on Buf1 */
4410 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4411 			     DMA_TO_DEVICE);
4412 	if (dma_mapping_error(priv->device, des))
4413 		goto dma_map_err;
4414 
4415 	stmmac_set_desc_addr(priv, first, des);
4416 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4417 			     (nfrags == 0), queue);
4418 
4419 	/* In case two or more DMA transmit descriptors are allocated for this
4420 	 * non-paged SKB data, the DMA buffer address should be saved to
4421 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4422 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4423 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4424 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4425 	 * sooner or later.
4426 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4427 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4428 	 * this DMA buffer right after the DMA engine completely finishes the
4429 	 * full buffer transmission.
4430 	 */
4431 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4432 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4433 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4434 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4435 
4436 	/* Prepare fragments */
4437 	for (i = 0; i < nfrags; i++) {
4438 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4439 
4440 		des = skb_frag_dma_map(priv->device, frag, 0,
4441 				       skb_frag_size(frag),
4442 				       DMA_TO_DEVICE);
4443 		if (dma_mapping_error(priv->device, des))
4444 			goto dma_map_err;
4445 
4446 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4447 				     (i == nfrags - 1), queue);
4448 
4449 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4450 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4451 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4452 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4453 	}
4454 
4455 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4456 
4457 	/* Only the last descriptor gets to point to the skb. */
4458 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4459 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4460 
4461 	/* Manage tx mitigation */
4462 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4463 	tx_q->tx_count_frames += tx_packets;
4464 
4465 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4466 		set_ic = true;
4467 	else if (!priv->tx_coal_frames[queue])
4468 		set_ic = false;
4469 	else if (tx_packets > priv->tx_coal_frames[queue])
4470 		set_ic = true;
4471 	else if ((tx_q->tx_count_frames %
4472 		  priv->tx_coal_frames[queue]) < tx_packets)
4473 		set_ic = true;
4474 	else
4475 		set_ic = false;
4476 
4477 	if (set_ic) {
4478 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4479 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4480 		else
4481 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4482 
4483 		tx_q->tx_count_frames = 0;
4484 		stmmac_set_tx_ic(priv, desc);
4485 	}
4486 
4487 	/* We've used all descriptors we need for this skb, however,
4488 	 * advance cur_tx so that it references a fresh descriptor.
4489 	 * ndo_start_xmit will fill this descriptor the next time it's
4490 	 * called and stmmac_tx_clean may clean up to this descriptor.
4491 	 */
4492 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4493 
4494 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4495 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4496 			  __func__);
4497 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4498 	}
4499 
4500 	u64_stats_update_begin(&txq_stats->q_syncp);
4501 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4502 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4503 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4504 	if (set_ic)
4505 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4506 	u64_stats_update_end(&txq_stats->q_syncp);
4507 
4508 	if (priv->sarc_type)
4509 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4510 
4511 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4512 		     priv->hwts_tx_en)) {
4513 		/* declare that device is doing timestamping */
4514 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4515 		stmmac_enable_tx_timestamp(priv, first);
4516 	}
4517 
4518 	/* Complete the first descriptor before granting the DMA */
4519 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4520 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4521 				   hdr / 4, (skb->len - proto_hdr_len));
4522 
4523 	/* If context desc is used to change MSS */
4524 	if (mss_desc) {
4525 		/* Make sure that first descriptor has been completely
4526 		 * written, including its own bit. This is because MSS is
4527 		 * actually before first descriptor, so we need to make
4528 		 * sure that MSS's own bit is the last thing written.
4529 		 */
4530 		dma_wmb();
4531 		stmmac_set_tx_owner(priv, mss_desc);
4532 	}
4533 
4534 	if (netif_msg_pktdata(priv)) {
4535 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4536 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4537 			tx_q->cur_tx, first, nfrags);
4538 		pr_info(">>> frame to be transmitted: ");
4539 		print_pkt(skb->data, skb_headlen(skb));
4540 	}
4541 
4542 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4543 	skb_tx_timestamp(skb);
4544 
4545 	stmmac_flush_tx_descriptors(priv, queue);
4546 	stmmac_tx_timer_arm(priv, queue);
4547 
4548 	return NETDEV_TX_OK;
4549 
4550 dma_map_err:
4551 	dev_err(priv->device, "Tx dma map failed\n");
4552 	dev_kfree_skb(skb);
4553 	priv->xstats.tx_dropped++;
4554 	return NETDEV_TX_OK;
4555 }
4556 
4557 /**
4558  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4559  * @skb: socket buffer to check
4560  *
4561  * Check if a packet has an ethertype that will trigger the IP header checks
4562  * and IP/TCP checksum engine of the stmmac core.
4563  *
4564  * Return: true if the ethertype can trigger the checksum engine, false
4565  * otherwise
4566  */
4567 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4568 {
4569 	int depth = 0;
4570 	__be16 proto;
4571 
4572 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4573 				    &depth);
4574 
4575 	return (depth <= ETH_HLEN) &&
4576 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4577 }
4578 
4579 /**
4580  *  stmmac_xmit - Tx entry point of the driver
4581  *  @skb : the socket buffer
4582  *  @dev : device pointer
4583  *  Description : this is the tx entry point of the driver.
4584  *  It programs the chain or the ring and supports oversized frames
4585  *  and SG feature.
4586  */
4587 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4588 {
4589 	unsigned int first_entry, tx_packets, enh_desc;
4590 	struct stmmac_priv *priv = netdev_priv(dev);
4591 	unsigned int nopaged_len = skb_headlen(skb);
4592 	int i, csum_insertion = 0, is_jumbo = 0;
4593 	u32 queue = skb_get_queue_mapping(skb);
4594 	int nfrags = skb_shinfo(skb)->nr_frags;
4595 	int gso = skb_shinfo(skb)->gso_type;
4596 	struct stmmac_txq_stats *txq_stats;
4597 	struct dma_edesc *tbs_desc = NULL;
4598 	struct dma_desc *desc, *first;
4599 	struct stmmac_tx_queue *tx_q;
4600 	bool has_vlan, set_ic;
4601 	int entry, first_tx;
4602 	dma_addr_t des;
4603 
4604 	tx_q = &priv->dma_conf.tx_queue[queue];
4605 	txq_stats = &priv->xstats.txq_stats[queue];
4606 	first_tx = tx_q->cur_tx;
4607 
4608 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4609 		stmmac_stop_sw_lpi(priv);
4610 
4611 	/* Manage oversized TCP frames for GMAC4 device */
4612 	if (skb_is_gso(skb) && priv->tso) {
4613 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4614 			return stmmac_tso_xmit(skb, dev);
4615 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4616 			return stmmac_tso_xmit(skb, dev);
4617 	}
4618 
4619 	if (priv->est && priv->est->enable &&
4620 	    priv->est->max_sdu[queue] &&
4621 	    skb->len > priv->est->max_sdu[queue]){
4622 		priv->xstats.max_sdu_txq_drop[queue]++;
4623 		goto max_sdu_err;
4624 	}
4625 
4626 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4627 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4628 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4629 								queue));
4630 			/* This is a hard error, log it. */
4631 			netdev_err(priv->dev,
4632 				   "%s: Tx Ring full when queue awake\n",
4633 				   __func__);
4634 		}
4635 		return NETDEV_TX_BUSY;
4636 	}
4637 
4638 	/* Check if VLAN can be inserted by HW */
4639 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4640 
4641 	entry = tx_q->cur_tx;
4642 	first_entry = entry;
4643 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4644 
4645 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4646 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4647 	 * queues. In that case, checksum offloading for those queues that don't
4648 	 * support tx coe needs to fallback to software checksum calculation.
4649 	 *
4650 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4651 	 * also have to be checksummed in software.
4652 	 */
4653 	if (csum_insertion &&
4654 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4655 	     !stmmac_has_ip_ethertype(skb))) {
4656 		if (unlikely(skb_checksum_help(skb)))
4657 			goto dma_map_err;
4658 		csum_insertion = !csum_insertion;
4659 	}
4660 
4661 	if (likely(priv->extend_desc))
4662 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4663 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4664 		desc = &tx_q->dma_entx[entry].basic;
4665 	else
4666 		desc = tx_q->dma_tx + entry;
4667 
4668 	first = desc;
4669 
4670 	if (has_vlan)
4671 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4672 
4673 	enh_desc = priv->plat->enh_desc;
4674 	/* To program the descriptors according to the size of the frame */
4675 	if (enh_desc)
4676 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4677 
4678 	if (unlikely(is_jumbo)) {
4679 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4680 		if (unlikely(entry < 0) && (entry != -EINVAL))
4681 			goto dma_map_err;
4682 	}
4683 
4684 	for (i = 0; i < nfrags; i++) {
4685 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4686 		int len = skb_frag_size(frag);
4687 		bool last_segment = (i == (nfrags - 1));
4688 
4689 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4690 		WARN_ON(tx_q->tx_skbuff[entry]);
4691 
4692 		if (likely(priv->extend_desc))
4693 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4694 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4695 			desc = &tx_q->dma_entx[entry].basic;
4696 		else
4697 			desc = tx_q->dma_tx + entry;
4698 
4699 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4700 				       DMA_TO_DEVICE);
4701 		if (dma_mapping_error(priv->device, des))
4702 			goto dma_map_err; /* should reuse desc w/o issues */
4703 
4704 		tx_q->tx_skbuff_dma[entry].buf = des;
4705 
4706 		stmmac_set_desc_addr(priv, desc, des);
4707 
4708 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4709 		tx_q->tx_skbuff_dma[entry].len = len;
4710 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4711 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4712 
4713 		/* Prepare the descriptor and set the own bit too */
4714 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4715 				priv->mode, 1, last_segment, skb->len);
4716 	}
4717 
4718 	/* Only the last descriptor gets to point to the skb. */
4719 	tx_q->tx_skbuff[entry] = skb;
4720 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4721 
4722 	/* According to the coalesce parameter the IC bit for the latest
4723 	 * segment is reset and the timer re-started to clean the tx status.
4724 	 * This approach takes care about the fragments: desc is the first
4725 	 * element in case of no SG.
4726 	 */
4727 	tx_packets = (entry + 1) - first_tx;
4728 	tx_q->tx_count_frames += tx_packets;
4729 
4730 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4731 		set_ic = true;
4732 	else if (!priv->tx_coal_frames[queue])
4733 		set_ic = false;
4734 	else if (tx_packets > priv->tx_coal_frames[queue])
4735 		set_ic = true;
4736 	else if ((tx_q->tx_count_frames %
4737 		  priv->tx_coal_frames[queue]) < tx_packets)
4738 		set_ic = true;
4739 	else
4740 		set_ic = false;
4741 
4742 	if (set_ic) {
4743 		if (likely(priv->extend_desc))
4744 			desc = &tx_q->dma_etx[entry].basic;
4745 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4746 			desc = &tx_q->dma_entx[entry].basic;
4747 		else
4748 			desc = &tx_q->dma_tx[entry];
4749 
4750 		tx_q->tx_count_frames = 0;
4751 		stmmac_set_tx_ic(priv, desc);
4752 	}
4753 
4754 	/* We've used all descriptors we need for this skb, however,
4755 	 * advance cur_tx so that it references a fresh descriptor.
4756 	 * ndo_start_xmit will fill this descriptor the next time it's
4757 	 * called and stmmac_tx_clean may clean up to this descriptor.
4758 	 */
4759 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4760 	tx_q->cur_tx = entry;
4761 
4762 	if (netif_msg_pktdata(priv)) {
4763 		netdev_dbg(priv->dev,
4764 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4765 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4766 			   entry, first, nfrags);
4767 
4768 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4769 		print_pkt(skb->data, skb->len);
4770 	}
4771 
4772 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4773 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4774 			  __func__);
4775 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4776 	}
4777 
4778 	u64_stats_update_begin(&txq_stats->q_syncp);
4779 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4780 	if (set_ic)
4781 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4782 	u64_stats_update_end(&txq_stats->q_syncp);
4783 
4784 	if (priv->sarc_type)
4785 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4786 
4787 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4788 	 * problems because all the descriptors are actually ready to be
4789 	 * passed to the DMA engine.
4790 	 */
4791 	if (likely(!is_jumbo)) {
4792 		bool last_segment = (nfrags == 0);
4793 
4794 		des = dma_map_single(priv->device, skb->data,
4795 				     nopaged_len, DMA_TO_DEVICE);
4796 		if (dma_mapping_error(priv->device, des))
4797 			goto dma_map_err;
4798 
4799 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4800 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4801 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4802 
4803 		stmmac_set_desc_addr(priv, first, des);
4804 
4805 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4806 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4807 
4808 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4809 			     priv->hwts_tx_en)) {
4810 			/* declare that device is doing timestamping */
4811 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4812 			stmmac_enable_tx_timestamp(priv, first);
4813 		}
4814 
4815 		/* Prepare the first descriptor setting the OWN bit too */
4816 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4817 				csum_insertion, priv->mode, 0, last_segment,
4818 				skb->len);
4819 	}
4820 
4821 	if (tx_q->tbs & STMMAC_TBS_EN) {
4822 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4823 
4824 		tbs_desc = &tx_q->dma_entx[first_entry];
4825 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4826 	}
4827 
4828 	stmmac_set_tx_owner(priv, first);
4829 
4830 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4831 
4832 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4833 	skb_tx_timestamp(skb);
4834 	stmmac_flush_tx_descriptors(priv, queue);
4835 	stmmac_tx_timer_arm(priv, queue);
4836 
4837 	return NETDEV_TX_OK;
4838 
4839 dma_map_err:
4840 	netdev_err(priv->dev, "Tx DMA map failed\n");
4841 max_sdu_err:
4842 	dev_kfree_skb(skb);
4843 	priv->xstats.tx_dropped++;
4844 	return NETDEV_TX_OK;
4845 }
4846 
4847 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4848 {
4849 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4850 	__be16 vlan_proto = veth->h_vlan_proto;
4851 	u16 vlanid;
4852 
4853 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4854 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4855 	    (vlan_proto == htons(ETH_P_8021AD) &&
4856 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4857 		/* pop the vlan tag */
4858 		vlanid = ntohs(veth->h_vlan_TCI);
4859 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4860 		skb_pull(skb, VLAN_HLEN);
4861 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4862 	}
4863 }
4864 
4865 /**
4866  * stmmac_rx_refill - refill used skb preallocated buffers
4867  * @priv: driver private structure
4868  * @queue: RX queue index
4869  * Description : this is to reallocate the skb for the reception process
4870  * that is based on zero-copy.
4871  */
4872 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4873 {
4874 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4875 	int dirty = stmmac_rx_dirty(priv, queue);
4876 	unsigned int entry = rx_q->dirty_rx;
4877 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4878 
4879 	if (priv->dma_cap.host_dma_width <= 32)
4880 		gfp |= GFP_DMA32;
4881 
4882 	while (dirty-- > 0) {
4883 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4884 		struct dma_desc *p;
4885 		bool use_rx_wd;
4886 
4887 		if (priv->extend_desc)
4888 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4889 		else
4890 			p = rx_q->dma_rx + entry;
4891 
4892 		if (!buf->page) {
4893 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4894 			if (!buf->page)
4895 				break;
4896 		}
4897 
4898 		if (priv->sph && !buf->sec_page) {
4899 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4900 			if (!buf->sec_page)
4901 				break;
4902 
4903 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4904 		}
4905 
4906 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4907 
4908 		stmmac_set_desc_addr(priv, p, buf->addr);
4909 		if (priv->sph)
4910 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4911 		else
4912 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4913 		stmmac_refill_desc3(priv, rx_q, p);
4914 
4915 		rx_q->rx_count_frames++;
4916 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4917 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4918 			rx_q->rx_count_frames = 0;
4919 
4920 		use_rx_wd = !priv->rx_coal_frames[queue];
4921 		use_rx_wd |= rx_q->rx_count_frames > 0;
4922 		if (!priv->use_riwt)
4923 			use_rx_wd = false;
4924 
4925 		dma_wmb();
4926 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4927 
4928 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4929 	}
4930 	rx_q->dirty_rx = entry;
4931 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4932 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4933 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4934 }
4935 
4936 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4937 				       struct dma_desc *p,
4938 				       int status, unsigned int len)
4939 {
4940 	unsigned int plen = 0, hlen = 0;
4941 	int coe = priv->hw->rx_csum;
4942 
4943 	/* Not first descriptor, buffer is always zero */
4944 	if (priv->sph && len)
4945 		return 0;
4946 
4947 	/* First descriptor, get split header length */
4948 	stmmac_get_rx_header_len(priv, p, &hlen);
4949 	if (priv->sph && hlen) {
4950 		priv->xstats.rx_split_hdr_pkt_n++;
4951 		return hlen;
4952 	}
4953 
4954 	/* First descriptor, not last descriptor and not split header */
4955 	if (status & rx_not_ls)
4956 		return priv->dma_conf.dma_buf_sz;
4957 
4958 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4959 
4960 	/* First descriptor and last descriptor and not split header */
4961 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4962 }
4963 
4964 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4965 				       struct dma_desc *p,
4966 				       int status, unsigned int len)
4967 {
4968 	int coe = priv->hw->rx_csum;
4969 	unsigned int plen = 0;
4970 
4971 	/* Not split header, buffer is not available */
4972 	if (!priv->sph)
4973 		return 0;
4974 
4975 	/* Not last descriptor */
4976 	if (status & rx_not_ls)
4977 		return priv->dma_conf.dma_buf_sz;
4978 
4979 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4980 
4981 	/* Last descriptor */
4982 	return plen - len;
4983 }
4984 
4985 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4986 				struct xdp_frame *xdpf, bool dma_map)
4987 {
4988 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4989 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4990 	unsigned int entry = tx_q->cur_tx;
4991 	struct dma_desc *tx_desc;
4992 	dma_addr_t dma_addr;
4993 	bool set_ic;
4994 
4995 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4996 		return STMMAC_XDP_CONSUMED;
4997 
4998 	if (priv->est && priv->est->enable &&
4999 	    priv->est->max_sdu[queue] &&
5000 	    xdpf->len > priv->est->max_sdu[queue]) {
5001 		priv->xstats.max_sdu_txq_drop[queue]++;
5002 		return STMMAC_XDP_CONSUMED;
5003 	}
5004 
5005 	if (likely(priv->extend_desc))
5006 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
5007 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
5008 		tx_desc = &tx_q->dma_entx[entry].basic;
5009 	else
5010 		tx_desc = tx_q->dma_tx + entry;
5011 
5012 	if (dma_map) {
5013 		dma_addr = dma_map_single(priv->device, xdpf->data,
5014 					  xdpf->len, DMA_TO_DEVICE);
5015 		if (dma_mapping_error(priv->device, dma_addr))
5016 			return STMMAC_XDP_CONSUMED;
5017 
5018 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
5019 	} else {
5020 		struct page *page = virt_to_page(xdpf->data);
5021 
5022 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5023 			   xdpf->headroom;
5024 		dma_sync_single_for_device(priv->device, dma_addr,
5025 					   xdpf->len, DMA_BIDIRECTIONAL);
5026 
5027 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5028 	}
5029 
5030 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5031 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
5032 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5033 	tx_q->tx_skbuff_dma[entry].last_segment = true;
5034 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5035 
5036 	tx_q->xdpf[entry] = xdpf;
5037 
5038 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5039 
5040 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5041 			       true, priv->mode, true, true,
5042 			       xdpf->len);
5043 
5044 	tx_q->tx_count_frames++;
5045 
5046 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5047 		set_ic = true;
5048 	else
5049 		set_ic = false;
5050 
5051 	if (set_ic) {
5052 		tx_q->tx_count_frames = 0;
5053 		stmmac_set_tx_ic(priv, tx_desc);
5054 		u64_stats_update_begin(&txq_stats->q_syncp);
5055 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5056 		u64_stats_update_end(&txq_stats->q_syncp);
5057 	}
5058 
5059 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5060 
5061 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5062 	tx_q->cur_tx = entry;
5063 
5064 	return STMMAC_XDP_TX;
5065 }
5066 
5067 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5068 				   int cpu)
5069 {
5070 	int index = cpu;
5071 
5072 	if (unlikely(index < 0))
5073 		index = 0;
5074 
5075 	while (index >= priv->plat->tx_queues_to_use)
5076 		index -= priv->plat->tx_queues_to_use;
5077 
5078 	return index;
5079 }
5080 
5081 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5082 				struct xdp_buff *xdp)
5083 {
5084 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5085 	int cpu = smp_processor_id();
5086 	struct netdev_queue *nq;
5087 	int queue;
5088 	int res;
5089 
5090 	if (unlikely(!xdpf))
5091 		return STMMAC_XDP_CONSUMED;
5092 
5093 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5094 	nq = netdev_get_tx_queue(priv->dev, queue);
5095 
5096 	__netif_tx_lock(nq, cpu);
5097 	/* Avoids TX time-out as we are sharing with slow path */
5098 	txq_trans_cond_update(nq);
5099 
5100 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5101 	if (res == STMMAC_XDP_TX)
5102 		stmmac_flush_tx_descriptors(priv, queue);
5103 
5104 	__netif_tx_unlock(nq);
5105 
5106 	return res;
5107 }
5108 
5109 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5110 				 struct bpf_prog *prog,
5111 				 struct xdp_buff *xdp)
5112 {
5113 	u32 act;
5114 	int res;
5115 
5116 	act = bpf_prog_run_xdp(prog, xdp);
5117 	switch (act) {
5118 	case XDP_PASS:
5119 		res = STMMAC_XDP_PASS;
5120 		break;
5121 	case XDP_TX:
5122 		res = stmmac_xdp_xmit_back(priv, xdp);
5123 		break;
5124 	case XDP_REDIRECT:
5125 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5126 			res = STMMAC_XDP_CONSUMED;
5127 		else
5128 			res = STMMAC_XDP_REDIRECT;
5129 		break;
5130 	default:
5131 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5132 		fallthrough;
5133 	case XDP_ABORTED:
5134 		trace_xdp_exception(priv->dev, prog, act);
5135 		fallthrough;
5136 	case XDP_DROP:
5137 		res = STMMAC_XDP_CONSUMED;
5138 		break;
5139 	}
5140 
5141 	return res;
5142 }
5143 
5144 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5145 					   struct xdp_buff *xdp)
5146 {
5147 	struct bpf_prog *prog;
5148 	int res;
5149 
5150 	prog = READ_ONCE(priv->xdp_prog);
5151 	if (!prog) {
5152 		res = STMMAC_XDP_PASS;
5153 		goto out;
5154 	}
5155 
5156 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5157 out:
5158 	return ERR_PTR(-res);
5159 }
5160 
5161 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5162 				   int xdp_status)
5163 {
5164 	int cpu = smp_processor_id();
5165 	int queue;
5166 
5167 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5168 
5169 	if (xdp_status & STMMAC_XDP_TX)
5170 		stmmac_tx_timer_arm(priv, queue);
5171 
5172 	if (xdp_status & STMMAC_XDP_REDIRECT)
5173 		xdp_do_flush();
5174 }
5175 
5176 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5177 					       struct xdp_buff *xdp)
5178 {
5179 	unsigned int metasize = xdp->data - xdp->data_meta;
5180 	unsigned int datasize = xdp->data_end - xdp->data;
5181 	struct sk_buff *skb;
5182 
5183 	skb = napi_alloc_skb(&ch->rxtx_napi,
5184 			     xdp->data_end - xdp->data_hard_start);
5185 	if (unlikely(!skb))
5186 		return NULL;
5187 
5188 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5189 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5190 	if (metasize)
5191 		skb_metadata_set(skb, metasize);
5192 
5193 	return skb;
5194 }
5195 
5196 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5197 				   struct dma_desc *p, struct dma_desc *np,
5198 				   struct xdp_buff *xdp)
5199 {
5200 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5201 	struct stmmac_channel *ch = &priv->channel[queue];
5202 	unsigned int len = xdp->data_end - xdp->data;
5203 	enum pkt_hash_types hash_type;
5204 	int coe = priv->hw->rx_csum;
5205 	struct sk_buff *skb;
5206 	u32 hash;
5207 
5208 	skb = stmmac_construct_skb_zc(ch, xdp);
5209 	if (!skb) {
5210 		priv->xstats.rx_dropped++;
5211 		return;
5212 	}
5213 
5214 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5215 	if (priv->hw->hw_vlan_en)
5216 		/* MAC level stripping. */
5217 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5218 	else
5219 		/* Driver level stripping. */
5220 		stmmac_rx_vlan(priv->dev, skb);
5221 	skb->protocol = eth_type_trans(skb, priv->dev);
5222 
5223 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5224 		skb_checksum_none_assert(skb);
5225 	else
5226 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5227 
5228 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5229 		skb_set_hash(skb, hash, hash_type);
5230 
5231 	skb_record_rx_queue(skb, queue);
5232 	napi_gro_receive(&ch->rxtx_napi, skb);
5233 
5234 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5235 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5236 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5237 	u64_stats_update_end(&rxq_stats->napi_syncp);
5238 }
5239 
5240 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5241 {
5242 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5243 	unsigned int entry = rx_q->dirty_rx;
5244 	struct dma_desc *rx_desc = NULL;
5245 	bool ret = true;
5246 
5247 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5248 
5249 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5250 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5251 		dma_addr_t dma_addr;
5252 		bool use_rx_wd;
5253 
5254 		if (!buf->xdp) {
5255 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5256 			if (!buf->xdp) {
5257 				ret = false;
5258 				break;
5259 			}
5260 		}
5261 
5262 		if (priv->extend_desc)
5263 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5264 		else
5265 			rx_desc = rx_q->dma_rx + entry;
5266 
5267 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5268 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5269 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5270 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5271 
5272 		rx_q->rx_count_frames++;
5273 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5274 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5275 			rx_q->rx_count_frames = 0;
5276 
5277 		use_rx_wd = !priv->rx_coal_frames[queue];
5278 		use_rx_wd |= rx_q->rx_count_frames > 0;
5279 		if (!priv->use_riwt)
5280 			use_rx_wd = false;
5281 
5282 		dma_wmb();
5283 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5284 
5285 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5286 	}
5287 
5288 	if (rx_desc) {
5289 		rx_q->dirty_rx = entry;
5290 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5291 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5292 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5293 	}
5294 
5295 	return ret;
5296 }
5297 
5298 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5299 {
5300 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5301 	 * to represent incoming packet, whereas cb field in the same structure
5302 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5303 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5304 	 */
5305 	return (struct stmmac_xdp_buff *)xdp;
5306 }
5307 
5308 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5309 {
5310 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5311 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5312 	unsigned int count = 0, error = 0, len = 0;
5313 	int dirty = stmmac_rx_dirty(priv, queue);
5314 	unsigned int next_entry = rx_q->cur_rx;
5315 	u32 rx_errors = 0, rx_dropped = 0;
5316 	unsigned int desc_size;
5317 	struct bpf_prog *prog;
5318 	bool failure = false;
5319 	int xdp_status = 0;
5320 	int status = 0;
5321 
5322 	if (netif_msg_rx_status(priv)) {
5323 		void *rx_head;
5324 
5325 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5326 		if (priv->extend_desc) {
5327 			rx_head = (void *)rx_q->dma_erx;
5328 			desc_size = sizeof(struct dma_extended_desc);
5329 		} else {
5330 			rx_head = (void *)rx_q->dma_rx;
5331 			desc_size = sizeof(struct dma_desc);
5332 		}
5333 
5334 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5335 				    rx_q->dma_rx_phy, desc_size);
5336 	}
5337 	while (count < limit) {
5338 		struct stmmac_rx_buffer *buf;
5339 		struct stmmac_xdp_buff *ctx;
5340 		unsigned int buf1_len = 0;
5341 		struct dma_desc *np, *p;
5342 		int entry;
5343 		int res;
5344 
5345 		if (!count && rx_q->state_saved) {
5346 			error = rx_q->state.error;
5347 			len = rx_q->state.len;
5348 		} else {
5349 			rx_q->state_saved = false;
5350 			error = 0;
5351 			len = 0;
5352 		}
5353 
5354 		if (count >= limit)
5355 			break;
5356 
5357 read_again:
5358 		buf1_len = 0;
5359 		entry = next_entry;
5360 		buf = &rx_q->buf_pool[entry];
5361 
5362 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5363 			failure = failure ||
5364 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5365 			dirty = 0;
5366 		}
5367 
5368 		if (priv->extend_desc)
5369 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5370 		else
5371 			p = rx_q->dma_rx + entry;
5372 
5373 		/* read the status of the incoming frame */
5374 		status = stmmac_rx_status(priv, &priv->xstats, p);
5375 		/* check if managed by the DMA otherwise go ahead */
5376 		if (unlikely(status & dma_own))
5377 			break;
5378 
5379 		/* Prefetch the next RX descriptor */
5380 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5381 						priv->dma_conf.dma_rx_size);
5382 		next_entry = rx_q->cur_rx;
5383 
5384 		if (priv->extend_desc)
5385 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5386 		else
5387 			np = rx_q->dma_rx + next_entry;
5388 
5389 		prefetch(np);
5390 
5391 		/* Ensure a valid XSK buffer before proceed */
5392 		if (!buf->xdp)
5393 			break;
5394 
5395 		if (priv->extend_desc)
5396 			stmmac_rx_extended_status(priv, &priv->xstats,
5397 						  rx_q->dma_erx + entry);
5398 		if (unlikely(status == discard_frame)) {
5399 			xsk_buff_free(buf->xdp);
5400 			buf->xdp = NULL;
5401 			dirty++;
5402 			error = 1;
5403 			if (!priv->hwts_rx_en)
5404 				rx_errors++;
5405 		}
5406 
5407 		if (unlikely(error && (status & rx_not_ls)))
5408 			goto read_again;
5409 		if (unlikely(error)) {
5410 			count++;
5411 			continue;
5412 		}
5413 
5414 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5415 		if (likely(status & rx_not_ls)) {
5416 			xsk_buff_free(buf->xdp);
5417 			buf->xdp = NULL;
5418 			dirty++;
5419 			count++;
5420 			goto read_again;
5421 		}
5422 
5423 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5424 		ctx->priv = priv;
5425 		ctx->desc = p;
5426 		ctx->ndesc = np;
5427 
5428 		/* XDP ZC Frame only support primary buffers for now */
5429 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5430 		len += buf1_len;
5431 
5432 		/* ACS is disabled; strip manually. */
5433 		if (likely(!(status & rx_not_ls))) {
5434 			buf1_len -= ETH_FCS_LEN;
5435 			len -= ETH_FCS_LEN;
5436 		}
5437 
5438 		/* RX buffer is good and fit into a XSK pool buffer */
5439 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5440 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5441 
5442 		prog = READ_ONCE(priv->xdp_prog);
5443 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5444 
5445 		switch (res) {
5446 		case STMMAC_XDP_PASS:
5447 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5448 			xsk_buff_free(buf->xdp);
5449 			break;
5450 		case STMMAC_XDP_CONSUMED:
5451 			xsk_buff_free(buf->xdp);
5452 			rx_dropped++;
5453 			break;
5454 		case STMMAC_XDP_TX:
5455 		case STMMAC_XDP_REDIRECT:
5456 			xdp_status |= res;
5457 			break;
5458 		}
5459 
5460 		buf->xdp = NULL;
5461 		dirty++;
5462 		count++;
5463 	}
5464 
5465 	if (status & rx_not_ls) {
5466 		rx_q->state_saved = true;
5467 		rx_q->state.error = error;
5468 		rx_q->state.len = len;
5469 	}
5470 
5471 	stmmac_finalize_xdp_rx(priv, xdp_status);
5472 
5473 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5474 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5475 	u64_stats_update_end(&rxq_stats->napi_syncp);
5476 
5477 	priv->xstats.rx_dropped += rx_dropped;
5478 	priv->xstats.rx_errors += rx_errors;
5479 
5480 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5481 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5482 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5483 		else
5484 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5485 
5486 		return (int)count;
5487 	}
5488 
5489 	return failure ? limit : (int)count;
5490 }
5491 
5492 /**
5493  * stmmac_rx - manage the receive process
5494  * @priv: driver private structure
5495  * @limit: napi bugget
5496  * @queue: RX queue index.
5497  * Description :  this the function called by the napi poll method.
5498  * It gets all the frames inside the ring.
5499  */
5500 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5501 {
5502 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5503 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5504 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5505 	struct stmmac_channel *ch = &priv->channel[queue];
5506 	unsigned int count = 0, error = 0, len = 0;
5507 	int status = 0, coe = priv->hw->rx_csum;
5508 	unsigned int next_entry = rx_q->cur_rx;
5509 	enum dma_data_direction dma_dir;
5510 	unsigned int desc_size;
5511 	struct sk_buff *skb = NULL;
5512 	struct stmmac_xdp_buff ctx;
5513 	int xdp_status = 0;
5514 	int bufsz;
5515 
5516 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5517 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5518 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5519 
5520 	if (netif_msg_rx_status(priv)) {
5521 		void *rx_head;
5522 
5523 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5524 		if (priv->extend_desc) {
5525 			rx_head = (void *)rx_q->dma_erx;
5526 			desc_size = sizeof(struct dma_extended_desc);
5527 		} else {
5528 			rx_head = (void *)rx_q->dma_rx;
5529 			desc_size = sizeof(struct dma_desc);
5530 		}
5531 
5532 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5533 				    rx_q->dma_rx_phy, desc_size);
5534 	}
5535 	while (count < limit) {
5536 		unsigned int buf1_len = 0, buf2_len = 0;
5537 		enum pkt_hash_types hash_type;
5538 		struct stmmac_rx_buffer *buf;
5539 		struct dma_desc *np, *p;
5540 		int entry;
5541 		u32 hash;
5542 
5543 		if (!count && rx_q->state_saved) {
5544 			skb = rx_q->state.skb;
5545 			error = rx_q->state.error;
5546 			len = rx_q->state.len;
5547 		} else {
5548 			rx_q->state_saved = false;
5549 			skb = NULL;
5550 			error = 0;
5551 			len = 0;
5552 		}
5553 
5554 read_again:
5555 		if (count >= limit)
5556 			break;
5557 
5558 		buf1_len = 0;
5559 		buf2_len = 0;
5560 		entry = next_entry;
5561 		buf = &rx_q->buf_pool[entry];
5562 
5563 		if (priv->extend_desc)
5564 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5565 		else
5566 			p = rx_q->dma_rx + entry;
5567 
5568 		/* read the status of the incoming frame */
5569 		status = stmmac_rx_status(priv, &priv->xstats, p);
5570 		/* check if managed by the DMA otherwise go ahead */
5571 		if (unlikely(status & dma_own))
5572 			break;
5573 
5574 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5575 						priv->dma_conf.dma_rx_size);
5576 		next_entry = rx_q->cur_rx;
5577 
5578 		if (priv->extend_desc)
5579 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5580 		else
5581 			np = rx_q->dma_rx + next_entry;
5582 
5583 		prefetch(np);
5584 
5585 		if (priv->extend_desc)
5586 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5587 		if (unlikely(status == discard_frame)) {
5588 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5589 			buf->page = NULL;
5590 			error = 1;
5591 			if (!priv->hwts_rx_en)
5592 				rx_errors++;
5593 		}
5594 
5595 		if (unlikely(error && (status & rx_not_ls)))
5596 			goto read_again;
5597 		if (unlikely(error)) {
5598 			dev_kfree_skb(skb);
5599 			skb = NULL;
5600 			count++;
5601 			continue;
5602 		}
5603 
5604 		/* Buffer is good. Go on. */
5605 
5606 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5607 		len += buf1_len;
5608 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5609 		len += buf2_len;
5610 
5611 		/* ACS is disabled; strip manually. */
5612 		if (likely(!(status & rx_not_ls))) {
5613 			if (buf2_len) {
5614 				buf2_len -= ETH_FCS_LEN;
5615 				len -= ETH_FCS_LEN;
5616 			} else if (buf1_len) {
5617 				buf1_len -= ETH_FCS_LEN;
5618 				len -= ETH_FCS_LEN;
5619 			}
5620 		}
5621 
5622 		if (!skb) {
5623 			unsigned int pre_len, sync_len;
5624 
5625 			dma_sync_single_for_cpu(priv->device, buf->addr,
5626 						buf1_len, dma_dir);
5627 			net_prefetch(page_address(buf->page) +
5628 				     buf->page_offset);
5629 
5630 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5631 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5632 					 buf->page_offset, buf1_len, true);
5633 
5634 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5635 				  buf->page_offset;
5636 
5637 			ctx.priv = priv;
5638 			ctx.desc = p;
5639 			ctx.ndesc = np;
5640 
5641 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5642 			/* Due xdp_adjust_tail: DMA sync for_device
5643 			 * cover max len CPU touch
5644 			 */
5645 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5646 				   buf->page_offset;
5647 			sync_len = max(sync_len, pre_len);
5648 
5649 			/* For Not XDP_PASS verdict */
5650 			if (IS_ERR(skb)) {
5651 				unsigned int xdp_res = -PTR_ERR(skb);
5652 
5653 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5654 					page_pool_put_page(rx_q->page_pool,
5655 							   virt_to_head_page(ctx.xdp.data),
5656 							   sync_len, true);
5657 					buf->page = NULL;
5658 					rx_dropped++;
5659 
5660 					/* Clear skb as it was set as
5661 					 * status by XDP program.
5662 					 */
5663 					skb = NULL;
5664 
5665 					if (unlikely((status & rx_not_ls)))
5666 						goto read_again;
5667 
5668 					count++;
5669 					continue;
5670 				} else if (xdp_res & (STMMAC_XDP_TX |
5671 						      STMMAC_XDP_REDIRECT)) {
5672 					xdp_status |= xdp_res;
5673 					buf->page = NULL;
5674 					skb = NULL;
5675 					count++;
5676 					continue;
5677 				}
5678 			}
5679 		}
5680 
5681 		if (!skb) {
5682 			unsigned int head_pad_len;
5683 
5684 			/* XDP program may expand or reduce tail */
5685 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5686 
5687 			skb = napi_build_skb(page_address(buf->page),
5688 					     rx_q->napi_skb_frag_size);
5689 			if (!skb) {
5690 				page_pool_recycle_direct(rx_q->page_pool,
5691 							 buf->page);
5692 				rx_dropped++;
5693 				count++;
5694 				goto drain_data;
5695 			}
5696 
5697 			/* XDP program may adjust header */
5698 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5699 			skb_reserve(skb, head_pad_len);
5700 			skb_put(skb, buf1_len);
5701 			skb_mark_for_recycle(skb);
5702 			buf->page = NULL;
5703 		} else if (buf1_len) {
5704 			dma_sync_single_for_cpu(priv->device, buf->addr,
5705 						buf1_len, dma_dir);
5706 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5707 					buf->page, buf->page_offset, buf1_len,
5708 					priv->dma_conf.dma_buf_sz);
5709 			buf->page = NULL;
5710 		}
5711 
5712 		if (buf2_len) {
5713 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5714 						buf2_len, dma_dir);
5715 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5716 					buf->sec_page, 0, buf2_len,
5717 					priv->dma_conf.dma_buf_sz);
5718 			buf->sec_page = NULL;
5719 		}
5720 
5721 drain_data:
5722 		if (likely(status & rx_not_ls))
5723 			goto read_again;
5724 		if (!skb)
5725 			continue;
5726 
5727 		/* Got entire packet into SKB. Finish it. */
5728 
5729 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5730 
5731 		if (priv->hw->hw_vlan_en)
5732 			/* MAC level stripping. */
5733 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5734 		else
5735 			/* Driver level stripping. */
5736 			stmmac_rx_vlan(priv->dev, skb);
5737 
5738 		skb->protocol = eth_type_trans(skb, priv->dev);
5739 
5740 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5741 		    (status & csum_none))
5742 			skb_checksum_none_assert(skb);
5743 		else
5744 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5745 
5746 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5747 			skb_set_hash(skb, hash, hash_type);
5748 
5749 		skb_record_rx_queue(skb, queue);
5750 		napi_gro_receive(&ch->rx_napi, skb);
5751 		skb = NULL;
5752 
5753 		rx_packets++;
5754 		rx_bytes += len;
5755 		count++;
5756 	}
5757 
5758 	if (status & rx_not_ls || skb) {
5759 		rx_q->state_saved = true;
5760 		rx_q->state.skb = skb;
5761 		rx_q->state.error = error;
5762 		rx_q->state.len = len;
5763 	}
5764 
5765 	stmmac_finalize_xdp_rx(priv, xdp_status);
5766 
5767 	stmmac_rx_refill(priv, queue);
5768 
5769 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5770 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5771 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5772 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5773 	u64_stats_update_end(&rxq_stats->napi_syncp);
5774 
5775 	priv->xstats.rx_dropped += rx_dropped;
5776 	priv->xstats.rx_errors += rx_errors;
5777 
5778 	return count;
5779 }
5780 
5781 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5782 {
5783 	struct stmmac_channel *ch =
5784 		container_of(napi, struct stmmac_channel, rx_napi);
5785 	struct stmmac_priv *priv = ch->priv_data;
5786 	struct stmmac_rxq_stats *rxq_stats;
5787 	u32 chan = ch->index;
5788 	int work_done;
5789 
5790 	rxq_stats = &priv->xstats.rxq_stats[chan];
5791 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5792 	u64_stats_inc(&rxq_stats->napi.poll);
5793 	u64_stats_update_end(&rxq_stats->napi_syncp);
5794 
5795 	work_done = stmmac_rx(priv, budget, chan);
5796 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5797 		unsigned long flags;
5798 
5799 		spin_lock_irqsave(&ch->lock, flags);
5800 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5801 		spin_unlock_irqrestore(&ch->lock, flags);
5802 	}
5803 
5804 	return work_done;
5805 }
5806 
5807 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5808 {
5809 	struct stmmac_channel *ch =
5810 		container_of(napi, struct stmmac_channel, tx_napi);
5811 	struct stmmac_priv *priv = ch->priv_data;
5812 	struct stmmac_txq_stats *txq_stats;
5813 	bool pending_packets = false;
5814 	u32 chan = ch->index;
5815 	int work_done;
5816 
5817 	txq_stats = &priv->xstats.txq_stats[chan];
5818 	u64_stats_update_begin(&txq_stats->napi_syncp);
5819 	u64_stats_inc(&txq_stats->napi.poll);
5820 	u64_stats_update_end(&txq_stats->napi_syncp);
5821 
5822 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5823 	work_done = min(work_done, budget);
5824 
5825 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5826 		unsigned long flags;
5827 
5828 		spin_lock_irqsave(&ch->lock, flags);
5829 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5830 		spin_unlock_irqrestore(&ch->lock, flags);
5831 	}
5832 
5833 	/* TX still have packet to handle, check if we need to arm tx timer */
5834 	if (pending_packets)
5835 		stmmac_tx_timer_arm(priv, chan);
5836 
5837 	return work_done;
5838 }
5839 
5840 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5841 {
5842 	struct stmmac_channel *ch =
5843 		container_of(napi, struct stmmac_channel, rxtx_napi);
5844 	struct stmmac_priv *priv = ch->priv_data;
5845 	bool tx_pending_packets = false;
5846 	int rx_done, tx_done, rxtx_done;
5847 	struct stmmac_rxq_stats *rxq_stats;
5848 	struct stmmac_txq_stats *txq_stats;
5849 	u32 chan = ch->index;
5850 
5851 	rxq_stats = &priv->xstats.rxq_stats[chan];
5852 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5853 	u64_stats_inc(&rxq_stats->napi.poll);
5854 	u64_stats_update_end(&rxq_stats->napi_syncp);
5855 
5856 	txq_stats = &priv->xstats.txq_stats[chan];
5857 	u64_stats_update_begin(&txq_stats->napi_syncp);
5858 	u64_stats_inc(&txq_stats->napi.poll);
5859 	u64_stats_update_end(&txq_stats->napi_syncp);
5860 
5861 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5862 	tx_done = min(tx_done, budget);
5863 
5864 	rx_done = stmmac_rx_zc(priv, budget, chan);
5865 
5866 	rxtx_done = max(tx_done, rx_done);
5867 
5868 	/* If either TX or RX work is not complete, return budget
5869 	 * and keep pooling
5870 	 */
5871 	if (rxtx_done >= budget)
5872 		return budget;
5873 
5874 	/* all work done, exit the polling mode */
5875 	if (napi_complete_done(napi, rxtx_done)) {
5876 		unsigned long flags;
5877 
5878 		spin_lock_irqsave(&ch->lock, flags);
5879 		/* Both RX and TX work done are compelte,
5880 		 * so enable both RX & TX IRQs.
5881 		 */
5882 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5883 		spin_unlock_irqrestore(&ch->lock, flags);
5884 	}
5885 
5886 	/* TX still have packet to handle, check if we need to arm tx timer */
5887 	if (tx_pending_packets)
5888 		stmmac_tx_timer_arm(priv, chan);
5889 
5890 	return min(rxtx_done, budget - 1);
5891 }
5892 
5893 /**
5894  *  stmmac_tx_timeout
5895  *  @dev : Pointer to net device structure
5896  *  @txqueue: the index of the hanging transmit queue
5897  *  Description: this function is called when a packet transmission fails to
5898  *   complete within a reasonable time. The driver will mark the error in the
5899  *   netdev structure and arrange for the device to be reset to a sane state
5900  *   in order to transmit a new packet.
5901  */
5902 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5903 {
5904 	struct stmmac_priv *priv = netdev_priv(dev);
5905 
5906 	stmmac_global_err(priv);
5907 }
5908 
5909 /**
5910  *  stmmac_set_rx_mode - entry point for multicast addressing
5911  *  @dev : pointer to the device structure
5912  *  Description:
5913  *  This function is a driver entry point which gets called by the kernel
5914  *  whenever multicast addresses must be enabled/disabled.
5915  *  Return value:
5916  *  void.
5917  *
5918  *  FIXME: This may need RXC to be running, but it may be called with BH
5919  *  disabled, which means we can't call phylink_rx_clk_stop*().
5920  */
5921 static void stmmac_set_rx_mode(struct net_device *dev)
5922 {
5923 	struct stmmac_priv *priv = netdev_priv(dev);
5924 
5925 	stmmac_set_filter(priv, priv->hw, dev);
5926 }
5927 
5928 /**
5929  *  stmmac_change_mtu - entry point to change MTU size for the device.
5930  *  @dev : device pointer.
5931  *  @new_mtu : the new MTU size for the device.
5932  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5933  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5934  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5935  *  Return value:
5936  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5937  *  file on failure.
5938  */
5939 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5940 {
5941 	struct stmmac_priv *priv = netdev_priv(dev);
5942 	int txfifosz = priv->plat->tx_fifo_size;
5943 	struct stmmac_dma_conf *dma_conf;
5944 	const int mtu = new_mtu;
5945 	int ret;
5946 
5947 	if (txfifosz == 0)
5948 		txfifosz = priv->dma_cap.tx_fifo_size;
5949 
5950 	txfifosz /= priv->plat->tx_queues_to_use;
5951 
5952 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5953 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5954 		return -EINVAL;
5955 	}
5956 
5957 	new_mtu = STMMAC_ALIGN(new_mtu);
5958 
5959 	/* If condition true, FIFO is too small or MTU too large */
5960 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5961 		return -EINVAL;
5962 
5963 	if (netif_running(dev)) {
5964 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5965 		/* Try to allocate the new DMA conf with the new mtu */
5966 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5967 		if (IS_ERR(dma_conf)) {
5968 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5969 				   mtu);
5970 			return PTR_ERR(dma_conf);
5971 		}
5972 
5973 		stmmac_release(dev);
5974 
5975 		ret = __stmmac_open(dev, dma_conf);
5976 		if (ret) {
5977 			free_dma_desc_resources(priv, dma_conf);
5978 			kfree(dma_conf);
5979 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5980 			return ret;
5981 		}
5982 
5983 		kfree(dma_conf);
5984 
5985 		stmmac_set_rx_mode(dev);
5986 	}
5987 
5988 	WRITE_ONCE(dev->mtu, mtu);
5989 	netdev_update_features(dev);
5990 
5991 	return 0;
5992 }
5993 
5994 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5995 					     netdev_features_t features)
5996 {
5997 	struct stmmac_priv *priv = netdev_priv(dev);
5998 
5999 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
6000 		features &= ~NETIF_F_RXCSUM;
6001 
6002 	if (!priv->plat->tx_coe)
6003 		features &= ~NETIF_F_CSUM_MASK;
6004 
6005 	/* Some GMAC devices have a bugged Jumbo frame support that
6006 	 * needs to have the Tx COE disabled for oversized frames
6007 	 * (due to limited buffer sizes). In this case we disable
6008 	 * the TX csum insertion in the TDES and not use SF.
6009 	 */
6010 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
6011 		features &= ~NETIF_F_CSUM_MASK;
6012 
6013 	/* Disable tso if asked by ethtool */
6014 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
6015 		if (features & NETIF_F_TSO)
6016 			priv->tso = true;
6017 		else
6018 			priv->tso = false;
6019 	}
6020 
6021 	return features;
6022 }
6023 
6024 static int stmmac_set_features(struct net_device *netdev,
6025 			       netdev_features_t features)
6026 {
6027 	struct stmmac_priv *priv = netdev_priv(netdev);
6028 
6029 	/* Keep the COE Type in case of csum is supporting */
6030 	if (features & NETIF_F_RXCSUM)
6031 		priv->hw->rx_csum = priv->plat->rx_coe;
6032 	else
6033 		priv->hw->rx_csum = 0;
6034 	/* No check needed because rx_coe has been set before and it will be
6035 	 * fixed in case of issue.
6036 	 */
6037 	stmmac_rx_ipc(priv, priv->hw);
6038 
6039 	if (priv->sph_cap) {
6040 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6041 		u32 chan;
6042 
6043 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6044 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6045 	}
6046 
6047 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
6048 		priv->hw->hw_vlan_en = true;
6049 	else
6050 		priv->hw->hw_vlan_en = false;
6051 
6052 	phylink_rx_clk_stop_block(priv->phylink);
6053 	stmmac_set_hw_vlan_mode(priv, priv->hw);
6054 	phylink_rx_clk_stop_unblock(priv->phylink);
6055 
6056 	return 0;
6057 }
6058 
6059 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6060 {
6061 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6062 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6063 	u32 queues_count;
6064 	u32 queue;
6065 	bool xmac;
6066 
6067 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6068 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6069 
6070 	if (priv->irq_wake)
6071 		pm_wakeup_event(priv->device, 0);
6072 
6073 	if (priv->dma_cap.estsel)
6074 		stmmac_est_irq_status(priv, priv, priv->dev,
6075 				      &priv->xstats, tx_cnt);
6076 
6077 	if (stmmac_fpe_supported(priv))
6078 		stmmac_fpe_irq_status(priv);
6079 
6080 	/* To handle GMAC own interrupts */
6081 	if ((priv->plat->has_gmac) || xmac) {
6082 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6083 
6084 		if (unlikely(status)) {
6085 			/* For LPI we need to save the tx status */
6086 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6087 				priv->tx_path_in_lpi_mode = true;
6088 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6089 				priv->tx_path_in_lpi_mode = false;
6090 		}
6091 
6092 		for (queue = 0; queue < queues_count; queue++)
6093 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6094 
6095 		/* PCS link status */
6096 		if (priv->hw->pcs &&
6097 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6098 			if (priv->xstats.pcs_link)
6099 				netif_carrier_on(priv->dev);
6100 			else
6101 				netif_carrier_off(priv->dev);
6102 		}
6103 
6104 		stmmac_timestamp_interrupt(priv, priv);
6105 	}
6106 }
6107 
6108 /**
6109  *  stmmac_interrupt - main ISR
6110  *  @irq: interrupt number.
6111  *  @dev_id: to pass the net device pointer.
6112  *  Description: this is the main driver interrupt service routine.
6113  *  It can call:
6114  *  o DMA service routine (to manage incoming frame reception and transmission
6115  *    status)
6116  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6117  *    interrupts.
6118  */
6119 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6120 {
6121 	struct net_device *dev = (struct net_device *)dev_id;
6122 	struct stmmac_priv *priv = netdev_priv(dev);
6123 
6124 	/* Check if adapter is up */
6125 	if (test_bit(STMMAC_DOWN, &priv->state))
6126 		return IRQ_HANDLED;
6127 
6128 	/* Check ASP error if it isn't delivered via an individual IRQ */
6129 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6130 		return IRQ_HANDLED;
6131 
6132 	/* To handle Common interrupts */
6133 	stmmac_common_interrupt(priv);
6134 
6135 	/* To handle DMA interrupts */
6136 	stmmac_dma_interrupt(priv);
6137 
6138 	return IRQ_HANDLED;
6139 }
6140 
6141 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6142 {
6143 	struct net_device *dev = (struct net_device *)dev_id;
6144 	struct stmmac_priv *priv = netdev_priv(dev);
6145 
6146 	/* Check if adapter is up */
6147 	if (test_bit(STMMAC_DOWN, &priv->state))
6148 		return IRQ_HANDLED;
6149 
6150 	/* To handle Common interrupts */
6151 	stmmac_common_interrupt(priv);
6152 
6153 	return IRQ_HANDLED;
6154 }
6155 
6156 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6157 {
6158 	struct net_device *dev = (struct net_device *)dev_id;
6159 	struct stmmac_priv *priv = netdev_priv(dev);
6160 
6161 	/* Check if adapter is up */
6162 	if (test_bit(STMMAC_DOWN, &priv->state))
6163 		return IRQ_HANDLED;
6164 
6165 	/* Check if a fatal error happened */
6166 	stmmac_safety_feat_interrupt(priv);
6167 
6168 	return IRQ_HANDLED;
6169 }
6170 
6171 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6172 {
6173 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6174 	struct stmmac_dma_conf *dma_conf;
6175 	int chan = tx_q->queue_index;
6176 	struct stmmac_priv *priv;
6177 	int status;
6178 
6179 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6180 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6181 
6182 	/* Check if adapter is up */
6183 	if (test_bit(STMMAC_DOWN, &priv->state))
6184 		return IRQ_HANDLED;
6185 
6186 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6187 
6188 	if (unlikely(status & tx_hard_error_bump_tc)) {
6189 		/* Try to bump up the dma threshold on this failure */
6190 		stmmac_bump_dma_threshold(priv, chan);
6191 	} else if (unlikely(status == tx_hard_error)) {
6192 		stmmac_tx_err(priv, chan);
6193 	}
6194 
6195 	return IRQ_HANDLED;
6196 }
6197 
6198 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6199 {
6200 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6201 	struct stmmac_dma_conf *dma_conf;
6202 	int chan = rx_q->queue_index;
6203 	struct stmmac_priv *priv;
6204 
6205 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6206 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6207 
6208 	/* Check if adapter is up */
6209 	if (test_bit(STMMAC_DOWN, &priv->state))
6210 		return IRQ_HANDLED;
6211 
6212 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6213 
6214 	return IRQ_HANDLED;
6215 }
6216 
6217 /**
6218  *  stmmac_ioctl - Entry point for the Ioctl
6219  *  @dev: Device pointer.
6220  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6221  *  a proprietary structure used to pass information to the driver.
6222  *  @cmd: IOCTL command
6223  *  Description:
6224  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6225  */
6226 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6227 {
6228 	struct stmmac_priv *priv = netdev_priv (dev);
6229 	int ret = -EOPNOTSUPP;
6230 
6231 	if (!netif_running(dev))
6232 		return -EINVAL;
6233 
6234 	switch (cmd) {
6235 	case SIOCGMIIPHY:
6236 	case SIOCGMIIREG:
6237 	case SIOCSMIIREG:
6238 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6239 		break;
6240 	default:
6241 		break;
6242 	}
6243 
6244 	return ret;
6245 }
6246 
6247 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6248 				    void *cb_priv)
6249 {
6250 	struct stmmac_priv *priv = cb_priv;
6251 	int ret = -EOPNOTSUPP;
6252 
6253 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6254 		return ret;
6255 
6256 	__stmmac_disable_all_queues(priv);
6257 
6258 	switch (type) {
6259 	case TC_SETUP_CLSU32:
6260 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6261 		break;
6262 	case TC_SETUP_CLSFLOWER:
6263 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6264 		break;
6265 	default:
6266 		break;
6267 	}
6268 
6269 	stmmac_enable_all_queues(priv);
6270 	return ret;
6271 }
6272 
6273 static LIST_HEAD(stmmac_block_cb_list);
6274 
6275 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6276 			   void *type_data)
6277 {
6278 	struct stmmac_priv *priv = netdev_priv(ndev);
6279 
6280 	switch (type) {
6281 	case TC_QUERY_CAPS:
6282 		return stmmac_tc_query_caps(priv, priv, type_data);
6283 	case TC_SETUP_QDISC_MQPRIO:
6284 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6285 	case TC_SETUP_BLOCK:
6286 		return flow_block_cb_setup_simple(type_data,
6287 						  &stmmac_block_cb_list,
6288 						  stmmac_setup_tc_block_cb,
6289 						  priv, priv, true);
6290 	case TC_SETUP_QDISC_CBS:
6291 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6292 	case TC_SETUP_QDISC_TAPRIO:
6293 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6294 	case TC_SETUP_QDISC_ETF:
6295 		return stmmac_tc_setup_etf(priv, priv, type_data);
6296 	default:
6297 		return -EOPNOTSUPP;
6298 	}
6299 }
6300 
6301 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6302 			       struct net_device *sb_dev)
6303 {
6304 	int gso = skb_shinfo(skb)->gso_type;
6305 
6306 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6307 		/*
6308 		 * There is no way to determine the number of TSO/USO
6309 		 * capable Queues. Let's use always the Queue 0
6310 		 * because if TSO/USO is supported then at least this
6311 		 * one will be capable.
6312 		 */
6313 		return 0;
6314 	}
6315 
6316 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6317 }
6318 
6319 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6320 {
6321 	struct stmmac_priv *priv = netdev_priv(ndev);
6322 	int ret = 0;
6323 
6324 	ret = pm_runtime_resume_and_get(priv->device);
6325 	if (ret < 0)
6326 		return ret;
6327 
6328 	ret = eth_mac_addr(ndev, addr);
6329 	if (ret)
6330 		goto set_mac_error;
6331 
6332 	phylink_rx_clk_stop_block(priv->phylink);
6333 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6334 	phylink_rx_clk_stop_unblock(priv->phylink);
6335 
6336 set_mac_error:
6337 	pm_runtime_put(priv->device);
6338 
6339 	return ret;
6340 }
6341 
6342 #ifdef CONFIG_DEBUG_FS
6343 static struct dentry *stmmac_fs_dir;
6344 
6345 static void sysfs_display_ring(void *head, int size, int extend_desc,
6346 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6347 {
6348 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6349 	struct dma_desc *p = (struct dma_desc *)head;
6350 	unsigned int desc_size;
6351 	dma_addr_t dma_addr;
6352 	int i;
6353 
6354 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6355 	for (i = 0; i < size; i++) {
6356 		dma_addr = dma_phy_addr + i * desc_size;
6357 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6358 				i, &dma_addr,
6359 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6360 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6361 		if (extend_desc)
6362 			p = &(++ep)->basic;
6363 		else
6364 			p++;
6365 	}
6366 }
6367 
6368 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6369 {
6370 	struct net_device *dev = seq->private;
6371 	struct stmmac_priv *priv = netdev_priv(dev);
6372 	u32 rx_count = priv->plat->rx_queues_to_use;
6373 	u32 tx_count = priv->plat->tx_queues_to_use;
6374 	u32 queue;
6375 
6376 	if ((dev->flags & IFF_UP) == 0)
6377 		return 0;
6378 
6379 	for (queue = 0; queue < rx_count; queue++) {
6380 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6381 
6382 		seq_printf(seq, "RX Queue %d:\n", queue);
6383 
6384 		if (priv->extend_desc) {
6385 			seq_printf(seq, "Extended descriptor ring:\n");
6386 			sysfs_display_ring((void *)rx_q->dma_erx,
6387 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6388 		} else {
6389 			seq_printf(seq, "Descriptor ring:\n");
6390 			sysfs_display_ring((void *)rx_q->dma_rx,
6391 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6392 		}
6393 	}
6394 
6395 	for (queue = 0; queue < tx_count; queue++) {
6396 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6397 
6398 		seq_printf(seq, "TX Queue %d:\n", queue);
6399 
6400 		if (priv->extend_desc) {
6401 			seq_printf(seq, "Extended descriptor ring:\n");
6402 			sysfs_display_ring((void *)tx_q->dma_etx,
6403 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6404 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6405 			seq_printf(seq, "Descriptor ring:\n");
6406 			sysfs_display_ring((void *)tx_q->dma_tx,
6407 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6408 		}
6409 	}
6410 
6411 	return 0;
6412 }
6413 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6414 
6415 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6416 {
6417 	static const char * const dwxgmac_timestamp_source[] = {
6418 		"None",
6419 		"Internal",
6420 		"External",
6421 		"Both",
6422 	};
6423 	static const char * const dwxgmac_safety_feature_desc[] = {
6424 		"No",
6425 		"All Safety Features with ECC and Parity",
6426 		"All Safety Features without ECC or Parity",
6427 		"All Safety Features with Parity Only",
6428 		"ECC Only",
6429 		"UNDEFINED",
6430 		"UNDEFINED",
6431 		"UNDEFINED",
6432 	};
6433 	struct net_device *dev = seq->private;
6434 	struct stmmac_priv *priv = netdev_priv(dev);
6435 
6436 	if (!priv->hw_cap_support) {
6437 		seq_printf(seq, "DMA HW features not supported\n");
6438 		return 0;
6439 	}
6440 
6441 	seq_printf(seq, "==============================\n");
6442 	seq_printf(seq, "\tDMA HW features\n");
6443 	seq_printf(seq, "==============================\n");
6444 
6445 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6446 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6447 	seq_printf(seq, "\t1000 Mbps: %s\n",
6448 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6449 	seq_printf(seq, "\tHalf duplex: %s\n",
6450 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6451 	if (priv->plat->has_xgmac) {
6452 		seq_printf(seq,
6453 			   "\tNumber of Additional MAC address registers: %d\n",
6454 			   priv->dma_cap.multi_addr);
6455 	} else {
6456 		seq_printf(seq, "\tHash Filter: %s\n",
6457 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6458 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6459 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6460 	}
6461 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6462 		   (priv->dma_cap.pcs) ? "Y" : "N");
6463 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6464 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6465 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6466 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6467 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6468 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6469 	seq_printf(seq, "\tRMON module: %s\n",
6470 		   (priv->dma_cap.rmon) ? "Y" : "N");
6471 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6472 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6473 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6474 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6475 	if (priv->plat->has_xgmac)
6476 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6477 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6478 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6479 		   (priv->dma_cap.eee) ? "Y" : "N");
6480 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6481 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6482 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6483 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6484 	    priv->plat->has_xgmac) {
6485 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6486 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6487 	} else {
6488 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6489 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6490 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6491 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6492 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6493 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6494 	}
6495 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6496 		   priv->dma_cap.number_rx_channel);
6497 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6498 		   priv->dma_cap.number_tx_channel);
6499 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6500 		   priv->dma_cap.number_rx_queues);
6501 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6502 		   priv->dma_cap.number_tx_queues);
6503 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6504 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6505 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6506 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6507 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6508 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6509 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6510 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6511 		   priv->dma_cap.pps_out_num);
6512 	seq_printf(seq, "\tSafety Features: %s\n",
6513 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6514 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6515 		   priv->dma_cap.frpsel ? "Y" : "N");
6516 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6517 		   priv->dma_cap.host_dma_width);
6518 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6519 		   priv->dma_cap.rssen ? "Y" : "N");
6520 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6521 		   priv->dma_cap.vlhash ? "Y" : "N");
6522 	seq_printf(seq, "\tSplit Header: %s\n",
6523 		   priv->dma_cap.sphen ? "Y" : "N");
6524 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6525 		   priv->dma_cap.vlins ? "Y" : "N");
6526 	seq_printf(seq, "\tDouble VLAN: %s\n",
6527 		   priv->dma_cap.dvlan ? "Y" : "N");
6528 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6529 		   priv->dma_cap.l3l4fnum);
6530 	seq_printf(seq, "\tARP Offloading: %s\n",
6531 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6532 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6533 		   priv->dma_cap.estsel ? "Y" : "N");
6534 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6535 		   priv->dma_cap.fpesel ? "Y" : "N");
6536 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6537 		   priv->dma_cap.tbssel ? "Y" : "N");
6538 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6539 		   priv->dma_cap.tbs_ch_num);
6540 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6541 		   priv->dma_cap.sgfsel ? "Y" : "N");
6542 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6543 		   BIT(priv->dma_cap.ttsfd) >> 1);
6544 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6545 		   priv->dma_cap.numtc);
6546 	seq_printf(seq, "\tDCB Feature: %s\n",
6547 		   priv->dma_cap.dcben ? "Y" : "N");
6548 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6549 		   priv->dma_cap.advthword ? "Y" : "N");
6550 	seq_printf(seq, "\tPTP Offload: %s\n",
6551 		   priv->dma_cap.ptoen ? "Y" : "N");
6552 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6553 		   priv->dma_cap.osten ? "Y" : "N");
6554 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6555 		   priv->dma_cap.pfcen ? "Y" : "N");
6556 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6557 		   BIT(priv->dma_cap.frpes) << 6);
6558 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6559 		   BIT(priv->dma_cap.frpbs) << 6);
6560 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6561 		   priv->dma_cap.frppipe_num);
6562 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6563 		   priv->dma_cap.nrvf_num ?
6564 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6565 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6566 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6567 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6568 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6569 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6570 		   priv->dma_cap.cbtisel ? "Y" : "N");
6571 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6572 		   priv->dma_cap.aux_snapshot_n);
6573 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6574 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6575 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6576 		   priv->dma_cap.edma ? "Y" : "N");
6577 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6578 		   priv->dma_cap.ediffc ? "Y" : "N");
6579 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6580 		   priv->dma_cap.vxn ? "Y" : "N");
6581 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6582 		   priv->dma_cap.dbgmem ? "Y" : "N");
6583 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6584 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6585 	return 0;
6586 }
6587 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6588 
6589 /* Use network device events to rename debugfs file entries.
6590  */
6591 static int stmmac_device_event(struct notifier_block *unused,
6592 			       unsigned long event, void *ptr)
6593 {
6594 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6595 	struct stmmac_priv *priv = netdev_priv(dev);
6596 
6597 	if (dev->netdev_ops != &stmmac_netdev_ops)
6598 		goto done;
6599 
6600 	switch (event) {
6601 	case NETDEV_CHANGENAME:
6602 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6603 		break;
6604 	}
6605 done:
6606 	return NOTIFY_DONE;
6607 }
6608 
6609 static struct notifier_block stmmac_notifier = {
6610 	.notifier_call = stmmac_device_event,
6611 };
6612 
6613 static void stmmac_init_fs(struct net_device *dev)
6614 {
6615 	struct stmmac_priv *priv = netdev_priv(dev);
6616 
6617 	rtnl_lock();
6618 
6619 	/* Create per netdev entries */
6620 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6621 
6622 	/* Entry to report DMA RX/TX rings */
6623 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6624 			    &stmmac_rings_status_fops);
6625 
6626 	/* Entry to report the DMA HW features */
6627 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6628 			    &stmmac_dma_cap_fops);
6629 
6630 	rtnl_unlock();
6631 }
6632 
6633 static void stmmac_exit_fs(struct net_device *dev)
6634 {
6635 	struct stmmac_priv *priv = netdev_priv(dev);
6636 
6637 	debugfs_remove_recursive(priv->dbgfs_dir);
6638 }
6639 #endif /* CONFIG_DEBUG_FS */
6640 
6641 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6642 {
6643 	unsigned char *data = (unsigned char *)&vid_le;
6644 	unsigned char data_byte = 0;
6645 	u32 crc = ~0x0;
6646 	u32 temp = 0;
6647 	int i, bits;
6648 
6649 	bits = get_bitmask_order(VLAN_VID_MASK);
6650 	for (i = 0; i < bits; i++) {
6651 		if ((i % 8) == 0)
6652 			data_byte = data[i / 8];
6653 
6654 		temp = ((crc & 1) ^ data_byte) & 1;
6655 		crc >>= 1;
6656 		data_byte >>= 1;
6657 
6658 		if (temp)
6659 			crc ^= 0xedb88320;
6660 	}
6661 
6662 	return crc;
6663 }
6664 
6665 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6666 {
6667 	u32 crc, hash = 0;
6668 	u16 pmatch = 0;
6669 	int count = 0;
6670 	u16 vid = 0;
6671 
6672 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6673 		__le16 vid_le = cpu_to_le16(vid);
6674 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6675 		hash |= (1 << crc);
6676 		count++;
6677 	}
6678 
6679 	if (!priv->dma_cap.vlhash) {
6680 		if (count > 2) /* VID = 0 always passes filter */
6681 			return -EOPNOTSUPP;
6682 
6683 		pmatch = vid;
6684 		hash = 0;
6685 	}
6686 
6687 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6688 }
6689 
6690 /* FIXME: This may need RXC to be running, but it may be called with BH
6691  * disabled, which means we can't call phylink_rx_clk_stop*().
6692  */
6693 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6694 {
6695 	struct stmmac_priv *priv = netdev_priv(ndev);
6696 	bool is_double = false;
6697 	int ret;
6698 
6699 	ret = pm_runtime_resume_and_get(priv->device);
6700 	if (ret < 0)
6701 		return ret;
6702 
6703 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6704 		is_double = true;
6705 
6706 	set_bit(vid, priv->active_vlans);
6707 	ret = stmmac_vlan_update(priv, is_double);
6708 	if (ret) {
6709 		clear_bit(vid, priv->active_vlans);
6710 		goto err_pm_put;
6711 	}
6712 
6713 	if (priv->hw->num_vlan) {
6714 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6715 		if (ret)
6716 			goto err_pm_put;
6717 	}
6718 err_pm_put:
6719 	pm_runtime_put(priv->device);
6720 
6721 	return ret;
6722 }
6723 
6724 /* FIXME: This may need RXC to be running, but it may be called with BH
6725  * disabled, which means we can't call phylink_rx_clk_stop*().
6726  */
6727 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6728 {
6729 	struct stmmac_priv *priv = netdev_priv(ndev);
6730 	bool is_double = false;
6731 	int ret;
6732 
6733 	ret = pm_runtime_resume_and_get(priv->device);
6734 	if (ret < 0)
6735 		return ret;
6736 
6737 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6738 		is_double = true;
6739 
6740 	clear_bit(vid, priv->active_vlans);
6741 
6742 	if (priv->hw->num_vlan) {
6743 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6744 		if (ret)
6745 			goto del_vlan_error;
6746 	}
6747 
6748 	ret = stmmac_vlan_update(priv, is_double);
6749 
6750 del_vlan_error:
6751 	pm_runtime_put(priv->device);
6752 
6753 	return ret;
6754 }
6755 
6756 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6757 {
6758 	struct stmmac_priv *priv = netdev_priv(dev);
6759 
6760 	switch (bpf->command) {
6761 	case XDP_SETUP_PROG:
6762 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6763 	case XDP_SETUP_XSK_POOL:
6764 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6765 					     bpf->xsk.queue_id);
6766 	default:
6767 		return -EOPNOTSUPP;
6768 	}
6769 }
6770 
6771 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6772 			   struct xdp_frame **frames, u32 flags)
6773 {
6774 	struct stmmac_priv *priv = netdev_priv(dev);
6775 	int cpu = smp_processor_id();
6776 	struct netdev_queue *nq;
6777 	int i, nxmit = 0;
6778 	int queue;
6779 
6780 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6781 		return -ENETDOWN;
6782 
6783 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6784 		return -EINVAL;
6785 
6786 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6787 	nq = netdev_get_tx_queue(priv->dev, queue);
6788 
6789 	__netif_tx_lock(nq, cpu);
6790 	/* Avoids TX time-out as we are sharing with slow path */
6791 	txq_trans_cond_update(nq);
6792 
6793 	for (i = 0; i < num_frames; i++) {
6794 		int res;
6795 
6796 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6797 		if (res == STMMAC_XDP_CONSUMED)
6798 			break;
6799 
6800 		nxmit++;
6801 	}
6802 
6803 	if (flags & XDP_XMIT_FLUSH) {
6804 		stmmac_flush_tx_descriptors(priv, queue);
6805 		stmmac_tx_timer_arm(priv, queue);
6806 	}
6807 
6808 	__netif_tx_unlock(nq);
6809 
6810 	return nxmit;
6811 }
6812 
6813 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6814 {
6815 	struct stmmac_channel *ch = &priv->channel[queue];
6816 	unsigned long flags;
6817 
6818 	spin_lock_irqsave(&ch->lock, flags);
6819 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6820 	spin_unlock_irqrestore(&ch->lock, flags);
6821 
6822 	stmmac_stop_rx_dma(priv, queue);
6823 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6824 }
6825 
6826 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6827 {
6828 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6829 	struct stmmac_channel *ch = &priv->channel[queue];
6830 	unsigned long flags;
6831 	u32 buf_size;
6832 	int ret;
6833 
6834 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6835 	if (ret) {
6836 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6837 		return;
6838 	}
6839 
6840 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6841 	if (ret) {
6842 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6843 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6844 		return;
6845 	}
6846 
6847 	stmmac_reset_rx_queue(priv, queue);
6848 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6849 
6850 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6851 			    rx_q->dma_rx_phy, rx_q->queue_index);
6852 
6853 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6854 			     sizeof(struct dma_desc));
6855 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6856 			       rx_q->rx_tail_addr, rx_q->queue_index);
6857 
6858 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6859 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6860 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6861 				      buf_size,
6862 				      rx_q->queue_index);
6863 	} else {
6864 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6865 				      priv->dma_conf.dma_buf_sz,
6866 				      rx_q->queue_index);
6867 	}
6868 
6869 	stmmac_start_rx_dma(priv, queue);
6870 
6871 	spin_lock_irqsave(&ch->lock, flags);
6872 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6873 	spin_unlock_irqrestore(&ch->lock, flags);
6874 }
6875 
6876 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6877 {
6878 	struct stmmac_channel *ch = &priv->channel[queue];
6879 	unsigned long flags;
6880 
6881 	spin_lock_irqsave(&ch->lock, flags);
6882 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6883 	spin_unlock_irqrestore(&ch->lock, flags);
6884 
6885 	stmmac_stop_tx_dma(priv, queue);
6886 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6887 }
6888 
6889 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6890 {
6891 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6892 	struct stmmac_channel *ch = &priv->channel[queue];
6893 	unsigned long flags;
6894 	int ret;
6895 
6896 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6897 	if (ret) {
6898 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6899 		return;
6900 	}
6901 
6902 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6903 	if (ret) {
6904 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6905 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6906 		return;
6907 	}
6908 
6909 	stmmac_reset_tx_queue(priv, queue);
6910 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6911 
6912 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6913 			    tx_q->dma_tx_phy, tx_q->queue_index);
6914 
6915 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6916 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6917 
6918 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6919 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6920 			       tx_q->tx_tail_addr, tx_q->queue_index);
6921 
6922 	stmmac_start_tx_dma(priv, queue);
6923 
6924 	spin_lock_irqsave(&ch->lock, flags);
6925 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6926 	spin_unlock_irqrestore(&ch->lock, flags);
6927 }
6928 
6929 void stmmac_xdp_release(struct net_device *dev)
6930 {
6931 	struct stmmac_priv *priv = netdev_priv(dev);
6932 	u32 chan;
6933 
6934 	/* Ensure tx function is not running */
6935 	netif_tx_disable(dev);
6936 
6937 	/* Disable NAPI process */
6938 	stmmac_disable_all_queues(priv);
6939 
6940 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6941 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6942 
6943 	/* Free the IRQ lines */
6944 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6945 
6946 	/* Stop TX/RX DMA channels */
6947 	stmmac_stop_all_dma(priv);
6948 
6949 	/* Release and free the Rx/Tx resources */
6950 	free_dma_desc_resources(priv, &priv->dma_conf);
6951 
6952 	/* Disable the MAC Rx/Tx */
6953 	stmmac_mac_set(priv, priv->ioaddr, false);
6954 
6955 	/* set trans_start so we don't get spurious
6956 	 * watchdogs during reset
6957 	 */
6958 	netif_trans_update(dev);
6959 	netif_carrier_off(dev);
6960 }
6961 
6962 int stmmac_xdp_open(struct net_device *dev)
6963 {
6964 	struct stmmac_priv *priv = netdev_priv(dev);
6965 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6966 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6967 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6968 	struct stmmac_rx_queue *rx_q;
6969 	struct stmmac_tx_queue *tx_q;
6970 	u32 buf_size;
6971 	bool sph_en;
6972 	u32 chan;
6973 	int ret;
6974 
6975 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6976 	if (ret < 0) {
6977 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6978 			   __func__);
6979 		goto dma_desc_error;
6980 	}
6981 
6982 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6983 	if (ret < 0) {
6984 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6985 			   __func__);
6986 		goto init_error;
6987 	}
6988 
6989 	stmmac_reset_queues_param(priv);
6990 
6991 	/* DMA CSR Channel configuration */
6992 	for (chan = 0; chan < dma_csr_ch; chan++) {
6993 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6994 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6995 	}
6996 
6997 	/* Adjust Split header */
6998 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6999 
7000 	/* DMA RX Channel Configuration */
7001 	for (chan = 0; chan < rx_cnt; chan++) {
7002 		rx_q = &priv->dma_conf.rx_queue[chan];
7003 
7004 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7005 				    rx_q->dma_rx_phy, chan);
7006 
7007 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
7008 				     (rx_q->buf_alloc_num *
7009 				      sizeof(struct dma_desc));
7010 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
7011 				       rx_q->rx_tail_addr, chan);
7012 
7013 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
7014 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
7015 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7016 					      buf_size,
7017 					      rx_q->queue_index);
7018 		} else {
7019 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7020 					      priv->dma_conf.dma_buf_sz,
7021 					      rx_q->queue_index);
7022 		}
7023 
7024 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7025 	}
7026 
7027 	/* DMA TX Channel Configuration */
7028 	for (chan = 0; chan < tx_cnt; chan++) {
7029 		tx_q = &priv->dma_conf.tx_queue[chan];
7030 
7031 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7032 				    tx_q->dma_tx_phy, chan);
7033 
7034 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7035 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7036 				       tx_q->tx_tail_addr, chan);
7037 
7038 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7039 	}
7040 
7041 	/* Enable the MAC Rx/Tx */
7042 	stmmac_mac_set(priv, priv->ioaddr, true);
7043 
7044 	/* Start Rx & Tx DMA Channels */
7045 	stmmac_start_all_dma(priv);
7046 
7047 	ret = stmmac_request_irq(dev);
7048 	if (ret)
7049 		goto irq_error;
7050 
7051 	/* Enable NAPI process*/
7052 	stmmac_enable_all_queues(priv);
7053 	netif_carrier_on(dev);
7054 	netif_tx_start_all_queues(dev);
7055 	stmmac_enable_all_dma_irq(priv);
7056 
7057 	return 0;
7058 
7059 irq_error:
7060 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7061 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7062 
7063 	stmmac_hw_teardown(dev);
7064 init_error:
7065 	free_dma_desc_resources(priv, &priv->dma_conf);
7066 dma_desc_error:
7067 	return ret;
7068 }
7069 
7070 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7071 {
7072 	struct stmmac_priv *priv = netdev_priv(dev);
7073 	struct stmmac_rx_queue *rx_q;
7074 	struct stmmac_tx_queue *tx_q;
7075 	struct stmmac_channel *ch;
7076 
7077 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7078 	    !netif_carrier_ok(priv->dev))
7079 		return -ENETDOWN;
7080 
7081 	if (!stmmac_xdp_is_enabled(priv))
7082 		return -EINVAL;
7083 
7084 	if (queue >= priv->plat->rx_queues_to_use ||
7085 	    queue >= priv->plat->tx_queues_to_use)
7086 		return -EINVAL;
7087 
7088 	rx_q = &priv->dma_conf.rx_queue[queue];
7089 	tx_q = &priv->dma_conf.tx_queue[queue];
7090 	ch = &priv->channel[queue];
7091 
7092 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7093 		return -EINVAL;
7094 
7095 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7096 		/* EQoS does not have per-DMA channel SW interrupt,
7097 		 * so we schedule RX Napi straight-away.
7098 		 */
7099 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7100 			__napi_schedule(&ch->rxtx_napi);
7101 	}
7102 
7103 	return 0;
7104 }
7105 
7106 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7107 {
7108 	struct stmmac_priv *priv = netdev_priv(dev);
7109 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7110 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7111 	unsigned int start;
7112 	int q;
7113 
7114 	for (q = 0; q < tx_cnt; q++) {
7115 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7116 		u64 tx_packets;
7117 		u64 tx_bytes;
7118 
7119 		do {
7120 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7121 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7122 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7123 		do {
7124 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7125 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7126 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7127 
7128 		stats->tx_packets += tx_packets;
7129 		stats->tx_bytes += tx_bytes;
7130 	}
7131 
7132 	for (q = 0; q < rx_cnt; q++) {
7133 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7134 		u64 rx_packets;
7135 		u64 rx_bytes;
7136 
7137 		do {
7138 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7139 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7140 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7141 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7142 
7143 		stats->rx_packets += rx_packets;
7144 		stats->rx_bytes += rx_bytes;
7145 	}
7146 
7147 	stats->rx_dropped = priv->xstats.rx_dropped;
7148 	stats->rx_errors = priv->xstats.rx_errors;
7149 	stats->tx_dropped = priv->xstats.tx_dropped;
7150 	stats->tx_errors = priv->xstats.tx_errors;
7151 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7152 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7153 	stats->rx_length_errors = priv->xstats.rx_length;
7154 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7155 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7156 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7157 }
7158 
7159 static const struct net_device_ops stmmac_netdev_ops = {
7160 	.ndo_open = stmmac_open,
7161 	.ndo_start_xmit = stmmac_xmit,
7162 	.ndo_stop = stmmac_release,
7163 	.ndo_change_mtu = stmmac_change_mtu,
7164 	.ndo_fix_features = stmmac_fix_features,
7165 	.ndo_set_features = stmmac_set_features,
7166 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7167 	.ndo_tx_timeout = stmmac_tx_timeout,
7168 	.ndo_eth_ioctl = stmmac_ioctl,
7169 	.ndo_get_stats64 = stmmac_get_stats64,
7170 	.ndo_setup_tc = stmmac_setup_tc,
7171 	.ndo_select_queue = stmmac_select_queue,
7172 	.ndo_set_mac_address = stmmac_set_mac_address,
7173 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7174 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7175 	.ndo_bpf = stmmac_bpf,
7176 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7177 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7178 	.ndo_hwtstamp_get = stmmac_hwtstamp_get,
7179 	.ndo_hwtstamp_set = stmmac_hwtstamp_set,
7180 };
7181 
7182 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7183 {
7184 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7185 		return;
7186 	if (test_bit(STMMAC_DOWN, &priv->state))
7187 		return;
7188 
7189 	netdev_err(priv->dev, "Reset adapter.\n");
7190 
7191 	rtnl_lock();
7192 	netif_trans_update(priv->dev);
7193 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7194 		usleep_range(1000, 2000);
7195 
7196 	set_bit(STMMAC_DOWN, &priv->state);
7197 	dev_close(priv->dev);
7198 	dev_open(priv->dev, NULL);
7199 	clear_bit(STMMAC_DOWN, &priv->state);
7200 	clear_bit(STMMAC_RESETING, &priv->state);
7201 	rtnl_unlock();
7202 }
7203 
7204 static void stmmac_service_task(struct work_struct *work)
7205 {
7206 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7207 			service_task);
7208 
7209 	stmmac_reset_subtask(priv);
7210 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7211 }
7212 
7213 /**
7214  *  stmmac_hw_init - Init the MAC device
7215  *  @priv: driver private structure
7216  *  Description: this function is to configure the MAC device according to
7217  *  some platform parameters or the HW capability register. It prepares the
7218  *  driver to use either ring or chain modes and to setup either enhanced or
7219  *  normal descriptors.
7220  */
7221 static int stmmac_hw_init(struct stmmac_priv *priv)
7222 {
7223 	int ret;
7224 
7225 	/* dwmac-sun8i only work in chain mode */
7226 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7227 		chain_mode = 1;
7228 	priv->chain_mode = chain_mode;
7229 
7230 	/* Initialize HW Interface */
7231 	ret = stmmac_hwif_init(priv);
7232 	if (ret)
7233 		return ret;
7234 
7235 	/* Get the HW capability (new GMAC newer than 3.50a) */
7236 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7237 	if (priv->hw_cap_support) {
7238 		dev_info(priv->device, "DMA HW capability register supported\n");
7239 
7240 		/* We can override some gmac/dma configuration fields: e.g.
7241 		 * enh_desc, tx_coe (e.g. that are passed through the
7242 		 * platform) with the values from the HW capability
7243 		 * register (if supported).
7244 		 */
7245 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7246 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7247 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7248 		if (priv->dma_cap.hash_tb_sz) {
7249 			priv->hw->multicast_filter_bins =
7250 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7251 			priv->hw->mcast_bits_log2 =
7252 					ilog2(priv->hw->multicast_filter_bins);
7253 		}
7254 
7255 		/* TXCOE doesn't work in thresh DMA mode */
7256 		if (priv->plat->force_thresh_dma_mode)
7257 			priv->plat->tx_coe = 0;
7258 		else
7259 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7260 
7261 		/* In case of GMAC4 rx_coe is from HW cap register. */
7262 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7263 
7264 		if (priv->dma_cap.rx_coe_type2)
7265 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7266 		else if (priv->dma_cap.rx_coe_type1)
7267 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7268 
7269 	} else {
7270 		dev_info(priv->device, "No HW DMA feature register supported\n");
7271 	}
7272 
7273 	if (priv->plat->rx_coe) {
7274 		priv->hw->rx_csum = priv->plat->rx_coe;
7275 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7276 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7277 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7278 	}
7279 	if (priv->plat->tx_coe)
7280 		dev_info(priv->device, "TX Checksum insertion supported\n");
7281 
7282 	if (priv->plat->pmt) {
7283 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7284 		device_set_wakeup_capable(priv->device, 1);
7285 		devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7286 	}
7287 
7288 	if (priv->dma_cap.tsoen)
7289 		dev_info(priv->device, "TSO supported\n");
7290 
7291 	if (priv->dma_cap.number_rx_queues &&
7292 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7293 		dev_warn(priv->device,
7294 			 "Number of Rx queues (%u) exceeds dma capability\n",
7295 			 priv->plat->rx_queues_to_use);
7296 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7297 	}
7298 	if (priv->dma_cap.number_tx_queues &&
7299 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7300 		dev_warn(priv->device,
7301 			 "Number of Tx queues (%u) exceeds dma capability\n",
7302 			 priv->plat->tx_queues_to_use);
7303 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7304 	}
7305 
7306 	if (priv->dma_cap.rx_fifo_size &&
7307 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7308 		dev_warn(priv->device,
7309 			 "Rx FIFO size (%u) exceeds dma capability\n",
7310 			 priv->plat->rx_fifo_size);
7311 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7312 	}
7313 	if (priv->dma_cap.tx_fifo_size &&
7314 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7315 		dev_warn(priv->device,
7316 			 "Tx FIFO size (%u) exceeds dma capability\n",
7317 			 priv->plat->tx_fifo_size);
7318 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7319 	}
7320 
7321 	priv->hw->vlan_fail_q_en =
7322 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7323 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7324 
7325 	/* Run HW quirks, if any */
7326 	if (priv->hwif_quirks) {
7327 		ret = priv->hwif_quirks(priv);
7328 		if (ret)
7329 			return ret;
7330 	}
7331 
7332 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7333 	 * In some case, for example on bugged HW this feature
7334 	 * has to be disable and this can be done by passing the
7335 	 * riwt_off field from the platform.
7336 	 */
7337 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7338 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7339 		priv->use_riwt = 1;
7340 		dev_info(priv->device,
7341 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7342 	}
7343 
7344 	return 0;
7345 }
7346 
7347 static void stmmac_napi_add(struct net_device *dev)
7348 {
7349 	struct stmmac_priv *priv = netdev_priv(dev);
7350 	u32 queue, maxq;
7351 
7352 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7353 
7354 	for (queue = 0; queue < maxq; queue++) {
7355 		struct stmmac_channel *ch = &priv->channel[queue];
7356 
7357 		ch->priv_data = priv;
7358 		ch->index = queue;
7359 		spin_lock_init(&ch->lock);
7360 
7361 		if (queue < priv->plat->rx_queues_to_use) {
7362 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7363 		}
7364 		if (queue < priv->plat->tx_queues_to_use) {
7365 			netif_napi_add_tx(dev, &ch->tx_napi,
7366 					  stmmac_napi_poll_tx);
7367 		}
7368 		if (queue < priv->plat->rx_queues_to_use &&
7369 		    queue < priv->plat->tx_queues_to_use) {
7370 			netif_napi_add(dev, &ch->rxtx_napi,
7371 				       stmmac_napi_poll_rxtx);
7372 		}
7373 	}
7374 }
7375 
7376 static void stmmac_napi_del(struct net_device *dev)
7377 {
7378 	struct stmmac_priv *priv = netdev_priv(dev);
7379 	u32 queue, maxq;
7380 
7381 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7382 
7383 	for (queue = 0; queue < maxq; queue++) {
7384 		struct stmmac_channel *ch = &priv->channel[queue];
7385 
7386 		if (queue < priv->plat->rx_queues_to_use)
7387 			netif_napi_del(&ch->rx_napi);
7388 		if (queue < priv->plat->tx_queues_to_use)
7389 			netif_napi_del(&ch->tx_napi);
7390 		if (queue < priv->plat->rx_queues_to_use &&
7391 		    queue < priv->plat->tx_queues_to_use) {
7392 			netif_napi_del(&ch->rxtx_napi);
7393 		}
7394 	}
7395 }
7396 
7397 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7398 {
7399 	struct stmmac_priv *priv = netdev_priv(dev);
7400 	int ret = 0, i;
7401 
7402 	if (netif_running(dev))
7403 		stmmac_release(dev);
7404 
7405 	stmmac_napi_del(dev);
7406 
7407 	priv->plat->rx_queues_to_use = rx_cnt;
7408 	priv->plat->tx_queues_to_use = tx_cnt;
7409 	if (!netif_is_rxfh_configured(dev))
7410 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7411 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7412 									rx_cnt);
7413 
7414 	stmmac_napi_add(dev);
7415 
7416 	if (netif_running(dev))
7417 		ret = stmmac_open(dev);
7418 
7419 	return ret;
7420 }
7421 
7422 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7423 {
7424 	struct stmmac_priv *priv = netdev_priv(dev);
7425 	int ret = 0;
7426 
7427 	if (netif_running(dev))
7428 		stmmac_release(dev);
7429 
7430 	priv->dma_conf.dma_rx_size = rx_size;
7431 	priv->dma_conf.dma_tx_size = tx_size;
7432 
7433 	if (netif_running(dev))
7434 		ret = stmmac_open(dev);
7435 
7436 	return ret;
7437 }
7438 
7439 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7440 {
7441 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7442 	struct dma_desc *desc_contains_ts = ctx->desc;
7443 	struct stmmac_priv *priv = ctx->priv;
7444 	struct dma_desc *ndesc = ctx->ndesc;
7445 	struct dma_desc *desc = ctx->desc;
7446 	u64 ns = 0;
7447 
7448 	if (!priv->hwts_rx_en)
7449 		return -ENODATA;
7450 
7451 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7452 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7453 		desc_contains_ts = ndesc;
7454 
7455 	/* Check if timestamp is available */
7456 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7457 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7458 		ns -= priv->plat->cdc_error_adj;
7459 		*timestamp = ns_to_ktime(ns);
7460 		return 0;
7461 	}
7462 
7463 	return -ENODATA;
7464 }
7465 
7466 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7467 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7468 };
7469 
7470 /**
7471  * stmmac_dvr_probe
7472  * @device: device pointer
7473  * @plat_dat: platform data pointer
7474  * @res: stmmac resource pointer
7475  * Description: this is the main probe function used to
7476  * call the alloc_etherdev, allocate the priv structure.
7477  * Return:
7478  * returns 0 on success, otherwise errno.
7479  */
7480 int stmmac_dvr_probe(struct device *device,
7481 		     struct plat_stmmacenet_data *plat_dat,
7482 		     struct stmmac_resources *res)
7483 {
7484 	struct net_device *ndev = NULL;
7485 	struct stmmac_priv *priv;
7486 	u32 rxq;
7487 	int i, ret = 0;
7488 
7489 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7490 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7491 	if (!ndev)
7492 		return -ENOMEM;
7493 
7494 	SET_NETDEV_DEV(ndev, device);
7495 
7496 	priv = netdev_priv(ndev);
7497 	priv->device = device;
7498 	priv->dev = ndev;
7499 
7500 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7501 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7502 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7503 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7504 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7505 	}
7506 
7507 	priv->xstats.pcpu_stats =
7508 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7509 	if (!priv->xstats.pcpu_stats)
7510 		return -ENOMEM;
7511 
7512 	stmmac_set_ethtool_ops(ndev);
7513 	priv->pause_time = pause;
7514 	priv->plat = plat_dat;
7515 	priv->ioaddr = res->addr;
7516 	priv->dev->base_addr = (unsigned long)res->addr;
7517 	priv->plat->dma_cfg->multi_msi_en =
7518 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7519 
7520 	priv->dev->irq = res->irq;
7521 	priv->wol_irq = res->wol_irq;
7522 	priv->lpi_irq = res->lpi_irq;
7523 	priv->sfty_irq = res->sfty_irq;
7524 	priv->sfty_ce_irq = res->sfty_ce_irq;
7525 	priv->sfty_ue_irq = res->sfty_ue_irq;
7526 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7527 		priv->rx_irq[i] = res->rx_irq[i];
7528 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7529 		priv->tx_irq[i] = res->tx_irq[i];
7530 
7531 	if (!is_zero_ether_addr(res->mac))
7532 		eth_hw_addr_set(priv->dev, res->mac);
7533 
7534 	dev_set_drvdata(device, priv->dev);
7535 
7536 	/* Verify driver arguments */
7537 	stmmac_verify_args();
7538 
7539 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7540 	if (!priv->af_xdp_zc_qps)
7541 		return -ENOMEM;
7542 
7543 	/* Allocate workqueue */
7544 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7545 	if (!priv->wq) {
7546 		dev_err(priv->device, "failed to create workqueue\n");
7547 		ret = -ENOMEM;
7548 		goto error_wq_init;
7549 	}
7550 
7551 	INIT_WORK(&priv->service_task, stmmac_service_task);
7552 
7553 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7554 
7555 	/* Override with kernel parameters if supplied XXX CRS XXX
7556 	 * this needs to have multiple instances
7557 	 */
7558 	if ((phyaddr >= 0) && (phyaddr <= 31))
7559 		priv->plat->phy_addr = phyaddr;
7560 
7561 	if (priv->plat->stmmac_rst) {
7562 		ret = reset_control_assert(priv->plat->stmmac_rst);
7563 		reset_control_deassert(priv->plat->stmmac_rst);
7564 		/* Some reset controllers have only reset callback instead of
7565 		 * assert + deassert callbacks pair.
7566 		 */
7567 		if (ret == -ENOTSUPP)
7568 			reset_control_reset(priv->plat->stmmac_rst);
7569 	}
7570 
7571 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7572 	if (ret == -ENOTSUPP)
7573 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7574 			ERR_PTR(ret));
7575 
7576 	/* Wait a bit for the reset to take effect */
7577 	udelay(10);
7578 
7579 	/* Init MAC and get the capabilities */
7580 	ret = stmmac_hw_init(priv);
7581 	if (ret)
7582 		goto error_hw_init;
7583 
7584 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7585 	 */
7586 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7587 		priv->plat->dma_cfg->dche = false;
7588 
7589 	stmmac_check_ether_addr(priv);
7590 
7591 	ndev->netdev_ops = &stmmac_netdev_ops;
7592 
7593 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7594 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7595 
7596 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7597 			    NETIF_F_RXCSUM;
7598 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7599 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7600 
7601 	ret = stmmac_tc_init(priv, priv);
7602 	if (!ret) {
7603 		ndev->hw_features |= NETIF_F_HW_TC;
7604 	}
7605 
7606 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7607 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7608 		if (priv->plat->has_gmac4)
7609 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7610 		priv->tso = true;
7611 		dev_info(priv->device, "TSO feature enabled\n");
7612 	}
7613 
7614 	if (priv->dma_cap.sphen &&
7615 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7616 		ndev->hw_features |= NETIF_F_GRO;
7617 		priv->sph_cap = true;
7618 		priv->sph = priv->sph_cap;
7619 		dev_info(priv->device, "SPH feature enabled\n");
7620 	}
7621 
7622 	/* Ideally our host DMA address width is the same as for the
7623 	 * device. However, it may differ and then we have to use our
7624 	 * host DMA width for allocation and the device DMA width for
7625 	 * register handling.
7626 	 */
7627 	if (priv->plat->host_dma_width)
7628 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7629 	else
7630 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7631 
7632 	if (priv->dma_cap.host_dma_width) {
7633 		ret = dma_set_mask_and_coherent(device,
7634 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7635 		if (!ret) {
7636 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7637 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7638 
7639 			/*
7640 			 * If more than 32 bits can be addressed, make sure to
7641 			 * enable enhanced addressing mode.
7642 			 */
7643 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7644 				priv->plat->dma_cfg->eame = true;
7645 		} else {
7646 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7647 			if (ret) {
7648 				dev_err(priv->device, "Failed to set DMA Mask\n");
7649 				goto error_hw_init;
7650 			}
7651 
7652 			priv->dma_cap.host_dma_width = 32;
7653 		}
7654 	}
7655 
7656 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7657 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7658 #ifdef STMMAC_VLAN_TAG_USED
7659 	/* Both mac100 and gmac support receive VLAN tag detection */
7660 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7661 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
7662 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7663 		priv->hw->hw_vlan_en = true;
7664 	}
7665 	if (priv->dma_cap.vlhash) {
7666 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7667 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7668 	}
7669 	if (priv->dma_cap.vlins) {
7670 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7671 		if (priv->dma_cap.dvlan)
7672 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7673 	}
7674 #endif
7675 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7676 
7677 	priv->xstats.threshold = tc;
7678 
7679 	/* Initialize RSS */
7680 	rxq = priv->plat->rx_queues_to_use;
7681 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7682 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7683 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7684 
7685 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7686 		ndev->features |= NETIF_F_RXHASH;
7687 
7688 	ndev->vlan_features |= ndev->features;
7689 
7690 	/* MTU range: 46 - hw-specific max */
7691 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7692 	if (priv->plat->has_xgmac)
7693 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7694 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7695 		ndev->max_mtu = JUMBO_LEN;
7696 	else
7697 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7698 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7699 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7700 	 */
7701 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7702 	    (priv->plat->maxmtu >= ndev->min_mtu))
7703 		ndev->max_mtu = priv->plat->maxmtu;
7704 	else if (priv->plat->maxmtu < ndev->min_mtu)
7705 		dev_warn(priv->device,
7706 			 "%s: warning: maxmtu having invalid value (%d)\n",
7707 			 __func__, priv->plat->maxmtu);
7708 
7709 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7710 
7711 	/* Setup channels NAPI */
7712 	stmmac_napi_add(ndev);
7713 
7714 	mutex_init(&priv->lock);
7715 
7716 	stmmac_fpe_init(priv);
7717 
7718 	/* If a specific clk_csr value is passed from the platform
7719 	 * this means that the CSR Clock Range selection cannot be
7720 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7721 	 * set the MDC clock dynamically according to the csr actual
7722 	 * clock input.
7723 	 */
7724 	if (priv->plat->clk_csr >= 0)
7725 		priv->clk_csr = priv->plat->clk_csr;
7726 	else
7727 		stmmac_clk_csr_set(priv);
7728 
7729 	stmmac_check_pcs_mode(priv);
7730 
7731 	pm_runtime_get_noresume(device);
7732 	pm_runtime_set_active(device);
7733 	if (!pm_runtime_enabled(device))
7734 		pm_runtime_enable(device);
7735 
7736 	ret = stmmac_mdio_register(ndev);
7737 	if (ret < 0) {
7738 		dev_err_probe(priv->device, ret,
7739 			      "MDIO bus (id: %d) registration failed\n",
7740 			      priv->plat->bus_id);
7741 		goto error_mdio_register;
7742 	}
7743 
7744 	ret = stmmac_pcs_setup(ndev);
7745 	if (ret)
7746 		goto error_pcs_setup;
7747 
7748 	ret = stmmac_phy_setup(priv);
7749 	if (ret) {
7750 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7751 		goto error_phy_setup;
7752 	}
7753 
7754 	ret = register_netdev(ndev);
7755 	if (ret) {
7756 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7757 			__func__, ret);
7758 		goto error_netdev_register;
7759 	}
7760 
7761 #ifdef CONFIG_DEBUG_FS
7762 	stmmac_init_fs(ndev);
7763 #endif
7764 
7765 	if (priv->plat->dump_debug_regs)
7766 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7767 
7768 	/* Let pm_runtime_put() disable the clocks.
7769 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7770 	 */
7771 	pm_runtime_put(device);
7772 
7773 	return ret;
7774 
7775 error_netdev_register:
7776 	phylink_destroy(priv->phylink);
7777 error_phy_setup:
7778 	stmmac_pcs_clean(ndev);
7779 error_pcs_setup:
7780 	stmmac_mdio_unregister(ndev);
7781 error_mdio_register:
7782 	stmmac_napi_del(ndev);
7783 error_hw_init:
7784 	destroy_workqueue(priv->wq);
7785 error_wq_init:
7786 	bitmap_free(priv->af_xdp_zc_qps);
7787 
7788 	return ret;
7789 }
7790 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7791 
7792 /**
7793  * stmmac_dvr_remove
7794  * @dev: device pointer
7795  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7796  * changes the link status, releases the DMA descriptor rings.
7797  */
7798 void stmmac_dvr_remove(struct device *dev)
7799 {
7800 	struct net_device *ndev = dev_get_drvdata(dev);
7801 	struct stmmac_priv *priv = netdev_priv(ndev);
7802 
7803 	netdev_info(priv->dev, "%s: removing driver", __func__);
7804 
7805 	pm_runtime_get_sync(dev);
7806 
7807 	unregister_netdev(ndev);
7808 
7809 #ifdef CONFIG_DEBUG_FS
7810 	stmmac_exit_fs(ndev);
7811 #endif
7812 	phylink_destroy(priv->phylink);
7813 	if (priv->plat->stmmac_rst)
7814 		reset_control_assert(priv->plat->stmmac_rst);
7815 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7816 
7817 	stmmac_pcs_clean(ndev);
7818 	stmmac_mdio_unregister(ndev);
7819 
7820 	destroy_workqueue(priv->wq);
7821 	mutex_destroy(&priv->lock);
7822 	bitmap_free(priv->af_xdp_zc_qps);
7823 
7824 	pm_runtime_disable(dev);
7825 	pm_runtime_put_noidle(dev);
7826 }
7827 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7828 
7829 /**
7830  * stmmac_suspend - suspend callback
7831  * @dev: device pointer
7832  * Description: this is the function to suspend the device and it is called
7833  * by the platform driver to stop the network queue, release the resources,
7834  * program the PMT register (for WoL), clean and release driver resources.
7835  */
7836 int stmmac_suspend(struct device *dev)
7837 {
7838 	struct net_device *ndev = dev_get_drvdata(dev);
7839 	struct stmmac_priv *priv = netdev_priv(ndev);
7840 	u32 chan;
7841 
7842 	if (!ndev || !netif_running(ndev))
7843 		return 0;
7844 
7845 	mutex_lock(&priv->lock);
7846 
7847 	netif_device_detach(ndev);
7848 
7849 	stmmac_disable_all_queues(priv);
7850 
7851 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7852 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7853 
7854 	if (priv->eee_sw_timer_en) {
7855 		priv->tx_path_in_lpi_mode = false;
7856 		timer_delete_sync(&priv->eee_ctrl_timer);
7857 	}
7858 
7859 	/* Stop TX/RX DMA */
7860 	stmmac_stop_all_dma(priv);
7861 
7862 	if (priv->plat->serdes_powerdown)
7863 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7864 
7865 	/* Enable Power down mode by programming the PMT regs */
7866 	if (stmmac_wol_enabled_mac(priv)) {
7867 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7868 		priv->irq_wake = 1;
7869 	} else {
7870 		stmmac_mac_set(priv, priv->ioaddr, false);
7871 		pinctrl_pm_select_sleep_state(priv->device);
7872 	}
7873 
7874 	mutex_unlock(&priv->lock);
7875 
7876 	rtnl_lock();
7877 	if (stmmac_wol_enabled_phy(priv))
7878 		phylink_speed_down(priv->phylink, false);
7879 
7880 	phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv));
7881 	rtnl_unlock();
7882 
7883 	if (stmmac_fpe_supported(priv))
7884 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
7885 
7886 	if (priv->plat->suspend)
7887 		return priv->plat->suspend(dev, priv->plat->bsp_priv);
7888 
7889 	return 0;
7890 }
7891 EXPORT_SYMBOL_GPL(stmmac_suspend);
7892 
7893 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7894 {
7895 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7896 
7897 	rx_q->cur_rx = 0;
7898 	rx_q->dirty_rx = 0;
7899 }
7900 
7901 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7902 {
7903 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7904 
7905 	tx_q->cur_tx = 0;
7906 	tx_q->dirty_tx = 0;
7907 	tx_q->mss = 0;
7908 
7909 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7910 }
7911 
7912 /**
7913  * stmmac_reset_queues_param - reset queue parameters
7914  * @priv: device pointer
7915  */
7916 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7917 {
7918 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7919 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7920 	u32 queue;
7921 
7922 	for (queue = 0; queue < rx_cnt; queue++)
7923 		stmmac_reset_rx_queue(priv, queue);
7924 
7925 	for (queue = 0; queue < tx_cnt; queue++)
7926 		stmmac_reset_tx_queue(priv, queue);
7927 }
7928 
7929 /**
7930  * stmmac_resume - resume callback
7931  * @dev: device pointer
7932  * Description: when resume this function is invoked to setup the DMA and CORE
7933  * in a usable state.
7934  */
7935 int stmmac_resume(struct device *dev)
7936 {
7937 	struct net_device *ndev = dev_get_drvdata(dev);
7938 	struct stmmac_priv *priv = netdev_priv(ndev);
7939 	int ret;
7940 
7941 	if (priv->plat->resume) {
7942 		ret = priv->plat->resume(dev, priv->plat->bsp_priv);
7943 		if (ret)
7944 			return ret;
7945 	}
7946 
7947 	if (!netif_running(ndev))
7948 		return 0;
7949 
7950 	/* Power Down bit, into the PM register, is cleared
7951 	 * automatically as soon as a magic packet or a Wake-up frame
7952 	 * is received. Anyway, it's better to manually clear
7953 	 * this bit because it can generate problems while resuming
7954 	 * from another devices (e.g. serial console).
7955 	 */
7956 	if (stmmac_wol_enabled_mac(priv)) {
7957 		mutex_lock(&priv->lock);
7958 		stmmac_pmt(priv, priv->hw, 0);
7959 		mutex_unlock(&priv->lock);
7960 		priv->irq_wake = 0;
7961 	} else {
7962 		pinctrl_pm_select_default_state(priv->device);
7963 		/* reset the phy so that it's ready */
7964 		if (priv->mii)
7965 			stmmac_mdio_reset(priv->mii);
7966 	}
7967 
7968 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7969 	    priv->plat->serdes_powerup) {
7970 		ret = priv->plat->serdes_powerup(ndev,
7971 						 priv->plat->bsp_priv);
7972 
7973 		if (ret < 0)
7974 			return ret;
7975 	}
7976 
7977 	rtnl_lock();
7978 
7979 	/* Prepare the PHY to resume, ensuring that its clocks which are
7980 	 * necessary for the MAC DMA reset to complete are running
7981 	 */
7982 	phylink_prepare_resume(priv->phylink);
7983 
7984 	mutex_lock(&priv->lock);
7985 
7986 	stmmac_reset_queues_param(priv);
7987 
7988 	stmmac_free_tx_skbufs(priv);
7989 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7990 
7991 	ret = stmmac_hw_setup(ndev, false);
7992 	if (ret < 0) {
7993 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
7994 		mutex_unlock(&priv->lock);
7995 		rtnl_unlock();
7996 		return ret;
7997 	}
7998 
7999 	stmmac_init_coalesce(priv);
8000 	phylink_rx_clk_stop_block(priv->phylink);
8001 	stmmac_set_rx_mode(ndev);
8002 
8003 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8004 	phylink_rx_clk_stop_unblock(priv->phylink);
8005 
8006 	stmmac_enable_all_queues(priv);
8007 	stmmac_enable_all_dma_irq(priv);
8008 
8009 	mutex_unlock(&priv->lock);
8010 
8011 	/* phylink_resume() must be called after the hardware has been
8012 	 * initialised because it may bring the link up immediately in a
8013 	 * workqueue thread, which will race with initialisation.
8014 	 */
8015 	phylink_resume(priv->phylink);
8016 	if (stmmac_wol_enabled_phy(priv))
8017 		phylink_speed_up(priv->phylink);
8018 
8019 	rtnl_unlock();
8020 
8021 	netif_device_attach(ndev);
8022 
8023 	return 0;
8024 }
8025 EXPORT_SYMBOL_GPL(stmmac_resume);
8026 
8027 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
8028 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
8029 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
8030 
8031 #ifndef MODULE
8032 static int __init stmmac_cmdline_opt(char *str)
8033 {
8034 	char *opt;
8035 
8036 	if (!str || !*str)
8037 		return 1;
8038 	while ((opt = strsep(&str, ",")) != NULL) {
8039 		if (!strncmp(opt, "debug:", 6)) {
8040 			if (kstrtoint(opt + 6, 0, &debug))
8041 				goto err;
8042 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8043 			if (kstrtoint(opt + 8, 0, &phyaddr))
8044 				goto err;
8045 		} else if (!strncmp(opt, "tc:", 3)) {
8046 			if (kstrtoint(opt + 3, 0, &tc))
8047 				goto err;
8048 		} else if (!strncmp(opt, "watchdog:", 9)) {
8049 			if (kstrtoint(opt + 9, 0, &watchdog))
8050 				goto err;
8051 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8052 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8053 				goto err;
8054 		} else if (!strncmp(opt, "pause:", 6)) {
8055 			if (kstrtoint(opt + 6, 0, &pause))
8056 				goto err;
8057 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8058 			if (kstrtoint(opt + 10, 0, &eee_timer))
8059 				goto err;
8060 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8061 			if (kstrtoint(opt + 11, 0, &chain_mode))
8062 				goto err;
8063 		}
8064 	}
8065 	return 1;
8066 
8067 err:
8068 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8069 	return 1;
8070 }
8071 
8072 __setup("stmmaceth=", stmmac_cmdline_opt);
8073 #endif /* MODULE */
8074 
8075 static int __init stmmac_init(void)
8076 {
8077 #ifdef CONFIG_DEBUG_FS
8078 	/* Create debugfs main directory if it doesn't exist yet */
8079 	if (!stmmac_fs_dir)
8080 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8081 	register_netdevice_notifier(&stmmac_notifier);
8082 #endif
8083 
8084 	return 0;
8085 }
8086 
8087 static void __exit stmmac_exit(void)
8088 {
8089 #ifdef CONFIG_DEBUG_FS
8090 	unregister_netdevice_notifier(&stmmac_notifier);
8091 	debugfs_remove_recursive(stmmac_fs_dir);
8092 #endif
8093 }
8094 
8095 module_init(stmmac_init)
8096 module_exit(stmmac_exit)
8097 
8098 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8099 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8100 MODULE_LICENSE("GPL");
8101