xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision a8a6531164e54cea6df4d82f1770451f68945972)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54 
55 /* As long as the interface is active, we keep the timestamping counter enabled
56  * with fine resolution and binary rollover. This avoid non-monotonic behavior
57  * (clock jumps) when changing timestamping settings at runtime.
58  */
59 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 				 PTP_TCR_TSCTRLSSR)
61 
62 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
64 
65 /* Module parameters */
66 #define TX_TIMEO	5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70 
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78 
79 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
110 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
111 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
112 
113 #define STMMAC_DEFAULT_LPI_TIMER	1000
114 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
115 module_param(eee_timer, int, 0644);
116 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
117 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
118 
119 /* By default the driver will use the ring mode to manage tx and rx descriptors,
120  * but allow user to force to use the chain instead of the ring
121  */
122 static unsigned int chain_mode;
123 module_param(chain_mode, int, 0444);
124 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
125 
126 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
127 /* For MSI interrupts handling */
128 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
129 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
131 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
132 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
133 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
135 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
137 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
138 					  u32 rxmode, u32 chan);
139 
140 #ifdef CONFIG_DEBUG_FS
141 static const struct net_device_ops stmmac_netdev_ops;
142 static void stmmac_init_fs(struct net_device *dev);
143 static void stmmac_exit_fs(struct net_device *dev);
144 #endif
145 
146 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
147 
148 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
149 {
150 	int ret = 0;
151 
152 	if (enabled) {
153 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
154 		if (ret)
155 			return ret;
156 		ret = clk_prepare_enable(priv->plat->pclk);
157 		if (ret) {
158 			clk_disable_unprepare(priv->plat->stmmac_clk);
159 			return ret;
160 		}
161 		if (priv->plat->clks_config) {
162 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
163 			if (ret) {
164 				clk_disable_unprepare(priv->plat->stmmac_clk);
165 				clk_disable_unprepare(priv->plat->pclk);
166 				return ret;
167 			}
168 		}
169 	} else {
170 		clk_disable_unprepare(priv->plat->stmmac_clk);
171 		clk_disable_unprepare(priv->plat->pclk);
172 		if (priv->plat->clks_config)
173 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
174 	}
175 
176 	return ret;
177 }
178 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
179 
180 /**
181  * stmmac_verify_args - verify the driver parameters.
182  * Description: it checks the driver parameters and set a default in case of
183  * errors.
184  */
185 static void stmmac_verify_args(void)
186 {
187 	if (unlikely(watchdog < 0))
188 		watchdog = TX_TIMEO;
189 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
190 		buf_sz = DEFAULT_BUFSIZE;
191 	if (unlikely(flow_ctrl > 1))
192 		flow_ctrl = FLOW_AUTO;
193 	else if (likely(flow_ctrl < 0))
194 		flow_ctrl = FLOW_OFF;
195 	if (unlikely((pause < 0) || (pause > 0xffff)))
196 		pause = PAUSE_TIME;
197 	if (eee_timer < 0)
198 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
199 }
200 
201 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
202 {
203 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
204 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
205 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
206 	u32 queue;
207 
208 	for (queue = 0; queue < maxq; queue++) {
209 		struct stmmac_channel *ch = &priv->channel[queue];
210 
211 		if (stmmac_xdp_is_enabled(priv) &&
212 		    test_bit(queue, priv->af_xdp_zc_qps)) {
213 			napi_disable(&ch->rxtx_napi);
214 			continue;
215 		}
216 
217 		if (queue < rx_queues_cnt)
218 			napi_disable(&ch->rx_napi);
219 		if (queue < tx_queues_cnt)
220 			napi_disable(&ch->tx_napi);
221 	}
222 }
223 
224 /**
225  * stmmac_disable_all_queues - Disable all queues
226  * @priv: driver private structure
227  */
228 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
229 {
230 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
231 	struct stmmac_rx_queue *rx_q;
232 	u32 queue;
233 
234 	/* synchronize_rcu() needed for pending XDP buffers to drain */
235 	for (queue = 0; queue < rx_queues_cnt; queue++) {
236 		rx_q = &priv->dma_conf.rx_queue[queue];
237 		if (rx_q->xsk_pool) {
238 			synchronize_rcu();
239 			break;
240 		}
241 	}
242 
243 	__stmmac_disable_all_queues(priv);
244 }
245 
246 /**
247  * stmmac_enable_all_queues - Enable all queues
248  * @priv: driver private structure
249  */
250 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
251 {
252 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
253 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
254 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
255 	u32 queue;
256 
257 	for (queue = 0; queue < maxq; queue++) {
258 		struct stmmac_channel *ch = &priv->channel[queue];
259 
260 		if (stmmac_xdp_is_enabled(priv) &&
261 		    test_bit(queue, priv->af_xdp_zc_qps)) {
262 			napi_enable(&ch->rxtx_napi);
263 			continue;
264 		}
265 
266 		if (queue < rx_queues_cnt)
267 			napi_enable(&ch->rx_napi);
268 		if (queue < tx_queues_cnt)
269 			napi_enable(&ch->tx_napi);
270 	}
271 }
272 
273 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
274 {
275 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
276 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
277 		queue_work(priv->wq, &priv->service_task);
278 }
279 
280 static void stmmac_global_err(struct stmmac_priv *priv)
281 {
282 	netif_carrier_off(priv->dev);
283 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
284 	stmmac_service_event_schedule(priv);
285 }
286 
287 /**
288  * stmmac_clk_csr_set - dynamically set the MDC clock
289  * @priv: driver private structure
290  * Description: this is to dynamically set the MDC clock according to the csr
291  * clock input.
292  * Note:
293  *	If a specific clk_csr value is passed from the platform
294  *	this means that the CSR Clock Range selection cannot be
295  *	changed at run-time and it is fixed (as reported in the driver
296  *	documentation). Viceversa the driver will try to set the MDC
297  *	clock dynamically according to the actual clock input.
298  */
299 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
300 {
301 	unsigned long clk_rate;
302 
303 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
304 
305 	/* Platform provided default clk_csr would be assumed valid
306 	 * for all other cases except for the below mentioned ones.
307 	 * For values higher than the IEEE 802.3 specified frequency
308 	 * we can not estimate the proper divider as it is not known
309 	 * the frequency of clk_csr_i. So we do not change the default
310 	 * divider.
311 	 */
312 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
313 		if (clk_rate < CSR_F_35M)
314 			priv->clk_csr = STMMAC_CSR_20_35M;
315 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
316 			priv->clk_csr = STMMAC_CSR_35_60M;
317 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
318 			priv->clk_csr = STMMAC_CSR_60_100M;
319 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
320 			priv->clk_csr = STMMAC_CSR_100_150M;
321 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
322 			priv->clk_csr = STMMAC_CSR_150_250M;
323 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
324 			priv->clk_csr = STMMAC_CSR_250_300M;
325 		else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
326 			priv->clk_csr = STMMAC_CSR_300_500M;
327 		else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
328 			priv->clk_csr = STMMAC_CSR_500_800M;
329 	}
330 
331 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
332 		if (clk_rate > 160000000)
333 			priv->clk_csr = 0x03;
334 		else if (clk_rate > 80000000)
335 			priv->clk_csr = 0x02;
336 		else if (clk_rate > 40000000)
337 			priv->clk_csr = 0x01;
338 		else
339 			priv->clk_csr = 0;
340 	}
341 
342 	if (priv->plat->has_xgmac) {
343 		if (clk_rate > 400000000)
344 			priv->clk_csr = 0x5;
345 		else if (clk_rate > 350000000)
346 			priv->clk_csr = 0x4;
347 		else if (clk_rate > 300000000)
348 			priv->clk_csr = 0x3;
349 		else if (clk_rate > 250000000)
350 			priv->clk_csr = 0x2;
351 		else if (clk_rate > 150000000)
352 			priv->clk_csr = 0x1;
353 		else
354 			priv->clk_csr = 0x0;
355 	}
356 }
357 
358 static void print_pkt(unsigned char *buf, int len)
359 {
360 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
361 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
362 }
363 
364 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
365 {
366 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
367 	u32 avail;
368 
369 	if (tx_q->dirty_tx > tx_q->cur_tx)
370 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
371 	else
372 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
373 
374 	return avail;
375 }
376 
377 /**
378  * stmmac_rx_dirty - Get RX queue dirty
379  * @priv: driver private structure
380  * @queue: RX queue index
381  */
382 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
383 {
384 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
385 	u32 dirty;
386 
387 	if (rx_q->dirty_rx <= rx_q->cur_rx)
388 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
389 	else
390 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
391 
392 	return dirty;
393 }
394 
395 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
396 {
397 	int tx_lpi_timer;
398 
399 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
400 	priv->eee_sw_timer_en = en ? 0 : 1;
401 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
402 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
403 }
404 
405 /**
406  * stmmac_enable_eee_mode - check and enter in LPI mode
407  * @priv: driver private structure
408  * Description: this function is to verify and enter in LPI mode in case of
409  * EEE.
410  */
411 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
412 {
413 	u32 tx_cnt = priv->plat->tx_queues_to_use;
414 	u32 queue;
415 
416 	/* check if all TX queues have the work finished */
417 	for (queue = 0; queue < tx_cnt; queue++) {
418 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
419 
420 		if (tx_q->dirty_tx != tx_q->cur_tx)
421 			return -EBUSY; /* still unfinished work */
422 	}
423 
424 	/* Check and enter in LPI mode */
425 	if (!priv->tx_path_in_lpi_mode)
426 		stmmac_set_eee_mode(priv, priv->hw,
427 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
428 	return 0;
429 }
430 
431 /**
432  * stmmac_disable_eee_mode - disable and exit from LPI mode
433  * @priv: driver private structure
434  * Description: this function is to exit and disable EEE in case of
435  * LPI state is true. This is called by the xmit.
436  */
437 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
438 {
439 	if (!priv->eee_sw_timer_en) {
440 		stmmac_lpi_entry_timer_config(priv, 0);
441 		return;
442 	}
443 
444 	stmmac_reset_eee_mode(priv, priv->hw);
445 	del_timer_sync(&priv->eee_ctrl_timer);
446 	priv->tx_path_in_lpi_mode = false;
447 }
448 
449 /**
450  * stmmac_eee_ctrl_timer - EEE TX SW timer.
451  * @t:  timer_list struct containing private info
452  * Description:
453  *  if there is no data transfer and if we are not in LPI state,
454  *  then MAC Transmitter can be moved to LPI state.
455  */
456 static void stmmac_eee_ctrl_timer(struct timer_list *t)
457 {
458 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
459 
460 	if (stmmac_enable_eee_mode(priv))
461 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
462 }
463 
464 /**
465  * stmmac_eee_init - init EEE
466  * @priv: driver private structure
467  * Description:
468  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
469  *  can also manage EEE, this function enable the LPI state and start related
470  *  timer.
471  */
472 bool stmmac_eee_init(struct stmmac_priv *priv)
473 {
474 	int eee_tw_timer = priv->eee_tw_timer;
475 
476 	/* Check if MAC core supports the EEE feature. */
477 	if (!priv->dma_cap.eee)
478 		return false;
479 
480 	mutex_lock(&priv->lock);
481 
482 	/* Check if it needs to be deactivated */
483 	if (!priv->eee_active) {
484 		if (priv->eee_enabled) {
485 			netdev_dbg(priv->dev, "disable EEE\n");
486 			stmmac_lpi_entry_timer_config(priv, 0);
487 			del_timer_sync(&priv->eee_ctrl_timer);
488 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
489 			if (priv->hw->xpcs)
490 				xpcs_config_eee(priv->hw->xpcs,
491 						priv->plat->mult_fact_100ns,
492 						false);
493 		}
494 		mutex_unlock(&priv->lock);
495 		return false;
496 	}
497 
498 	if (priv->eee_active && !priv->eee_enabled) {
499 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
500 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
501 				     eee_tw_timer);
502 		if (priv->hw->xpcs)
503 			xpcs_config_eee(priv->hw->xpcs,
504 					priv->plat->mult_fact_100ns,
505 					true);
506 	}
507 
508 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
509 		del_timer_sync(&priv->eee_ctrl_timer);
510 		priv->tx_path_in_lpi_mode = false;
511 		stmmac_lpi_entry_timer_config(priv, 1);
512 	} else {
513 		stmmac_lpi_entry_timer_config(priv, 0);
514 		mod_timer(&priv->eee_ctrl_timer,
515 			  STMMAC_LPI_T(priv->tx_lpi_timer));
516 	}
517 
518 	mutex_unlock(&priv->lock);
519 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
520 	return true;
521 }
522 
523 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
524  * @priv: driver private structure
525  * @p : descriptor pointer
526  * @skb : the socket buffer
527  * Description :
528  * This function will read timestamp from the descriptor & pass it to stack.
529  * and also perform some sanity checks.
530  */
531 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
532 				   struct dma_desc *p, struct sk_buff *skb)
533 {
534 	struct skb_shared_hwtstamps shhwtstamp;
535 	bool found = false;
536 	u64 ns = 0;
537 
538 	if (!priv->hwts_tx_en)
539 		return;
540 
541 	/* exit if skb doesn't support hw tstamp */
542 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
543 		return;
544 
545 	/* check tx tstamp status */
546 	if (stmmac_get_tx_timestamp_status(priv, p)) {
547 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
548 		found = true;
549 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
550 		found = true;
551 	}
552 
553 	if (found) {
554 		ns -= priv->plat->cdc_error_adj;
555 
556 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
557 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
558 
559 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
560 		/* pass tstamp to stack */
561 		skb_tstamp_tx(skb, &shhwtstamp);
562 	}
563 }
564 
565 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
566  * @priv: driver private structure
567  * @p : descriptor pointer
568  * @np : next descriptor pointer
569  * @skb : the socket buffer
570  * Description :
571  * This function will read received packet's timestamp from the descriptor
572  * and pass it to stack. It also perform some sanity checks.
573  */
574 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
575 				   struct dma_desc *np, struct sk_buff *skb)
576 {
577 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
578 	struct dma_desc *desc = p;
579 	u64 ns = 0;
580 
581 	if (!priv->hwts_rx_en)
582 		return;
583 	/* For GMAC4, the valid timestamp is from CTX next desc. */
584 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
585 		desc = np;
586 
587 	/* Check if timestamp is available */
588 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
589 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
590 
591 		ns -= priv->plat->cdc_error_adj;
592 
593 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
594 		shhwtstamp = skb_hwtstamps(skb);
595 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
596 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
597 	} else  {
598 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
599 	}
600 }
601 
602 /**
603  *  stmmac_hwtstamp_set - control hardware timestamping.
604  *  @dev: device pointer.
605  *  @ifr: An IOCTL specific structure, that can contain a pointer to
606  *  a proprietary structure used to pass information to the driver.
607  *  Description:
608  *  This function configures the MAC to enable/disable both outgoing(TX)
609  *  and incoming(RX) packets time stamping based on user input.
610  *  Return Value:
611  *  0 on success and an appropriate -ve integer on failure.
612  */
613 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
614 {
615 	struct stmmac_priv *priv = netdev_priv(dev);
616 	struct hwtstamp_config config;
617 	u32 ptp_v2 = 0;
618 	u32 tstamp_all = 0;
619 	u32 ptp_over_ipv4_udp = 0;
620 	u32 ptp_over_ipv6_udp = 0;
621 	u32 ptp_over_ethernet = 0;
622 	u32 snap_type_sel = 0;
623 	u32 ts_master_en = 0;
624 	u32 ts_event_en = 0;
625 
626 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
627 		netdev_alert(priv->dev, "No support for HW time stamping\n");
628 		priv->hwts_tx_en = 0;
629 		priv->hwts_rx_en = 0;
630 
631 		return -EOPNOTSUPP;
632 	}
633 
634 	if (copy_from_user(&config, ifr->ifr_data,
635 			   sizeof(config)))
636 		return -EFAULT;
637 
638 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
639 		   __func__, config.flags, config.tx_type, config.rx_filter);
640 
641 	if (config.tx_type != HWTSTAMP_TX_OFF &&
642 	    config.tx_type != HWTSTAMP_TX_ON)
643 		return -ERANGE;
644 
645 	if (priv->adv_ts) {
646 		switch (config.rx_filter) {
647 		case HWTSTAMP_FILTER_NONE:
648 			/* time stamp no incoming packet at all */
649 			config.rx_filter = HWTSTAMP_FILTER_NONE;
650 			break;
651 
652 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
653 			/* PTP v1, UDP, any kind of event packet */
654 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
655 			/* 'xmac' hardware can support Sync, Pdelay_Req and
656 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
657 			 * This leaves Delay_Req timestamps out.
658 			 * Enable all events *and* general purpose message
659 			 * timestamping
660 			 */
661 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
662 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
663 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
664 			break;
665 
666 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
667 			/* PTP v1, UDP, Sync packet */
668 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
669 			/* take time stamp for SYNC messages only */
670 			ts_event_en = PTP_TCR_TSEVNTENA;
671 
672 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
673 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
674 			break;
675 
676 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
677 			/* PTP v1, UDP, Delay_req packet */
678 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
679 			/* take time stamp for Delay_Req messages only */
680 			ts_master_en = PTP_TCR_TSMSTRENA;
681 			ts_event_en = PTP_TCR_TSEVNTENA;
682 
683 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
684 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
685 			break;
686 
687 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
688 			/* PTP v2, UDP, any kind of event packet */
689 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
690 			ptp_v2 = PTP_TCR_TSVER2ENA;
691 			/* take time stamp for all event messages */
692 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
693 
694 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
695 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
696 			break;
697 
698 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
699 			/* PTP v2, UDP, Sync packet */
700 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
701 			ptp_v2 = PTP_TCR_TSVER2ENA;
702 			/* take time stamp for SYNC messages only */
703 			ts_event_en = PTP_TCR_TSEVNTENA;
704 
705 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
706 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
707 			break;
708 
709 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
710 			/* PTP v2, UDP, Delay_req packet */
711 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
712 			ptp_v2 = PTP_TCR_TSVER2ENA;
713 			/* take time stamp for Delay_Req messages only */
714 			ts_master_en = PTP_TCR_TSMSTRENA;
715 			ts_event_en = PTP_TCR_TSEVNTENA;
716 
717 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
718 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
719 			break;
720 
721 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
722 			/* PTP v2/802.AS1 any layer, any kind of event packet */
723 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
724 			ptp_v2 = PTP_TCR_TSVER2ENA;
725 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
726 			if (priv->synopsys_id < DWMAC_CORE_4_10)
727 				ts_event_en = PTP_TCR_TSEVNTENA;
728 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
729 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
730 			ptp_over_ethernet = PTP_TCR_TSIPENA;
731 			break;
732 
733 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
734 			/* PTP v2/802.AS1, any layer, Sync packet */
735 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
736 			ptp_v2 = PTP_TCR_TSVER2ENA;
737 			/* take time stamp for SYNC messages only */
738 			ts_event_en = PTP_TCR_TSEVNTENA;
739 
740 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
741 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
742 			ptp_over_ethernet = PTP_TCR_TSIPENA;
743 			break;
744 
745 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
746 			/* PTP v2/802.AS1, any layer, Delay_req packet */
747 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
748 			ptp_v2 = PTP_TCR_TSVER2ENA;
749 			/* take time stamp for Delay_Req messages only */
750 			ts_master_en = PTP_TCR_TSMSTRENA;
751 			ts_event_en = PTP_TCR_TSEVNTENA;
752 
753 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
754 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
755 			ptp_over_ethernet = PTP_TCR_TSIPENA;
756 			break;
757 
758 		case HWTSTAMP_FILTER_NTP_ALL:
759 		case HWTSTAMP_FILTER_ALL:
760 			/* time stamp any incoming packet */
761 			config.rx_filter = HWTSTAMP_FILTER_ALL;
762 			tstamp_all = PTP_TCR_TSENALL;
763 			break;
764 
765 		default:
766 			return -ERANGE;
767 		}
768 	} else {
769 		switch (config.rx_filter) {
770 		case HWTSTAMP_FILTER_NONE:
771 			config.rx_filter = HWTSTAMP_FILTER_NONE;
772 			break;
773 		default:
774 			/* PTP v1, UDP, any kind of event packet */
775 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
776 			break;
777 		}
778 	}
779 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
780 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
781 
782 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
783 
784 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
785 		priv->systime_flags |= tstamp_all | ptp_v2 |
786 				       ptp_over_ethernet | ptp_over_ipv6_udp |
787 				       ptp_over_ipv4_udp | ts_event_en |
788 				       ts_master_en | snap_type_sel;
789 	}
790 
791 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
792 
793 	memcpy(&priv->tstamp_config, &config, sizeof(config));
794 
795 	return copy_to_user(ifr->ifr_data, &config,
796 			    sizeof(config)) ? -EFAULT : 0;
797 }
798 
799 /**
800  *  stmmac_hwtstamp_get - read hardware timestamping.
801  *  @dev: device pointer.
802  *  @ifr: An IOCTL specific structure, that can contain a pointer to
803  *  a proprietary structure used to pass information to the driver.
804  *  Description:
805  *  This function obtain the current hardware timestamping settings
806  *  as requested.
807  */
808 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
809 {
810 	struct stmmac_priv *priv = netdev_priv(dev);
811 	struct hwtstamp_config *config = &priv->tstamp_config;
812 
813 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
814 		return -EOPNOTSUPP;
815 
816 	return copy_to_user(ifr->ifr_data, config,
817 			    sizeof(*config)) ? -EFAULT : 0;
818 }
819 
820 /**
821  * stmmac_init_tstamp_counter - init hardware timestamping counter
822  * @priv: driver private structure
823  * @systime_flags: timestamping flags
824  * Description:
825  * Initialize hardware counter for packet timestamping.
826  * This is valid as long as the interface is open and not suspended.
827  * Will be rerun after resuming from suspend, case in which the timestamping
828  * flags updated by stmmac_hwtstamp_set() also need to be restored.
829  */
830 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
831 {
832 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
833 	struct timespec64 now;
834 	u32 sec_inc = 0;
835 	u64 temp = 0;
836 
837 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
838 		return -EOPNOTSUPP;
839 
840 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
841 	priv->systime_flags = systime_flags;
842 
843 	/* program Sub Second Increment reg */
844 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
845 					   priv->plat->clk_ptp_rate,
846 					   xmac, &sec_inc);
847 	temp = div_u64(1000000000ULL, sec_inc);
848 
849 	/* Store sub second increment for later use */
850 	priv->sub_second_inc = sec_inc;
851 
852 	/* calculate default added value:
853 	 * formula is :
854 	 * addend = (2^32)/freq_div_ratio;
855 	 * where, freq_div_ratio = 1e9ns/sec_inc
856 	 */
857 	temp = (u64)(temp << 32);
858 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
859 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
860 
861 	/* initialize system time */
862 	ktime_get_real_ts64(&now);
863 
864 	/* lower 32 bits of tv_sec are safe until y2106 */
865 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
866 
867 	return 0;
868 }
869 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
870 
871 /**
872  * stmmac_init_ptp - init PTP
873  * @priv: driver private structure
874  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
875  * This is done by looking at the HW cap. register.
876  * This function also registers the ptp driver.
877  */
878 static int stmmac_init_ptp(struct stmmac_priv *priv)
879 {
880 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
881 	int ret;
882 
883 	if (priv->plat->ptp_clk_freq_config)
884 		priv->plat->ptp_clk_freq_config(priv);
885 
886 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
887 	if (ret)
888 		return ret;
889 
890 	priv->adv_ts = 0;
891 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
892 	if (xmac && priv->dma_cap.atime_stamp)
893 		priv->adv_ts = 1;
894 	/* Dwmac 3.x core with extend_desc can support adv_ts */
895 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
896 		priv->adv_ts = 1;
897 
898 	if (priv->dma_cap.time_stamp)
899 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
900 
901 	if (priv->adv_ts)
902 		netdev_info(priv->dev,
903 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
904 
905 	priv->hwts_tx_en = 0;
906 	priv->hwts_rx_en = 0;
907 
908 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
909 		stmmac_hwtstamp_correct_latency(priv, priv);
910 
911 	return 0;
912 }
913 
914 static void stmmac_release_ptp(struct stmmac_priv *priv)
915 {
916 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
917 	stmmac_ptp_unregister(priv);
918 }
919 
920 /**
921  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
922  *  @priv: driver private structure
923  *  @duplex: duplex passed to the next function
924  *  Description: It is used for configuring the flow control in all queues
925  */
926 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
927 {
928 	u32 tx_cnt = priv->plat->tx_queues_to_use;
929 
930 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
931 			priv->pause, tx_cnt);
932 }
933 
934 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
935 					 phy_interface_t interface)
936 {
937 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
938 
939 	/* Refresh the MAC-specific capabilities */
940 	stmmac_mac_update_caps(priv);
941 
942 	config->mac_capabilities = priv->hw->link.caps;
943 
944 	if (priv->plat->max_speed)
945 		phylink_limit_mac_speed(config, priv->plat->max_speed);
946 
947 	return config->mac_capabilities;
948 }
949 
950 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
951 						 phy_interface_t interface)
952 {
953 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
954 	struct phylink_pcs *pcs;
955 
956 	if (priv->plat->select_pcs) {
957 		pcs = priv->plat->select_pcs(priv, interface);
958 		if (!IS_ERR(pcs))
959 			return pcs;
960 	}
961 
962 	return NULL;
963 }
964 
965 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
966 			      const struct phylink_link_state *state)
967 {
968 	/* Nothing to do, xpcs_config() handles everything */
969 }
970 
971 static void stmmac_mac_link_down(struct phylink_config *config,
972 				 unsigned int mode, phy_interface_t interface)
973 {
974 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
975 
976 	stmmac_mac_set(priv, priv->ioaddr, false);
977 	priv->eee_active = false;
978 	priv->tx_lpi_enabled = false;
979 	priv->eee_enabled = stmmac_eee_init(priv);
980 	stmmac_set_eee_pls(priv, priv->hw, false);
981 
982 	if (stmmac_fpe_supported(priv))
983 		stmmac_fpe_link_state_handle(priv, false);
984 }
985 
986 static void stmmac_mac_link_up(struct phylink_config *config,
987 			       struct phy_device *phy,
988 			       unsigned int mode, phy_interface_t interface,
989 			       int speed, int duplex,
990 			       bool tx_pause, bool rx_pause)
991 {
992 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
993 	u32 old_ctrl, ctrl;
994 
995 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
996 	    priv->plat->serdes_powerup)
997 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
998 
999 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1000 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1001 
1002 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1003 		switch (speed) {
1004 		case SPEED_10000:
1005 			ctrl |= priv->hw->link.xgmii.speed10000;
1006 			break;
1007 		case SPEED_5000:
1008 			ctrl |= priv->hw->link.xgmii.speed5000;
1009 			break;
1010 		case SPEED_2500:
1011 			ctrl |= priv->hw->link.xgmii.speed2500;
1012 			break;
1013 		default:
1014 			return;
1015 		}
1016 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1017 		switch (speed) {
1018 		case SPEED_100000:
1019 			ctrl |= priv->hw->link.xlgmii.speed100000;
1020 			break;
1021 		case SPEED_50000:
1022 			ctrl |= priv->hw->link.xlgmii.speed50000;
1023 			break;
1024 		case SPEED_40000:
1025 			ctrl |= priv->hw->link.xlgmii.speed40000;
1026 			break;
1027 		case SPEED_25000:
1028 			ctrl |= priv->hw->link.xlgmii.speed25000;
1029 			break;
1030 		case SPEED_10000:
1031 			ctrl |= priv->hw->link.xgmii.speed10000;
1032 			break;
1033 		case SPEED_2500:
1034 			ctrl |= priv->hw->link.speed2500;
1035 			break;
1036 		case SPEED_1000:
1037 			ctrl |= priv->hw->link.speed1000;
1038 			break;
1039 		default:
1040 			return;
1041 		}
1042 	} else {
1043 		switch (speed) {
1044 		case SPEED_2500:
1045 			ctrl |= priv->hw->link.speed2500;
1046 			break;
1047 		case SPEED_1000:
1048 			ctrl |= priv->hw->link.speed1000;
1049 			break;
1050 		case SPEED_100:
1051 			ctrl |= priv->hw->link.speed100;
1052 			break;
1053 		case SPEED_10:
1054 			ctrl |= priv->hw->link.speed10;
1055 			break;
1056 		default:
1057 			return;
1058 		}
1059 	}
1060 
1061 	priv->speed = speed;
1062 
1063 	if (priv->plat->fix_mac_speed)
1064 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1065 
1066 	if (!duplex)
1067 		ctrl &= ~priv->hw->link.duplex;
1068 	else
1069 		ctrl |= priv->hw->link.duplex;
1070 
1071 	/* Flow Control operation */
1072 	if (rx_pause && tx_pause)
1073 		priv->flow_ctrl = FLOW_AUTO;
1074 	else if (rx_pause && !tx_pause)
1075 		priv->flow_ctrl = FLOW_RX;
1076 	else if (!rx_pause && tx_pause)
1077 		priv->flow_ctrl = FLOW_TX;
1078 	else
1079 		priv->flow_ctrl = FLOW_OFF;
1080 
1081 	stmmac_mac_flow_ctrl(priv, duplex);
1082 
1083 	if (ctrl != old_ctrl)
1084 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1085 
1086 	stmmac_mac_set(priv, priv->ioaddr, true);
1087 	if (phy && priv->dma_cap.eee) {
1088 		priv->eee_active =
1089 			phy_init_eee(phy, !(priv->plat->flags &
1090 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1091 		priv->eee_enabled = stmmac_eee_init(priv);
1092 		priv->tx_lpi_enabled = priv->eee_enabled;
1093 		stmmac_set_eee_pls(priv, priv->hw, true);
1094 	}
1095 
1096 	if (stmmac_fpe_supported(priv))
1097 		stmmac_fpe_link_state_handle(priv, true);
1098 
1099 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1100 		stmmac_hwtstamp_correct_latency(priv, priv);
1101 }
1102 
1103 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1104 	.mac_get_caps = stmmac_mac_get_caps,
1105 	.mac_select_pcs = stmmac_mac_select_pcs,
1106 	.mac_config = stmmac_mac_config,
1107 	.mac_link_down = stmmac_mac_link_down,
1108 	.mac_link_up = stmmac_mac_link_up,
1109 };
1110 
1111 /**
1112  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1113  * @priv: driver private structure
1114  * Description: this is to verify if the HW supports the PCS.
1115  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1116  * configured for the TBI, RTBI, or SGMII PHY interface.
1117  */
1118 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1119 {
1120 	int interface = priv->plat->mac_interface;
1121 
1122 	if (priv->dma_cap.pcs) {
1123 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1124 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1125 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1127 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1128 			priv->hw->pcs = STMMAC_PCS_RGMII;
1129 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1130 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_SGMII;
1132 		}
1133 	}
1134 }
1135 
1136 /**
1137  * stmmac_init_phy - PHY initialization
1138  * @dev: net device structure
1139  * Description: it initializes the driver's PHY state, and attaches the PHY
1140  * to the mac driver.
1141  *  Return value:
1142  *  0 on success
1143  */
1144 static int stmmac_init_phy(struct net_device *dev)
1145 {
1146 	struct stmmac_priv *priv = netdev_priv(dev);
1147 	struct fwnode_handle *phy_fwnode;
1148 	struct fwnode_handle *fwnode;
1149 	int ret;
1150 
1151 	if (!phylink_expects_phy(priv->phylink))
1152 		return 0;
1153 
1154 	fwnode = priv->plat->port_node;
1155 	if (!fwnode)
1156 		fwnode = dev_fwnode(priv->device);
1157 
1158 	if (fwnode)
1159 		phy_fwnode = fwnode_get_phy_node(fwnode);
1160 	else
1161 		phy_fwnode = NULL;
1162 
1163 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1164 	 * manually parse it
1165 	 */
1166 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1167 		int addr = priv->plat->phy_addr;
1168 		struct phy_device *phydev;
1169 
1170 		if (addr < 0) {
1171 			netdev_err(priv->dev, "no phy found\n");
1172 			return -ENODEV;
1173 		}
1174 
1175 		phydev = mdiobus_get_phy(priv->mii, addr);
1176 		if (!phydev) {
1177 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1178 			return -ENODEV;
1179 		}
1180 
1181 		if (priv->dma_cap.eee)
1182 			phy_support_eee(phydev);
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (!priv->plat->pmt) {
1191 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192 
1193 		phylink_ethtool_get_wol(priv->phylink, &wol);
1194 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 static int stmmac_phy_setup(struct stmmac_priv *priv)
1202 {
1203 	struct stmmac_mdio_bus_data *mdio_bus_data;
1204 	int mode = priv->plat->phy_interface;
1205 	struct fwnode_handle *fwnode;
1206 	struct phylink_pcs *pcs;
1207 	struct phylink *phylink;
1208 
1209 	priv->phylink_config.dev = &priv->dev->dev;
1210 	priv->phylink_config.type = PHYLINK_NETDEV;
1211 	priv->phylink_config.mac_managed_pm = true;
1212 
1213 	/* Stmmac always requires an RX clock for hardware initialization */
1214 	priv->phylink_config.mac_requires_rxc = true;
1215 
1216 	mdio_bus_data = priv->plat->mdio_bus_data;
1217 	if (mdio_bus_data)
1218 		priv->phylink_config.default_an_inband =
1219 			mdio_bus_data->default_an_inband;
1220 
1221 	/* Set the platform/firmware specified interface mode. Note, phylink
1222 	 * deals with the PHY interface mode, not the MAC interface mode.
1223 	 */
1224 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1225 
1226 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1227 	if (priv->hw->xpcs)
1228 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1229 	else
1230 		pcs = priv->hw->phylink_pcs;
1231 
1232 	if (pcs)
1233 		phy_interface_or(priv->phylink_config.supported_interfaces,
1234 				 priv->phylink_config.supported_interfaces,
1235 				 pcs->supported_interfaces);
1236 
1237 	fwnode = priv->plat->port_node;
1238 	if (!fwnode)
1239 		fwnode = dev_fwnode(priv->device);
1240 
1241 	phylink = phylink_create(&priv->phylink_config, fwnode,
1242 				 mode, &stmmac_phylink_mac_ops);
1243 	if (IS_ERR(phylink))
1244 		return PTR_ERR(phylink);
1245 
1246 	priv->phylink = phylink;
1247 	return 0;
1248 }
1249 
1250 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1251 				    struct stmmac_dma_conf *dma_conf)
1252 {
1253 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1254 	unsigned int desc_size;
1255 	void *head_rx;
1256 	u32 queue;
1257 
1258 	/* Display RX rings */
1259 	for (queue = 0; queue < rx_cnt; queue++) {
1260 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1261 
1262 		pr_info("\tRX Queue %u rings\n", queue);
1263 
1264 		if (priv->extend_desc) {
1265 			head_rx = (void *)rx_q->dma_erx;
1266 			desc_size = sizeof(struct dma_extended_desc);
1267 		} else {
1268 			head_rx = (void *)rx_q->dma_rx;
1269 			desc_size = sizeof(struct dma_desc);
1270 		}
1271 
1272 		/* Display RX ring */
1273 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1274 				    rx_q->dma_rx_phy, desc_size);
1275 	}
1276 }
1277 
1278 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1279 				    struct stmmac_dma_conf *dma_conf)
1280 {
1281 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1282 	unsigned int desc_size;
1283 	void *head_tx;
1284 	u32 queue;
1285 
1286 	/* Display TX rings */
1287 	for (queue = 0; queue < tx_cnt; queue++) {
1288 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1289 
1290 		pr_info("\tTX Queue %d rings\n", queue);
1291 
1292 		if (priv->extend_desc) {
1293 			head_tx = (void *)tx_q->dma_etx;
1294 			desc_size = sizeof(struct dma_extended_desc);
1295 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1296 			head_tx = (void *)tx_q->dma_entx;
1297 			desc_size = sizeof(struct dma_edesc);
1298 		} else {
1299 			head_tx = (void *)tx_q->dma_tx;
1300 			desc_size = sizeof(struct dma_desc);
1301 		}
1302 
1303 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1304 				    tx_q->dma_tx_phy, desc_size);
1305 	}
1306 }
1307 
1308 static void stmmac_display_rings(struct stmmac_priv *priv,
1309 				 struct stmmac_dma_conf *dma_conf)
1310 {
1311 	/* Display RX ring */
1312 	stmmac_display_rx_rings(priv, dma_conf);
1313 
1314 	/* Display TX ring */
1315 	stmmac_display_tx_rings(priv, dma_conf);
1316 }
1317 
1318 static int stmmac_set_bfsize(int mtu, int bufsize)
1319 {
1320 	int ret = bufsize;
1321 
1322 	if (mtu >= BUF_SIZE_8KiB)
1323 		ret = BUF_SIZE_16KiB;
1324 	else if (mtu >= BUF_SIZE_4KiB)
1325 		ret = BUF_SIZE_8KiB;
1326 	else if (mtu >= BUF_SIZE_2KiB)
1327 		ret = BUF_SIZE_4KiB;
1328 	else if (mtu > DEFAULT_BUFSIZE)
1329 		ret = BUF_SIZE_2KiB;
1330 	else
1331 		ret = DEFAULT_BUFSIZE;
1332 
1333 	return ret;
1334 }
1335 
1336 /**
1337  * stmmac_clear_rx_descriptors - clear RX descriptors
1338  * @priv: driver private structure
1339  * @dma_conf: structure to take the dma data
1340  * @queue: RX queue index
1341  * Description: this function is called to clear the RX descriptors
1342  * in case of both basic and extended descriptors are used.
1343  */
1344 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1345 					struct stmmac_dma_conf *dma_conf,
1346 					u32 queue)
1347 {
1348 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1349 	int i;
1350 
1351 	/* Clear the RX descriptors */
1352 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1353 		if (priv->extend_desc)
1354 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1355 					priv->use_riwt, priv->mode,
1356 					(i == dma_conf->dma_rx_size - 1),
1357 					dma_conf->dma_buf_sz);
1358 		else
1359 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1360 					priv->use_riwt, priv->mode,
1361 					(i == dma_conf->dma_rx_size - 1),
1362 					dma_conf->dma_buf_sz);
1363 }
1364 
1365 /**
1366  * stmmac_clear_tx_descriptors - clear tx descriptors
1367  * @priv: driver private structure
1368  * @dma_conf: structure to take the dma data
1369  * @queue: TX queue index.
1370  * Description: this function is called to clear the TX descriptors
1371  * in case of both basic and extended descriptors are used.
1372  */
1373 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1374 					struct stmmac_dma_conf *dma_conf,
1375 					u32 queue)
1376 {
1377 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1378 	int i;
1379 
1380 	/* Clear the TX descriptors */
1381 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1382 		int last = (i == (dma_conf->dma_tx_size - 1));
1383 		struct dma_desc *p;
1384 
1385 		if (priv->extend_desc)
1386 			p = &tx_q->dma_etx[i].basic;
1387 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1388 			p = &tx_q->dma_entx[i].basic;
1389 		else
1390 			p = &tx_q->dma_tx[i];
1391 
1392 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1393 	}
1394 }
1395 
1396 /**
1397  * stmmac_clear_descriptors - clear descriptors
1398  * @priv: driver private structure
1399  * @dma_conf: structure to take the dma data
1400  * Description: this function is called to clear the TX and RX descriptors
1401  * in case of both basic and extended descriptors are used.
1402  */
1403 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1404 				     struct stmmac_dma_conf *dma_conf)
1405 {
1406 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1407 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1408 	u32 queue;
1409 
1410 	/* Clear the RX descriptors */
1411 	for (queue = 0; queue < rx_queue_cnt; queue++)
1412 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1413 
1414 	/* Clear the TX descriptors */
1415 	for (queue = 0; queue < tx_queue_cnt; queue++)
1416 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1417 }
1418 
1419 /**
1420  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1421  * @priv: driver private structure
1422  * @dma_conf: structure to take the dma data
1423  * @p: descriptor pointer
1424  * @i: descriptor index
1425  * @flags: gfp flag
1426  * @queue: RX queue index
1427  * Description: this function is called to allocate a receive buffer, perform
1428  * the DMA mapping and init the descriptor.
1429  */
1430 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1431 				  struct stmmac_dma_conf *dma_conf,
1432 				  struct dma_desc *p,
1433 				  int i, gfp_t flags, u32 queue)
1434 {
1435 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1436 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1437 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1438 
1439 	if (priv->dma_cap.host_dma_width <= 32)
1440 		gfp |= GFP_DMA32;
1441 
1442 	if (!buf->page) {
1443 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1444 		if (!buf->page)
1445 			return -ENOMEM;
1446 		buf->page_offset = stmmac_rx_offset(priv);
1447 	}
1448 
1449 	if (priv->sph && !buf->sec_page) {
1450 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1451 		if (!buf->sec_page)
1452 			return -ENOMEM;
1453 
1454 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1455 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1456 	} else {
1457 		buf->sec_page = NULL;
1458 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1459 	}
1460 
1461 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1462 
1463 	stmmac_set_desc_addr(priv, p, buf->addr);
1464 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1465 		stmmac_init_desc3(priv, p);
1466 
1467 	return 0;
1468 }
1469 
1470 /**
1471  * stmmac_free_rx_buffer - free RX dma buffers
1472  * @priv: private structure
1473  * @rx_q: RX queue
1474  * @i: buffer index.
1475  */
1476 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1477 				  struct stmmac_rx_queue *rx_q,
1478 				  int i)
1479 {
1480 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1481 
1482 	if (buf->page)
1483 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1484 	buf->page = NULL;
1485 
1486 	if (buf->sec_page)
1487 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1488 	buf->sec_page = NULL;
1489 }
1490 
1491 /**
1492  * stmmac_free_tx_buffer - free RX dma buffers
1493  * @priv: private structure
1494  * @dma_conf: structure to take the dma data
1495  * @queue: RX queue index
1496  * @i: buffer index.
1497  */
1498 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1499 				  struct stmmac_dma_conf *dma_conf,
1500 				  u32 queue, int i)
1501 {
1502 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1503 
1504 	if (tx_q->tx_skbuff_dma[i].buf &&
1505 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1506 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1507 			dma_unmap_page(priv->device,
1508 				       tx_q->tx_skbuff_dma[i].buf,
1509 				       tx_q->tx_skbuff_dma[i].len,
1510 				       DMA_TO_DEVICE);
1511 		else
1512 			dma_unmap_single(priv->device,
1513 					 tx_q->tx_skbuff_dma[i].buf,
1514 					 tx_q->tx_skbuff_dma[i].len,
1515 					 DMA_TO_DEVICE);
1516 	}
1517 
1518 	if (tx_q->xdpf[i] &&
1519 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1520 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1521 		xdp_return_frame(tx_q->xdpf[i]);
1522 		tx_q->xdpf[i] = NULL;
1523 	}
1524 
1525 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1526 		tx_q->xsk_frames_done++;
1527 
1528 	if (tx_q->tx_skbuff[i] &&
1529 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1530 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1531 		tx_q->tx_skbuff[i] = NULL;
1532 	}
1533 
1534 	tx_q->tx_skbuff_dma[i].buf = 0;
1535 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1536 }
1537 
1538 /**
1539  * dma_free_rx_skbufs - free RX dma buffers
1540  * @priv: private structure
1541  * @dma_conf: structure to take the dma data
1542  * @queue: RX queue index
1543  */
1544 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1545 			       struct stmmac_dma_conf *dma_conf,
1546 			       u32 queue)
1547 {
1548 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1549 	int i;
1550 
1551 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1552 		stmmac_free_rx_buffer(priv, rx_q, i);
1553 }
1554 
1555 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1556 				   struct stmmac_dma_conf *dma_conf,
1557 				   u32 queue, gfp_t flags)
1558 {
1559 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1560 	int i;
1561 
1562 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1563 		struct dma_desc *p;
1564 		int ret;
1565 
1566 		if (priv->extend_desc)
1567 			p = &((rx_q->dma_erx + i)->basic);
1568 		else
1569 			p = rx_q->dma_rx + i;
1570 
1571 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1572 					     queue);
1573 		if (ret)
1574 			return ret;
1575 
1576 		rx_q->buf_alloc_num++;
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 /**
1583  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1584  * @priv: private structure
1585  * @dma_conf: structure to take the dma data
1586  * @queue: RX queue index
1587  */
1588 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1589 				struct stmmac_dma_conf *dma_conf,
1590 				u32 queue)
1591 {
1592 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1593 	int i;
1594 
1595 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1596 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1597 
1598 		if (!buf->xdp)
1599 			continue;
1600 
1601 		xsk_buff_free(buf->xdp);
1602 		buf->xdp = NULL;
1603 	}
1604 }
1605 
1606 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1607 				      struct stmmac_dma_conf *dma_conf,
1608 				      u32 queue)
1609 {
1610 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611 	int i;
1612 
1613 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1614 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1615 	 * use this macro to make sure no size violations.
1616 	 */
1617 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1618 
1619 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1620 		struct stmmac_rx_buffer *buf;
1621 		dma_addr_t dma_addr;
1622 		struct dma_desc *p;
1623 
1624 		if (priv->extend_desc)
1625 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1626 		else
1627 			p = rx_q->dma_rx + i;
1628 
1629 		buf = &rx_q->buf_pool[i];
1630 
1631 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1632 		if (!buf->xdp)
1633 			return -ENOMEM;
1634 
1635 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1636 		stmmac_set_desc_addr(priv, p, dma_addr);
1637 		rx_q->buf_alloc_num++;
1638 	}
1639 
1640 	return 0;
1641 }
1642 
1643 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1644 {
1645 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1646 		return NULL;
1647 
1648 	return xsk_get_pool_from_qid(priv->dev, queue);
1649 }
1650 
1651 /**
1652  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1653  * @priv: driver private structure
1654  * @dma_conf: structure to take the dma data
1655  * @queue: RX queue index
1656  * @flags: gfp flag.
1657  * Description: this function initializes the DMA RX descriptors
1658  * and allocates the socket buffers. It supports the chained and ring
1659  * modes.
1660  */
1661 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1662 				    struct stmmac_dma_conf *dma_conf,
1663 				    u32 queue, gfp_t flags)
1664 {
1665 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1666 	int ret;
1667 
1668 	netif_dbg(priv, probe, priv->dev,
1669 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1670 		  (u32)rx_q->dma_rx_phy);
1671 
1672 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1673 
1674 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1675 
1676 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1677 
1678 	if (rx_q->xsk_pool) {
1679 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680 						   MEM_TYPE_XSK_BUFF_POOL,
1681 						   NULL));
1682 		netdev_info(priv->dev,
1683 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1684 			    rx_q->queue_index);
1685 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1686 	} else {
1687 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1688 						   MEM_TYPE_PAGE_POOL,
1689 						   rx_q->page_pool));
1690 		netdev_info(priv->dev,
1691 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1692 			    rx_q->queue_index);
1693 	}
1694 
1695 	if (rx_q->xsk_pool) {
1696 		/* RX XDP ZC buffer pool may not be populated, e.g.
1697 		 * xdpsock TX-only.
1698 		 */
1699 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1700 	} else {
1701 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1702 		if (ret < 0)
1703 			return -ENOMEM;
1704 	}
1705 
1706 	/* Setup the chained descriptor addresses */
1707 	if (priv->mode == STMMAC_CHAIN_MODE) {
1708 		if (priv->extend_desc)
1709 			stmmac_mode_init(priv, rx_q->dma_erx,
1710 					 rx_q->dma_rx_phy,
1711 					 dma_conf->dma_rx_size, 1);
1712 		else
1713 			stmmac_mode_init(priv, rx_q->dma_rx,
1714 					 rx_q->dma_rx_phy,
1715 					 dma_conf->dma_rx_size, 0);
1716 	}
1717 
1718 	return 0;
1719 }
1720 
1721 static int init_dma_rx_desc_rings(struct net_device *dev,
1722 				  struct stmmac_dma_conf *dma_conf,
1723 				  gfp_t flags)
1724 {
1725 	struct stmmac_priv *priv = netdev_priv(dev);
1726 	u32 rx_count = priv->plat->rx_queues_to_use;
1727 	int queue;
1728 	int ret;
1729 
1730 	/* RX INITIALIZATION */
1731 	netif_dbg(priv, probe, priv->dev,
1732 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1733 
1734 	for (queue = 0; queue < rx_count; queue++) {
1735 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1736 		if (ret)
1737 			goto err_init_rx_buffers;
1738 	}
1739 
1740 	return 0;
1741 
1742 err_init_rx_buffers:
1743 	while (queue >= 0) {
1744 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1745 
1746 		if (rx_q->xsk_pool)
1747 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1748 		else
1749 			dma_free_rx_skbufs(priv, dma_conf, queue);
1750 
1751 		rx_q->buf_alloc_num = 0;
1752 		rx_q->xsk_pool = NULL;
1753 
1754 		queue--;
1755 	}
1756 
1757 	return ret;
1758 }
1759 
1760 /**
1761  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1762  * @priv: driver private structure
1763  * @dma_conf: structure to take the dma data
1764  * @queue: TX queue index
1765  * Description: this function initializes the DMA TX descriptors
1766  * and allocates the socket buffers. It supports the chained and ring
1767  * modes.
1768  */
1769 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1770 				    struct stmmac_dma_conf *dma_conf,
1771 				    u32 queue)
1772 {
1773 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1774 	int i;
1775 
1776 	netif_dbg(priv, probe, priv->dev,
1777 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1778 		  (u32)tx_q->dma_tx_phy);
1779 
1780 	/* Setup the chained descriptor addresses */
1781 	if (priv->mode == STMMAC_CHAIN_MODE) {
1782 		if (priv->extend_desc)
1783 			stmmac_mode_init(priv, tx_q->dma_etx,
1784 					 tx_q->dma_tx_phy,
1785 					 dma_conf->dma_tx_size, 1);
1786 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1787 			stmmac_mode_init(priv, tx_q->dma_tx,
1788 					 tx_q->dma_tx_phy,
1789 					 dma_conf->dma_tx_size, 0);
1790 	}
1791 
1792 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1793 
1794 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1795 		struct dma_desc *p;
1796 
1797 		if (priv->extend_desc)
1798 			p = &((tx_q->dma_etx + i)->basic);
1799 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1800 			p = &((tx_q->dma_entx + i)->basic);
1801 		else
1802 			p = tx_q->dma_tx + i;
1803 
1804 		stmmac_clear_desc(priv, p);
1805 
1806 		tx_q->tx_skbuff_dma[i].buf = 0;
1807 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1808 		tx_q->tx_skbuff_dma[i].len = 0;
1809 		tx_q->tx_skbuff_dma[i].last_segment = false;
1810 		tx_q->tx_skbuff[i] = NULL;
1811 	}
1812 
1813 	return 0;
1814 }
1815 
1816 static int init_dma_tx_desc_rings(struct net_device *dev,
1817 				  struct stmmac_dma_conf *dma_conf)
1818 {
1819 	struct stmmac_priv *priv = netdev_priv(dev);
1820 	u32 tx_queue_cnt;
1821 	u32 queue;
1822 
1823 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1824 
1825 	for (queue = 0; queue < tx_queue_cnt; queue++)
1826 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1827 
1828 	return 0;
1829 }
1830 
1831 /**
1832  * init_dma_desc_rings - init the RX/TX descriptor rings
1833  * @dev: net device structure
1834  * @dma_conf: structure to take the dma data
1835  * @flags: gfp flag.
1836  * Description: this function initializes the DMA RX/TX descriptors
1837  * and allocates the socket buffers. It supports the chained and ring
1838  * modes.
1839  */
1840 static int init_dma_desc_rings(struct net_device *dev,
1841 			       struct stmmac_dma_conf *dma_conf,
1842 			       gfp_t flags)
1843 {
1844 	struct stmmac_priv *priv = netdev_priv(dev);
1845 	int ret;
1846 
1847 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1848 	if (ret)
1849 		return ret;
1850 
1851 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1852 
1853 	stmmac_clear_descriptors(priv, dma_conf);
1854 
1855 	if (netif_msg_hw(priv))
1856 		stmmac_display_rings(priv, dma_conf);
1857 
1858 	return ret;
1859 }
1860 
1861 /**
1862  * dma_free_tx_skbufs - free TX dma buffers
1863  * @priv: private structure
1864  * @dma_conf: structure to take the dma data
1865  * @queue: TX queue index
1866  */
1867 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1868 			       struct stmmac_dma_conf *dma_conf,
1869 			       u32 queue)
1870 {
1871 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1872 	int i;
1873 
1874 	tx_q->xsk_frames_done = 0;
1875 
1876 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1877 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1878 
1879 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1880 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1881 		tx_q->xsk_frames_done = 0;
1882 		tx_q->xsk_pool = NULL;
1883 	}
1884 }
1885 
1886 /**
1887  * stmmac_free_tx_skbufs - free TX skb buffers
1888  * @priv: private structure
1889  */
1890 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1891 {
1892 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1893 	u32 queue;
1894 
1895 	for (queue = 0; queue < tx_queue_cnt; queue++)
1896 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1897 }
1898 
1899 /**
1900  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1901  * @priv: private structure
1902  * @dma_conf: structure to take the dma data
1903  * @queue: RX queue index
1904  */
1905 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1906 					 struct stmmac_dma_conf *dma_conf,
1907 					 u32 queue)
1908 {
1909 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1910 
1911 	/* Release the DMA RX socket buffers */
1912 	if (rx_q->xsk_pool)
1913 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1914 	else
1915 		dma_free_rx_skbufs(priv, dma_conf, queue);
1916 
1917 	rx_q->buf_alloc_num = 0;
1918 	rx_q->xsk_pool = NULL;
1919 
1920 	/* Free DMA regions of consistent memory previously allocated */
1921 	if (!priv->extend_desc)
1922 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1923 				  sizeof(struct dma_desc),
1924 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1925 	else
1926 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1927 				  sizeof(struct dma_extended_desc),
1928 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1929 
1930 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1931 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1932 
1933 	kfree(rx_q->buf_pool);
1934 	if (rx_q->page_pool)
1935 		page_pool_destroy(rx_q->page_pool);
1936 }
1937 
1938 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1939 				       struct stmmac_dma_conf *dma_conf)
1940 {
1941 	u32 rx_count = priv->plat->rx_queues_to_use;
1942 	u32 queue;
1943 
1944 	/* Free RX queue resources */
1945 	for (queue = 0; queue < rx_count; queue++)
1946 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1947 }
1948 
1949 /**
1950  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1951  * @priv: private structure
1952  * @dma_conf: structure to take the dma data
1953  * @queue: TX queue index
1954  */
1955 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1956 					 struct stmmac_dma_conf *dma_conf,
1957 					 u32 queue)
1958 {
1959 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1960 	size_t size;
1961 	void *addr;
1962 
1963 	/* Release the DMA TX socket buffers */
1964 	dma_free_tx_skbufs(priv, dma_conf, queue);
1965 
1966 	if (priv->extend_desc) {
1967 		size = sizeof(struct dma_extended_desc);
1968 		addr = tx_q->dma_etx;
1969 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1970 		size = sizeof(struct dma_edesc);
1971 		addr = tx_q->dma_entx;
1972 	} else {
1973 		size = sizeof(struct dma_desc);
1974 		addr = tx_q->dma_tx;
1975 	}
1976 
1977 	size *= dma_conf->dma_tx_size;
1978 
1979 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1980 
1981 	kfree(tx_q->tx_skbuff_dma);
1982 	kfree(tx_q->tx_skbuff);
1983 }
1984 
1985 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1986 				       struct stmmac_dma_conf *dma_conf)
1987 {
1988 	u32 tx_count = priv->plat->tx_queues_to_use;
1989 	u32 queue;
1990 
1991 	/* Free TX queue resources */
1992 	for (queue = 0; queue < tx_count; queue++)
1993 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1994 }
1995 
1996 /**
1997  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1998  * @priv: private structure
1999  * @dma_conf: structure to take the dma data
2000  * @queue: RX queue index
2001  * Description: according to which descriptor can be used (extend or basic)
2002  * this function allocates the resources for TX and RX paths. In case of
2003  * reception, for example, it pre-allocated the RX socket buffer in order to
2004  * allow zero-copy mechanism.
2005  */
2006 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2007 					 struct stmmac_dma_conf *dma_conf,
2008 					 u32 queue)
2009 {
2010 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2011 	struct stmmac_channel *ch = &priv->channel[queue];
2012 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2013 	struct page_pool_params pp_params = { 0 };
2014 	unsigned int num_pages;
2015 	unsigned int napi_id;
2016 	int ret;
2017 
2018 	rx_q->queue_index = queue;
2019 	rx_q->priv_data = priv;
2020 
2021 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2022 	pp_params.pool_size = dma_conf->dma_rx_size;
2023 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2024 	pp_params.order = ilog2(num_pages);
2025 	pp_params.nid = dev_to_node(priv->device);
2026 	pp_params.dev = priv->device;
2027 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2028 	pp_params.offset = stmmac_rx_offset(priv);
2029 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2030 
2031 	rx_q->page_pool = page_pool_create(&pp_params);
2032 	if (IS_ERR(rx_q->page_pool)) {
2033 		ret = PTR_ERR(rx_q->page_pool);
2034 		rx_q->page_pool = NULL;
2035 		return ret;
2036 	}
2037 
2038 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2039 				 sizeof(*rx_q->buf_pool),
2040 				 GFP_KERNEL);
2041 	if (!rx_q->buf_pool)
2042 		return -ENOMEM;
2043 
2044 	if (priv->extend_desc) {
2045 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2046 						   dma_conf->dma_rx_size *
2047 						   sizeof(struct dma_extended_desc),
2048 						   &rx_q->dma_rx_phy,
2049 						   GFP_KERNEL);
2050 		if (!rx_q->dma_erx)
2051 			return -ENOMEM;
2052 
2053 	} else {
2054 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2055 						  dma_conf->dma_rx_size *
2056 						  sizeof(struct dma_desc),
2057 						  &rx_q->dma_rx_phy,
2058 						  GFP_KERNEL);
2059 		if (!rx_q->dma_rx)
2060 			return -ENOMEM;
2061 	}
2062 
2063 	if (stmmac_xdp_is_enabled(priv) &&
2064 	    test_bit(queue, priv->af_xdp_zc_qps))
2065 		napi_id = ch->rxtx_napi.napi_id;
2066 	else
2067 		napi_id = ch->rx_napi.napi_id;
2068 
2069 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2070 			       rx_q->queue_index,
2071 			       napi_id);
2072 	if (ret) {
2073 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2074 		return -EINVAL;
2075 	}
2076 
2077 	return 0;
2078 }
2079 
2080 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2081 				       struct stmmac_dma_conf *dma_conf)
2082 {
2083 	u32 rx_count = priv->plat->rx_queues_to_use;
2084 	u32 queue;
2085 	int ret;
2086 
2087 	/* RX queues buffers and DMA */
2088 	for (queue = 0; queue < rx_count; queue++) {
2089 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2090 		if (ret)
2091 			goto err_dma;
2092 	}
2093 
2094 	return 0;
2095 
2096 err_dma:
2097 	free_dma_rx_desc_resources(priv, dma_conf);
2098 
2099 	return ret;
2100 }
2101 
2102 /**
2103  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2104  * @priv: private structure
2105  * @dma_conf: structure to take the dma data
2106  * @queue: TX queue index
2107  * Description: according to which descriptor can be used (extend or basic)
2108  * this function allocates the resources for TX and RX paths. In case of
2109  * reception, for example, it pre-allocated the RX socket buffer in order to
2110  * allow zero-copy mechanism.
2111  */
2112 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2113 					 struct stmmac_dma_conf *dma_conf,
2114 					 u32 queue)
2115 {
2116 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2117 	size_t size;
2118 	void *addr;
2119 
2120 	tx_q->queue_index = queue;
2121 	tx_q->priv_data = priv;
2122 
2123 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2124 				      sizeof(*tx_q->tx_skbuff_dma),
2125 				      GFP_KERNEL);
2126 	if (!tx_q->tx_skbuff_dma)
2127 		return -ENOMEM;
2128 
2129 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2130 				  sizeof(struct sk_buff *),
2131 				  GFP_KERNEL);
2132 	if (!tx_q->tx_skbuff)
2133 		return -ENOMEM;
2134 
2135 	if (priv->extend_desc)
2136 		size = sizeof(struct dma_extended_desc);
2137 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2138 		size = sizeof(struct dma_edesc);
2139 	else
2140 		size = sizeof(struct dma_desc);
2141 
2142 	size *= dma_conf->dma_tx_size;
2143 
2144 	addr = dma_alloc_coherent(priv->device, size,
2145 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2146 	if (!addr)
2147 		return -ENOMEM;
2148 
2149 	if (priv->extend_desc)
2150 		tx_q->dma_etx = addr;
2151 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2152 		tx_q->dma_entx = addr;
2153 	else
2154 		tx_q->dma_tx = addr;
2155 
2156 	return 0;
2157 }
2158 
2159 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2160 				       struct stmmac_dma_conf *dma_conf)
2161 {
2162 	u32 tx_count = priv->plat->tx_queues_to_use;
2163 	u32 queue;
2164 	int ret;
2165 
2166 	/* TX queues buffers and DMA */
2167 	for (queue = 0; queue < tx_count; queue++) {
2168 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2169 		if (ret)
2170 			goto err_dma;
2171 	}
2172 
2173 	return 0;
2174 
2175 err_dma:
2176 	free_dma_tx_desc_resources(priv, dma_conf);
2177 	return ret;
2178 }
2179 
2180 /**
2181  * alloc_dma_desc_resources - alloc TX/RX resources.
2182  * @priv: private structure
2183  * @dma_conf: structure to take the dma data
2184  * Description: according to which descriptor can be used (extend or basic)
2185  * this function allocates the resources for TX and RX paths. In case of
2186  * reception, for example, it pre-allocated the RX socket buffer in order to
2187  * allow zero-copy mechanism.
2188  */
2189 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2190 				    struct stmmac_dma_conf *dma_conf)
2191 {
2192 	/* RX Allocation */
2193 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2194 
2195 	if (ret)
2196 		return ret;
2197 
2198 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2199 
2200 	return ret;
2201 }
2202 
2203 /**
2204  * free_dma_desc_resources - free dma desc resources
2205  * @priv: private structure
2206  * @dma_conf: structure to take the dma data
2207  */
2208 static void free_dma_desc_resources(struct stmmac_priv *priv,
2209 				    struct stmmac_dma_conf *dma_conf)
2210 {
2211 	/* Release the DMA TX socket buffers */
2212 	free_dma_tx_desc_resources(priv, dma_conf);
2213 
2214 	/* Release the DMA RX socket buffers later
2215 	 * to ensure all pending XDP_TX buffers are returned.
2216 	 */
2217 	free_dma_rx_desc_resources(priv, dma_conf);
2218 }
2219 
2220 /**
2221  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2222  *  @priv: driver private structure
2223  *  Description: It is used for enabling the rx queues in the MAC
2224  */
2225 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2226 {
2227 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2228 	int queue;
2229 	u8 mode;
2230 
2231 	for (queue = 0; queue < rx_queues_count; queue++) {
2232 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2233 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2234 	}
2235 }
2236 
2237 /**
2238  * stmmac_start_rx_dma - start RX DMA channel
2239  * @priv: driver private structure
2240  * @chan: RX channel index
2241  * Description:
2242  * This starts a RX DMA channel
2243  */
2244 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2245 {
2246 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2247 	stmmac_start_rx(priv, priv->ioaddr, chan);
2248 }
2249 
2250 /**
2251  * stmmac_start_tx_dma - start TX DMA channel
2252  * @priv: driver private structure
2253  * @chan: TX channel index
2254  * Description:
2255  * This starts a TX DMA channel
2256  */
2257 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2258 {
2259 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2260 	stmmac_start_tx(priv, priv->ioaddr, chan);
2261 }
2262 
2263 /**
2264  * stmmac_stop_rx_dma - stop RX DMA channel
2265  * @priv: driver private structure
2266  * @chan: RX channel index
2267  * Description:
2268  * This stops a RX DMA channel
2269  */
2270 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2271 {
2272 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2273 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2274 }
2275 
2276 /**
2277  * stmmac_stop_tx_dma - stop TX DMA channel
2278  * @priv: driver private structure
2279  * @chan: TX channel index
2280  * Description:
2281  * This stops a TX DMA channel
2282  */
2283 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2284 {
2285 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2286 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2287 }
2288 
2289 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2290 {
2291 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2292 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2293 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2294 	u32 chan;
2295 
2296 	for (chan = 0; chan < dma_csr_ch; chan++) {
2297 		struct stmmac_channel *ch = &priv->channel[chan];
2298 		unsigned long flags;
2299 
2300 		spin_lock_irqsave(&ch->lock, flags);
2301 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2302 		spin_unlock_irqrestore(&ch->lock, flags);
2303 	}
2304 }
2305 
2306 /**
2307  * stmmac_start_all_dma - start all RX and TX DMA channels
2308  * @priv: driver private structure
2309  * Description:
2310  * This starts all the RX and TX DMA channels
2311  */
2312 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2313 {
2314 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2315 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2316 	u32 chan = 0;
2317 
2318 	for (chan = 0; chan < rx_channels_count; chan++)
2319 		stmmac_start_rx_dma(priv, chan);
2320 
2321 	for (chan = 0; chan < tx_channels_count; chan++)
2322 		stmmac_start_tx_dma(priv, chan);
2323 }
2324 
2325 /**
2326  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2327  * @priv: driver private structure
2328  * Description:
2329  * This stops the RX and TX DMA channels
2330  */
2331 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2332 {
2333 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2334 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2335 	u32 chan = 0;
2336 
2337 	for (chan = 0; chan < rx_channels_count; chan++)
2338 		stmmac_stop_rx_dma(priv, chan);
2339 
2340 	for (chan = 0; chan < tx_channels_count; chan++)
2341 		stmmac_stop_tx_dma(priv, chan);
2342 }
2343 
2344 /**
2345  *  stmmac_dma_operation_mode - HW DMA operation mode
2346  *  @priv: driver private structure
2347  *  Description: it is used for configuring the DMA operation mode register in
2348  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2349  */
2350 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2351 {
2352 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2353 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2354 	int rxfifosz = priv->plat->rx_fifo_size;
2355 	int txfifosz = priv->plat->tx_fifo_size;
2356 	u32 txmode = 0;
2357 	u32 rxmode = 0;
2358 	u32 chan = 0;
2359 	u8 qmode = 0;
2360 
2361 	if (rxfifosz == 0)
2362 		rxfifosz = priv->dma_cap.rx_fifo_size;
2363 	if (txfifosz == 0)
2364 		txfifosz = priv->dma_cap.tx_fifo_size;
2365 
2366 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2367 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2368 		rxfifosz /= rx_channels_count;
2369 		txfifosz /= tx_channels_count;
2370 	}
2371 
2372 	if (priv->plat->force_thresh_dma_mode) {
2373 		txmode = tc;
2374 		rxmode = tc;
2375 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2376 		/*
2377 		 * In case of GMAC, SF mode can be enabled
2378 		 * to perform the TX COE in HW. This depends on:
2379 		 * 1) TX COE if actually supported
2380 		 * 2) There is no bugged Jumbo frame support
2381 		 *    that needs to not insert csum in the TDES.
2382 		 */
2383 		txmode = SF_DMA_MODE;
2384 		rxmode = SF_DMA_MODE;
2385 		priv->xstats.threshold = SF_DMA_MODE;
2386 	} else {
2387 		txmode = tc;
2388 		rxmode = SF_DMA_MODE;
2389 	}
2390 
2391 	/* configure all channels */
2392 	for (chan = 0; chan < rx_channels_count; chan++) {
2393 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2394 		u32 buf_size;
2395 
2396 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2397 
2398 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2399 				rxfifosz, qmode);
2400 
2401 		if (rx_q->xsk_pool) {
2402 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2403 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2404 					      buf_size,
2405 					      chan);
2406 		} else {
2407 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2408 					      priv->dma_conf.dma_buf_sz,
2409 					      chan);
2410 		}
2411 	}
2412 
2413 	for (chan = 0; chan < tx_channels_count; chan++) {
2414 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2415 
2416 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2417 				txfifosz, qmode);
2418 	}
2419 }
2420 
2421 static void stmmac_xsk_request_timestamp(void *_priv)
2422 {
2423 	struct stmmac_metadata_request *meta_req = _priv;
2424 
2425 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2426 	*meta_req->set_ic = true;
2427 }
2428 
2429 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2430 {
2431 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2432 	struct stmmac_priv *priv = tx_compl->priv;
2433 	struct dma_desc *desc = tx_compl->desc;
2434 	bool found = false;
2435 	u64 ns = 0;
2436 
2437 	if (!priv->hwts_tx_en)
2438 		return 0;
2439 
2440 	/* check tx tstamp status */
2441 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2442 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2443 		found = true;
2444 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2445 		found = true;
2446 	}
2447 
2448 	if (found) {
2449 		ns -= priv->plat->cdc_error_adj;
2450 		return ns_to_ktime(ns);
2451 	}
2452 
2453 	return 0;
2454 }
2455 
2456 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2457 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2458 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2459 };
2460 
2461 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2462 {
2463 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2464 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2465 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2466 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2467 	unsigned int entry = tx_q->cur_tx;
2468 	struct dma_desc *tx_desc = NULL;
2469 	struct xdp_desc xdp_desc;
2470 	bool work_done = true;
2471 	u32 tx_set_ic_bit = 0;
2472 
2473 	/* Avoids TX time-out as we are sharing with slow path */
2474 	txq_trans_cond_update(nq);
2475 
2476 	budget = min(budget, stmmac_tx_avail(priv, queue));
2477 
2478 	while (budget-- > 0) {
2479 		struct stmmac_metadata_request meta_req;
2480 		struct xsk_tx_metadata *meta = NULL;
2481 		dma_addr_t dma_addr;
2482 		bool set_ic;
2483 
2484 		/* We are sharing with slow path and stop XSK TX desc submission when
2485 		 * available TX ring is less than threshold.
2486 		 */
2487 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2488 		    !netif_carrier_ok(priv->dev)) {
2489 			work_done = false;
2490 			break;
2491 		}
2492 
2493 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2494 			break;
2495 
2496 		if (priv->est && priv->est->enable &&
2497 		    priv->est->max_sdu[queue] &&
2498 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2499 			priv->xstats.max_sdu_txq_drop[queue]++;
2500 			continue;
2501 		}
2502 
2503 		if (likely(priv->extend_desc))
2504 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2505 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2506 			tx_desc = &tx_q->dma_entx[entry].basic;
2507 		else
2508 			tx_desc = tx_q->dma_tx + entry;
2509 
2510 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2511 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2512 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2513 
2514 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2515 
2516 		/* To return XDP buffer to XSK pool, we simple call
2517 		 * xsk_tx_completed(), so we don't need to fill up
2518 		 * 'buf' and 'xdpf'.
2519 		 */
2520 		tx_q->tx_skbuff_dma[entry].buf = 0;
2521 		tx_q->xdpf[entry] = NULL;
2522 
2523 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2524 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2525 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2526 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2527 
2528 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2529 
2530 		tx_q->tx_count_frames++;
2531 
2532 		if (!priv->tx_coal_frames[queue])
2533 			set_ic = false;
2534 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2535 			set_ic = true;
2536 		else
2537 			set_ic = false;
2538 
2539 		meta_req.priv = priv;
2540 		meta_req.tx_desc = tx_desc;
2541 		meta_req.set_ic = &set_ic;
2542 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2543 					&meta_req);
2544 		if (set_ic) {
2545 			tx_q->tx_count_frames = 0;
2546 			stmmac_set_tx_ic(priv, tx_desc);
2547 			tx_set_ic_bit++;
2548 		}
2549 
2550 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2551 				       true, priv->mode, true, true,
2552 				       xdp_desc.len);
2553 
2554 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2555 
2556 		xsk_tx_metadata_to_compl(meta,
2557 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2558 
2559 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2560 		entry = tx_q->cur_tx;
2561 	}
2562 	u64_stats_update_begin(&txq_stats->napi_syncp);
2563 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2564 	u64_stats_update_end(&txq_stats->napi_syncp);
2565 
2566 	if (tx_desc) {
2567 		stmmac_flush_tx_descriptors(priv, queue);
2568 		xsk_tx_release(pool);
2569 	}
2570 
2571 	/* Return true if all of the 3 conditions are met
2572 	 *  a) TX Budget is still available
2573 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2574 	 *     pending XSK TX for transmission)
2575 	 */
2576 	return !!budget && work_done;
2577 }
2578 
2579 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2580 {
2581 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2582 		tc += 64;
2583 
2584 		if (priv->plat->force_thresh_dma_mode)
2585 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2586 		else
2587 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2588 						      chan);
2589 
2590 		priv->xstats.threshold = tc;
2591 	}
2592 }
2593 
2594 /**
2595  * stmmac_tx_clean - to manage the transmission completion
2596  * @priv: driver private structure
2597  * @budget: napi budget limiting this functions packet handling
2598  * @queue: TX queue index
2599  * @pending_packets: signal to arm the TX coal timer
2600  * Description: it reclaims the transmit resources after transmission completes.
2601  * If some packets still needs to be handled, due to TX coalesce, set
2602  * pending_packets to true to make NAPI arm the TX coal timer.
2603  */
2604 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2605 			   bool *pending_packets)
2606 {
2607 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2608 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2609 	unsigned int bytes_compl = 0, pkts_compl = 0;
2610 	unsigned int entry, xmits = 0, count = 0;
2611 	u32 tx_packets = 0, tx_errors = 0;
2612 
2613 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2614 
2615 	tx_q->xsk_frames_done = 0;
2616 
2617 	entry = tx_q->dirty_tx;
2618 
2619 	/* Try to clean all TX complete frame in 1 shot */
2620 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2621 		struct xdp_frame *xdpf;
2622 		struct sk_buff *skb;
2623 		struct dma_desc *p;
2624 		int status;
2625 
2626 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2627 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2628 			xdpf = tx_q->xdpf[entry];
2629 			skb = NULL;
2630 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2631 			xdpf = NULL;
2632 			skb = tx_q->tx_skbuff[entry];
2633 		} else {
2634 			xdpf = NULL;
2635 			skb = NULL;
2636 		}
2637 
2638 		if (priv->extend_desc)
2639 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2640 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2641 			p = &tx_q->dma_entx[entry].basic;
2642 		else
2643 			p = tx_q->dma_tx + entry;
2644 
2645 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2646 		/* Check if the descriptor is owned by the DMA */
2647 		if (unlikely(status & tx_dma_own))
2648 			break;
2649 
2650 		count++;
2651 
2652 		/* Make sure descriptor fields are read after reading
2653 		 * the own bit.
2654 		 */
2655 		dma_rmb();
2656 
2657 		/* Just consider the last segment and ...*/
2658 		if (likely(!(status & tx_not_ls))) {
2659 			/* ... verify the status error condition */
2660 			if (unlikely(status & tx_err)) {
2661 				tx_errors++;
2662 				if (unlikely(status & tx_err_bump_tc))
2663 					stmmac_bump_dma_threshold(priv, queue);
2664 			} else {
2665 				tx_packets++;
2666 			}
2667 			if (skb) {
2668 				stmmac_get_tx_hwtstamp(priv, p, skb);
2669 			} else if (tx_q->xsk_pool &&
2670 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2671 				struct stmmac_xsk_tx_complete tx_compl = {
2672 					.priv = priv,
2673 					.desc = p,
2674 				};
2675 
2676 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2677 							 &stmmac_xsk_tx_metadata_ops,
2678 							 &tx_compl);
2679 			}
2680 		}
2681 
2682 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2683 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2684 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2685 				dma_unmap_page(priv->device,
2686 					       tx_q->tx_skbuff_dma[entry].buf,
2687 					       tx_q->tx_skbuff_dma[entry].len,
2688 					       DMA_TO_DEVICE);
2689 			else
2690 				dma_unmap_single(priv->device,
2691 						 tx_q->tx_skbuff_dma[entry].buf,
2692 						 tx_q->tx_skbuff_dma[entry].len,
2693 						 DMA_TO_DEVICE);
2694 			tx_q->tx_skbuff_dma[entry].buf = 0;
2695 			tx_q->tx_skbuff_dma[entry].len = 0;
2696 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2697 		}
2698 
2699 		stmmac_clean_desc3(priv, tx_q, p);
2700 
2701 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2702 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2703 
2704 		if (xdpf &&
2705 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2706 			xdp_return_frame_rx_napi(xdpf);
2707 			tx_q->xdpf[entry] = NULL;
2708 		}
2709 
2710 		if (xdpf &&
2711 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2712 			xdp_return_frame(xdpf);
2713 			tx_q->xdpf[entry] = NULL;
2714 		}
2715 
2716 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2717 			tx_q->xsk_frames_done++;
2718 
2719 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2720 			if (likely(skb)) {
2721 				pkts_compl++;
2722 				bytes_compl += skb->len;
2723 				dev_consume_skb_any(skb);
2724 				tx_q->tx_skbuff[entry] = NULL;
2725 			}
2726 		}
2727 
2728 		stmmac_release_tx_desc(priv, p, priv->mode);
2729 
2730 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2731 	}
2732 	tx_q->dirty_tx = entry;
2733 
2734 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2735 				  pkts_compl, bytes_compl);
2736 
2737 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2738 								queue))) &&
2739 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2740 
2741 		netif_dbg(priv, tx_done, priv->dev,
2742 			  "%s: restart transmit\n", __func__);
2743 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2744 	}
2745 
2746 	if (tx_q->xsk_pool) {
2747 		bool work_done;
2748 
2749 		if (tx_q->xsk_frames_done)
2750 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2751 
2752 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2753 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2754 
2755 		/* For XSK TX, we try to send as many as possible.
2756 		 * If XSK work done (XSK TX desc empty and budget still
2757 		 * available), return "budget - 1" to reenable TX IRQ.
2758 		 * Else, return "budget" to make NAPI continue polling.
2759 		 */
2760 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2761 					       STMMAC_XSK_TX_BUDGET_MAX);
2762 		if (work_done)
2763 			xmits = budget - 1;
2764 		else
2765 			xmits = budget;
2766 	}
2767 
2768 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2769 	    priv->eee_sw_timer_en) {
2770 		if (stmmac_enable_eee_mode(priv))
2771 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2772 	}
2773 
2774 	/* We still have pending packets, let's call for a new scheduling */
2775 	if (tx_q->dirty_tx != tx_q->cur_tx)
2776 		*pending_packets = true;
2777 
2778 	u64_stats_update_begin(&txq_stats->napi_syncp);
2779 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2780 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2781 	u64_stats_inc(&txq_stats->napi.tx_clean);
2782 	u64_stats_update_end(&txq_stats->napi_syncp);
2783 
2784 	priv->xstats.tx_errors += tx_errors;
2785 
2786 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2787 
2788 	/* Combine decisions from TX clean and XSK TX */
2789 	return max(count, xmits);
2790 }
2791 
2792 /**
2793  * stmmac_tx_err - to manage the tx error
2794  * @priv: driver private structure
2795  * @chan: channel index
2796  * Description: it cleans the descriptors and restarts the transmission
2797  * in case of transmission errors.
2798  */
2799 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2800 {
2801 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2802 
2803 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2804 
2805 	stmmac_stop_tx_dma(priv, chan);
2806 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2807 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2808 	stmmac_reset_tx_queue(priv, chan);
2809 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2810 			    tx_q->dma_tx_phy, chan);
2811 	stmmac_start_tx_dma(priv, chan);
2812 
2813 	priv->xstats.tx_errors++;
2814 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2815 }
2816 
2817 /**
2818  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2819  *  @priv: driver private structure
2820  *  @txmode: TX operating mode
2821  *  @rxmode: RX operating mode
2822  *  @chan: channel index
2823  *  Description: it is used for configuring of the DMA operation mode in
2824  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2825  *  mode.
2826  */
2827 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2828 					  u32 rxmode, u32 chan)
2829 {
2830 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2831 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2832 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2833 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2834 	int rxfifosz = priv->plat->rx_fifo_size;
2835 	int txfifosz = priv->plat->tx_fifo_size;
2836 
2837 	if (rxfifosz == 0)
2838 		rxfifosz = priv->dma_cap.rx_fifo_size;
2839 	if (txfifosz == 0)
2840 		txfifosz = priv->dma_cap.tx_fifo_size;
2841 
2842 	/* Adjust for real per queue fifo size */
2843 	rxfifosz /= rx_channels_count;
2844 	txfifosz /= tx_channels_count;
2845 
2846 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2847 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2848 }
2849 
2850 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2851 {
2852 	int ret;
2853 
2854 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2855 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2856 	if (ret && (ret != -EINVAL)) {
2857 		stmmac_global_err(priv);
2858 		return true;
2859 	}
2860 
2861 	return false;
2862 }
2863 
2864 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2865 {
2866 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2867 						 &priv->xstats, chan, dir);
2868 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2869 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2870 	struct stmmac_channel *ch = &priv->channel[chan];
2871 	struct napi_struct *rx_napi;
2872 	struct napi_struct *tx_napi;
2873 	unsigned long flags;
2874 
2875 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2876 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2877 
2878 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2879 		if (napi_schedule_prep(rx_napi)) {
2880 			spin_lock_irqsave(&ch->lock, flags);
2881 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2882 			spin_unlock_irqrestore(&ch->lock, flags);
2883 			__napi_schedule(rx_napi);
2884 		}
2885 	}
2886 
2887 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2888 		if (napi_schedule_prep(tx_napi)) {
2889 			spin_lock_irqsave(&ch->lock, flags);
2890 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2891 			spin_unlock_irqrestore(&ch->lock, flags);
2892 			__napi_schedule(tx_napi);
2893 		}
2894 	}
2895 
2896 	return status;
2897 }
2898 
2899 /**
2900  * stmmac_dma_interrupt - DMA ISR
2901  * @priv: driver private structure
2902  * Description: this is the DMA ISR. It is called by the main ISR.
2903  * It calls the dwmac dma routine and schedule poll method in case of some
2904  * work can be done.
2905  */
2906 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2907 {
2908 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2909 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2910 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2911 				tx_channel_count : rx_channel_count;
2912 	u32 chan;
2913 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2914 
2915 	/* Make sure we never check beyond our status buffer. */
2916 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2917 		channels_to_check = ARRAY_SIZE(status);
2918 
2919 	for (chan = 0; chan < channels_to_check; chan++)
2920 		status[chan] = stmmac_napi_check(priv, chan,
2921 						 DMA_DIR_RXTX);
2922 
2923 	for (chan = 0; chan < tx_channel_count; chan++) {
2924 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2925 			/* Try to bump up the dma threshold on this failure */
2926 			stmmac_bump_dma_threshold(priv, chan);
2927 		} else if (unlikely(status[chan] == tx_hard_error)) {
2928 			stmmac_tx_err(priv, chan);
2929 		}
2930 	}
2931 }
2932 
2933 /**
2934  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2935  * @priv: driver private structure
2936  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2937  */
2938 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2939 {
2940 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2941 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2942 
2943 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2944 
2945 	if (priv->dma_cap.rmon) {
2946 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2947 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2948 	} else
2949 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2950 }
2951 
2952 /**
2953  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2954  * @priv: driver private structure
2955  * Description:
2956  *  new GMAC chip generations have a new register to indicate the
2957  *  presence of the optional feature/functions.
2958  *  This can be also used to override the value passed through the
2959  *  platform and necessary for old MAC10/100 and GMAC chips.
2960  */
2961 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2962 {
2963 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2964 }
2965 
2966 /**
2967  * stmmac_check_ether_addr - check if the MAC addr is valid
2968  * @priv: driver private structure
2969  * Description:
2970  * it is to verify if the MAC address is valid, in case of failures it
2971  * generates a random MAC address
2972  */
2973 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2974 {
2975 	u8 addr[ETH_ALEN];
2976 
2977 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2978 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2979 		if (is_valid_ether_addr(addr))
2980 			eth_hw_addr_set(priv->dev, addr);
2981 		else
2982 			eth_hw_addr_random(priv->dev);
2983 		dev_info(priv->device, "device MAC address %pM\n",
2984 			 priv->dev->dev_addr);
2985 	}
2986 }
2987 
2988 /**
2989  * stmmac_init_dma_engine - DMA init.
2990  * @priv: driver private structure
2991  * Description:
2992  * It inits the DMA invoking the specific MAC/GMAC callback.
2993  * Some DMA parameters can be passed from the platform;
2994  * in case of these are not passed a default is kept for the MAC or GMAC.
2995  */
2996 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2997 {
2998 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2999 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3000 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3001 	struct stmmac_rx_queue *rx_q;
3002 	struct stmmac_tx_queue *tx_q;
3003 	u32 chan = 0;
3004 	int ret = 0;
3005 
3006 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3007 		dev_err(priv->device, "Invalid DMA configuration\n");
3008 		return -EINVAL;
3009 	}
3010 
3011 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3012 		priv->plat->dma_cfg->atds = 1;
3013 
3014 	ret = stmmac_reset(priv, priv->ioaddr);
3015 	if (ret) {
3016 		dev_err(priv->device, "Failed to reset the dma\n");
3017 		return ret;
3018 	}
3019 
3020 	/* DMA Configuration */
3021 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3022 
3023 	if (priv->plat->axi)
3024 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3025 
3026 	/* DMA CSR Channel configuration */
3027 	for (chan = 0; chan < dma_csr_ch; chan++) {
3028 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3029 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3030 	}
3031 
3032 	/* DMA RX Channel Configuration */
3033 	for (chan = 0; chan < rx_channels_count; chan++) {
3034 		rx_q = &priv->dma_conf.rx_queue[chan];
3035 
3036 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3037 				    rx_q->dma_rx_phy, chan);
3038 
3039 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3040 				     (rx_q->buf_alloc_num *
3041 				      sizeof(struct dma_desc));
3042 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3043 				       rx_q->rx_tail_addr, chan);
3044 	}
3045 
3046 	/* DMA TX Channel Configuration */
3047 	for (chan = 0; chan < tx_channels_count; chan++) {
3048 		tx_q = &priv->dma_conf.tx_queue[chan];
3049 
3050 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3051 				    tx_q->dma_tx_phy, chan);
3052 
3053 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3054 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3055 				       tx_q->tx_tail_addr, chan);
3056 	}
3057 
3058 	return ret;
3059 }
3060 
3061 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3062 {
3063 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3064 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3065 	struct stmmac_channel *ch;
3066 	struct napi_struct *napi;
3067 
3068 	if (!tx_coal_timer)
3069 		return;
3070 
3071 	ch = &priv->channel[tx_q->queue_index];
3072 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3073 
3074 	/* Arm timer only if napi is not already scheduled.
3075 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3076 	 * again in the next scheduled napi.
3077 	 */
3078 	if (unlikely(!napi_is_scheduled(napi)))
3079 		hrtimer_start(&tx_q->txtimer,
3080 			      STMMAC_COAL_TIMER(tx_coal_timer),
3081 			      HRTIMER_MODE_REL);
3082 	else
3083 		hrtimer_try_to_cancel(&tx_q->txtimer);
3084 }
3085 
3086 /**
3087  * stmmac_tx_timer - mitigation sw timer for tx.
3088  * @t: data pointer
3089  * Description:
3090  * This is the timer handler to directly invoke the stmmac_tx_clean.
3091  */
3092 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3093 {
3094 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3095 	struct stmmac_priv *priv = tx_q->priv_data;
3096 	struct stmmac_channel *ch;
3097 	struct napi_struct *napi;
3098 
3099 	ch = &priv->channel[tx_q->queue_index];
3100 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3101 
3102 	if (likely(napi_schedule_prep(napi))) {
3103 		unsigned long flags;
3104 
3105 		spin_lock_irqsave(&ch->lock, flags);
3106 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3107 		spin_unlock_irqrestore(&ch->lock, flags);
3108 		__napi_schedule(napi);
3109 	}
3110 
3111 	return HRTIMER_NORESTART;
3112 }
3113 
3114 /**
3115  * stmmac_init_coalesce - init mitigation options.
3116  * @priv: driver private structure
3117  * Description:
3118  * This inits the coalesce parameters: i.e. timer rate,
3119  * timer handler and default threshold used for enabling the
3120  * interrupt on completion bit.
3121  */
3122 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3123 {
3124 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3125 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3126 	u32 chan;
3127 
3128 	for (chan = 0; chan < tx_channel_count; chan++) {
3129 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3130 
3131 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3132 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3133 
3134 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3135 		tx_q->txtimer.function = stmmac_tx_timer;
3136 	}
3137 
3138 	for (chan = 0; chan < rx_channel_count; chan++)
3139 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3140 }
3141 
3142 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3143 {
3144 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3145 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3146 	u32 chan;
3147 
3148 	/* set TX ring length */
3149 	for (chan = 0; chan < tx_channels_count; chan++)
3150 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3151 				       (priv->dma_conf.dma_tx_size - 1), chan);
3152 
3153 	/* set RX ring length */
3154 	for (chan = 0; chan < rx_channels_count; chan++)
3155 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3156 				       (priv->dma_conf.dma_rx_size - 1), chan);
3157 }
3158 
3159 /**
3160  *  stmmac_set_tx_queue_weight - Set TX queue weight
3161  *  @priv: driver private structure
3162  *  Description: It is used for setting TX queues weight
3163  */
3164 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3165 {
3166 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3167 	u32 weight;
3168 	u32 queue;
3169 
3170 	for (queue = 0; queue < tx_queues_count; queue++) {
3171 		weight = priv->plat->tx_queues_cfg[queue].weight;
3172 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3173 	}
3174 }
3175 
3176 /**
3177  *  stmmac_configure_cbs - Configure CBS in TX queue
3178  *  @priv: driver private structure
3179  *  Description: It is used for configuring CBS in AVB TX queues
3180  */
3181 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3182 {
3183 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3184 	u32 mode_to_use;
3185 	u32 queue;
3186 
3187 	/* queue 0 is reserved for legacy traffic */
3188 	for (queue = 1; queue < tx_queues_count; queue++) {
3189 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3190 		if (mode_to_use == MTL_QUEUE_DCB)
3191 			continue;
3192 
3193 		stmmac_config_cbs(priv, priv->hw,
3194 				priv->plat->tx_queues_cfg[queue].send_slope,
3195 				priv->plat->tx_queues_cfg[queue].idle_slope,
3196 				priv->plat->tx_queues_cfg[queue].high_credit,
3197 				priv->plat->tx_queues_cfg[queue].low_credit,
3198 				queue);
3199 	}
3200 }
3201 
3202 /**
3203  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3204  *  @priv: driver private structure
3205  *  Description: It is used for mapping RX queues to RX dma channels
3206  */
3207 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3208 {
3209 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3210 	u32 queue;
3211 	u32 chan;
3212 
3213 	for (queue = 0; queue < rx_queues_count; queue++) {
3214 		chan = priv->plat->rx_queues_cfg[queue].chan;
3215 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3216 	}
3217 }
3218 
3219 /**
3220  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3221  *  @priv: driver private structure
3222  *  Description: It is used for configuring the RX Queue Priority
3223  */
3224 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3225 {
3226 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3227 	u32 queue;
3228 	u32 prio;
3229 
3230 	for (queue = 0; queue < rx_queues_count; queue++) {
3231 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3232 			continue;
3233 
3234 		prio = priv->plat->rx_queues_cfg[queue].prio;
3235 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3236 	}
3237 }
3238 
3239 /**
3240  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3241  *  @priv: driver private structure
3242  *  Description: It is used for configuring the TX Queue Priority
3243  */
3244 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3245 {
3246 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3247 	u32 queue;
3248 	u32 prio;
3249 
3250 	for (queue = 0; queue < tx_queues_count; queue++) {
3251 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3252 			continue;
3253 
3254 		prio = priv->plat->tx_queues_cfg[queue].prio;
3255 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3256 	}
3257 }
3258 
3259 /**
3260  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3261  *  @priv: driver private structure
3262  *  Description: It is used for configuring the RX queue routing
3263  */
3264 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3265 {
3266 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3267 	u32 queue;
3268 	u8 packet;
3269 
3270 	for (queue = 0; queue < rx_queues_count; queue++) {
3271 		/* no specific packet type routing specified for the queue */
3272 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3273 			continue;
3274 
3275 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3276 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3277 	}
3278 }
3279 
3280 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3281 {
3282 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3283 		priv->rss.enable = false;
3284 		return;
3285 	}
3286 
3287 	if (priv->dev->features & NETIF_F_RXHASH)
3288 		priv->rss.enable = true;
3289 	else
3290 		priv->rss.enable = false;
3291 
3292 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3293 			     priv->plat->rx_queues_to_use);
3294 }
3295 
3296 /**
3297  *  stmmac_mtl_configuration - Configure MTL
3298  *  @priv: driver private structure
3299  *  Description: It is used for configurring MTL
3300  */
3301 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3302 {
3303 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3304 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3305 
3306 	if (tx_queues_count > 1)
3307 		stmmac_set_tx_queue_weight(priv);
3308 
3309 	/* Configure MTL RX algorithms */
3310 	if (rx_queues_count > 1)
3311 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3312 				priv->plat->rx_sched_algorithm);
3313 
3314 	/* Configure MTL TX algorithms */
3315 	if (tx_queues_count > 1)
3316 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3317 				priv->plat->tx_sched_algorithm);
3318 
3319 	/* Configure CBS in AVB TX queues */
3320 	if (tx_queues_count > 1)
3321 		stmmac_configure_cbs(priv);
3322 
3323 	/* Map RX MTL to DMA channels */
3324 	stmmac_rx_queue_dma_chan_map(priv);
3325 
3326 	/* Enable MAC RX Queues */
3327 	stmmac_mac_enable_rx_queues(priv);
3328 
3329 	/* Set RX priorities */
3330 	if (rx_queues_count > 1)
3331 		stmmac_mac_config_rx_queues_prio(priv);
3332 
3333 	/* Set TX priorities */
3334 	if (tx_queues_count > 1)
3335 		stmmac_mac_config_tx_queues_prio(priv);
3336 
3337 	/* Set RX routing */
3338 	if (rx_queues_count > 1)
3339 		stmmac_mac_config_rx_queues_routing(priv);
3340 
3341 	/* Receive Side Scaling */
3342 	if (rx_queues_count > 1)
3343 		stmmac_mac_config_rss(priv);
3344 }
3345 
3346 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3347 {
3348 	if (priv->dma_cap.asp) {
3349 		netdev_info(priv->dev, "Enabling Safety Features\n");
3350 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3351 					  priv->plat->safety_feat_cfg);
3352 	} else {
3353 		netdev_info(priv->dev, "No Safety Features support found\n");
3354 	}
3355 }
3356 
3357 /**
3358  * stmmac_hw_setup - setup mac in a usable state.
3359  *  @dev : pointer to the device structure.
3360  *  @ptp_register: register PTP if set
3361  *  Description:
3362  *  this is the main function to setup the HW in a usable state because the
3363  *  dma engine is reset, the core registers are configured (e.g. AXI,
3364  *  Checksum features, timers). The DMA is ready to start receiving and
3365  *  transmitting.
3366  *  Return value:
3367  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3368  *  file on failure.
3369  */
3370 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3371 {
3372 	struct stmmac_priv *priv = netdev_priv(dev);
3373 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3374 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3375 	bool sph_en;
3376 	u32 chan;
3377 	int ret;
3378 
3379 	/* Make sure RX clock is enabled */
3380 	if (priv->hw->phylink_pcs)
3381 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3382 
3383 	/* DMA initialization and SW reset */
3384 	ret = stmmac_init_dma_engine(priv);
3385 	if (ret < 0) {
3386 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3387 			   __func__);
3388 		return ret;
3389 	}
3390 
3391 	/* Copy the MAC addr into the HW  */
3392 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3393 
3394 	/* PS and related bits will be programmed according to the speed */
3395 	if (priv->hw->pcs) {
3396 		int speed = priv->plat->mac_port_sel_speed;
3397 
3398 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3399 		    (speed == SPEED_1000)) {
3400 			priv->hw->ps = speed;
3401 		} else {
3402 			dev_warn(priv->device, "invalid port speed\n");
3403 			priv->hw->ps = 0;
3404 		}
3405 	}
3406 
3407 	/* Initialize the MAC Core */
3408 	stmmac_core_init(priv, priv->hw, dev);
3409 
3410 	/* Initialize MTL*/
3411 	stmmac_mtl_configuration(priv);
3412 
3413 	/* Initialize Safety Features */
3414 	stmmac_safety_feat_configuration(priv);
3415 
3416 	ret = stmmac_rx_ipc(priv, priv->hw);
3417 	if (!ret) {
3418 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3419 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3420 		priv->hw->rx_csum = 0;
3421 	}
3422 
3423 	/* Enable the MAC Rx/Tx */
3424 	stmmac_mac_set(priv, priv->ioaddr, true);
3425 
3426 	/* Set the HW DMA mode and the COE */
3427 	stmmac_dma_operation_mode(priv);
3428 
3429 	stmmac_mmc_setup(priv);
3430 
3431 	if (ptp_register) {
3432 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3433 		if (ret < 0)
3434 			netdev_warn(priv->dev,
3435 				    "failed to enable PTP reference clock: %pe\n",
3436 				    ERR_PTR(ret));
3437 	}
3438 
3439 	ret = stmmac_init_ptp(priv);
3440 	if (ret == -EOPNOTSUPP)
3441 		netdev_info(priv->dev, "PTP not supported by HW\n");
3442 	else if (ret)
3443 		netdev_warn(priv->dev, "PTP init failed\n");
3444 	else if (ptp_register)
3445 		stmmac_ptp_register(priv);
3446 
3447 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3448 
3449 	/* Convert the timer from msec to usec */
3450 	if (!priv->tx_lpi_timer)
3451 		priv->tx_lpi_timer = eee_timer * 1000;
3452 
3453 	if (priv->use_riwt) {
3454 		u32 queue;
3455 
3456 		for (queue = 0; queue < rx_cnt; queue++) {
3457 			if (!priv->rx_riwt[queue])
3458 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3459 
3460 			stmmac_rx_watchdog(priv, priv->ioaddr,
3461 					   priv->rx_riwt[queue], queue);
3462 		}
3463 	}
3464 
3465 	if (priv->hw->pcs)
3466 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3467 
3468 	/* set TX and RX rings length */
3469 	stmmac_set_rings_length(priv);
3470 
3471 	/* Enable TSO */
3472 	if (priv->tso) {
3473 		for (chan = 0; chan < tx_cnt; chan++) {
3474 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3475 
3476 			/* TSO and TBS cannot co-exist */
3477 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3478 				continue;
3479 
3480 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3481 		}
3482 	}
3483 
3484 	/* Enable Split Header */
3485 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3486 	for (chan = 0; chan < rx_cnt; chan++)
3487 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3488 
3489 
3490 	/* VLAN Tag Insertion */
3491 	if (priv->dma_cap.vlins)
3492 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3493 
3494 	/* TBS */
3495 	for (chan = 0; chan < tx_cnt; chan++) {
3496 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3497 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3498 
3499 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3500 	}
3501 
3502 	/* Configure real RX and TX queues */
3503 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3504 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3505 
3506 	/* Start the ball rolling... */
3507 	stmmac_start_all_dma(priv);
3508 
3509 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3510 
3511 	return 0;
3512 }
3513 
3514 static void stmmac_hw_teardown(struct net_device *dev)
3515 {
3516 	struct stmmac_priv *priv = netdev_priv(dev);
3517 
3518 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3519 }
3520 
3521 static void stmmac_free_irq(struct net_device *dev,
3522 			    enum request_irq_err irq_err, int irq_idx)
3523 {
3524 	struct stmmac_priv *priv = netdev_priv(dev);
3525 	int j;
3526 
3527 	switch (irq_err) {
3528 	case REQ_IRQ_ERR_ALL:
3529 		irq_idx = priv->plat->tx_queues_to_use;
3530 		fallthrough;
3531 	case REQ_IRQ_ERR_TX:
3532 		for (j = irq_idx - 1; j >= 0; j--) {
3533 			if (priv->tx_irq[j] > 0) {
3534 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3535 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3536 			}
3537 		}
3538 		irq_idx = priv->plat->rx_queues_to_use;
3539 		fallthrough;
3540 	case REQ_IRQ_ERR_RX:
3541 		for (j = irq_idx - 1; j >= 0; j--) {
3542 			if (priv->rx_irq[j] > 0) {
3543 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3544 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3545 			}
3546 		}
3547 
3548 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3549 			free_irq(priv->sfty_ue_irq, dev);
3550 		fallthrough;
3551 	case REQ_IRQ_ERR_SFTY_UE:
3552 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3553 			free_irq(priv->sfty_ce_irq, dev);
3554 		fallthrough;
3555 	case REQ_IRQ_ERR_SFTY_CE:
3556 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3557 			free_irq(priv->lpi_irq, dev);
3558 		fallthrough;
3559 	case REQ_IRQ_ERR_LPI:
3560 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3561 			free_irq(priv->wol_irq, dev);
3562 		fallthrough;
3563 	case REQ_IRQ_ERR_SFTY:
3564 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3565 			free_irq(priv->sfty_irq, dev);
3566 		fallthrough;
3567 	case REQ_IRQ_ERR_WOL:
3568 		free_irq(dev->irq, dev);
3569 		fallthrough;
3570 	case REQ_IRQ_ERR_MAC:
3571 	case REQ_IRQ_ERR_NO:
3572 		/* If MAC IRQ request error, no more IRQ to free */
3573 		break;
3574 	}
3575 }
3576 
3577 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3578 {
3579 	struct stmmac_priv *priv = netdev_priv(dev);
3580 	enum request_irq_err irq_err;
3581 	cpumask_t cpu_mask;
3582 	int irq_idx = 0;
3583 	char *int_name;
3584 	int ret;
3585 	int i;
3586 
3587 	/* For common interrupt */
3588 	int_name = priv->int_name_mac;
3589 	sprintf(int_name, "%s:%s", dev->name, "mac");
3590 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3591 			  0, int_name, dev);
3592 	if (unlikely(ret < 0)) {
3593 		netdev_err(priv->dev,
3594 			   "%s: alloc mac MSI %d (error: %d)\n",
3595 			   __func__, dev->irq, ret);
3596 		irq_err = REQ_IRQ_ERR_MAC;
3597 		goto irq_error;
3598 	}
3599 
3600 	/* Request the Wake IRQ in case of another line
3601 	 * is used for WoL
3602 	 */
3603 	priv->wol_irq_disabled = true;
3604 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3605 		int_name = priv->int_name_wol;
3606 		sprintf(int_name, "%s:%s", dev->name, "wol");
3607 		ret = request_irq(priv->wol_irq,
3608 				  stmmac_mac_interrupt,
3609 				  0, int_name, dev);
3610 		if (unlikely(ret < 0)) {
3611 			netdev_err(priv->dev,
3612 				   "%s: alloc wol MSI %d (error: %d)\n",
3613 				   __func__, priv->wol_irq, ret);
3614 			irq_err = REQ_IRQ_ERR_WOL;
3615 			goto irq_error;
3616 		}
3617 	}
3618 
3619 	/* Request the LPI IRQ in case of another line
3620 	 * is used for LPI
3621 	 */
3622 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3623 		int_name = priv->int_name_lpi;
3624 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3625 		ret = request_irq(priv->lpi_irq,
3626 				  stmmac_mac_interrupt,
3627 				  0, int_name, dev);
3628 		if (unlikely(ret < 0)) {
3629 			netdev_err(priv->dev,
3630 				   "%s: alloc lpi MSI %d (error: %d)\n",
3631 				   __func__, priv->lpi_irq, ret);
3632 			irq_err = REQ_IRQ_ERR_LPI;
3633 			goto irq_error;
3634 		}
3635 	}
3636 
3637 	/* Request the common Safety Feature Correctible/Uncorrectible
3638 	 * Error line in case of another line is used
3639 	 */
3640 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3641 		int_name = priv->int_name_sfty;
3642 		sprintf(int_name, "%s:%s", dev->name, "safety");
3643 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3644 				  0, int_name, dev);
3645 		if (unlikely(ret < 0)) {
3646 			netdev_err(priv->dev,
3647 				   "%s: alloc sfty MSI %d (error: %d)\n",
3648 				   __func__, priv->sfty_irq, ret);
3649 			irq_err = REQ_IRQ_ERR_SFTY;
3650 			goto irq_error;
3651 		}
3652 	}
3653 
3654 	/* Request the Safety Feature Correctible Error line in
3655 	 * case of another line is used
3656 	 */
3657 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3658 		int_name = priv->int_name_sfty_ce;
3659 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3660 		ret = request_irq(priv->sfty_ce_irq,
3661 				  stmmac_safety_interrupt,
3662 				  0, int_name, dev);
3663 		if (unlikely(ret < 0)) {
3664 			netdev_err(priv->dev,
3665 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3666 				   __func__, priv->sfty_ce_irq, ret);
3667 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3668 			goto irq_error;
3669 		}
3670 	}
3671 
3672 	/* Request the Safety Feature Uncorrectible Error line in
3673 	 * case of another line is used
3674 	 */
3675 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3676 		int_name = priv->int_name_sfty_ue;
3677 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3678 		ret = request_irq(priv->sfty_ue_irq,
3679 				  stmmac_safety_interrupt,
3680 				  0, int_name, dev);
3681 		if (unlikely(ret < 0)) {
3682 			netdev_err(priv->dev,
3683 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3684 				   __func__, priv->sfty_ue_irq, ret);
3685 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3686 			goto irq_error;
3687 		}
3688 	}
3689 
3690 	/* Request Rx MSI irq */
3691 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3692 		if (i >= MTL_MAX_RX_QUEUES)
3693 			break;
3694 		if (priv->rx_irq[i] == 0)
3695 			continue;
3696 
3697 		int_name = priv->int_name_rx_irq[i];
3698 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3699 		ret = request_irq(priv->rx_irq[i],
3700 				  stmmac_msi_intr_rx,
3701 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3702 		if (unlikely(ret < 0)) {
3703 			netdev_err(priv->dev,
3704 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3705 				   __func__, i, priv->rx_irq[i], ret);
3706 			irq_err = REQ_IRQ_ERR_RX;
3707 			irq_idx = i;
3708 			goto irq_error;
3709 		}
3710 		cpumask_clear(&cpu_mask);
3711 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3712 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3713 	}
3714 
3715 	/* Request Tx MSI irq */
3716 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3717 		if (i >= MTL_MAX_TX_QUEUES)
3718 			break;
3719 		if (priv->tx_irq[i] == 0)
3720 			continue;
3721 
3722 		int_name = priv->int_name_tx_irq[i];
3723 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3724 		ret = request_irq(priv->tx_irq[i],
3725 				  stmmac_msi_intr_tx,
3726 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3727 		if (unlikely(ret < 0)) {
3728 			netdev_err(priv->dev,
3729 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3730 				   __func__, i, priv->tx_irq[i], ret);
3731 			irq_err = REQ_IRQ_ERR_TX;
3732 			irq_idx = i;
3733 			goto irq_error;
3734 		}
3735 		cpumask_clear(&cpu_mask);
3736 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3737 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3738 	}
3739 
3740 	return 0;
3741 
3742 irq_error:
3743 	stmmac_free_irq(dev, irq_err, irq_idx);
3744 	return ret;
3745 }
3746 
3747 static int stmmac_request_irq_single(struct net_device *dev)
3748 {
3749 	struct stmmac_priv *priv = netdev_priv(dev);
3750 	enum request_irq_err irq_err;
3751 	int ret;
3752 
3753 	ret = request_irq(dev->irq, stmmac_interrupt,
3754 			  IRQF_SHARED, dev->name, dev);
3755 	if (unlikely(ret < 0)) {
3756 		netdev_err(priv->dev,
3757 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3758 			   __func__, dev->irq, ret);
3759 		irq_err = REQ_IRQ_ERR_MAC;
3760 		goto irq_error;
3761 	}
3762 
3763 	/* Request the Wake IRQ in case of another line
3764 	 * is used for WoL
3765 	 */
3766 	priv->wol_irq_disabled = true;
3767 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3768 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3769 				  IRQF_SHARED, dev->name, dev);
3770 		if (unlikely(ret < 0)) {
3771 			netdev_err(priv->dev,
3772 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3773 				   __func__, priv->wol_irq, ret);
3774 			irq_err = REQ_IRQ_ERR_WOL;
3775 			goto irq_error;
3776 		}
3777 	}
3778 
3779 	/* Request the IRQ lines */
3780 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3781 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3782 				  IRQF_SHARED, dev->name, dev);
3783 		if (unlikely(ret < 0)) {
3784 			netdev_err(priv->dev,
3785 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3786 				   __func__, priv->lpi_irq, ret);
3787 			irq_err = REQ_IRQ_ERR_LPI;
3788 			goto irq_error;
3789 		}
3790 	}
3791 
3792 	/* Request the common Safety Feature Correctible/Uncorrectible
3793 	 * Error line in case of another line is used
3794 	 */
3795 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3796 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3797 				  IRQF_SHARED, dev->name, dev);
3798 		if (unlikely(ret < 0)) {
3799 			netdev_err(priv->dev,
3800 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3801 				   __func__, priv->sfty_irq, ret);
3802 			irq_err = REQ_IRQ_ERR_SFTY;
3803 			goto irq_error;
3804 		}
3805 	}
3806 
3807 	return 0;
3808 
3809 irq_error:
3810 	stmmac_free_irq(dev, irq_err, 0);
3811 	return ret;
3812 }
3813 
3814 static int stmmac_request_irq(struct net_device *dev)
3815 {
3816 	struct stmmac_priv *priv = netdev_priv(dev);
3817 	int ret;
3818 
3819 	/* Request the IRQ lines */
3820 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3821 		ret = stmmac_request_irq_multi_msi(dev);
3822 	else
3823 		ret = stmmac_request_irq_single(dev);
3824 
3825 	return ret;
3826 }
3827 
3828 /**
3829  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3830  *  @priv: driver private structure
3831  *  @mtu: MTU to setup the dma queue and buf with
3832  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3833  *  Allocate the Tx/Rx DMA queue and init them.
3834  *  Return value:
3835  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3836  */
3837 static struct stmmac_dma_conf *
3838 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3839 {
3840 	struct stmmac_dma_conf *dma_conf;
3841 	int chan, bfsize, ret;
3842 
3843 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3844 	if (!dma_conf) {
3845 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3846 			   __func__);
3847 		return ERR_PTR(-ENOMEM);
3848 	}
3849 
3850 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3851 	if (bfsize < 0)
3852 		bfsize = 0;
3853 
3854 	if (bfsize < BUF_SIZE_16KiB)
3855 		bfsize = stmmac_set_bfsize(mtu, 0);
3856 
3857 	dma_conf->dma_buf_sz = bfsize;
3858 	/* Chose the tx/rx size from the already defined one in the
3859 	 * priv struct. (if defined)
3860 	 */
3861 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3862 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3863 
3864 	if (!dma_conf->dma_tx_size)
3865 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3866 	if (!dma_conf->dma_rx_size)
3867 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3868 
3869 	/* Earlier check for TBS */
3870 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3871 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3872 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3873 
3874 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3875 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3876 	}
3877 
3878 	ret = alloc_dma_desc_resources(priv, dma_conf);
3879 	if (ret < 0) {
3880 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3881 			   __func__);
3882 		goto alloc_error;
3883 	}
3884 
3885 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3886 	if (ret < 0) {
3887 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3888 			   __func__);
3889 		goto init_error;
3890 	}
3891 
3892 	return dma_conf;
3893 
3894 init_error:
3895 	free_dma_desc_resources(priv, dma_conf);
3896 alloc_error:
3897 	kfree(dma_conf);
3898 	return ERR_PTR(ret);
3899 }
3900 
3901 /**
3902  *  __stmmac_open - open entry point of the driver
3903  *  @dev : pointer to the device structure.
3904  *  @dma_conf :  structure to take the dma data
3905  *  Description:
3906  *  This function is the open entry point of the driver.
3907  *  Return value:
3908  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3909  *  file on failure.
3910  */
3911 static int __stmmac_open(struct net_device *dev,
3912 			 struct stmmac_dma_conf *dma_conf)
3913 {
3914 	struct stmmac_priv *priv = netdev_priv(dev);
3915 	int mode = priv->plat->phy_interface;
3916 	u32 chan;
3917 	int ret;
3918 
3919 	ret = pm_runtime_resume_and_get(priv->device);
3920 	if (ret < 0)
3921 		return ret;
3922 
3923 	if ((!priv->hw->xpcs ||
3924 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3925 		ret = stmmac_init_phy(dev);
3926 		if (ret) {
3927 			netdev_err(priv->dev,
3928 				   "%s: Cannot attach to PHY (error: %d)\n",
3929 				   __func__, ret);
3930 			goto init_phy_error;
3931 		}
3932 	}
3933 
3934 	buf_sz = dma_conf->dma_buf_sz;
3935 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3936 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3937 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3938 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3939 
3940 	stmmac_reset_queues_param(priv);
3941 
3942 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3943 	    priv->plat->serdes_powerup) {
3944 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3945 		if (ret < 0) {
3946 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3947 				   __func__);
3948 			goto init_error;
3949 		}
3950 	}
3951 
3952 	ret = stmmac_hw_setup(dev, true);
3953 	if (ret < 0) {
3954 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3955 		goto init_error;
3956 	}
3957 
3958 	stmmac_init_coalesce(priv);
3959 
3960 	phylink_start(priv->phylink);
3961 	/* We may have called phylink_speed_down before */
3962 	phylink_speed_up(priv->phylink);
3963 
3964 	ret = stmmac_request_irq(dev);
3965 	if (ret)
3966 		goto irq_error;
3967 
3968 	stmmac_enable_all_queues(priv);
3969 	netif_tx_start_all_queues(priv->dev);
3970 	stmmac_enable_all_dma_irq(priv);
3971 
3972 	return 0;
3973 
3974 irq_error:
3975 	phylink_stop(priv->phylink);
3976 
3977 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3978 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3979 
3980 	stmmac_hw_teardown(dev);
3981 init_error:
3982 	phylink_disconnect_phy(priv->phylink);
3983 init_phy_error:
3984 	pm_runtime_put(priv->device);
3985 	return ret;
3986 }
3987 
3988 static int stmmac_open(struct net_device *dev)
3989 {
3990 	struct stmmac_priv *priv = netdev_priv(dev);
3991 	struct stmmac_dma_conf *dma_conf;
3992 	int ret;
3993 
3994 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3995 	if (IS_ERR(dma_conf))
3996 		return PTR_ERR(dma_conf);
3997 
3998 	ret = __stmmac_open(dev, dma_conf);
3999 	if (ret)
4000 		free_dma_desc_resources(priv, dma_conf);
4001 
4002 	kfree(dma_conf);
4003 	return ret;
4004 }
4005 
4006 /**
4007  *  stmmac_release - close entry point of the driver
4008  *  @dev : device pointer.
4009  *  Description:
4010  *  This is the stop entry point of the driver.
4011  */
4012 static int stmmac_release(struct net_device *dev)
4013 {
4014 	struct stmmac_priv *priv = netdev_priv(dev);
4015 	u32 chan;
4016 
4017 	if (device_may_wakeup(priv->device))
4018 		phylink_speed_down(priv->phylink, false);
4019 	/* Stop and disconnect the PHY */
4020 	phylink_stop(priv->phylink);
4021 	phylink_disconnect_phy(priv->phylink);
4022 
4023 	stmmac_disable_all_queues(priv);
4024 
4025 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4026 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4027 
4028 	netif_tx_disable(dev);
4029 
4030 	/* Free the IRQ lines */
4031 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4032 
4033 	if (priv->eee_enabled) {
4034 		priv->tx_path_in_lpi_mode = false;
4035 		del_timer_sync(&priv->eee_ctrl_timer);
4036 	}
4037 
4038 	/* Stop TX/RX DMA and clear the descriptors */
4039 	stmmac_stop_all_dma(priv);
4040 
4041 	/* Release and free the Rx/Tx resources */
4042 	free_dma_desc_resources(priv, &priv->dma_conf);
4043 
4044 	/* Disable the MAC Rx/Tx */
4045 	stmmac_mac_set(priv, priv->ioaddr, false);
4046 
4047 	/* Powerdown Serdes if there is */
4048 	if (priv->plat->serdes_powerdown)
4049 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4050 
4051 	stmmac_release_ptp(priv);
4052 
4053 	if (stmmac_fpe_supported(priv))
4054 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4055 
4056 	pm_runtime_put(priv->device);
4057 
4058 	return 0;
4059 }
4060 
4061 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4062 			       struct stmmac_tx_queue *tx_q)
4063 {
4064 	u16 tag = 0x0, inner_tag = 0x0;
4065 	u32 inner_type = 0x0;
4066 	struct dma_desc *p;
4067 
4068 	if (!priv->dma_cap.vlins)
4069 		return false;
4070 	if (!skb_vlan_tag_present(skb))
4071 		return false;
4072 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4073 		inner_tag = skb_vlan_tag_get(skb);
4074 		inner_type = STMMAC_VLAN_INSERT;
4075 	}
4076 
4077 	tag = skb_vlan_tag_get(skb);
4078 
4079 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4080 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4081 	else
4082 		p = &tx_q->dma_tx[tx_q->cur_tx];
4083 
4084 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4085 		return false;
4086 
4087 	stmmac_set_tx_owner(priv, p);
4088 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4089 	return true;
4090 }
4091 
4092 /**
4093  *  stmmac_tso_allocator - close entry point of the driver
4094  *  @priv: driver private structure
4095  *  @des: buffer start address
4096  *  @total_len: total length to fill in descriptors
4097  *  @last_segment: condition for the last descriptor
4098  *  @queue: TX queue index
4099  *  Description:
4100  *  This function fills descriptor and request new descriptors according to
4101  *  buffer length to fill
4102  */
4103 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4104 				 int total_len, bool last_segment, u32 queue)
4105 {
4106 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4107 	struct dma_desc *desc;
4108 	u32 buff_size;
4109 	int tmp_len;
4110 
4111 	tmp_len = total_len;
4112 
4113 	while (tmp_len > 0) {
4114 		dma_addr_t curr_addr;
4115 
4116 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4117 						priv->dma_conf.dma_tx_size);
4118 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4119 
4120 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4121 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4122 		else
4123 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4124 
4125 		curr_addr = des + (total_len - tmp_len);
4126 		stmmac_set_desc_addr(priv, desc, curr_addr);
4127 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4128 			    TSO_MAX_BUFF_SIZE : tmp_len;
4129 
4130 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4131 				0, 1,
4132 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4133 				0, 0);
4134 
4135 		tmp_len -= TSO_MAX_BUFF_SIZE;
4136 	}
4137 }
4138 
4139 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4140 {
4141 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4142 	int desc_size;
4143 
4144 	if (likely(priv->extend_desc))
4145 		desc_size = sizeof(struct dma_extended_desc);
4146 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4147 		desc_size = sizeof(struct dma_edesc);
4148 	else
4149 		desc_size = sizeof(struct dma_desc);
4150 
4151 	/* The own bit must be the latest setting done when prepare the
4152 	 * descriptor and then barrier is needed to make sure that
4153 	 * all is coherent before granting the DMA engine.
4154 	 */
4155 	wmb();
4156 
4157 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4158 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4159 }
4160 
4161 /**
4162  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4163  *  @skb : the socket buffer
4164  *  @dev : device pointer
4165  *  Description: this is the transmit function that is called on TSO frames
4166  *  (support available on GMAC4 and newer chips).
4167  *  Diagram below show the ring programming in case of TSO frames:
4168  *
4169  *  First Descriptor
4170  *   --------
4171  *   | DES0 |---> buffer1 = L2/L3/L4 header
4172  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4173  *   |      |     width is 32-bit, but we never use it.
4174  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4175  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4176  *   |      |     or 48-bit, and we always use it.
4177  *   | DES2 |---> buffer1 len
4178  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4179  *   --------
4180  *   --------
4181  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4182  *   | DES1 |---> same as the First Descriptor
4183  *   | DES2 |---> buffer1 len
4184  *   | DES3 |
4185  *   --------
4186  *	|
4187  *     ...
4188  *	|
4189  *   --------
4190  *   | DES0 |---> buffer1 = Split TCP Payload
4191  *   | DES1 |---> same as the First Descriptor
4192  *   | DES2 |---> buffer1 len
4193  *   | DES3 |
4194  *   --------
4195  *
4196  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4197  */
4198 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4199 {
4200 	struct dma_desc *desc, *first, *mss_desc = NULL;
4201 	struct stmmac_priv *priv = netdev_priv(dev);
4202 	unsigned int first_entry, tx_packets;
4203 	struct stmmac_txq_stats *txq_stats;
4204 	struct stmmac_tx_queue *tx_q;
4205 	u32 pay_len, mss, queue;
4206 	int i, first_tx, nfrags;
4207 	u8 proto_hdr_len, hdr;
4208 	dma_addr_t des;
4209 	bool set_ic;
4210 
4211 	/* Always insert VLAN tag to SKB payload for TSO frames.
4212 	 *
4213 	 * Never insert VLAN tag by HW, since segments splited by
4214 	 * TSO engine will be un-tagged by mistake.
4215 	 */
4216 	if (skb_vlan_tag_present(skb)) {
4217 		skb = __vlan_hwaccel_push_inside(skb);
4218 		if (unlikely(!skb)) {
4219 			priv->xstats.tx_dropped++;
4220 			return NETDEV_TX_OK;
4221 		}
4222 	}
4223 
4224 	nfrags = skb_shinfo(skb)->nr_frags;
4225 	queue = skb_get_queue_mapping(skb);
4226 
4227 	tx_q = &priv->dma_conf.tx_queue[queue];
4228 	txq_stats = &priv->xstats.txq_stats[queue];
4229 	first_tx = tx_q->cur_tx;
4230 
4231 	/* Compute header lengths */
4232 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4233 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4234 		hdr = sizeof(struct udphdr);
4235 	} else {
4236 		proto_hdr_len = skb_tcp_all_headers(skb);
4237 		hdr = tcp_hdrlen(skb);
4238 	}
4239 
4240 	/* Desc availability based on threshold should be enough safe */
4241 	if (unlikely(stmmac_tx_avail(priv, queue) <
4242 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4243 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4244 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4245 								queue));
4246 			/* This is a hard error, log it. */
4247 			netdev_err(priv->dev,
4248 				   "%s: Tx Ring full when queue awake\n",
4249 				   __func__);
4250 		}
4251 		return NETDEV_TX_BUSY;
4252 	}
4253 
4254 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4255 
4256 	mss = skb_shinfo(skb)->gso_size;
4257 
4258 	/* set new MSS value if needed */
4259 	if (mss != tx_q->mss) {
4260 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4261 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4262 		else
4263 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4264 
4265 		stmmac_set_mss(priv, mss_desc, mss);
4266 		tx_q->mss = mss;
4267 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4268 						priv->dma_conf.dma_tx_size);
4269 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4270 	}
4271 
4272 	if (netif_msg_tx_queued(priv)) {
4273 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4274 			__func__, hdr, proto_hdr_len, pay_len, mss);
4275 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4276 			skb->data_len);
4277 	}
4278 
4279 	first_entry = tx_q->cur_tx;
4280 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4281 
4282 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4283 		desc = &tx_q->dma_entx[first_entry].basic;
4284 	else
4285 		desc = &tx_q->dma_tx[first_entry];
4286 	first = desc;
4287 
4288 	/* first descriptor: fill Headers on Buf1 */
4289 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4290 			     DMA_TO_DEVICE);
4291 	if (dma_mapping_error(priv->device, des))
4292 		goto dma_map_err;
4293 
4294 	stmmac_set_desc_addr(priv, first, des);
4295 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4296 			     (nfrags == 0), queue);
4297 
4298 	/* In case two or more DMA transmit descriptors are allocated for this
4299 	 * non-paged SKB data, the DMA buffer address should be saved to
4300 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4301 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4302 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4303 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4304 	 * sooner or later.
4305 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4306 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4307 	 * this DMA buffer right after the DMA engine completely finishes the
4308 	 * full buffer transmission.
4309 	 */
4310 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4311 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4312 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4313 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4314 
4315 	/* Prepare fragments */
4316 	for (i = 0; i < nfrags; i++) {
4317 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4318 
4319 		des = skb_frag_dma_map(priv->device, frag, 0,
4320 				       skb_frag_size(frag),
4321 				       DMA_TO_DEVICE);
4322 		if (dma_mapping_error(priv->device, des))
4323 			goto dma_map_err;
4324 
4325 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4326 				     (i == nfrags - 1), queue);
4327 
4328 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4329 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4330 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4331 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4332 	}
4333 
4334 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4335 
4336 	/* Only the last descriptor gets to point to the skb. */
4337 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4338 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4339 
4340 	/* Manage tx mitigation */
4341 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4342 	tx_q->tx_count_frames += tx_packets;
4343 
4344 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4345 		set_ic = true;
4346 	else if (!priv->tx_coal_frames[queue])
4347 		set_ic = false;
4348 	else if (tx_packets > priv->tx_coal_frames[queue])
4349 		set_ic = true;
4350 	else if ((tx_q->tx_count_frames %
4351 		  priv->tx_coal_frames[queue]) < tx_packets)
4352 		set_ic = true;
4353 	else
4354 		set_ic = false;
4355 
4356 	if (set_ic) {
4357 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4358 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4359 		else
4360 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4361 
4362 		tx_q->tx_count_frames = 0;
4363 		stmmac_set_tx_ic(priv, desc);
4364 	}
4365 
4366 	/* We've used all descriptors we need for this skb, however,
4367 	 * advance cur_tx so that it references a fresh descriptor.
4368 	 * ndo_start_xmit will fill this descriptor the next time it's
4369 	 * called and stmmac_tx_clean may clean up to this descriptor.
4370 	 */
4371 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4372 
4373 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4374 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4375 			  __func__);
4376 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4377 	}
4378 
4379 	u64_stats_update_begin(&txq_stats->q_syncp);
4380 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4381 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4382 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4383 	if (set_ic)
4384 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4385 	u64_stats_update_end(&txq_stats->q_syncp);
4386 
4387 	if (priv->sarc_type)
4388 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4389 
4390 	skb_tx_timestamp(skb);
4391 
4392 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4393 		     priv->hwts_tx_en)) {
4394 		/* declare that device is doing timestamping */
4395 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4396 		stmmac_enable_tx_timestamp(priv, first);
4397 	}
4398 
4399 	/* Complete the first descriptor before granting the DMA */
4400 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4401 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4402 				   hdr / 4, (skb->len - proto_hdr_len));
4403 
4404 	/* If context desc is used to change MSS */
4405 	if (mss_desc) {
4406 		/* Make sure that first descriptor has been completely
4407 		 * written, including its own bit. This is because MSS is
4408 		 * actually before first descriptor, so we need to make
4409 		 * sure that MSS's own bit is the last thing written.
4410 		 */
4411 		dma_wmb();
4412 		stmmac_set_tx_owner(priv, mss_desc);
4413 	}
4414 
4415 	if (netif_msg_pktdata(priv)) {
4416 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4417 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4418 			tx_q->cur_tx, first, nfrags);
4419 		pr_info(">>> frame to be transmitted: ");
4420 		print_pkt(skb->data, skb_headlen(skb));
4421 	}
4422 
4423 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4424 
4425 	stmmac_flush_tx_descriptors(priv, queue);
4426 	stmmac_tx_timer_arm(priv, queue);
4427 
4428 	return NETDEV_TX_OK;
4429 
4430 dma_map_err:
4431 	dev_err(priv->device, "Tx dma map failed\n");
4432 	dev_kfree_skb(skb);
4433 	priv->xstats.tx_dropped++;
4434 	return NETDEV_TX_OK;
4435 }
4436 
4437 /**
4438  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4439  * @skb: socket buffer to check
4440  *
4441  * Check if a packet has an ethertype that will trigger the IP header checks
4442  * and IP/TCP checksum engine of the stmmac core.
4443  *
4444  * Return: true if the ethertype can trigger the checksum engine, false
4445  * otherwise
4446  */
4447 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4448 {
4449 	int depth = 0;
4450 	__be16 proto;
4451 
4452 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4453 				    &depth);
4454 
4455 	return (depth <= ETH_HLEN) &&
4456 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4457 }
4458 
4459 /**
4460  *  stmmac_xmit - Tx entry point of the driver
4461  *  @skb : the socket buffer
4462  *  @dev : device pointer
4463  *  Description : this is the tx entry point of the driver.
4464  *  It programs the chain or the ring and supports oversized frames
4465  *  and SG feature.
4466  */
4467 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4468 {
4469 	unsigned int first_entry, tx_packets, enh_desc;
4470 	struct stmmac_priv *priv = netdev_priv(dev);
4471 	unsigned int nopaged_len = skb_headlen(skb);
4472 	int i, csum_insertion = 0, is_jumbo = 0;
4473 	u32 queue = skb_get_queue_mapping(skb);
4474 	int nfrags = skb_shinfo(skb)->nr_frags;
4475 	int gso = skb_shinfo(skb)->gso_type;
4476 	struct stmmac_txq_stats *txq_stats;
4477 	struct dma_edesc *tbs_desc = NULL;
4478 	struct dma_desc *desc, *first;
4479 	struct stmmac_tx_queue *tx_q;
4480 	bool has_vlan, set_ic;
4481 	int entry, first_tx;
4482 	dma_addr_t des;
4483 
4484 	tx_q = &priv->dma_conf.tx_queue[queue];
4485 	txq_stats = &priv->xstats.txq_stats[queue];
4486 	first_tx = tx_q->cur_tx;
4487 
4488 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4489 		stmmac_disable_eee_mode(priv);
4490 
4491 	/* Manage oversized TCP frames for GMAC4 device */
4492 	if (skb_is_gso(skb) && priv->tso) {
4493 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4494 			return stmmac_tso_xmit(skb, dev);
4495 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4496 			return stmmac_tso_xmit(skb, dev);
4497 	}
4498 
4499 	if (priv->est && priv->est->enable &&
4500 	    priv->est->max_sdu[queue] &&
4501 	    skb->len > priv->est->max_sdu[queue]){
4502 		priv->xstats.max_sdu_txq_drop[queue]++;
4503 		goto max_sdu_err;
4504 	}
4505 
4506 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4507 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4508 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4509 								queue));
4510 			/* This is a hard error, log it. */
4511 			netdev_err(priv->dev,
4512 				   "%s: Tx Ring full when queue awake\n",
4513 				   __func__);
4514 		}
4515 		return NETDEV_TX_BUSY;
4516 	}
4517 
4518 	/* Check if VLAN can be inserted by HW */
4519 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4520 
4521 	entry = tx_q->cur_tx;
4522 	first_entry = entry;
4523 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4524 
4525 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4526 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4527 	 * queues. In that case, checksum offloading for those queues that don't
4528 	 * support tx coe needs to fallback to software checksum calculation.
4529 	 *
4530 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4531 	 * also have to be checksummed in software.
4532 	 */
4533 	if (csum_insertion &&
4534 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4535 	     !stmmac_has_ip_ethertype(skb))) {
4536 		if (unlikely(skb_checksum_help(skb)))
4537 			goto dma_map_err;
4538 		csum_insertion = !csum_insertion;
4539 	}
4540 
4541 	if (likely(priv->extend_desc))
4542 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4543 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4544 		desc = &tx_q->dma_entx[entry].basic;
4545 	else
4546 		desc = tx_q->dma_tx + entry;
4547 
4548 	first = desc;
4549 
4550 	if (has_vlan)
4551 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4552 
4553 	enh_desc = priv->plat->enh_desc;
4554 	/* To program the descriptors according to the size of the frame */
4555 	if (enh_desc)
4556 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4557 
4558 	if (unlikely(is_jumbo)) {
4559 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4560 		if (unlikely(entry < 0) && (entry != -EINVAL))
4561 			goto dma_map_err;
4562 	}
4563 
4564 	for (i = 0; i < nfrags; i++) {
4565 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4566 		int len = skb_frag_size(frag);
4567 		bool last_segment = (i == (nfrags - 1));
4568 
4569 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4570 		WARN_ON(tx_q->tx_skbuff[entry]);
4571 
4572 		if (likely(priv->extend_desc))
4573 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4574 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4575 			desc = &tx_q->dma_entx[entry].basic;
4576 		else
4577 			desc = tx_q->dma_tx + entry;
4578 
4579 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4580 				       DMA_TO_DEVICE);
4581 		if (dma_mapping_error(priv->device, des))
4582 			goto dma_map_err; /* should reuse desc w/o issues */
4583 
4584 		tx_q->tx_skbuff_dma[entry].buf = des;
4585 
4586 		stmmac_set_desc_addr(priv, desc, des);
4587 
4588 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4589 		tx_q->tx_skbuff_dma[entry].len = len;
4590 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4591 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4592 
4593 		/* Prepare the descriptor and set the own bit too */
4594 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4595 				priv->mode, 1, last_segment, skb->len);
4596 	}
4597 
4598 	/* Only the last descriptor gets to point to the skb. */
4599 	tx_q->tx_skbuff[entry] = skb;
4600 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4601 
4602 	/* According to the coalesce parameter the IC bit for the latest
4603 	 * segment is reset and the timer re-started to clean the tx status.
4604 	 * This approach takes care about the fragments: desc is the first
4605 	 * element in case of no SG.
4606 	 */
4607 	tx_packets = (entry + 1) - first_tx;
4608 	tx_q->tx_count_frames += tx_packets;
4609 
4610 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4611 		set_ic = true;
4612 	else if (!priv->tx_coal_frames[queue])
4613 		set_ic = false;
4614 	else if (tx_packets > priv->tx_coal_frames[queue])
4615 		set_ic = true;
4616 	else if ((tx_q->tx_count_frames %
4617 		  priv->tx_coal_frames[queue]) < tx_packets)
4618 		set_ic = true;
4619 	else
4620 		set_ic = false;
4621 
4622 	if (set_ic) {
4623 		if (likely(priv->extend_desc))
4624 			desc = &tx_q->dma_etx[entry].basic;
4625 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4626 			desc = &tx_q->dma_entx[entry].basic;
4627 		else
4628 			desc = &tx_q->dma_tx[entry];
4629 
4630 		tx_q->tx_count_frames = 0;
4631 		stmmac_set_tx_ic(priv, desc);
4632 	}
4633 
4634 	/* We've used all descriptors we need for this skb, however,
4635 	 * advance cur_tx so that it references a fresh descriptor.
4636 	 * ndo_start_xmit will fill this descriptor the next time it's
4637 	 * called and stmmac_tx_clean may clean up to this descriptor.
4638 	 */
4639 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4640 	tx_q->cur_tx = entry;
4641 
4642 	if (netif_msg_pktdata(priv)) {
4643 		netdev_dbg(priv->dev,
4644 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4645 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4646 			   entry, first, nfrags);
4647 
4648 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4649 		print_pkt(skb->data, skb->len);
4650 	}
4651 
4652 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4653 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4654 			  __func__);
4655 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4656 	}
4657 
4658 	u64_stats_update_begin(&txq_stats->q_syncp);
4659 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4660 	if (set_ic)
4661 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4662 	u64_stats_update_end(&txq_stats->q_syncp);
4663 
4664 	if (priv->sarc_type)
4665 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4666 
4667 	skb_tx_timestamp(skb);
4668 
4669 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4670 	 * problems because all the descriptors are actually ready to be
4671 	 * passed to the DMA engine.
4672 	 */
4673 	if (likely(!is_jumbo)) {
4674 		bool last_segment = (nfrags == 0);
4675 
4676 		des = dma_map_single(priv->device, skb->data,
4677 				     nopaged_len, DMA_TO_DEVICE);
4678 		if (dma_mapping_error(priv->device, des))
4679 			goto dma_map_err;
4680 
4681 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4682 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4683 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4684 
4685 		stmmac_set_desc_addr(priv, first, des);
4686 
4687 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4688 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4689 
4690 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4691 			     priv->hwts_tx_en)) {
4692 			/* declare that device is doing timestamping */
4693 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4694 			stmmac_enable_tx_timestamp(priv, first);
4695 		}
4696 
4697 		/* Prepare the first descriptor setting the OWN bit too */
4698 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4699 				csum_insertion, priv->mode, 0, last_segment,
4700 				skb->len);
4701 	}
4702 
4703 	if (tx_q->tbs & STMMAC_TBS_EN) {
4704 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4705 
4706 		tbs_desc = &tx_q->dma_entx[first_entry];
4707 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4708 	}
4709 
4710 	stmmac_set_tx_owner(priv, first);
4711 
4712 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4713 
4714 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4715 
4716 	stmmac_flush_tx_descriptors(priv, queue);
4717 	stmmac_tx_timer_arm(priv, queue);
4718 
4719 	return NETDEV_TX_OK;
4720 
4721 dma_map_err:
4722 	netdev_err(priv->dev, "Tx DMA map failed\n");
4723 max_sdu_err:
4724 	dev_kfree_skb(skb);
4725 	priv->xstats.tx_dropped++;
4726 	return NETDEV_TX_OK;
4727 }
4728 
4729 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4730 {
4731 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4732 	__be16 vlan_proto = veth->h_vlan_proto;
4733 	u16 vlanid;
4734 
4735 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4736 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4737 	    (vlan_proto == htons(ETH_P_8021AD) &&
4738 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4739 		/* pop the vlan tag */
4740 		vlanid = ntohs(veth->h_vlan_TCI);
4741 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4742 		skb_pull(skb, VLAN_HLEN);
4743 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4744 	}
4745 }
4746 
4747 /**
4748  * stmmac_rx_refill - refill used skb preallocated buffers
4749  * @priv: driver private structure
4750  * @queue: RX queue index
4751  * Description : this is to reallocate the skb for the reception process
4752  * that is based on zero-copy.
4753  */
4754 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4755 {
4756 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4757 	int dirty = stmmac_rx_dirty(priv, queue);
4758 	unsigned int entry = rx_q->dirty_rx;
4759 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4760 
4761 	if (priv->dma_cap.host_dma_width <= 32)
4762 		gfp |= GFP_DMA32;
4763 
4764 	while (dirty-- > 0) {
4765 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4766 		struct dma_desc *p;
4767 		bool use_rx_wd;
4768 
4769 		if (priv->extend_desc)
4770 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4771 		else
4772 			p = rx_q->dma_rx + entry;
4773 
4774 		if (!buf->page) {
4775 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4776 			if (!buf->page)
4777 				break;
4778 		}
4779 
4780 		if (priv->sph && !buf->sec_page) {
4781 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4782 			if (!buf->sec_page)
4783 				break;
4784 
4785 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4786 		}
4787 
4788 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4789 
4790 		stmmac_set_desc_addr(priv, p, buf->addr);
4791 		if (priv->sph)
4792 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4793 		else
4794 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4795 		stmmac_refill_desc3(priv, rx_q, p);
4796 
4797 		rx_q->rx_count_frames++;
4798 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4799 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4800 			rx_q->rx_count_frames = 0;
4801 
4802 		use_rx_wd = !priv->rx_coal_frames[queue];
4803 		use_rx_wd |= rx_q->rx_count_frames > 0;
4804 		if (!priv->use_riwt)
4805 			use_rx_wd = false;
4806 
4807 		dma_wmb();
4808 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4809 
4810 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4811 	}
4812 	rx_q->dirty_rx = entry;
4813 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4814 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4815 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4816 }
4817 
4818 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4819 				       struct dma_desc *p,
4820 				       int status, unsigned int len)
4821 {
4822 	unsigned int plen = 0, hlen = 0;
4823 	int coe = priv->hw->rx_csum;
4824 
4825 	/* Not first descriptor, buffer is always zero */
4826 	if (priv->sph && len)
4827 		return 0;
4828 
4829 	/* First descriptor, get split header length */
4830 	stmmac_get_rx_header_len(priv, p, &hlen);
4831 	if (priv->sph && hlen) {
4832 		priv->xstats.rx_split_hdr_pkt_n++;
4833 		return hlen;
4834 	}
4835 
4836 	/* First descriptor, not last descriptor and not split header */
4837 	if (status & rx_not_ls)
4838 		return priv->dma_conf.dma_buf_sz;
4839 
4840 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4841 
4842 	/* First descriptor and last descriptor and not split header */
4843 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4844 }
4845 
4846 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4847 				       struct dma_desc *p,
4848 				       int status, unsigned int len)
4849 {
4850 	int coe = priv->hw->rx_csum;
4851 	unsigned int plen = 0;
4852 
4853 	/* Not split header, buffer is not available */
4854 	if (!priv->sph)
4855 		return 0;
4856 
4857 	/* Not last descriptor */
4858 	if (status & rx_not_ls)
4859 		return priv->dma_conf.dma_buf_sz;
4860 
4861 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4862 
4863 	/* Last descriptor */
4864 	return plen - len;
4865 }
4866 
4867 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4868 				struct xdp_frame *xdpf, bool dma_map)
4869 {
4870 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4871 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4872 	unsigned int entry = tx_q->cur_tx;
4873 	struct dma_desc *tx_desc;
4874 	dma_addr_t dma_addr;
4875 	bool set_ic;
4876 
4877 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4878 		return STMMAC_XDP_CONSUMED;
4879 
4880 	if (priv->est && priv->est->enable &&
4881 	    priv->est->max_sdu[queue] &&
4882 	    xdpf->len > priv->est->max_sdu[queue]) {
4883 		priv->xstats.max_sdu_txq_drop[queue]++;
4884 		return STMMAC_XDP_CONSUMED;
4885 	}
4886 
4887 	if (likely(priv->extend_desc))
4888 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4889 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4890 		tx_desc = &tx_q->dma_entx[entry].basic;
4891 	else
4892 		tx_desc = tx_q->dma_tx + entry;
4893 
4894 	if (dma_map) {
4895 		dma_addr = dma_map_single(priv->device, xdpf->data,
4896 					  xdpf->len, DMA_TO_DEVICE);
4897 		if (dma_mapping_error(priv->device, dma_addr))
4898 			return STMMAC_XDP_CONSUMED;
4899 
4900 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4901 	} else {
4902 		struct page *page = virt_to_page(xdpf->data);
4903 
4904 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4905 			   xdpf->headroom;
4906 		dma_sync_single_for_device(priv->device, dma_addr,
4907 					   xdpf->len, DMA_BIDIRECTIONAL);
4908 
4909 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4910 	}
4911 
4912 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4913 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4914 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4915 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4916 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4917 
4918 	tx_q->xdpf[entry] = xdpf;
4919 
4920 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4921 
4922 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4923 			       true, priv->mode, true, true,
4924 			       xdpf->len);
4925 
4926 	tx_q->tx_count_frames++;
4927 
4928 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4929 		set_ic = true;
4930 	else
4931 		set_ic = false;
4932 
4933 	if (set_ic) {
4934 		tx_q->tx_count_frames = 0;
4935 		stmmac_set_tx_ic(priv, tx_desc);
4936 		u64_stats_update_begin(&txq_stats->q_syncp);
4937 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4938 		u64_stats_update_end(&txq_stats->q_syncp);
4939 	}
4940 
4941 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4942 
4943 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4944 	tx_q->cur_tx = entry;
4945 
4946 	return STMMAC_XDP_TX;
4947 }
4948 
4949 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4950 				   int cpu)
4951 {
4952 	int index = cpu;
4953 
4954 	if (unlikely(index < 0))
4955 		index = 0;
4956 
4957 	while (index >= priv->plat->tx_queues_to_use)
4958 		index -= priv->plat->tx_queues_to_use;
4959 
4960 	return index;
4961 }
4962 
4963 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4964 				struct xdp_buff *xdp)
4965 {
4966 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4967 	int cpu = smp_processor_id();
4968 	struct netdev_queue *nq;
4969 	int queue;
4970 	int res;
4971 
4972 	if (unlikely(!xdpf))
4973 		return STMMAC_XDP_CONSUMED;
4974 
4975 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4976 	nq = netdev_get_tx_queue(priv->dev, queue);
4977 
4978 	__netif_tx_lock(nq, cpu);
4979 	/* Avoids TX time-out as we are sharing with slow path */
4980 	txq_trans_cond_update(nq);
4981 
4982 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4983 	if (res == STMMAC_XDP_TX)
4984 		stmmac_flush_tx_descriptors(priv, queue);
4985 
4986 	__netif_tx_unlock(nq);
4987 
4988 	return res;
4989 }
4990 
4991 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4992 				 struct bpf_prog *prog,
4993 				 struct xdp_buff *xdp)
4994 {
4995 	u32 act;
4996 	int res;
4997 
4998 	act = bpf_prog_run_xdp(prog, xdp);
4999 	switch (act) {
5000 	case XDP_PASS:
5001 		res = STMMAC_XDP_PASS;
5002 		break;
5003 	case XDP_TX:
5004 		res = stmmac_xdp_xmit_back(priv, xdp);
5005 		break;
5006 	case XDP_REDIRECT:
5007 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5008 			res = STMMAC_XDP_CONSUMED;
5009 		else
5010 			res = STMMAC_XDP_REDIRECT;
5011 		break;
5012 	default:
5013 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5014 		fallthrough;
5015 	case XDP_ABORTED:
5016 		trace_xdp_exception(priv->dev, prog, act);
5017 		fallthrough;
5018 	case XDP_DROP:
5019 		res = STMMAC_XDP_CONSUMED;
5020 		break;
5021 	}
5022 
5023 	return res;
5024 }
5025 
5026 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5027 					   struct xdp_buff *xdp)
5028 {
5029 	struct bpf_prog *prog;
5030 	int res;
5031 
5032 	prog = READ_ONCE(priv->xdp_prog);
5033 	if (!prog) {
5034 		res = STMMAC_XDP_PASS;
5035 		goto out;
5036 	}
5037 
5038 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5039 out:
5040 	return ERR_PTR(-res);
5041 }
5042 
5043 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5044 				   int xdp_status)
5045 {
5046 	int cpu = smp_processor_id();
5047 	int queue;
5048 
5049 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5050 
5051 	if (xdp_status & STMMAC_XDP_TX)
5052 		stmmac_tx_timer_arm(priv, queue);
5053 
5054 	if (xdp_status & STMMAC_XDP_REDIRECT)
5055 		xdp_do_flush();
5056 }
5057 
5058 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5059 					       struct xdp_buff *xdp)
5060 {
5061 	unsigned int metasize = xdp->data - xdp->data_meta;
5062 	unsigned int datasize = xdp->data_end - xdp->data;
5063 	struct sk_buff *skb;
5064 
5065 	skb = napi_alloc_skb(&ch->rxtx_napi,
5066 			     xdp->data_end - xdp->data_hard_start);
5067 	if (unlikely(!skb))
5068 		return NULL;
5069 
5070 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5071 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5072 	if (metasize)
5073 		skb_metadata_set(skb, metasize);
5074 
5075 	return skb;
5076 }
5077 
5078 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5079 				   struct dma_desc *p, struct dma_desc *np,
5080 				   struct xdp_buff *xdp)
5081 {
5082 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5083 	struct stmmac_channel *ch = &priv->channel[queue];
5084 	unsigned int len = xdp->data_end - xdp->data;
5085 	enum pkt_hash_types hash_type;
5086 	int coe = priv->hw->rx_csum;
5087 	struct sk_buff *skb;
5088 	u32 hash;
5089 
5090 	skb = stmmac_construct_skb_zc(ch, xdp);
5091 	if (!skb) {
5092 		priv->xstats.rx_dropped++;
5093 		return;
5094 	}
5095 
5096 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5097 	if (priv->hw->hw_vlan_en)
5098 		/* MAC level stripping. */
5099 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5100 	else
5101 		/* Driver level stripping. */
5102 		stmmac_rx_vlan(priv->dev, skb);
5103 	skb->protocol = eth_type_trans(skb, priv->dev);
5104 
5105 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5106 		skb_checksum_none_assert(skb);
5107 	else
5108 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5109 
5110 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5111 		skb_set_hash(skb, hash, hash_type);
5112 
5113 	skb_record_rx_queue(skb, queue);
5114 	napi_gro_receive(&ch->rxtx_napi, skb);
5115 
5116 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5117 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5118 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5119 	u64_stats_update_end(&rxq_stats->napi_syncp);
5120 }
5121 
5122 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5123 {
5124 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5125 	unsigned int entry = rx_q->dirty_rx;
5126 	struct dma_desc *rx_desc = NULL;
5127 	bool ret = true;
5128 
5129 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5130 
5131 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5132 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5133 		dma_addr_t dma_addr;
5134 		bool use_rx_wd;
5135 
5136 		if (!buf->xdp) {
5137 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5138 			if (!buf->xdp) {
5139 				ret = false;
5140 				break;
5141 			}
5142 		}
5143 
5144 		if (priv->extend_desc)
5145 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5146 		else
5147 			rx_desc = rx_q->dma_rx + entry;
5148 
5149 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5150 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5151 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5152 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5153 
5154 		rx_q->rx_count_frames++;
5155 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5156 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5157 			rx_q->rx_count_frames = 0;
5158 
5159 		use_rx_wd = !priv->rx_coal_frames[queue];
5160 		use_rx_wd |= rx_q->rx_count_frames > 0;
5161 		if (!priv->use_riwt)
5162 			use_rx_wd = false;
5163 
5164 		dma_wmb();
5165 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5166 
5167 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5168 	}
5169 
5170 	if (rx_desc) {
5171 		rx_q->dirty_rx = entry;
5172 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5173 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5174 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5175 	}
5176 
5177 	return ret;
5178 }
5179 
5180 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5181 {
5182 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5183 	 * to represent incoming packet, whereas cb field in the same structure
5184 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5185 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5186 	 */
5187 	return (struct stmmac_xdp_buff *)xdp;
5188 }
5189 
5190 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5191 {
5192 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5193 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5194 	unsigned int count = 0, error = 0, len = 0;
5195 	int dirty = stmmac_rx_dirty(priv, queue);
5196 	unsigned int next_entry = rx_q->cur_rx;
5197 	u32 rx_errors = 0, rx_dropped = 0;
5198 	unsigned int desc_size;
5199 	struct bpf_prog *prog;
5200 	bool failure = false;
5201 	int xdp_status = 0;
5202 	int status = 0;
5203 
5204 	if (netif_msg_rx_status(priv)) {
5205 		void *rx_head;
5206 
5207 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5208 		if (priv->extend_desc) {
5209 			rx_head = (void *)rx_q->dma_erx;
5210 			desc_size = sizeof(struct dma_extended_desc);
5211 		} else {
5212 			rx_head = (void *)rx_q->dma_rx;
5213 			desc_size = sizeof(struct dma_desc);
5214 		}
5215 
5216 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5217 				    rx_q->dma_rx_phy, desc_size);
5218 	}
5219 	while (count < limit) {
5220 		struct stmmac_rx_buffer *buf;
5221 		struct stmmac_xdp_buff *ctx;
5222 		unsigned int buf1_len = 0;
5223 		struct dma_desc *np, *p;
5224 		int entry;
5225 		int res;
5226 
5227 		if (!count && rx_q->state_saved) {
5228 			error = rx_q->state.error;
5229 			len = rx_q->state.len;
5230 		} else {
5231 			rx_q->state_saved = false;
5232 			error = 0;
5233 			len = 0;
5234 		}
5235 
5236 		if (count >= limit)
5237 			break;
5238 
5239 read_again:
5240 		buf1_len = 0;
5241 		entry = next_entry;
5242 		buf = &rx_q->buf_pool[entry];
5243 
5244 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5245 			failure = failure ||
5246 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5247 			dirty = 0;
5248 		}
5249 
5250 		if (priv->extend_desc)
5251 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5252 		else
5253 			p = rx_q->dma_rx + entry;
5254 
5255 		/* read the status of the incoming frame */
5256 		status = stmmac_rx_status(priv, &priv->xstats, p);
5257 		/* check if managed by the DMA otherwise go ahead */
5258 		if (unlikely(status & dma_own))
5259 			break;
5260 
5261 		/* Prefetch the next RX descriptor */
5262 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5263 						priv->dma_conf.dma_rx_size);
5264 		next_entry = rx_q->cur_rx;
5265 
5266 		if (priv->extend_desc)
5267 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5268 		else
5269 			np = rx_q->dma_rx + next_entry;
5270 
5271 		prefetch(np);
5272 
5273 		/* Ensure a valid XSK buffer before proceed */
5274 		if (!buf->xdp)
5275 			break;
5276 
5277 		if (priv->extend_desc)
5278 			stmmac_rx_extended_status(priv, &priv->xstats,
5279 						  rx_q->dma_erx + entry);
5280 		if (unlikely(status == discard_frame)) {
5281 			xsk_buff_free(buf->xdp);
5282 			buf->xdp = NULL;
5283 			dirty++;
5284 			error = 1;
5285 			if (!priv->hwts_rx_en)
5286 				rx_errors++;
5287 		}
5288 
5289 		if (unlikely(error && (status & rx_not_ls)))
5290 			goto read_again;
5291 		if (unlikely(error)) {
5292 			count++;
5293 			continue;
5294 		}
5295 
5296 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5297 		if (likely(status & rx_not_ls)) {
5298 			xsk_buff_free(buf->xdp);
5299 			buf->xdp = NULL;
5300 			dirty++;
5301 			count++;
5302 			goto read_again;
5303 		}
5304 
5305 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5306 		ctx->priv = priv;
5307 		ctx->desc = p;
5308 		ctx->ndesc = np;
5309 
5310 		/* XDP ZC Frame only support primary buffers for now */
5311 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5312 		len += buf1_len;
5313 
5314 		/* ACS is disabled; strip manually. */
5315 		if (likely(!(status & rx_not_ls))) {
5316 			buf1_len -= ETH_FCS_LEN;
5317 			len -= ETH_FCS_LEN;
5318 		}
5319 
5320 		/* RX buffer is good and fit into a XSK pool buffer */
5321 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5322 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5323 
5324 		prog = READ_ONCE(priv->xdp_prog);
5325 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5326 
5327 		switch (res) {
5328 		case STMMAC_XDP_PASS:
5329 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5330 			xsk_buff_free(buf->xdp);
5331 			break;
5332 		case STMMAC_XDP_CONSUMED:
5333 			xsk_buff_free(buf->xdp);
5334 			rx_dropped++;
5335 			break;
5336 		case STMMAC_XDP_TX:
5337 		case STMMAC_XDP_REDIRECT:
5338 			xdp_status |= res;
5339 			break;
5340 		}
5341 
5342 		buf->xdp = NULL;
5343 		dirty++;
5344 		count++;
5345 	}
5346 
5347 	if (status & rx_not_ls) {
5348 		rx_q->state_saved = true;
5349 		rx_q->state.error = error;
5350 		rx_q->state.len = len;
5351 	}
5352 
5353 	stmmac_finalize_xdp_rx(priv, xdp_status);
5354 
5355 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5356 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5357 	u64_stats_update_end(&rxq_stats->napi_syncp);
5358 
5359 	priv->xstats.rx_dropped += rx_dropped;
5360 	priv->xstats.rx_errors += rx_errors;
5361 
5362 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5363 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5364 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5365 		else
5366 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5367 
5368 		return (int)count;
5369 	}
5370 
5371 	return failure ? limit : (int)count;
5372 }
5373 
5374 /**
5375  * stmmac_rx - manage the receive process
5376  * @priv: driver private structure
5377  * @limit: napi bugget
5378  * @queue: RX queue index.
5379  * Description :  this the function called by the napi poll method.
5380  * It gets all the frames inside the ring.
5381  */
5382 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5383 {
5384 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5385 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5386 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5387 	struct stmmac_channel *ch = &priv->channel[queue];
5388 	unsigned int count = 0, error = 0, len = 0;
5389 	int status = 0, coe = priv->hw->rx_csum;
5390 	unsigned int next_entry = rx_q->cur_rx;
5391 	enum dma_data_direction dma_dir;
5392 	unsigned int desc_size;
5393 	struct sk_buff *skb = NULL;
5394 	struct stmmac_xdp_buff ctx;
5395 	int xdp_status = 0;
5396 	int buf_sz;
5397 
5398 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5399 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5400 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5401 
5402 	if (netif_msg_rx_status(priv)) {
5403 		void *rx_head;
5404 
5405 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5406 		if (priv->extend_desc) {
5407 			rx_head = (void *)rx_q->dma_erx;
5408 			desc_size = sizeof(struct dma_extended_desc);
5409 		} else {
5410 			rx_head = (void *)rx_q->dma_rx;
5411 			desc_size = sizeof(struct dma_desc);
5412 		}
5413 
5414 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5415 				    rx_q->dma_rx_phy, desc_size);
5416 	}
5417 	while (count < limit) {
5418 		unsigned int buf1_len = 0, buf2_len = 0;
5419 		enum pkt_hash_types hash_type;
5420 		struct stmmac_rx_buffer *buf;
5421 		struct dma_desc *np, *p;
5422 		int entry;
5423 		u32 hash;
5424 
5425 		if (!count && rx_q->state_saved) {
5426 			skb = rx_q->state.skb;
5427 			error = rx_q->state.error;
5428 			len = rx_q->state.len;
5429 		} else {
5430 			rx_q->state_saved = false;
5431 			skb = NULL;
5432 			error = 0;
5433 			len = 0;
5434 		}
5435 
5436 read_again:
5437 		if (count >= limit)
5438 			break;
5439 
5440 		buf1_len = 0;
5441 		buf2_len = 0;
5442 		entry = next_entry;
5443 		buf = &rx_q->buf_pool[entry];
5444 
5445 		if (priv->extend_desc)
5446 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5447 		else
5448 			p = rx_q->dma_rx + entry;
5449 
5450 		/* read the status of the incoming frame */
5451 		status = stmmac_rx_status(priv, &priv->xstats, p);
5452 		/* check if managed by the DMA otherwise go ahead */
5453 		if (unlikely(status & dma_own))
5454 			break;
5455 
5456 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5457 						priv->dma_conf.dma_rx_size);
5458 		next_entry = rx_q->cur_rx;
5459 
5460 		if (priv->extend_desc)
5461 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5462 		else
5463 			np = rx_q->dma_rx + next_entry;
5464 
5465 		prefetch(np);
5466 
5467 		if (priv->extend_desc)
5468 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5469 		if (unlikely(status == discard_frame)) {
5470 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5471 			buf->page = NULL;
5472 			error = 1;
5473 			if (!priv->hwts_rx_en)
5474 				rx_errors++;
5475 		}
5476 
5477 		if (unlikely(error && (status & rx_not_ls)))
5478 			goto read_again;
5479 		if (unlikely(error)) {
5480 			dev_kfree_skb(skb);
5481 			skb = NULL;
5482 			count++;
5483 			continue;
5484 		}
5485 
5486 		/* Buffer is good. Go on. */
5487 
5488 		prefetch(page_address(buf->page) + buf->page_offset);
5489 		if (buf->sec_page)
5490 			prefetch(page_address(buf->sec_page));
5491 
5492 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5493 		len += buf1_len;
5494 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5495 		len += buf2_len;
5496 
5497 		/* ACS is disabled; strip manually. */
5498 		if (likely(!(status & rx_not_ls))) {
5499 			if (buf2_len) {
5500 				buf2_len -= ETH_FCS_LEN;
5501 				len -= ETH_FCS_LEN;
5502 			} else if (buf1_len) {
5503 				buf1_len -= ETH_FCS_LEN;
5504 				len -= ETH_FCS_LEN;
5505 			}
5506 		}
5507 
5508 		if (!skb) {
5509 			unsigned int pre_len, sync_len;
5510 
5511 			dma_sync_single_for_cpu(priv->device, buf->addr,
5512 						buf1_len, dma_dir);
5513 
5514 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5515 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5516 					 buf->page_offset, buf1_len, true);
5517 
5518 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5519 				  buf->page_offset;
5520 
5521 			ctx.priv = priv;
5522 			ctx.desc = p;
5523 			ctx.ndesc = np;
5524 
5525 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5526 			/* Due xdp_adjust_tail: DMA sync for_device
5527 			 * cover max len CPU touch
5528 			 */
5529 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5530 				   buf->page_offset;
5531 			sync_len = max(sync_len, pre_len);
5532 
5533 			/* For Not XDP_PASS verdict */
5534 			if (IS_ERR(skb)) {
5535 				unsigned int xdp_res = -PTR_ERR(skb);
5536 
5537 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5538 					page_pool_put_page(rx_q->page_pool,
5539 							   virt_to_head_page(ctx.xdp.data),
5540 							   sync_len, true);
5541 					buf->page = NULL;
5542 					rx_dropped++;
5543 
5544 					/* Clear skb as it was set as
5545 					 * status by XDP program.
5546 					 */
5547 					skb = NULL;
5548 
5549 					if (unlikely((status & rx_not_ls)))
5550 						goto read_again;
5551 
5552 					count++;
5553 					continue;
5554 				} else if (xdp_res & (STMMAC_XDP_TX |
5555 						      STMMAC_XDP_REDIRECT)) {
5556 					xdp_status |= xdp_res;
5557 					buf->page = NULL;
5558 					skb = NULL;
5559 					count++;
5560 					continue;
5561 				}
5562 			}
5563 		}
5564 
5565 		if (!skb) {
5566 			/* XDP program may expand or reduce tail */
5567 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5568 
5569 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5570 			if (!skb) {
5571 				rx_dropped++;
5572 				count++;
5573 				goto drain_data;
5574 			}
5575 
5576 			/* XDP program may adjust header */
5577 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5578 			skb_put(skb, buf1_len);
5579 
5580 			/* Data payload copied into SKB, page ready for recycle */
5581 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5582 			buf->page = NULL;
5583 		} else if (buf1_len) {
5584 			dma_sync_single_for_cpu(priv->device, buf->addr,
5585 						buf1_len, dma_dir);
5586 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5587 					buf->page, buf->page_offset, buf1_len,
5588 					priv->dma_conf.dma_buf_sz);
5589 
5590 			/* Data payload appended into SKB */
5591 			skb_mark_for_recycle(skb);
5592 			buf->page = NULL;
5593 		}
5594 
5595 		if (buf2_len) {
5596 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5597 						buf2_len, dma_dir);
5598 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5599 					buf->sec_page, 0, buf2_len,
5600 					priv->dma_conf.dma_buf_sz);
5601 
5602 			/* Data payload appended into SKB */
5603 			skb_mark_for_recycle(skb);
5604 			buf->sec_page = NULL;
5605 		}
5606 
5607 drain_data:
5608 		if (likely(status & rx_not_ls))
5609 			goto read_again;
5610 		if (!skb)
5611 			continue;
5612 
5613 		/* Got entire packet into SKB. Finish it. */
5614 
5615 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5616 
5617 		if (priv->hw->hw_vlan_en)
5618 			/* MAC level stripping. */
5619 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5620 		else
5621 			/* Driver level stripping. */
5622 			stmmac_rx_vlan(priv->dev, skb);
5623 
5624 		skb->protocol = eth_type_trans(skb, priv->dev);
5625 
5626 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5627 			skb_checksum_none_assert(skb);
5628 		else
5629 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5630 
5631 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5632 			skb_set_hash(skb, hash, hash_type);
5633 
5634 		skb_record_rx_queue(skb, queue);
5635 		napi_gro_receive(&ch->rx_napi, skb);
5636 		skb = NULL;
5637 
5638 		rx_packets++;
5639 		rx_bytes += len;
5640 		count++;
5641 	}
5642 
5643 	if (status & rx_not_ls || skb) {
5644 		rx_q->state_saved = true;
5645 		rx_q->state.skb = skb;
5646 		rx_q->state.error = error;
5647 		rx_q->state.len = len;
5648 	}
5649 
5650 	stmmac_finalize_xdp_rx(priv, xdp_status);
5651 
5652 	stmmac_rx_refill(priv, queue);
5653 
5654 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5655 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5656 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5657 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5658 	u64_stats_update_end(&rxq_stats->napi_syncp);
5659 
5660 	priv->xstats.rx_dropped += rx_dropped;
5661 	priv->xstats.rx_errors += rx_errors;
5662 
5663 	return count;
5664 }
5665 
5666 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5667 {
5668 	struct stmmac_channel *ch =
5669 		container_of(napi, struct stmmac_channel, rx_napi);
5670 	struct stmmac_priv *priv = ch->priv_data;
5671 	struct stmmac_rxq_stats *rxq_stats;
5672 	u32 chan = ch->index;
5673 	int work_done;
5674 
5675 	rxq_stats = &priv->xstats.rxq_stats[chan];
5676 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5677 	u64_stats_inc(&rxq_stats->napi.poll);
5678 	u64_stats_update_end(&rxq_stats->napi_syncp);
5679 
5680 	work_done = stmmac_rx(priv, budget, chan);
5681 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5682 		unsigned long flags;
5683 
5684 		spin_lock_irqsave(&ch->lock, flags);
5685 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5686 		spin_unlock_irqrestore(&ch->lock, flags);
5687 	}
5688 
5689 	return work_done;
5690 }
5691 
5692 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5693 {
5694 	struct stmmac_channel *ch =
5695 		container_of(napi, struct stmmac_channel, tx_napi);
5696 	struct stmmac_priv *priv = ch->priv_data;
5697 	struct stmmac_txq_stats *txq_stats;
5698 	bool pending_packets = false;
5699 	u32 chan = ch->index;
5700 	int work_done;
5701 
5702 	txq_stats = &priv->xstats.txq_stats[chan];
5703 	u64_stats_update_begin(&txq_stats->napi_syncp);
5704 	u64_stats_inc(&txq_stats->napi.poll);
5705 	u64_stats_update_end(&txq_stats->napi_syncp);
5706 
5707 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5708 	work_done = min(work_done, budget);
5709 
5710 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5711 		unsigned long flags;
5712 
5713 		spin_lock_irqsave(&ch->lock, flags);
5714 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5715 		spin_unlock_irqrestore(&ch->lock, flags);
5716 	}
5717 
5718 	/* TX still have packet to handle, check if we need to arm tx timer */
5719 	if (pending_packets)
5720 		stmmac_tx_timer_arm(priv, chan);
5721 
5722 	return work_done;
5723 }
5724 
5725 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5726 {
5727 	struct stmmac_channel *ch =
5728 		container_of(napi, struct stmmac_channel, rxtx_napi);
5729 	struct stmmac_priv *priv = ch->priv_data;
5730 	bool tx_pending_packets = false;
5731 	int rx_done, tx_done, rxtx_done;
5732 	struct stmmac_rxq_stats *rxq_stats;
5733 	struct stmmac_txq_stats *txq_stats;
5734 	u32 chan = ch->index;
5735 
5736 	rxq_stats = &priv->xstats.rxq_stats[chan];
5737 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5738 	u64_stats_inc(&rxq_stats->napi.poll);
5739 	u64_stats_update_end(&rxq_stats->napi_syncp);
5740 
5741 	txq_stats = &priv->xstats.txq_stats[chan];
5742 	u64_stats_update_begin(&txq_stats->napi_syncp);
5743 	u64_stats_inc(&txq_stats->napi.poll);
5744 	u64_stats_update_end(&txq_stats->napi_syncp);
5745 
5746 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5747 	tx_done = min(tx_done, budget);
5748 
5749 	rx_done = stmmac_rx_zc(priv, budget, chan);
5750 
5751 	rxtx_done = max(tx_done, rx_done);
5752 
5753 	/* If either TX or RX work is not complete, return budget
5754 	 * and keep pooling
5755 	 */
5756 	if (rxtx_done >= budget)
5757 		return budget;
5758 
5759 	/* all work done, exit the polling mode */
5760 	if (napi_complete_done(napi, rxtx_done)) {
5761 		unsigned long flags;
5762 
5763 		spin_lock_irqsave(&ch->lock, flags);
5764 		/* Both RX and TX work done are compelte,
5765 		 * so enable both RX & TX IRQs.
5766 		 */
5767 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5768 		spin_unlock_irqrestore(&ch->lock, flags);
5769 	}
5770 
5771 	/* TX still have packet to handle, check if we need to arm tx timer */
5772 	if (tx_pending_packets)
5773 		stmmac_tx_timer_arm(priv, chan);
5774 
5775 	return min(rxtx_done, budget - 1);
5776 }
5777 
5778 /**
5779  *  stmmac_tx_timeout
5780  *  @dev : Pointer to net device structure
5781  *  @txqueue: the index of the hanging transmit queue
5782  *  Description: this function is called when a packet transmission fails to
5783  *   complete within a reasonable time. The driver will mark the error in the
5784  *   netdev structure and arrange for the device to be reset to a sane state
5785  *   in order to transmit a new packet.
5786  */
5787 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5788 {
5789 	struct stmmac_priv *priv = netdev_priv(dev);
5790 
5791 	stmmac_global_err(priv);
5792 }
5793 
5794 /**
5795  *  stmmac_set_rx_mode - entry point for multicast addressing
5796  *  @dev : pointer to the device structure
5797  *  Description:
5798  *  This function is a driver entry point which gets called by the kernel
5799  *  whenever multicast addresses must be enabled/disabled.
5800  *  Return value:
5801  *  void.
5802  */
5803 static void stmmac_set_rx_mode(struct net_device *dev)
5804 {
5805 	struct stmmac_priv *priv = netdev_priv(dev);
5806 
5807 	stmmac_set_filter(priv, priv->hw, dev);
5808 }
5809 
5810 /**
5811  *  stmmac_change_mtu - entry point to change MTU size for the device.
5812  *  @dev : device pointer.
5813  *  @new_mtu : the new MTU size for the device.
5814  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5815  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5816  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5817  *  Return value:
5818  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5819  *  file on failure.
5820  */
5821 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5822 {
5823 	struct stmmac_priv *priv = netdev_priv(dev);
5824 	int txfifosz = priv->plat->tx_fifo_size;
5825 	struct stmmac_dma_conf *dma_conf;
5826 	const int mtu = new_mtu;
5827 	int ret;
5828 
5829 	if (txfifosz == 0)
5830 		txfifosz = priv->dma_cap.tx_fifo_size;
5831 
5832 	txfifosz /= priv->plat->tx_queues_to_use;
5833 
5834 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5835 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5836 		return -EINVAL;
5837 	}
5838 
5839 	new_mtu = STMMAC_ALIGN(new_mtu);
5840 
5841 	/* If condition true, FIFO is too small or MTU too large */
5842 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5843 		return -EINVAL;
5844 
5845 	if (netif_running(dev)) {
5846 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5847 		/* Try to allocate the new DMA conf with the new mtu */
5848 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5849 		if (IS_ERR(dma_conf)) {
5850 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5851 				   mtu);
5852 			return PTR_ERR(dma_conf);
5853 		}
5854 
5855 		stmmac_release(dev);
5856 
5857 		ret = __stmmac_open(dev, dma_conf);
5858 		if (ret) {
5859 			free_dma_desc_resources(priv, dma_conf);
5860 			kfree(dma_conf);
5861 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5862 			return ret;
5863 		}
5864 
5865 		kfree(dma_conf);
5866 
5867 		stmmac_set_rx_mode(dev);
5868 	}
5869 
5870 	WRITE_ONCE(dev->mtu, mtu);
5871 	netdev_update_features(dev);
5872 
5873 	return 0;
5874 }
5875 
5876 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5877 					     netdev_features_t features)
5878 {
5879 	struct stmmac_priv *priv = netdev_priv(dev);
5880 
5881 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5882 		features &= ~NETIF_F_RXCSUM;
5883 
5884 	if (!priv->plat->tx_coe)
5885 		features &= ~NETIF_F_CSUM_MASK;
5886 
5887 	/* Some GMAC devices have a bugged Jumbo frame support that
5888 	 * needs to have the Tx COE disabled for oversized frames
5889 	 * (due to limited buffer sizes). In this case we disable
5890 	 * the TX csum insertion in the TDES and not use SF.
5891 	 */
5892 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5893 		features &= ~NETIF_F_CSUM_MASK;
5894 
5895 	/* Disable tso if asked by ethtool */
5896 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5897 		if (features & NETIF_F_TSO)
5898 			priv->tso = true;
5899 		else
5900 			priv->tso = false;
5901 	}
5902 
5903 	return features;
5904 }
5905 
5906 static int stmmac_set_features(struct net_device *netdev,
5907 			       netdev_features_t features)
5908 {
5909 	struct stmmac_priv *priv = netdev_priv(netdev);
5910 
5911 	/* Keep the COE Type in case of csum is supporting */
5912 	if (features & NETIF_F_RXCSUM)
5913 		priv->hw->rx_csum = priv->plat->rx_coe;
5914 	else
5915 		priv->hw->rx_csum = 0;
5916 	/* No check needed because rx_coe has been set before and it will be
5917 	 * fixed in case of issue.
5918 	 */
5919 	stmmac_rx_ipc(priv, priv->hw);
5920 
5921 	if (priv->sph_cap) {
5922 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5923 		u32 chan;
5924 
5925 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5926 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5927 	}
5928 
5929 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5930 		priv->hw->hw_vlan_en = true;
5931 	else
5932 		priv->hw->hw_vlan_en = false;
5933 
5934 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5935 
5936 	return 0;
5937 }
5938 
5939 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5940 {
5941 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5942 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5943 	u32 queues_count;
5944 	u32 queue;
5945 	bool xmac;
5946 
5947 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5948 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5949 
5950 	if (priv->irq_wake)
5951 		pm_wakeup_event(priv->device, 0);
5952 
5953 	if (priv->dma_cap.estsel)
5954 		stmmac_est_irq_status(priv, priv, priv->dev,
5955 				      &priv->xstats, tx_cnt);
5956 
5957 	if (stmmac_fpe_supported(priv))
5958 		stmmac_fpe_irq_status(priv);
5959 
5960 	/* To handle GMAC own interrupts */
5961 	if ((priv->plat->has_gmac) || xmac) {
5962 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5963 
5964 		if (unlikely(status)) {
5965 			/* For LPI we need to save the tx status */
5966 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5967 				priv->tx_path_in_lpi_mode = true;
5968 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5969 				priv->tx_path_in_lpi_mode = false;
5970 		}
5971 
5972 		for (queue = 0; queue < queues_count; queue++)
5973 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
5974 
5975 		/* PCS link status */
5976 		if (priv->hw->pcs &&
5977 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5978 			if (priv->xstats.pcs_link)
5979 				netif_carrier_on(priv->dev);
5980 			else
5981 				netif_carrier_off(priv->dev);
5982 		}
5983 
5984 		stmmac_timestamp_interrupt(priv, priv);
5985 	}
5986 }
5987 
5988 /**
5989  *  stmmac_interrupt - main ISR
5990  *  @irq: interrupt number.
5991  *  @dev_id: to pass the net device pointer.
5992  *  Description: this is the main driver interrupt service routine.
5993  *  It can call:
5994  *  o DMA service routine (to manage incoming frame reception and transmission
5995  *    status)
5996  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5997  *    interrupts.
5998  */
5999 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6000 {
6001 	struct net_device *dev = (struct net_device *)dev_id;
6002 	struct stmmac_priv *priv = netdev_priv(dev);
6003 
6004 	/* Check if adapter is up */
6005 	if (test_bit(STMMAC_DOWN, &priv->state))
6006 		return IRQ_HANDLED;
6007 
6008 	/* Check ASP error if it isn't delivered via an individual IRQ */
6009 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6010 		return IRQ_HANDLED;
6011 
6012 	/* To handle Common interrupts */
6013 	stmmac_common_interrupt(priv);
6014 
6015 	/* To handle DMA interrupts */
6016 	stmmac_dma_interrupt(priv);
6017 
6018 	return IRQ_HANDLED;
6019 }
6020 
6021 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6022 {
6023 	struct net_device *dev = (struct net_device *)dev_id;
6024 	struct stmmac_priv *priv = netdev_priv(dev);
6025 
6026 	/* Check if adapter is up */
6027 	if (test_bit(STMMAC_DOWN, &priv->state))
6028 		return IRQ_HANDLED;
6029 
6030 	/* To handle Common interrupts */
6031 	stmmac_common_interrupt(priv);
6032 
6033 	return IRQ_HANDLED;
6034 }
6035 
6036 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6037 {
6038 	struct net_device *dev = (struct net_device *)dev_id;
6039 	struct stmmac_priv *priv = netdev_priv(dev);
6040 
6041 	/* Check if adapter is up */
6042 	if (test_bit(STMMAC_DOWN, &priv->state))
6043 		return IRQ_HANDLED;
6044 
6045 	/* Check if a fatal error happened */
6046 	stmmac_safety_feat_interrupt(priv);
6047 
6048 	return IRQ_HANDLED;
6049 }
6050 
6051 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6052 {
6053 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6054 	struct stmmac_dma_conf *dma_conf;
6055 	int chan = tx_q->queue_index;
6056 	struct stmmac_priv *priv;
6057 	int status;
6058 
6059 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6060 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6061 
6062 	/* Check if adapter is up */
6063 	if (test_bit(STMMAC_DOWN, &priv->state))
6064 		return IRQ_HANDLED;
6065 
6066 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6067 
6068 	if (unlikely(status & tx_hard_error_bump_tc)) {
6069 		/* Try to bump up the dma threshold on this failure */
6070 		stmmac_bump_dma_threshold(priv, chan);
6071 	} else if (unlikely(status == tx_hard_error)) {
6072 		stmmac_tx_err(priv, chan);
6073 	}
6074 
6075 	return IRQ_HANDLED;
6076 }
6077 
6078 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6079 {
6080 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6081 	struct stmmac_dma_conf *dma_conf;
6082 	int chan = rx_q->queue_index;
6083 	struct stmmac_priv *priv;
6084 
6085 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6086 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6087 
6088 	/* Check if adapter is up */
6089 	if (test_bit(STMMAC_DOWN, &priv->state))
6090 		return IRQ_HANDLED;
6091 
6092 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6093 
6094 	return IRQ_HANDLED;
6095 }
6096 
6097 /**
6098  *  stmmac_ioctl - Entry point for the Ioctl
6099  *  @dev: Device pointer.
6100  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6101  *  a proprietary structure used to pass information to the driver.
6102  *  @cmd: IOCTL command
6103  *  Description:
6104  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6105  */
6106 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6107 {
6108 	struct stmmac_priv *priv = netdev_priv (dev);
6109 	int ret = -EOPNOTSUPP;
6110 
6111 	if (!netif_running(dev))
6112 		return -EINVAL;
6113 
6114 	switch (cmd) {
6115 	case SIOCGMIIPHY:
6116 	case SIOCGMIIREG:
6117 	case SIOCSMIIREG:
6118 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6119 		break;
6120 	case SIOCSHWTSTAMP:
6121 		ret = stmmac_hwtstamp_set(dev, rq);
6122 		break;
6123 	case SIOCGHWTSTAMP:
6124 		ret = stmmac_hwtstamp_get(dev, rq);
6125 		break;
6126 	default:
6127 		break;
6128 	}
6129 
6130 	return ret;
6131 }
6132 
6133 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6134 				    void *cb_priv)
6135 {
6136 	struct stmmac_priv *priv = cb_priv;
6137 	int ret = -EOPNOTSUPP;
6138 
6139 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6140 		return ret;
6141 
6142 	__stmmac_disable_all_queues(priv);
6143 
6144 	switch (type) {
6145 	case TC_SETUP_CLSU32:
6146 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6147 		break;
6148 	case TC_SETUP_CLSFLOWER:
6149 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6150 		break;
6151 	default:
6152 		break;
6153 	}
6154 
6155 	stmmac_enable_all_queues(priv);
6156 	return ret;
6157 }
6158 
6159 static LIST_HEAD(stmmac_block_cb_list);
6160 
6161 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6162 			   void *type_data)
6163 {
6164 	struct stmmac_priv *priv = netdev_priv(ndev);
6165 
6166 	switch (type) {
6167 	case TC_QUERY_CAPS:
6168 		return stmmac_tc_query_caps(priv, priv, type_data);
6169 	case TC_SETUP_QDISC_MQPRIO:
6170 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6171 	case TC_SETUP_BLOCK:
6172 		return flow_block_cb_setup_simple(type_data,
6173 						  &stmmac_block_cb_list,
6174 						  stmmac_setup_tc_block_cb,
6175 						  priv, priv, true);
6176 	case TC_SETUP_QDISC_CBS:
6177 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6178 	case TC_SETUP_QDISC_TAPRIO:
6179 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6180 	case TC_SETUP_QDISC_ETF:
6181 		return stmmac_tc_setup_etf(priv, priv, type_data);
6182 	default:
6183 		return -EOPNOTSUPP;
6184 	}
6185 }
6186 
6187 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6188 			       struct net_device *sb_dev)
6189 {
6190 	int gso = skb_shinfo(skb)->gso_type;
6191 
6192 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6193 		/*
6194 		 * There is no way to determine the number of TSO/USO
6195 		 * capable Queues. Let's use always the Queue 0
6196 		 * because if TSO/USO is supported then at least this
6197 		 * one will be capable.
6198 		 */
6199 		return 0;
6200 	}
6201 
6202 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6203 }
6204 
6205 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6206 {
6207 	struct stmmac_priv *priv = netdev_priv(ndev);
6208 	int ret = 0;
6209 
6210 	ret = pm_runtime_resume_and_get(priv->device);
6211 	if (ret < 0)
6212 		return ret;
6213 
6214 	ret = eth_mac_addr(ndev, addr);
6215 	if (ret)
6216 		goto set_mac_error;
6217 
6218 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6219 
6220 set_mac_error:
6221 	pm_runtime_put(priv->device);
6222 
6223 	return ret;
6224 }
6225 
6226 #ifdef CONFIG_DEBUG_FS
6227 static struct dentry *stmmac_fs_dir;
6228 
6229 static void sysfs_display_ring(void *head, int size, int extend_desc,
6230 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6231 {
6232 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6233 	struct dma_desc *p = (struct dma_desc *)head;
6234 	unsigned int desc_size;
6235 	dma_addr_t dma_addr;
6236 	int i;
6237 
6238 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6239 	for (i = 0; i < size; i++) {
6240 		dma_addr = dma_phy_addr + i * desc_size;
6241 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6242 				i, &dma_addr,
6243 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6244 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6245 		if (extend_desc)
6246 			p = &(++ep)->basic;
6247 		else
6248 			p++;
6249 	}
6250 }
6251 
6252 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6253 {
6254 	struct net_device *dev = seq->private;
6255 	struct stmmac_priv *priv = netdev_priv(dev);
6256 	u32 rx_count = priv->plat->rx_queues_to_use;
6257 	u32 tx_count = priv->plat->tx_queues_to_use;
6258 	u32 queue;
6259 
6260 	if ((dev->flags & IFF_UP) == 0)
6261 		return 0;
6262 
6263 	for (queue = 0; queue < rx_count; queue++) {
6264 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6265 
6266 		seq_printf(seq, "RX Queue %d:\n", queue);
6267 
6268 		if (priv->extend_desc) {
6269 			seq_printf(seq, "Extended descriptor ring:\n");
6270 			sysfs_display_ring((void *)rx_q->dma_erx,
6271 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6272 		} else {
6273 			seq_printf(seq, "Descriptor ring:\n");
6274 			sysfs_display_ring((void *)rx_q->dma_rx,
6275 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6276 		}
6277 	}
6278 
6279 	for (queue = 0; queue < tx_count; queue++) {
6280 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6281 
6282 		seq_printf(seq, "TX Queue %d:\n", queue);
6283 
6284 		if (priv->extend_desc) {
6285 			seq_printf(seq, "Extended descriptor ring:\n");
6286 			sysfs_display_ring((void *)tx_q->dma_etx,
6287 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6288 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6289 			seq_printf(seq, "Descriptor ring:\n");
6290 			sysfs_display_ring((void *)tx_q->dma_tx,
6291 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6292 		}
6293 	}
6294 
6295 	return 0;
6296 }
6297 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6298 
6299 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6300 {
6301 	static const char * const dwxgmac_timestamp_source[] = {
6302 		"None",
6303 		"Internal",
6304 		"External",
6305 		"Both",
6306 	};
6307 	static const char * const dwxgmac_safety_feature_desc[] = {
6308 		"No",
6309 		"All Safety Features with ECC and Parity",
6310 		"All Safety Features without ECC or Parity",
6311 		"All Safety Features with Parity Only",
6312 		"ECC Only",
6313 		"UNDEFINED",
6314 		"UNDEFINED",
6315 		"UNDEFINED",
6316 	};
6317 	struct net_device *dev = seq->private;
6318 	struct stmmac_priv *priv = netdev_priv(dev);
6319 
6320 	if (!priv->hw_cap_support) {
6321 		seq_printf(seq, "DMA HW features not supported\n");
6322 		return 0;
6323 	}
6324 
6325 	seq_printf(seq, "==============================\n");
6326 	seq_printf(seq, "\tDMA HW features\n");
6327 	seq_printf(seq, "==============================\n");
6328 
6329 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6330 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6331 	seq_printf(seq, "\t1000 Mbps: %s\n",
6332 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6333 	seq_printf(seq, "\tHalf duplex: %s\n",
6334 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6335 	if (priv->plat->has_xgmac) {
6336 		seq_printf(seq,
6337 			   "\tNumber of Additional MAC address registers: %d\n",
6338 			   priv->dma_cap.multi_addr);
6339 	} else {
6340 		seq_printf(seq, "\tHash Filter: %s\n",
6341 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6342 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6343 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6344 	}
6345 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6346 		   (priv->dma_cap.pcs) ? "Y" : "N");
6347 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6348 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6349 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6350 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6351 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6352 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6353 	seq_printf(seq, "\tRMON module: %s\n",
6354 		   (priv->dma_cap.rmon) ? "Y" : "N");
6355 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6356 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6357 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6358 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6359 	if (priv->plat->has_xgmac)
6360 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6361 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6362 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6363 		   (priv->dma_cap.eee) ? "Y" : "N");
6364 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6365 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6366 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6367 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6368 	    priv->plat->has_xgmac) {
6369 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6370 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6371 	} else {
6372 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6373 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6374 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6375 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6376 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6377 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6378 	}
6379 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6380 		   priv->dma_cap.number_rx_channel);
6381 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6382 		   priv->dma_cap.number_tx_channel);
6383 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6384 		   priv->dma_cap.number_rx_queues);
6385 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6386 		   priv->dma_cap.number_tx_queues);
6387 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6388 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6389 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6390 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6391 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6392 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6393 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6394 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6395 		   priv->dma_cap.pps_out_num);
6396 	seq_printf(seq, "\tSafety Features: %s\n",
6397 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6398 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6399 		   priv->dma_cap.frpsel ? "Y" : "N");
6400 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6401 		   priv->dma_cap.host_dma_width);
6402 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6403 		   priv->dma_cap.rssen ? "Y" : "N");
6404 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6405 		   priv->dma_cap.vlhash ? "Y" : "N");
6406 	seq_printf(seq, "\tSplit Header: %s\n",
6407 		   priv->dma_cap.sphen ? "Y" : "N");
6408 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6409 		   priv->dma_cap.vlins ? "Y" : "N");
6410 	seq_printf(seq, "\tDouble VLAN: %s\n",
6411 		   priv->dma_cap.dvlan ? "Y" : "N");
6412 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6413 		   priv->dma_cap.l3l4fnum);
6414 	seq_printf(seq, "\tARP Offloading: %s\n",
6415 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6416 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6417 		   priv->dma_cap.estsel ? "Y" : "N");
6418 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6419 		   priv->dma_cap.fpesel ? "Y" : "N");
6420 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6421 		   priv->dma_cap.tbssel ? "Y" : "N");
6422 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6423 		   priv->dma_cap.tbs_ch_num);
6424 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6425 		   priv->dma_cap.sgfsel ? "Y" : "N");
6426 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6427 		   BIT(priv->dma_cap.ttsfd) >> 1);
6428 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6429 		   priv->dma_cap.numtc);
6430 	seq_printf(seq, "\tDCB Feature: %s\n",
6431 		   priv->dma_cap.dcben ? "Y" : "N");
6432 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6433 		   priv->dma_cap.advthword ? "Y" : "N");
6434 	seq_printf(seq, "\tPTP Offload: %s\n",
6435 		   priv->dma_cap.ptoen ? "Y" : "N");
6436 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6437 		   priv->dma_cap.osten ? "Y" : "N");
6438 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6439 		   priv->dma_cap.pfcen ? "Y" : "N");
6440 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6441 		   BIT(priv->dma_cap.frpes) << 6);
6442 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6443 		   BIT(priv->dma_cap.frpbs) << 6);
6444 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6445 		   priv->dma_cap.frppipe_num);
6446 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6447 		   priv->dma_cap.nrvf_num ?
6448 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6449 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6450 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6451 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6452 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6453 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6454 		   priv->dma_cap.cbtisel ? "Y" : "N");
6455 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6456 		   priv->dma_cap.aux_snapshot_n);
6457 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6458 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6459 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6460 		   priv->dma_cap.edma ? "Y" : "N");
6461 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6462 		   priv->dma_cap.ediffc ? "Y" : "N");
6463 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6464 		   priv->dma_cap.vxn ? "Y" : "N");
6465 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6466 		   priv->dma_cap.dbgmem ? "Y" : "N");
6467 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6468 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6469 	return 0;
6470 }
6471 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6472 
6473 /* Use network device events to rename debugfs file entries.
6474  */
6475 static int stmmac_device_event(struct notifier_block *unused,
6476 			       unsigned long event, void *ptr)
6477 {
6478 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6479 	struct stmmac_priv *priv = netdev_priv(dev);
6480 
6481 	if (dev->netdev_ops != &stmmac_netdev_ops)
6482 		goto done;
6483 
6484 	switch (event) {
6485 	case NETDEV_CHANGENAME:
6486 		if (priv->dbgfs_dir)
6487 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6488 							 priv->dbgfs_dir,
6489 							 stmmac_fs_dir,
6490 							 dev->name);
6491 		break;
6492 	}
6493 done:
6494 	return NOTIFY_DONE;
6495 }
6496 
6497 static struct notifier_block stmmac_notifier = {
6498 	.notifier_call = stmmac_device_event,
6499 };
6500 
6501 static void stmmac_init_fs(struct net_device *dev)
6502 {
6503 	struct stmmac_priv *priv = netdev_priv(dev);
6504 
6505 	rtnl_lock();
6506 
6507 	/* Create per netdev entries */
6508 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6509 
6510 	/* Entry to report DMA RX/TX rings */
6511 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6512 			    &stmmac_rings_status_fops);
6513 
6514 	/* Entry to report the DMA HW features */
6515 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6516 			    &stmmac_dma_cap_fops);
6517 
6518 	rtnl_unlock();
6519 }
6520 
6521 static void stmmac_exit_fs(struct net_device *dev)
6522 {
6523 	struct stmmac_priv *priv = netdev_priv(dev);
6524 
6525 	debugfs_remove_recursive(priv->dbgfs_dir);
6526 }
6527 #endif /* CONFIG_DEBUG_FS */
6528 
6529 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6530 {
6531 	unsigned char *data = (unsigned char *)&vid_le;
6532 	unsigned char data_byte = 0;
6533 	u32 crc = ~0x0;
6534 	u32 temp = 0;
6535 	int i, bits;
6536 
6537 	bits = get_bitmask_order(VLAN_VID_MASK);
6538 	for (i = 0; i < bits; i++) {
6539 		if ((i % 8) == 0)
6540 			data_byte = data[i / 8];
6541 
6542 		temp = ((crc & 1) ^ data_byte) & 1;
6543 		crc >>= 1;
6544 		data_byte >>= 1;
6545 
6546 		if (temp)
6547 			crc ^= 0xedb88320;
6548 	}
6549 
6550 	return crc;
6551 }
6552 
6553 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6554 {
6555 	u32 crc, hash = 0;
6556 	u16 pmatch = 0;
6557 	int count = 0;
6558 	u16 vid = 0;
6559 
6560 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6561 		__le16 vid_le = cpu_to_le16(vid);
6562 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6563 		hash |= (1 << crc);
6564 		count++;
6565 	}
6566 
6567 	if (!priv->dma_cap.vlhash) {
6568 		if (count > 2) /* VID = 0 always passes filter */
6569 			return -EOPNOTSUPP;
6570 
6571 		pmatch = vid;
6572 		hash = 0;
6573 	}
6574 
6575 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6576 }
6577 
6578 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6579 {
6580 	struct stmmac_priv *priv = netdev_priv(ndev);
6581 	bool is_double = false;
6582 	int ret;
6583 
6584 	ret = pm_runtime_resume_and_get(priv->device);
6585 	if (ret < 0)
6586 		return ret;
6587 
6588 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6589 		is_double = true;
6590 
6591 	set_bit(vid, priv->active_vlans);
6592 	ret = stmmac_vlan_update(priv, is_double);
6593 	if (ret) {
6594 		clear_bit(vid, priv->active_vlans);
6595 		goto err_pm_put;
6596 	}
6597 
6598 	if (priv->hw->num_vlan) {
6599 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6600 		if (ret)
6601 			goto err_pm_put;
6602 	}
6603 err_pm_put:
6604 	pm_runtime_put(priv->device);
6605 
6606 	return ret;
6607 }
6608 
6609 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6610 {
6611 	struct stmmac_priv *priv = netdev_priv(ndev);
6612 	bool is_double = false;
6613 	int ret;
6614 
6615 	ret = pm_runtime_resume_and_get(priv->device);
6616 	if (ret < 0)
6617 		return ret;
6618 
6619 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6620 		is_double = true;
6621 
6622 	clear_bit(vid, priv->active_vlans);
6623 
6624 	if (priv->hw->num_vlan) {
6625 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6626 		if (ret)
6627 			goto del_vlan_error;
6628 	}
6629 
6630 	ret = stmmac_vlan_update(priv, is_double);
6631 
6632 del_vlan_error:
6633 	pm_runtime_put(priv->device);
6634 
6635 	return ret;
6636 }
6637 
6638 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6639 {
6640 	struct stmmac_priv *priv = netdev_priv(dev);
6641 
6642 	switch (bpf->command) {
6643 	case XDP_SETUP_PROG:
6644 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6645 	case XDP_SETUP_XSK_POOL:
6646 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6647 					     bpf->xsk.queue_id);
6648 	default:
6649 		return -EOPNOTSUPP;
6650 	}
6651 }
6652 
6653 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6654 			   struct xdp_frame **frames, u32 flags)
6655 {
6656 	struct stmmac_priv *priv = netdev_priv(dev);
6657 	int cpu = smp_processor_id();
6658 	struct netdev_queue *nq;
6659 	int i, nxmit = 0;
6660 	int queue;
6661 
6662 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6663 		return -ENETDOWN;
6664 
6665 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6666 		return -EINVAL;
6667 
6668 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6669 	nq = netdev_get_tx_queue(priv->dev, queue);
6670 
6671 	__netif_tx_lock(nq, cpu);
6672 	/* Avoids TX time-out as we are sharing with slow path */
6673 	txq_trans_cond_update(nq);
6674 
6675 	for (i = 0; i < num_frames; i++) {
6676 		int res;
6677 
6678 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6679 		if (res == STMMAC_XDP_CONSUMED)
6680 			break;
6681 
6682 		nxmit++;
6683 	}
6684 
6685 	if (flags & XDP_XMIT_FLUSH) {
6686 		stmmac_flush_tx_descriptors(priv, queue);
6687 		stmmac_tx_timer_arm(priv, queue);
6688 	}
6689 
6690 	__netif_tx_unlock(nq);
6691 
6692 	return nxmit;
6693 }
6694 
6695 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6696 {
6697 	struct stmmac_channel *ch = &priv->channel[queue];
6698 	unsigned long flags;
6699 
6700 	spin_lock_irqsave(&ch->lock, flags);
6701 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6702 	spin_unlock_irqrestore(&ch->lock, flags);
6703 
6704 	stmmac_stop_rx_dma(priv, queue);
6705 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6706 }
6707 
6708 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6709 {
6710 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6711 	struct stmmac_channel *ch = &priv->channel[queue];
6712 	unsigned long flags;
6713 	u32 buf_size;
6714 	int ret;
6715 
6716 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6717 	if (ret) {
6718 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6719 		return;
6720 	}
6721 
6722 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6723 	if (ret) {
6724 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6725 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6726 		return;
6727 	}
6728 
6729 	stmmac_reset_rx_queue(priv, queue);
6730 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6731 
6732 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6733 			    rx_q->dma_rx_phy, rx_q->queue_index);
6734 
6735 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6736 			     sizeof(struct dma_desc));
6737 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6738 			       rx_q->rx_tail_addr, rx_q->queue_index);
6739 
6740 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6741 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6742 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6743 				      buf_size,
6744 				      rx_q->queue_index);
6745 	} else {
6746 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6747 				      priv->dma_conf.dma_buf_sz,
6748 				      rx_q->queue_index);
6749 	}
6750 
6751 	stmmac_start_rx_dma(priv, queue);
6752 
6753 	spin_lock_irqsave(&ch->lock, flags);
6754 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6755 	spin_unlock_irqrestore(&ch->lock, flags);
6756 }
6757 
6758 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6759 {
6760 	struct stmmac_channel *ch = &priv->channel[queue];
6761 	unsigned long flags;
6762 
6763 	spin_lock_irqsave(&ch->lock, flags);
6764 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6765 	spin_unlock_irqrestore(&ch->lock, flags);
6766 
6767 	stmmac_stop_tx_dma(priv, queue);
6768 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6769 }
6770 
6771 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6772 {
6773 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6774 	struct stmmac_channel *ch = &priv->channel[queue];
6775 	unsigned long flags;
6776 	int ret;
6777 
6778 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6779 	if (ret) {
6780 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6781 		return;
6782 	}
6783 
6784 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6785 	if (ret) {
6786 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6787 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6788 		return;
6789 	}
6790 
6791 	stmmac_reset_tx_queue(priv, queue);
6792 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6793 
6794 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6795 			    tx_q->dma_tx_phy, tx_q->queue_index);
6796 
6797 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6798 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6799 
6800 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6801 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6802 			       tx_q->tx_tail_addr, tx_q->queue_index);
6803 
6804 	stmmac_start_tx_dma(priv, queue);
6805 
6806 	spin_lock_irqsave(&ch->lock, flags);
6807 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6808 	spin_unlock_irqrestore(&ch->lock, flags);
6809 }
6810 
6811 void stmmac_xdp_release(struct net_device *dev)
6812 {
6813 	struct stmmac_priv *priv = netdev_priv(dev);
6814 	u32 chan;
6815 
6816 	/* Ensure tx function is not running */
6817 	netif_tx_disable(dev);
6818 
6819 	/* Disable NAPI process */
6820 	stmmac_disable_all_queues(priv);
6821 
6822 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6823 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6824 
6825 	/* Free the IRQ lines */
6826 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6827 
6828 	/* Stop TX/RX DMA channels */
6829 	stmmac_stop_all_dma(priv);
6830 
6831 	/* Release and free the Rx/Tx resources */
6832 	free_dma_desc_resources(priv, &priv->dma_conf);
6833 
6834 	/* Disable the MAC Rx/Tx */
6835 	stmmac_mac_set(priv, priv->ioaddr, false);
6836 
6837 	/* set trans_start so we don't get spurious
6838 	 * watchdogs during reset
6839 	 */
6840 	netif_trans_update(dev);
6841 	netif_carrier_off(dev);
6842 }
6843 
6844 int stmmac_xdp_open(struct net_device *dev)
6845 {
6846 	struct stmmac_priv *priv = netdev_priv(dev);
6847 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6848 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6849 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6850 	struct stmmac_rx_queue *rx_q;
6851 	struct stmmac_tx_queue *tx_q;
6852 	u32 buf_size;
6853 	bool sph_en;
6854 	u32 chan;
6855 	int ret;
6856 
6857 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6858 	if (ret < 0) {
6859 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6860 			   __func__);
6861 		goto dma_desc_error;
6862 	}
6863 
6864 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6865 	if (ret < 0) {
6866 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6867 			   __func__);
6868 		goto init_error;
6869 	}
6870 
6871 	stmmac_reset_queues_param(priv);
6872 
6873 	/* DMA CSR Channel configuration */
6874 	for (chan = 0; chan < dma_csr_ch; chan++) {
6875 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6876 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6877 	}
6878 
6879 	/* Adjust Split header */
6880 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6881 
6882 	/* DMA RX Channel Configuration */
6883 	for (chan = 0; chan < rx_cnt; chan++) {
6884 		rx_q = &priv->dma_conf.rx_queue[chan];
6885 
6886 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6887 				    rx_q->dma_rx_phy, chan);
6888 
6889 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6890 				     (rx_q->buf_alloc_num *
6891 				      sizeof(struct dma_desc));
6892 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6893 				       rx_q->rx_tail_addr, chan);
6894 
6895 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6896 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6897 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6898 					      buf_size,
6899 					      rx_q->queue_index);
6900 		} else {
6901 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6902 					      priv->dma_conf.dma_buf_sz,
6903 					      rx_q->queue_index);
6904 		}
6905 
6906 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6907 	}
6908 
6909 	/* DMA TX Channel Configuration */
6910 	for (chan = 0; chan < tx_cnt; chan++) {
6911 		tx_q = &priv->dma_conf.tx_queue[chan];
6912 
6913 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6914 				    tx_q->dma_tx_phy, chan);
6915 
6916 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6917 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6918 				       tx_q->tx_tail_addr, chan);
6919 
6920 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6921 		tx_q->txtimer.function = stmmac_tx_timer;
6922 	}
6923 
6924 	/* Enable the MAC Rx/Tx */
6925 	stmmac_mac_set(priv, priv->ioaddr, true);
6926 
6927 	/* Start Rx & Tx DMA Channels */
6928 	stmmac_start_all_dma(priv);
6929 
6930 	ret = stmmac_request_irq(dev);
6931 	if (ret)
6932 		goto irq_error;
6933 
6934 	/* Enable NAPI process*/
6935 	stmmac_enable_all_queues(priv);
6936 	netif_carrier_on(dev);
6937 	netif_tx_start_all_queues(dev);
6938 	stmmac_enable_all_dma_irq(priv);
6939 
6940 	return 0;
6941 
6942 irq_error:
6943 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6944 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6945 
6946 	stmmac_hw_teardown(dev);
6947 init_error:
6948 	free_dma_desc_resources(priv, &priv->dma_conf);
6949 dma_desc_error:
6950 	return ret;
6951 }
6952 
6953 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6954 {
6955 	struct stmmac_priv *priv = netdev_priv(dev);
6956 	struct stmmac_rx_queue *rx_q;
6957 	struct stmmac_tx_queue *tx_q;
6958 	struct stmmac_channel *ch;
6959 
6960 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6961 	    !netif_carrier_ok(priv->dev))
6962 		return -ENETDOWN;
6963 
6964 	if (!stmmac_xdp_is_enabled(priv))
6965 		return -EINVAL;
6966 
6967 	if (queue >= priv->plat->rx_queues_to_use ||
6968 	    queue >= priv->plat->tx_queues_to_use)
6969 		return -EINVAL;
6970 
6971 	rx_q = &priv->dma_conf.rx_queue[queue];
6972 	tx_q = &priv->dma_conf.tx_queue[queue];
6973 	ch = &priv->channel[queue];
6974 
6975 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6976 		return -EINVAL;
6977 
6978 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6979 		/* EQoS does not have per-DMA channel SW interrupt,
6980 		 * so we schedule RX Napi straight-away.
6981 		 */
6982 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6983 			__napi_schedule(&ch->rxtx_napi);
6984 	}
6985 
6986 	return 0;
6987 }
6988 
6989 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6990 {
6991 	struct stmmac_priv *priv = netdev_priv(dev);
6992 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6993 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6994 	unsigned int start;
6995 	int q;
6996 
6997 	for (q = 0; q < tx_cnt; q++) {
6998 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6999 		u64 tx_packets;
7000 		u64 tx_bytes;
7001 
7002 		do {
7003 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7004 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7005 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7006 		do {
7007 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7008 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7009 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7010 
7011 		stats->tx_packets += tx_packets;
7012 		stats->tx_bytes += tx_bytes;
7013 	}
7014 
7015 	for (q = 0; q < rx_cnt; q++) {
7016 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7017 		u64 rx_packets;
7018 		u64 rx_bytes;
7019 
7020 		do {
7021 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7022 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7023 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7024 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7025 
7026 		stats->rx_packets += rx_packets;
7027 		stats->rx_bytes += rx_bytes;
7028 	}
7029 
7030 	stats->rx_dropped = priv->xstats.rx_dropped;
7031 	stats->rx_errors = priv->xstats.rx_errors;
7032 	stats->tx_dropped = priv->xstats.tx_dropped;
7033 	stats->tx_errors = priv->xstats.tx_errors;
7034 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7035 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7036 	stats->rx_length_errors = priv->xstats.rx_length;
7037 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7038 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7039 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7040 }
7041 
7042 static const struct net_device_ops stmmac_netdev_ops = {
7043 	.ndo_open = stmmac_open,
7044 	.ndo_start_xmit = stmmac_xmit,
7045 	.ndo_stop = stmmac_release,
7046 	.ndo_change_mtu = stmmac_change_mtu,
7047 	.ndo_fix_features = stmmac_fix_features,
7048 	.ndo_set_features = stmmac_set_features,
7049 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7050 	.ndo_tx_timeout = stmmac_tx_timeout,
7051 	.ndo_eth_ioctl = stmmac_ioctl,
7052 	.ndo_get_stats64 = stmmac_get_stats64,
7053 	.ndo_setup_tc = stmmac_setup_tc,
7054 	.ndo_select_queue = stmmac_select_queue,
7055 	.ndo_set_mac_address = stmmac_set_mac_address,
7056 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7057 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7058 	.ndo_bpf = stmmac_bpf,
7059 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7060 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7061 };
7062 
7063 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7064 {
7065 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7066 		return;
7067 	if (test_bit(STMMAC_DOWN, &priv->state))
7068 		return;
7069 
7070 	netdev_err(priv->dev, "Reset adapter.\n");
7071 
7072 	rtnl_lock();
7073 	netif_trans_update(priv->dev);
7074 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7075 		usleep_range(1000, 2000);
7076 
7077 	set_bit(STMMAC_DOWN, &priv->state);
7078 	dev_close(priv->dev);
7079 	dev_open(priv->dev, NULL);
7080 	clear_bit(STMMAC_DOWN, &priv->state);
7081 	clear_bit(STMMAC_RESETING, &priv->state);
7082 	rtnl_unlock();
7083 }
7084 
7085 static void stmmac_service_task(struct work_struct *work)
7086 {
7087 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7088 			service_task);
7089 
7090 	stmmac_reset_subtask(priv);
7091 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7092 }
7093 
7094 /**
7095  *  stmmac_hw_init - Init the MAC device
7096  *  @priv: driver private structure
7097  *  Description: this function is to configure the MAC device according to
7098  *  some platform parameters or the HW capability register. It prepares the
7099  *  driver to use either ring or chain modes and to setup either enhanced or
7100  *  normal descriptors.
7101  */
7102 static int stmmac_hw_init(struct stmmac_priv *priv)
7103 {
7104 	int ret;
7105 
7106 	/* dwmac-sun8i only work in chain mode */
7107 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7108 		chain_mode = 1;
7109 	priv->chain_mode = chain_mode;
7110 
7111 	/* Initialize HW Interface */
7112 	ret = stmmac_hwif_init(priv);
7113 	if (ret)
7114 		return ret;
7115 
7116 	/* Get the HW capability (new GMAC newer than 3.50a) */
7117 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7118 	if (priv->hw_cap_support) {
7119 		dev_info(priv->device, "DMA HW capability register supported\n");
7120 
7121 		/* We can override some gmac/dma configuration fields: e.g.
7122 		 * enh_desc, tx_coe (e.g. that are passed through the
7123 		 * platform) with the values from the HW capability
7124 		 * register (if supported).
7125 		 */
7126 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7127 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7128 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7129 		priv->hw->pmt = priv->plat->pmt;
7130 		if (priv->dma_cap.hash_tb_sz) {
7131 			priv->hw->multicast_filter_bins =
7132 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7133 			priv->hw->mcast_bits_log2 =
7134 					ilog2(priv->hw->multicast_filter_bins);
7135 		}
7136 
7137 		/* TXCOE doesn't work in thresh DMA mode */
7138 		if (priv->plat->force_thresh_dma_mode)
7139 			priv->plat->tx_coe = 0;
7140 		else
7141 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7142 
7143 		/* In case of GMAC4 rx_coe is from HW cap register. */
7144 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7145 
7146 		if (priv->dma_cap.rx_coe_type2)
7147 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7148 		else if (priv->dma_cap.rx_coe_type1)
7149 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7150 
7151 	} else {
7152 		dev_info(priv->device, "No HW DMA feature register supported\n");
7153 	}
7154 
7155 	if (priv->plat->rx_coe) {
7156 		priv->hw->rx_csum = priv->plat->rx_coe;
7157 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7158 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7159 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7160 	}
7161 	if (priv->plat->tx_coe)
7162 		dev_info(priv->device, "TX Checksum insertion supported\n");
7163 
7164 	if (priv->plat->pmt) {
7165 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7166 		device_set_wakeup_capable(priv->device, 1);
7167 	}
7168 
7169 	if (priv->dma_cap.tsoen)
7170 		dev_info(priv->device, "TSO supported\n");
7171 
7172 	priv->hw->vlan_fail_q_en =
7173 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7174 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7175 
7176 	/* Run HW quirks, if any */
7177 	if (priv->hwif_quirks) {
7178 		ret = priv->hwif_quirks(priv);
7179 		if (ret)
7180 			return ret;
7181 	}
7182 
7183 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7184 	 * In some case, for example on bugged HW this feature
7185 	 * has to be disable and this can be done by passing the
7186 	 * riwt_off field from the platform.
7187 	 */
7188 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7189 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7190 		priv->use_riwt = 1;
7191 		dev_info(priv->device,
7192 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7193 	}
7194 
7195 	return 0;
7196 }
7197 
7198 static void stmmac_napi_add(struct net_device *dev)
7199 {
7200 	struct stmmac_priv *priv = netdev_priv(dev);
7201 	u32 queue, maxq;
7202 
7203 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7204 
7205 	for (queue = 0; queue < maxq; queue++) {
7206 		struct stmmac_channel *ch = &priv->channel[queue];
7207 
7208 		ch->priv_data = priv;
7209 		ch->index = queue;
7210 		spin_lock_init(&ch->lock);
7211 
7212 		if (queue < priv->plat->rx_queues_to_use) {
7213 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7214 		}
7215 		if (queue < priv->plat->tx_queues_to_use) {
7216 			netif_napi_add_tx(dev, &ch->tx_napi,
7217 					  stmmac_napi_poll_tx);
7218 		}
7219 		if (queue < priv->plat->rx_queues_to_use &&
7220 		    queue < priv->plat->tx_queues_to_use) {
7221 			netif_napi_add(dev, &ch->rxtx_napi,
7222 				       stmmac_napi_poll_rxtx);
7223 		}
7224 	}
7225 }
7226 
7227 static void stmmac_napi_del(struct net_device *dev)
7228 {
7229 	struct stmmac_priv *priv = netdev_priv(dev);
7230 	u32 queue, maxq;
7231 
7232 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7233 
7234 	for (queue = 0; queue < maxq; queue++) {
7235 		struct stmmac_channel *ch = &priv->channel[queue];
7236 
7237 		if (queue < priv->plat->rx_queues_to_use)
7238 			netif_napi_del(&ch->rx_napi);
7239 		if (queue < priv->plat->tx_queues_to_use)
7240 			netif_napi_del(&ch->tx_napi);
7241 		if (queue < priv->plat->rx_queues_to_use &&
7242 		    queue < priv->plat->tx_queues_to_use) {
7243 			netif_napi_del(&ch->rxtx_napi);
7244 		}
7245 	}
7246 }
7247 
7248 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7249 {
7250 	struct stmmac_priv *priv = netdev_priv(dev);
7251 	int ret = 0, i;
7252 
7253 	if (netif_running(dev))
7254 		stmmac_release(dev);
7255 
7256 	stmmac_napi_del(dev);
7257 
7258 	priv->plat->rx_queues_to_use = rx_cnt;
7259 	priv->plat->tx_queues_to_use = tx_cnt;
7260 	if (!netif_is_rxfh_configured(dev))
7261 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7262 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7263 									rx_cnt);
7264 
7265 	stmmac_napi_add(dev);
7266 
7267 	if (netif_running(dev))
7268 		ret = stmmac_open(dev);
7269 
7270 	return ret;
7271 }
7272 
7273 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7274 {
7275 	struct stmmac_priv *priv = netdev_priv(dev);
7276 	int ret = 0;
7277 
7278 	if (netif_running(dev))
7279 		stmmac_release(dev);
7280 
7281 	priv->dma_conf.dma_rx_size = rx_size;
7282 	priv->dma_conf.dma_tx_size = tx_size;
7283 
7284 	if (netif_running(dev))
7285 		ret = stmmac_open(dev);
7286 
7287 	return ret;
7288 }
7289 
7290 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7291 {
7292 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7293 	struct dma_desc *desc_contains_ts = ctx->desc;
7294 	struct stmmac_priv *priv = ctx->priv;
7295 	struct dma_desc *ndesc = ctx->ndesc;
7296 	struct dma_desc *desc = ctx->desc;
7297 	u64 ns = 0;
7298 
7299 	if (!priv->hwts_rx_en)
7300 		return -ENODATA;
7301 
7302 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7303 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7304 		desc_contains_ts = ndesc;
7305 
7306 	/* Check if timestamp is available */
7307 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7308 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7309 		ns -= priv->plat->cdc_error_adj;
7310 		*timestamp = ns_to_ktime(ns);
7311 		return 0;
7312 	}
7313 
7314 	return -ENODATA;
7315 }
7316 
7317 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7318 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7319 };
7320 
7321 /**
7322  * stmmac_dvr_probe
7323  * @device: device pointer
7324  * @plat_dat: platform data pointer
7325  * @res: stmmac resource pointer
7326  * Description: this is the main probe function used to
7327  * call the alloc_etherdev, allocate the priv structure.
7328  * Return:
7329  * returns 0 on success, otherwise errno.
7330  */
7331 int stmmac_dvr_probe(struct device *device,
7332 		     struct plat_stmmacenet_data *plat_dat,
7333 		     struct stmmac_resources *res)
7334 {
7335 	struct net_device *ndev = NULL;
7336 	struct stmmac_priv *priv;
7337 	u32 rxq;
7338 	int i, ret = 0;
7339 
7340 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7341 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7342 	if (!ndev)
7343 		return -ENOMEM;
7344 
7345 	SET_NETDEV_DEV(ndev, device);
7346 
7347 	priv = netdev_priv(ndev);
7348 	priv->device = device;
7349 	priv->dev = ndev;
7350 
7351 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7352 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7353 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7354 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7355 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7356 	}
7357 
7358 	priv->xstats.pcpu_stats =
7359 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7360 	if (!priv->xstats.pcpu_stats)
7361 		return -ENOMEM;
7362 
7363 	stmmac_set_ethtool_ops(ndev);
7364 	priv->pause = pause;
7365 	priv->plat = plat_dat;
7366 	priv->ioaddr = res->addr;
7367 	priv->dev->base_addr = (unsigned long)res->addr;
7368 	priv->plat->dma_cfg->multi_msi_en =
7369 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7370 
7371 	priv->dev->irq = res->irq;
7372 	priv->wol_irq = res->wol_irq;
7373 	priv->lpi_irq = res->lpi_irq;
7374 	priv->sfty_irq = res->sfty_irq;
7375 	priv->sfty_ce_irq = res->sfty_ce_irq;
7376 	priv->sfty_ue_irq = res->sfty_ue_irq;
7377 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7378 		priv->rx_irq[i] = res->rx_irq[i];
7379 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7380 		priv->tx_irq[i] = res->tx_irq[i];
7381 
7382 	if (!is_zero_ether_addr(res->mac))
7383 		eth_hw_addr_set(priv->dev, res->mac);
7384 
7385 	dev_set_drvdata(device, priv->dev);
7386 
7387 	/* Verify driver arguments */
7388 	stmmac_verify_args();
7389 
7390 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7391 	if (!priv->af_xdp_zc_qps)
7392 		return -ENOMEM;
7393 
7394 	/* Allocate workqueue */
7395 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7396 	if (!priv->wq) {
7397 		dev_err(priv->device, "failed to create workqueue\n");
7398 		ret = -ENOMEM;
7399 		goto error_wq_init;
7400 	}
7401 
7402 	INIT_WORK(&priv->service_task, stmmac_service_task);
7403 
7404 	/* Override with kernel parameters if supplied XXX CRS XXX
7405 	 * this needs to have multiple instances
7406 	 */
7407 	if ((phyaddr >= 0) && (phyaddr <= 31))
7408 		priv->plat->phy_addr = phyaddr;
7409 
7410 	if (priv->plat->stmmac_rst) {
7411 		ret = reset_control_assert(priv->plat->stmmac_rst);
7412 		reset_control_deassert(priv->plat->stmmac_rst);
7413 		/* Some reset controllers have only reset callback instead of
7414 		 * assert + deassert callbacks pair.
7415 		 */
7416 		if (ret == -ENOTSUPP)
7417 			reset_control_reset(priv->plat->stmmac_rst);
7418 	}
7419 
7420 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7421 	if (ret == -ENOTSUPP)
7422 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7423 			ERR_PTR(ret));
7424 
7425 	/* Wait a bit for the reset to take effect */
7426 	udelay(10);
7427 
7428 	/* Init MAC and get the capabilities */
7429 	ret = stmmac_hw_init(priv);
7430 	if (ret)
7431 		goto error_hw_init;
7432 
7433 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7434 	 */
7435 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7436 		priv->plat->dma_cfg->dche = false;
7437 
7438 	stmmac_check_ether_addr(priv);
7439 
7440 	ndev->netdev_ops = &stmmac_netdev_ops;
7441 
7442 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7443 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7444 
7445 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7446 			    NETIF_F_RXCSUM;
7447 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7448 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7449 
7450 	ret = stmmac_tc_init(priv, priv);
7451 	if (!ret) {
7452 		ndev->hw_features |= NETIF_F_HW_TC;
7453 	}
7454 
7455 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7456 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7457 		if (priv->plat->has_gmac4)
7458 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7459 		priv->tso = true;
7460 		dev_info(priv->device, "TSO feature enabled\n");
7461 	}
7462 
7463 	if (priv->dma_cap.sphen &&
7464 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7465 		ndev->hw_features |= NETIF_F_GRO;
7466 		priv->sph_cap = true;
7467 		priv->sph = priv->sph_cap;
7468 		dev_info(priv->device, "SPH feature enabled\n");
7469 	}
7470 
7471 	/* Ideally our host DMA address width is the same as for the
7472 	 * device. However, it may differ and then we have to use our
7473 	 * host DMA width for allocation and the device DMA width for
7474 	 * register handling.
7475 	 */
7476 	if (priv->plat->host_dma_width)
7477 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7478 	else
7479 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7480 
7481 	if (priv->dma_cap.host_dma_width) {
7482 		ret = dma_set_mask_and_coherent(device,
7483 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7484 		if (!ret) {
7485 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7486 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7487 
7488 			/*
7489 			 * If more than 32 bits can be addressed, make sure to
7490 			 * enable enhanced addressing mode.
7491 			 */
7492 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7493 				priv->plat->dma_cfg->eame = true;
7494 		} else {
7495 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7496 			if (ret) {
7497 				dev_err(priv->device, "Failed to set DMA Mask\n");
7498 				goto error_hw_init;
7499 			}
7500 
7501 			priv->dma_cap.host_dma_width = 32;
7502 		}
7503 	}
7504 
7505 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7506 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7507 #ifdef STMMAC_VLAN_TAG_USED
7508 	/* Both mac100 and gmac support receive VLAN tag detection */
7509 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7510 	if (priv->plat->has_gmac4) {
7511 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7512 		priv->hw->hw_vlan_en = true;
7513 	}
7514 	if (priv->dma_cap.vlhash) {
7515 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7516 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7517 	}
7518 	if (priv->dma_cap.vlins) {
7519 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7520 		if (priv->dma_cap.dvlan)
7521 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7522 	}
7523 #endif
7524 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7525 
7526 	priv->xstats.threshold = tc;
7527 
7528 	/* Initialize RSS */
7529 	rxq = priv->plat->rx_queues_to_use;
7530 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7531 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7532 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7533 
7534 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7535 		ndev->features |= NETIF_F_RXHASH;
7536 
7537 	ndev->vlan_features |= ndev->features;
7538 
7539 	/* MTU range: 46 - hw-specific max */
7540 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7541 	if (priv->plat->has_xgmac)
7542 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7543 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7544 		ndev->max_mtu = JUMBO_LEN;
7545 	else
7546 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7547 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7548 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7549 	 */
7550 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7551 	    (priv->plat->maxmtu >= ndev->min_mtu))
7552 		ndev->max_mtu = priv->plat->maxmtu;
7553 	else if (priv->plat->maxmtu < ndev->min_mtu)
7554 		dev_warn(priv->device,
7555 			 "%s: warning: maxmtu having invalid value (%d)\n",
7556 			 __func__, priv->plat->maxmtu);
7557 
7558 	if (flow_ctrl)
7559 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7560 
7561 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7562 
7563 	/* Setup channels NAPI */
7564 	stmmac_napi_add(ndev);
7565 
7566 	mutex_init(&priv->lock);
7567 
7568 	stmmac_fpe_init(priv);
7569 
7570 	/* If a specific clk_csr value is passed from the platform
7571 	 * this means that the CSR Clock Range selection cannot be
7572 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7573 	 * set the MDC clock dynamically according to the csr actual
7574 	 * clock input.
7575 	 */
7576 	if (priv->plat->clk_csr >= 0)
7577 		priv->clk_csr = priv->plat->clk_csr;
7578 	else
7579 		stmmac_clk_csr_set(priv);
7580 
7581 	stmmac_check_pcs_mode(priv);
7582 
7583 	pm_runtime_get_noresume(device);
7584 	pm_runtime_set_active(device);
7585 	if (!pm_runtime_enabled(device))
7586 		pm_runtime_enable(device);
7587 
7588 	ret = stmmac_mdio_register(ndev);
7589 	if (ret < 0) {
7590 		dev_err_probe(priv->device, ret,
7591 			      "MDIO bus (id: %d) registration failed\n",
7592 			      priv->plat->bus_id);
7593 		goto error_mdio_register;
7594 	}
7595 
7596 	if (priv->plat->speed_mode_2500)
7597 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7598 
7599 	ret = stmmac_pcs_setup(ndev);
7600 	if (ret)
7601 		goto error_pcs_setup;
7602 
7603 	ret = stmmac_phy_setup(priv);
7604 	if (ret) {
7605 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7606 		goto error_phy_setup;
7607 	}
7608 
7609 	ret = register_netdev(ndev);
7610 	if (ret) {
7611 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7612 			__func__, ret);
7613 		goto error_netdev_register;
7614 	}
7615 
7616 #ifdef CONFIG_DEBUG_FS
7617 	stmmac_init_fs(ndev);
7618 #endif
7619 
7620 	if (priv->plat->dump_debug_regs)
7621 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7622 
7623 	/* Let pm_runtime_put() disable the clocks.
7624 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7625 	 */
7626 	pm_runtime_put(device);
7627 
7628 	return ret;
7629 
7630 error_netdev_register:
7631 	phylink_destroy(priv->phylink);
7632 error_phy_setup:
7633 	stmmac_pcs_clean(ndev);
7634 error_pcs_setup:
7635 	stmmac_mdio_unregister(ndev);
7636 error_mdio_register:
7637 	stmmac_napi_del(ndev);
7638 error_hw_init:
7639 	destroy_workqueue(priv->wq);
7640 error_wq_init:
7641 	bitmap_free(priv->af_xdp_zc_qps);
7642 
7643 	return ret;
7644 }
7645 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7646 
7647 /**
7648  * stmmac_dvr_remove
7649  * @dev: device pointer
7650  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7651  * changes the link status, releases the DMA descriptor rings.
7652  */
7653 void stmmac_dvr_remove(struct device *dev)
7654 {
7655 	struct net_device *ndev = dev_get_drvdata(dev);
7656 	struct stmmac_priv *priv = netdev_priv(ndev);
7657 
7658 	netdev_info(priv->dev, "%s: removing driver", __func__);
7659 
7660 	pm_runtime_get_sync(dev);
7661 
7662 	stmmac_stop_all_dma(priv);
7663 	stmmac_mac_set(priv, priv->ioaddr, false);
7664 	unregister_netdev(ndev);
7665 
7666 #ifdef CONFIG_DEBUG_FS
7667 	stmmac_exit_fs(ndev);
7668 #endif
7669 	phylink_destroy(priv->phylink);
7670 	if (priv->plat->stmmac_rst)
7671 		reset_control_assert(priv->plat->stmmac_rst);
7672 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7673 
7674 	stmmac_pcs_clean(ndev);
7675 	stmmac_mdio_unregister(ndev);
7676 
7677 	destroy_workqueue(priv->wq);
7678 	mutex_destroy(&priv->lock);
7679 	bitmap_free(priv->af_xdp_zc_qps);
7680 
7681 	pm_runtime_disable(dev);
7682 	pm_runtime_put_noidle(dev);
7683 }
7684 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7685 
7686 /**
7687  * stmmac_suspend - suspend callback
7688  * @dev: device pointer
7689  * Description: this is the function to suspend the device and it is called
7690  * by the platform driver to stop the network queue, release the resources,
7691  * program the PMT register (for WoL), clean and release driver resources.
7692  */
7693 int stmmac_suspend(struct device *dev)
7694 {
7695 	struct net_device *ndev = dev_get_drvdata(dev);
7696 	struct stmmac_priv *priv = netdev_priv(ndev);
7697 	u32 chan;
7698 
7699 	if (!ndev || !netif_running(ndev))
7700 		return 0;
7701 
7702 	mutex_lock(&priv->lock);
7703 
7704 	netif_device_detach(ndev);
7705 
7706 	stmmac_disable_all_queues(priv);
7707 
7708 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7709 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7710 
7711 	if (priv->eee_enabled) {
7712 		priv->tx_path_in_lpi_mode = false;
7713 		del_timer_sync(&priv->eee_ctrl_timer);
7714 	}
7715 
7716 	/* Stop TX/RX DMA */
7717 	stmmac_stop_all_dma(priv);
7718 
7719 	if (priv->plat->serdes_powerdown)
7720 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7721 
7722 	/* Enable Power down mode by programming the PMT regs */
7723 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7724 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7725 		priv->irq_wake = 1;
7726 	} else {
7727 		stmmac_mac_set(priv, priv->ioaddr, false);
7728 		pinctrl_pm_select_sleep_state(priv->device);
7729 	}
7730 
7731 	mutex_unlock(&priv->lock);
7732 
7733 	rtnl_lock();
7734 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7735 		phylink_suspend(priv->phylink, true);
7736 	} else {
7737 		if (device_may_wakeup(priv->device))
7738 			phylink_speed_down(priv->phylink, false);
7739 		phylink_suspend(priv->phylink, false);
7740 	}
7741 	rtnl_unlock();
7742 
7743 	if (stmmac_fpe_supported(priv))
7744 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7745 
7746 	priv->speed = SPEED_UNKNOWN;
7747 	return 0;
7748 }
7749 EXPORT_SYMBOL_GPL(stmmac_suspend);
7750 
7751 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7752 {
7753 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7754 
7755 	rx_q->cur_rx = 0;
7756 	rx_q->dirty_rx = 0;
7757 }
7758 
7759 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7760 {
7761 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7762 
7763 	tx_q->cur_tx = 0;
7764 	tx_q->dirty_tx = 0;
7765 	tx_q->mss = 0;
7766 
7767 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7768 }
7769 
7770 /**
7771  * stmmac_reset_queues_param - reset queue parameters
7772  * @priv: device pointer
7773  */
7774 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7775 {
7776 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7777 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7778 	u32 queue;
7779 
7780 	for (queue = 0; queue < rx_cnt; queue++)
7781 		stmmac_reset_rx_queue(priv, queue);
7782 
7783 	for (queue = 0; queue < tx_cnt; queue++)
7784 		stmmac_reset_tx_queue(priv, queue);
7785 }
7786 
7787 /**
7788  * stmmac_resume - resume callback
7789  * @dev: device pointer
7790  * Description: when resume this function is invoked to setup the DMA and CORE
7791  * in a usable state.
7792  */
7793 int stmmac_resume(struct device *dev)
7794 {
7795 	struct net_device *ndev = dev_get_drvdata(dev);
7796 	struct stmmac_priv *priv = netdev_priv(ndev);
7797 	int ret;
7798 
7799 	if (!netif_running(ndev))
7800 		return 0;
7801 
7802 	/* Power Down bit, into the PM register, is cleared
7803 	 * automatically as soon as a magic packet or a Wake-up frame
7804 	 * is received. Anyway, it's better to manually clear
7805 	 * this bit because it can generate problems while resuming
7806 	 * from another devices (e.g. serial console).
7807 	 */
7808 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7809 		mutex_lock(&priv->lock);
7810 		stmmac_pmt(priv, priv->hw, 0);
7811 		mutex_unlock(&priv->lock);
7812 		priv->irq_wake = 0;
7813 	} else {
7814 		pinctrl_pm_select_default_state(priv->device);
7815 		/* reset the phy so that it's ready */
7816 		if (priv->mii)
7817 			stmmac_mdio_reset(priv->mii);
7818 	}
7819 
7820 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7821 	    priv->plat->serdes_powerup) {
7822 		ret = priv->plat->serdes_powerup(ndev,
7823 						 priv->plat->bsp_priv);
7824 
7825 		if (ret < 0)
7826 			return ret;
7827 	}
7828 
7829 	rtnl_lock();
7830 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7831 		phylink_resume(priv->phylink);
7832 	} else {
7833 		phylink_resume(priv->phylink);
7834 		if (device_may_wakeup(priv->device))
7835 			phylink_speed_up(priv->phylink);
7836 	}
7837 	rtnl_unlock();
7838 
7839 	rtnl_lock();
7840 	mutex_lock(&priv->lock);
7841 
7842 	stmmac_reset_queues_param(priv);
7843 
7844 	stmmac_free_tx_skbufs(priv);
7845 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7846 
7847 	stmmac_hw_setup(ndev, false);
7848 	stmmac_init_coalesce(priv);
7849 	stmmac_set_rx_mode(ndev);
7850 
7851 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7852 
7853 	stmmac_enable_all_queues(priv);
7854 	stmmac_enable_all_dma_irq(priv);
7855 
7856 	mutex_unlock(&priv->lock);
7857 	rtnl_unlock();
7858 
7859 	netif_device_attach(ndev);
7860 
7861 	return 0;
7862 }
7863 EXPORT_SYMBOL_GPL(stmmac_resume);
7864 
7865 #ifndef MODULE
7866 static int __init stmmac_cmdline_opt(char *str)
7867 {
7868 	char *opt;
7869 
7870 	if (!str || !*str)
7871 		return 1;
7872 	while ((opt = strsep(&str, ",")) != NULL) {
7873 		if (!strncmp(opt, "debug:", 6)) {
7874 			if (kstrtoint(opt + 6, 0, &debug))
7875 				goto err;
7876 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7877 			if (kstrtoint(opt + 8, 0, &phyaddr))
7878 				goto err;
7879 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7880 			if (kstrtoint(opt + 7, 0, &buf_sz))
7881 				goto err;
7882 		} else if (!strncmp(opt, "tc:", 3)) {
7883 			if (kstrtoint(opt + 3, 0, &tc))
7884 				goto err;
7885 		} else if (!strncmp(opt, "watchdog:", 9)) {
7886 			if (kstrtoint(opt + 9, 0, &watchdog))
7887 				goto err;
7888 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7889 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7890 				goto err;
7891 		} else if (!strncmp(opt, "pause:", 6)) {
7892 			if (kstrtoint(opt + 6, 0, &pause))
7893 				goto err;
7894 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7895 			if (kstrtoint(opt + 10, 0, &eee_timer))
7896 				goto err;
7897 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7898 			if (kstrtoint(opt + 11, 0, &chain_mode))
7899 				goto err;
7900 		}
7901 	}
7902 	return 1;
7903 
7904 err:
7905 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7906 	return 1;
7907 }
7908 
7909 __setup("stmmaceth=", stmmac_cmdline_opt);
7910 #endif /* MODULE */
7911 
7912 static int __init stmmac_init(void)
7913 {
7914 #ifdef CONFIG_DEBUG_FS
7915 	/* Create debugfs main directory if it doesn't exist yet */
7916 	if (!stmmac_fs_dir)
7917 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7918 	register_netdevice_notifier(&stmmac_notifier);
7919 #endif
7920 
7921 	return 0;
7922 }
7923 
7924 static void __exit stmmac_exit(void)
7925 {
7926 #ifdef CONFIG_DEBUG_FS
7927 	unregister_netdevice_notifier(&stmmac_notifier);
7928 	debugfs_remove_recursive(stmmac_fs_dir);
7929 #endif
7930 }
7931 
7932 module_init(stmmac_init)
7933 module_exit(stmmac_exit)
7934 
7935 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7936 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7937 MODULE_LICENSE("GPL");
7938