xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision fcc79e1714e8c2b8e216dc3149812edd37884eef)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54 
55 /* As long as the interface is active, we keep the timestamping counter enabled
56  * with fine resolution and binary rollover. This avoid non-monotonic behavior
57  * (clock jumps) when changing timestamping settings at runtime.
58  */
59 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 				 PTP_TCR_TSCTRLSSR)
61 
62 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
64 
65 /* Module parameters */
66 #define TX_TIMEO	5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70 
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78 
79 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
80 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
81 
82 /* Limit to make sure XDP TX and slow path can coexist */
83 #define STMMAC_XSK_TX_BUDGET_MAX	256
84 #define STMMAC_TX_XSK_AVAIL		16
85 #define STMMAC_RX_FILL_BATCH		16
86 
87 #define STMMAC_XDP_PASS		0
88 #define STMMAC_XDP_CONSUMED	BIT(0)
89 #define STMMAC_XDP_TX		BIT(1)
90 #define STMMAC_XDP_REDIRECT	BIT(2)
91 
92 static int flow_ctrl = FLOW_AUTO;
93 module_param(flow_ctrl, int, 0644);
94 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
95 
96 static int pause = PAUSE_TIME;
97 module_param(pause, int, 0644);
98 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
99 
100 #define TC_DEFAULT 64
101 static int tc = TC_DEFAULT;
102 module_param(tc, int, 0644);
103 MODULE_PARM_DESC(tc, "DMA threshold control value");
104 
105 #define	DEFAULT_BUFSIZE	1536
106 static int buf_sz = DEFAULT_BUFSIZE;
107 module_param(buf_sz, int, 0644);
108 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
109 
110 #define	STMMAC_RX_COPYBREAK	256
111 
112 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
113 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
114 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
115 
116 #define STMMAC_DEFAULT_LPI_TIMER	1000
117 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
118 module_param(eee_timer, int, 0644);
119 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
120 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
121 
122 /* By default the driver will use the ring mode to manage tx and rx descriptors,
123  * but allow user to force to use the chain instead of the ring
124  */
125 static unsigned int chain_mode;
126 module_param(chain_mode, int, 0444);
127 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
128 
129 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
130 /* For MSI interrupts handling */
131 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
133 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
134 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
135 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
138 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
139 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
140 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
141 					  u32 rxmode, u32 chan);
142 
143 #ifdef CONFIG_DEBUG_FS
144 static const struct net_device_ops stmmac_netdev_ops;
145 static void stmmac_init_fs(struct net_device *dev);
146 static void stmmac_exit_fs(struct net_device *dev);
147 #endif
148 
149 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
150 
151 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
152 {
153 	int ret = 0;
154 
155 	if (enabled) {
156 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
157 		if (ret)
158 			return ret;
159 		ret = clk_prepare_enable(priv->plat->pclk);
160 		if (ret) {
161 			clk_disable_unprepare(priv->plat->stmmac_clk);
162 			return ret;
163 		}
164 		if (priv->plat->clks_config) {
165 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
166 			if (ret) {
167 				clk_disable_unprepare(priv->plat->stmmac_clk);
168 				clk_disable_unprepare(priv->plat->pclk);
169 				return ret;
170 			}
171 		}
172 	} else {
173 		clk_disable_unprepare(priv->plat->stmmac_clk);
174 		clk_disable_unprepare(priv->plat->pclk);
175 		if (priv->plat->clks_config)
176 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
177 	}
178 
179 	return ret;
180 }
181 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
182 
183 /**
184  * stmmac_verify_args - verify the driver parameters.
185  * Description: it checks the driver parameters and set a default in case of
186  * errors.
187  */
188 static void stmmac_verify_args(void)
189 {
190 	if (unlikely(watchdog < 0))
191 		watchdog = TX_TIMEO;
192 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
193 		buf_sz = DEFAULT_BUFSIZE;
194 	if (unlikely(flow_ctrl > 1))
195 		flow_ctrl = FLOW_AUTO;
196 	else if (likely(flow_ctrl < 0))
197 		flow_ctrl = FLOW_OFF;
198 	if (unlikely((pause < 0) || (pause > 0xffff)))
199 		pause = PAUSE_TIME;
200 	if (eee_timer < 0)
201 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
202 }
203 
204 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
205 {
206 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
207 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
208 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
209 	u32 queue;
210 
211 	for (queue = 0; queue < maxq; queue++) {
212 		struct stmmac_channel *ch = &priv->channel[queue];
213 
214 		if (stmmac_xdp_is_enabled(priv) &&
215 		    test_bit(queue, priv->af_xdp_zc_qps)) {
216 			napi_disable(&ch->rxtx_napi);
217 			continue;
218 		}
219 
220 		if (queue < rx_queues_cnt)
221 			napi_disable(&ch->rx_napi);
222 		if (queue < tx_queues_cnt)
223 			napi_disable(&ch->tx_napi);
224 	}
225 }
226 
227 /**
228  * stmmac_disable_all_queues - Disable all queues
229  * @priv: driver private structure
230  */
231 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
232 {
233 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
234 	struct stmmac_rx_queue *rx_q;
235 	u32 queue;
236 
237 	/* synchronize_rcu() needed for pending XDP buffers to drain */
238 	for (queue = 0; queue < rx_queues_cnt; queue++) {
239 		rx_q = &priv->dma_conf.rx_queue[queue];
240 		if (rx_q->xsk_pool) {
241 			synchronize_rcu();
242 			break;
243 		}
244 	}
245 
246 	__stmmac_disable_all_queues(priv);
247 }
248 
249 /**
250  * stmmac_enable_all_queues - Enable all queues
251  * @priv: driver private structure
252  */
253 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
254 {
255 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
256 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
257 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
258 	u32 queue;
259 
260 	for (queue = 0; queue < maxq; queue++) {
261 		struct stmmac_channel *ch = &priv->channel[queue];
262 
263 		if (stmmac_xdp_is_enabled(priv) &&
264 		    test_bit(queue, priv->af_xdp_zc_qps)) {
265 			napi_enable(&ch->rxtx_napi);
266 			continue;
267 		}
268 
269 		if (queue < rx_queues_cnt)
270 			napi_enable(&ch->rx_napi);
271 		if (queue < tx_queues_cnt)
272 			napi_enable(&ch->tx_napi);
273 	}
274 }
275 
276 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
277 {
278 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
279 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
280 		queue_work(priv->wq, &priv->service_task);
281 }
282 
283 static void stmmac_global_err(struct stmmac_priv *priv)
284 {
285 	netif_carrier_off(priv->dev);
286 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
287 	stmmac_service_event_schedule(priv);
288 }
289 
290 /**
291  * stmmac_clk_csr_set - dynamically set the MDC clock
292  * @priv: driver private structure
293  * Description: this is to dynamically set the MDC clock according to the csr
294  * clock input.
295  * Note:
296  *	If a specific clk_csr value is passed from the platform
297  *	this means that the CSR Clock Range selection cannot be
298  *	changed at run-time and it is fixed (as reported in the driver
299  *	documentation). Viceversa the driver will try to set the MDC
300  *	clock dynamically according to the actual clock input.
301  */
302 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
303 {
304 	u32 clk_rate;
305 
306 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
307 
308 	/* Platform provided default clk_csr would be assumed valid
309 	 * for all other cases except for the below mentioned ones.
310 	 * For values higher than the IEEE 802.3 specified frequency
311 	 * we can not estimate the proper divider as it is not known
312 	 * the frequency of clk_csr_i. So we do not change the default
313 	 * divider.
314 	 */
315 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
316 		if (clk_rate < CSR_F_35M)
317 			priv->clk_csr = STMMAC_CSR_20_35M;
318 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
319 			priv->clk_csr = STMMAC_CSR_35_60M;
320 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
321 			priv->clk_csr = STMMAC_CSR_60_100M;
322 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
323 			priv->clk_csr = STMMAC_CSR_100_150M;
324 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
325 			priv->clk_csr = STMMAC_CSR_150_250M;
326 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
327 			priv->clk_csr = STMMAC_CSR_250_300M;
328 	}
329 
330 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
331 		if (clk_rate > 160000000)
332 			priv->clk_csr = 0x03;
333 		else if (clk_rate > 80000000)
334 			priv->clk_csr = 0x02;
335 		else if (clk_rate > 40000000)
336 			priv->clk_csr = 0x01;
337 		else
338 			priv->clk_csr = 0;
339 	}
340 
341 	if (priv->plat->has_xgmac) {
342 		if (clk_rate > 400000000)
343 			priv->clk_csr = 0x5;
344 		else if (clk_rate > 350000000)
345 			priv->clk_csr = 0x4;
346 		else if (clk_rate > 300000000)
347 			priv->clk_csr = 0x3;
348 		else if (clk_rate > 250000000)
349 			priv->clk_csr = 0x2;
350 		else if (clk_rate > 150000000)
351 			priv->clk_csr = 0x1;
352 		else
353 			priv->clk_csr = 0x0;
354 	}
355 }
356 
357 static void print_pkt(unsigned char *buf, int len)
358 {
359 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
360 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
361 }
362 
363 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
364 {
365 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
366 	u32 avail;
367 
368 	if (tx_q->dirty_tx > tx_q->cur_tx)
369 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
370 	else
371 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
372 
373 	return avail;
374 }
375 
376 /**
377  * stmmac_rx_dirty - Get RX queue dirty
378  * @priv: driver private structure
379  * @queue: RX queue index
380  */
381 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
382 {
383 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
384 	u32 dirty;
385 
386 	if (rx_q->dirty_rx <= rx_q->cur_rx)
387 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
388 	else
389 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
390 
391 	return dirty;
392 }
393 
394 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
395 {
396 	int tx_lpi_timer;
397 
398 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
399 	priv->eee_sw_timer_en = en ? 0 : 1;
400 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
401 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
402 }
403 
404 /**
405  * stmmac_enable_eee_mode - check and enter in LPI mode
406  * @priv: driver private structure
407  * Description: this function is to verify and enter in LPI mode in case of
408  * EEE.
409  */
410 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
411 {
412 	u32 tx_cnt = priv->plat->tx_queues_to_use;
413 	u32 queue;
414 
415 	/* check if all TX queues have the work finished */
416 	for (queue = 0; queue < tx_cnt; queue++) {
417 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
418 
419 		if (tx_q->dirty_tx != tx_q->cur_tx)
420 			return -EBUSY; /* still unfinished work */
421 	}
422 
423 	/* Check and enter in LPI mode */
424 	if (!priv->tx_path_in_lpi_mode)
425 		stmmac_set_eee_mode(priv, priv->hw,
426 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
427 	return 0;
428 }
429 
430 /**
431  * stmmac_disable_eee_mode - disable and exit from LPI mode
432  * @priv: driver private structure
433  * Description: this function is to exit and disable EEE in case of
434  * LPI state is true. This is called by the xmit.
435  */
436 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
437 {
438 	if (!priv->eee_sw_timer_en) {
439 		stmmac_lpi_entry_timer_config(priv, 0);
440 		return;
441 	}
442 
443 	stmmac_reset_eee_mode(priv, priv->hw);
444 	del_timer_sync(&priv->eee_ctrl_timer);
445 	priv->tx_path_in_lpi_mode = false;
446 }
447 
448 /**
449  * stmmac_eee_ctrl_timer - EEE TX SW timer.
450  * @t:  timer_list struct containing private info
451  * Description:
452  *  if there is no data transfer and if we are not in LPI state,
453  *  then MAC Transmitter can be moved to LPI state.
454  */
455 static void stmmac_eee_ctrl_timer(struct timer_list *t)
456 {
457 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
458 
459 	if (stmmac_enable_eee_mode(priv))
460 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
461 }
462 
463 /**
464  * stmmac_eee_init - init EEE
465  * @priv: driver private structure
466  * Description:
467  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
468  *  can also manage EEE, this function enable the LPI state and start related
469  *  timer.
470  */
471 bool stmmac_eee_init(struct stmmac_priv *priv)
472 {
473 	int eee_tw_timer = priv->eee_tw_timer;
474 
475 	/* Check if MAC core supports the EEE feature. */
476 	if (!priv->dma_cap.eee)
477 		return false;
478 
479 	mutex_lock(&priv->lock);
480 
481 	/* Check if it needs to be deactivated */
482 	if (!priv->eee_active) {
483 		if (priv->eee_enabled) {
484 			netdev_dbg(priv->dev, "disable EEE\n");
485 			stmmac_lpi_entry_timer_config(priv, 0);
486 			del_timer_sync(&priv->eee_ctrl_timer);
487 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
488 			if (priv->hw->xpcs)
489 				xpcs_config_eee(priv->hw->xpcs,
490 						priv->plat->mult_fact_100ns,
491 						false);
492 		}
493 		mutex_unlock(&priv->lock);
494 		return false;
495 	}
496 
497 	if (priv->eee_active && !priv->eee_enabled) {
498 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
499 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
500 				     eee_tw_timer);
501 		if (priv->hw->xpcs)
502 			xpcs_config_eee(priv->hw->xpcs,
503 					priv->plat->mult_fact_100ns,
504 					true);
505 	}
506 
507 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
508 		del_timer_sync(&priv->eee_ctrl_timer);
509 		priv->tx_path_in_lpi_mode = false;
510 		stmmac_lpi_entry_timer_config(priv, 1);
511 	} else {
512 		stmmac_lpi_entry_timer_config(priv, 0);
513 		mod_timer(&priv->eee_ctrl_timer,
514 			  STMMAC_LPI_T(priv->tx_lpi_timer));
515 	}
516 
517 	mutex_unlock(&priv->lock);
518 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
519 	return true;
520 }
521 
522 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
523  * @priv: driver private structure
524  * @p : descriptor pointer
525  * @skb : the socket buffer
526  * Description :
527  * This function will read timestamp from the descriptor & pass it to stack.
528  * and also perform some sanity checks.
529  */
530 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
531 				   struct dma_desc *p, struct sk_buff *skb)
532 {
533 	struct skb_shared_hwtstamps shhwtstamp;
534 	bool found = false;
535 	u64 ns = 0;
536 
537 	if (!priv->hwts_tx_en)
538 		return;
539 
540 	/* exit if skb doesn't support hw tstamp */
541 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
542 		return;
543 
544 	/* check tx tstamp status */
545 	if (stmmac_get_tx_timestamp_status(priv, p)) {
546 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
547 		found = true;
548 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
549 		found = true;
550 	}
551 
552 	if (found) {
553 		ns -= priv->plat->cdc_error_adj;
554 
555 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
556 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
557 
558 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
559 		/* pass tstamp to stack */
560 		skb_tstamp_tx(skb, &shhwtstamp);
561 	}
562 }
563 
564 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
565  * @priv: driver private structure
566  * @p : descriptor pointer
567  * @np : next descriptor pointer
568  * @skb : the socket buffer
569  * Description :
570  * This function will read received packet's timestamp from the descriptor
571  * and pass it to stack. It also perform some sanity checks.
572  */
573 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
574 				   struct dma_desc *np, struct sk_buff *skb)
575 {
576 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
577 	struct dma_desc *desc = p;
578 	u64 ns = 0;
579 
580 	if (!priv->hwts_rx_en)
581 		return;
582 	/* For GMAC4, the valid timestamp is from CTX next desc. */
583 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
584 		desc = np;
585 
586 	/* Check if timestamp is available */
587 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
588 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
589 
590 		ns -= priv->plat->cdc_error_adj;
591 
592 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
593 		shhwtstamp = skb_hwtstamps(skb);
594 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
595 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
596 	} else  {
597 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
598 	}
599 }
600 
601 /**
602  *  stmmac_hwtstamp_set - control hardware timestamping.
603  *  @dev: device pointer.
604  *  @ifr: An IOCTL specific structure, that can contain a pointer to
605  *  a proprietary structure used to pass information to the driver.
606  *  Description:
607  *  This function configures the MAC to enable/disable both outgoing(TX)
608  *  and incoming(RX) packets time stamping based on user input.
609  *  Return Value:
610  *  0 on success and an appropriate -ve integer on failure.
611  */
612 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
613 {
614 	struct stmmac_priv *priv = netdev_priv(dev);
615 	struct hwtstamp_config config;
616 	u32 ptp_v2 = 0;
617 	u32 tstamp_all = 0;
618 	u32 ptp_over_ipv4_udp = 0;
619 	u32 ptp_over_ipv6_udp = 0;
620 	u32 ptp_over_ethernet = 0;
621 	u32 snap_type_sel = 0;
622 	u32 ts_master_en = 0;
623 	u32 ts_event_en = 0;
624 
625 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
626 		netdev_alert(priv->dev, "No support for HW time stamping\n");
627 		priv->hwts_tx_en = 0;
628 		priv->hwts_rx_en = 0;
629 
630 		return -EOPNOTSUPP;
631 	}
632 
633 	if (copy_from_user(&config, ifr->ifr_data,
634 			   sizeof(config)))
635 		return -EFAULT;
636 
637 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
638 		   __func__, config.flags, config.tx_type, config.rx_filter);
639 
640 	if (config.tx_type != HWTSTAMP_TX_OFF &&
641 	    config.tx_type != HWTSTAMP_TX_ON)
642 		return -ERANGE;
643 
644 	if (priv->adv_ts) {
645 		switch (config.rx_filter) {
646 		case HWTSTAMP_FILTER_NONE:
647 			/* time stamp no incoming packet at all */
648 			config.rx_filter = HWTSTAMP_FILTER_NONE;
649 			break;
650 
651 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
652 			/* PTP v1, UDP, any kind of event packet */
653 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
654 			/* 'xmac' hardware can support Sync, Pdelay_Req and
655 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
656 			 * This leaves Delay_Req timestamps out.
657 			 * Enable all events *and* general purpose message
658 			 * timestamping
659 			 */
660 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
661 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
662 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
663 			break;
664 
665 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
666 			/* PTP v1, UDP, Sync packet */
667 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
668 			/* take time stamp for SYNC messages only */
669 			ts_event_en = PTP_TCR_TSEVNTENA;
670 
671 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
672 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
673 			break;
674 
675 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
676 			/* PTP v1, UDP, Delay_req packet */
677 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
678 			/* take time stamp for Delay_Req messages only */
679 			ts_master_en = PTP_TCR_TSMSTRENA;
680 			ts_event_en = PTP_TCR_TSEVNTENA;
681 
682 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
683 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
684 			break;
685 
686 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
687 			/* PTP v2, UDP, any kind of event packet */
688 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
689 			ptp_v2 = PTP_TCR_TSVER2ENA;
690 			/* take time stamp for all event messages */
691 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
692 
693 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
694 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
695 			break;
696 
697 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
698 			/* PTP v2, UDP, Sync packet */
699 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
700 			ptp_v2 = PTP_TCR_TSVER2ENA;
701 			/* take time stamp for SYNC messages only */
702 			ts_event_en = PTP_TCR_TSEVNTENA;
703 
704 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
705 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
706 			break;
707 
708 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
709 			/* PTP v2, UDP, Delay_req packet */
710 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
711 			ptp_v2 = PTP_TCR_TSVER2ENA;
712 			/* take time stamp for Delay_Req messages only */
713 			ts_master_en = PTP_TCR_TSMSTRENA;
714 			ts_event_en = PTP_TCR_TSEVNTENA;
715 
716 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
717 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
718 			break;
719 
720 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
721 			/* PTP v2/802.AS1 any layer, any kind of event packet */
722 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
723 			ptp_v2 = PTP_TCR_TSVER2ENA;
724 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
725 			if (priv->synopsys_id < DWMAC_CORE_4_10)
726 				ts_event_en = PTP_TCR_TSEVNTENA;
727 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
728 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
729 			ptp_over_ethernet = PTP_TCR_TSIPENA;
730 			break;
731 
732 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
733 			/* PTP v2/802.AS1, any layer, Sync packet */
734 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
735 			ptp_v2 = PTP_TCR_TSVER2ENA;
736 			/* take time stamp for SYNC messages only */
737 			ts_event_en = PTP_TCR_TSEVNTENA;
738 
739 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
740 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
741 			ptp_over_ethernet = PTP_TCR_TSIPENA;
742 			break;
743 
744 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
745 			/* PTP v2/802.AS1, any layer, Delay_req packet */
746 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
747 			ptp_v2 = PTP_TCR_TSVER2ENA;
748 			/* take time stamp for Delay_Req messages only */
749 			ts_master_en = PTP_TCR_TSMSTRENA;
750 			ts_event_en = PTP_TCR_TSEVNTENA;
751 
752 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
753 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
754 			ptp_over_ethernet = PTP_TCR_TSIPENA;
755 			break;
756 
757 		case HWTSTAMP_FILTER_NTP_ALL:
758 		case HWTSTAMP_FILTER_ALL:
759 			/* time stamp any incoming packet */
760 			config.rx_filter = HWTSTAMP_FILTER_ALL;
761 			tstamp_all = PTP_TCR_TSENALL;
762 			break;
763 
764 		default:
765 			return -ERANGE;
766 		}
767 	} else {
768 		switch (config.rx_filter) {
769 		case HWTSTAMP_FILTER_NONE:
770 			config.rx_filter = HWTSTAMP_FILTER_NONE;
771 			break;
772 		default:
773 			/* PTP v1, UDP, any kind of event packet */
774 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
775 			break;
776 		}
777 	}
778 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
779 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
780 
781 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
782 
783 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
784 		priv->systime_flags |= tstamp_all | ptp_v2 |
785 				       ptp_over_ethernet | ptp_over_ipv6_udp |
786 				       ptp_over_ipv4_udp | ts_event_en |
787 				       ts_master_en | snap_type_sel;
788 	}
789 
790 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
791 
792 	memcpy(&priv->tstamp_config, &config, sizeof(config));
793 
794 	return copy_to_user(ifr->ifr_data, &config,
795 			    sizeof(config)) ? -EFAULT : 0;
796 }
797 
798 /**
799  *  stmmac_hwtstamp_get - read hardware timestamping.
800  *  @dev: device pointer.
801  *  @ifr: An IOCTL specific structure, that can contain a pointer to
802  *  a proprietary structure used to pass information to the driver.
803  *  Description:
804  *  This function obtain the current hardware timestamping settings
805  *  as requested.
806  */
807 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
808 {
809 	struct stmmac_priv *priv = netdev_priv(dev);
810 	struct hwtstamp_config *config = &priv->tstamp_config;
811 
812 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
813 		return -EOPNOTSUPP;
814 
815 	return copy_to_user(ifr->ifr_data, config,
816 			    sizeof(*config)) ? -EFAULT : 0;
817 }
818 
819 /**
820  * stmmac_init_tstamp_counter - init hardware timestamping counter
821  * @priv: driver private structure
822  * @systime_flags: timestamping flags
823  * Description:
824  * Initialize hardware counter for packet timestamping.
825  * This is valid as long as the interface is open and not suspended.
826  * Will be rerun after resuming from suspend, case in which the timestamping
827  * flags updated by stmmac_hwtstamp_set() also need to be restored.
828  */
829 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
830 {
831 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
832 	struct timespec64 now;
833 	u32 sec_inc = 0;
834 	u64 temp = 0;
835 
836 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
837 		return -EOPNOTSUPP;
838 
839 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
840 	priv->systime_flags = systime_flags;
841 
842 	/* program Sub Second Increment reg */
843 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
844 					   priv->plat->clk_ptp_rate,
845 					   xmac, &sec_inc);
846 	temp = div_u64(1000000000ULL, sec_inc);
847 
848 	/* Store sub second increment for later use */
849 	priv->sub_second_inc = sec_inc;
850 
851 	/* calculate default added value:
852 	 * formula is :
853 	 * addend = (2^32)/freq_div_ratio;
854 	 * where, freq_div_ratio = 1e9ns/sec_inc
855 	 */
856 	temp = (u64)(temp << 32);
857 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
858 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
859 
860 	/* initialize system time */
861 	ktime_get_real_ts64(&now);
862 
863 	/* lower 32 bits of tv_sec are safe until y2106 */
864 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
865 
866 	return 0;
867 }
868 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
869 
870 /**
871  * stmmac_init_ptp - init PTP
872  * @priv: driver private structure
873  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
874  * This is done by looking at the HW cap. register.
875  * This function also registers the ptp driver.
876  */
877 static int stmmac_init_ptp(struct stmmac_priv *priv)
878 {
879 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
880 	int ret;
881 
882 	if (priv->plat->ptp_clk_freq_config)
883 		priv->plat->ptp_clk_freq_config(priv);
884 
885 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
886 	if (ret)
887 		return ret;
888 
889 	priv->adv_ts = 0;
890 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
891 	if (xmac && priv->dma_cap.atime_stamp)
892 		priv->adv_ts = 1;
893 	/* Dwmac 3.x core with extend_desc can support adv_ts */
894 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
895 		priv->adv_ts = 1;
896 
897 	if (priv->dma_cap.time_stamp)
898 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
899 
900 	if (priv->adv_ts)
901 		netdev_info(priv->dev,
902 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
903 
904 	priv->hwts_tx_en = 0;
905 	priv->hwts_rx_en = 0;
906 
907 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
908 		stmmac_hwtstamp_correct_latency(priv, priv);
909 
910 	return 0;
911 }
912 
913 static void stmmac_release_ptp(struct stmmac_priv *priv)
914 {
915 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
916 	stmmac_ptp_unregister(priv);
917 }
918 
919 /**
920  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
921  *  @priv: driver private structure
922  *  @duplex: duplex passed to the next function
923  *  Description: It is used for configuring the flow control in all queues
924  */
925 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
926 {
927 	u32 tx_cnt = priv->plat->tx_queues_to_use;
928 
929 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
930 			priv->pause, tx_cnt);
931 }
932 
933 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
934 					 phy_interface_t interface)
935 {
936 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
937 
938 	/* Refresh the MAC-specific capabilities */
939 	stmmac_mac_update_caps(priv);
940 
941 	config->mac_capabilities = priv->hw->link.caps;
942 
943 	if (priv->plat->max_speed)
944 		phylink_limit_mac_speed(config, priv->plat->max_speed);
945 
946 	return config->mac_capabilities;
947 }
948 
949 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
950 						 phy_interface_t interface)
951 {
952 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
953 	struct phylink_pcs *pcs;
954 
955 	if (priv->plat->select_pcs) {
956 		pcs = priv->plat->select_pcs(priv, interface);
957 		if (!IS_ERR(pcs))
958 			return pcs;
959 	}
960 
961 	return NULL;
962 }
963 
964 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
965 			      const struct phylink_link_state *state)
966 {
967 	/* Nothing to do, xpcs_config() handles everything */
968 }
969 
970 static void stmmac_mac_link_down(struct phylink_config *config,
971 				 unsigned int mode, phy_interface_t interface)
972 {
973 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
974 
975 	stmmac_mac_set(priv, priv->ioaddr, false);
976 	priv->eee_active = false;
977 	priv->tx_lpi_enabled = false;
978 	priv->eee_enabled = stmmac_eee_init(priv);
979 	stmmac_set_eee_pls(priv, priv->hw, false);
980 
981 	if (stmmac_fpe_supported(priv))
982 		stmmac_fpe_link_state_handle(priv, false);
983 }
984 
985 static void stmmac_mac_link_up(struct phylink_config *config,
986 			       struct phy_device *phy,
987 			       unsigned int mode, phy_interface_t interface,
988 			       int speed, int duplex,
989 			       bool tx_pause, bool rx_pause)
990 {
991 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
992 	u32 old_ctrl, ctrl;
993 
994 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
995 	    priv->plat->serdes_powerup)
996 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
997 
998 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
999 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1000 
1001 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1002 		switch (speed) {
1003 		case SPEED_10000:
1004 			ctrl |= priv->hw->link.xgmii.speed10000;
1005 			break;
1006 		case SPEED_5000:
1007 			ctrl |= priv->hw->link.xgmii.speed5000;
1008 			break;
1009 		case SPEED_2500:
1010 			ctrl |= priv->hw->link.xgmii.speed2500;
1011 			break;
1012 		default:
1013 			return;
1014 		}
1015 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1016 		switch (speed) {
1017 		case SPEED_100000:
1018 			ctrl |= priv->hw->link.xlgmii.speed100000;
1019 			break;
1020 		case SPEED_50000:
1021 			ctrl |= priv->hw->link.xlgmii.speed50000;
1022 			break;
1023 		case SPEED_40000:
1024 			ctrl |= priv->hw->link.xlgmii.speed40000;
1025 			break;
1026 		case SPEED_25000:
1027 			ctrl |= priv->hw->link.xlgmii.speed25000;
1028 			break;
1029 		case SPEED_10000:
1030 			ctrl |= priv->hw->link.xgmii.speed10000;
1031 			break;
1032 		case SPEED_2500:
1033 			ctrl |= priv->hw->link.speed2500;
1034 			break;
1035 		case SPEED_1000:
1036 			ctrl |= priv->hw->link.speed1000;
1037 			break;
1038 		default:
1039 			return;
1040 		}
1041 	} else {
1042 		switch (speed) {
1043 		case SPEED_2500:
1044 			ctrl |= priv->hw->link.speed2500;
1045 			break;
1046 		case SPEED_1000:
1047 			ctrl |= priv->hw->link.speed1000;
1048 			break;
1049 		case SPEED_100:
1050 			ctrl |= priv->hw->link.speed100;
1051 			break;
1052 		case SPEED_10:
1053 			ctrl |= priv->hw->link.speed10;
1054 			break;
1055 		default:
1056 			return;
1057 		}
1058 	}
1059 
1060 	priv->speed = speed;
1061 
1062 	if (priv->plat->fix_mac_speed)
1063 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1064 
1065 	if (!duplex)
1066 		ctrl &= ~priv->hw->link.duplex;
1067 	else
1068 		ctrl |= priv->hw->link.duplex;
1069 
1070 	/* Flow Control operation */
1071 	if (rx_pause && tx_pause)
1072 		priv->flow_ctrl = FLOW_AUTO;
1073 	else if (rx_pause && !tx_pause)
1074 		priv->flow_ctrl = FLOW_RX;
1075 	else if (!rx_pause && tx_pause)
1076 		priv->flow_ctrl = FLOW_TX;
1077 	else
1078 		priv->flow_ctrl = FLOW_OFF;
1079 
1080 	stmmac_mac_flow_ctrl(priv, duplex);
1081 
1082 	if (ctrl != old_ctrl)
1083 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1084 
1085 	stmmac_mac_set(priv, priv->ioaddr, true);
1086 	if (phy && priv->dma_cap.eee) {
1087 		priv->eee_active =
1088 			phy_init_eee(phy, !(priv->plat->flags &
1089 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1090 		priv->eee_enabled = stmmac_eee_init(priv);
1091 		priv->tx_lpi_enabled = priv->eee_enabled;
1092 		stmmac_set_eee_pls(priv, priv->hw, true);
1093 	}
1094 
1095 	if (stmmac_fpe_supported(priv))
1096 		stmmac_fpe_link_state_handle(priv, true);
1097 
1098 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1099 		stmmac_hwtstamp_correct_latency(priv, priv);
1100 }
1101 
1102 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1103 	.mac_get_caps = stmmac_mac_get_caps,
1104 	.mac_select_pcs = stmmac_mac_select_pcs,
1105 	.mac_config = stmmac_mac_config,
1106 	.mac_link_down = stmmac_mac_link_down,
1107 	.mac_link_up = stmmac_mac_link_up,
1108 };
1109 
1110 /**
1111  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1112  * @priv: driver private structure
1113  * Description: this is to verify if the HW supports the PCS.
1114  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1115  * configured for the TBI, RTBI, or SGMII PHY interface.
1116  */
1117 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1118 {
1119 	int interface = priv->plat->mac_interface;
1120 
1121 	if (priv->dma_cap.pcs) {
1122 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1123 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1124 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1125 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1126 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1127 			priv->hw->pcs = STMMAC_PCS_RGMII;
1128 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1129 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1130 			priv->hw->pcs = STMMAC_PCS_SGMII;
1131 		}
1132 	}
1133 }
1134 
1135 /**
1136  * stmmac_init_phy - PHY initialization
1137  * @dev: net device structure
1138  * Description: it initializes the driver's PHY state, and attaches the PHY
1139  * to the mac driver.
1140  *  Return value:
1141  *  0 on success
1142  */
1143 static int stmmac_init_phy(struct net_device *dev)
1144 {
1145 	struct stmmac_priv *priv = netdev_priv(dev);
1146 	struct fwnode_handle *phy_fwnode;
1147 	struct fwnode_handle *fwnode;
1148 	int ret;
1149 
1150 	if (!phylink_expects_phy(priv->phylink))
1151 		return 0;
1152 
1153 	fwnode = priv->plat->port_node;
1154 	if (!fwnode)
1155 		fwnode = dev_fwnode(priv->device);
1156 
1157 	if (fwnode)
1158 		phy_fwnode = fwnode_get_phy_node(fwnode);
1159 	else
1160 		phy_fwnode = NULL;
1161 
1162 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1163 	 * manually parse it
1164 	 */
1165 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1166 		int addr = priv->plat->phy_addr;
1167 		struct phy_device *phydev;
1168 
1169 		if (addr < 0) {
1170 			netdev_err(priv->dev, "no phy found\n");
1171 			return -ENODEV;
1172 		}
1173 
1174 		phydev = mdiobus_get_phy(priv->mii, addr);
1175 		if (!phydev) {
1176 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1177 			return -ENODEV;
1178 		}
1179 
1180 		ret = phylink_connect_phy(priv->phylink, phydev);
1181 	} else {
1182 		fwnode_handle_put(phy_fwnode);
1183 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1184 	}
1185 
1186 	if (!priv->plat->pmt) {
1187 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1188 
1189 		phylink_ethtool_get_wol(priv->phylink, &wol);
1190 		device_set_wakeup_capable(priv->device, !!wol.supported);
1191 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1192 	}
1193 
1194 	return ret;
1195 }
1196 
1197 static int stmmac_phy_setup(struct stmmac_priv *priv)
1198 {
1199 	struct stmmac_mdio_bus_data *mdio_bus_data;
1200 	int mode = priv->plat->phy_interface;
1201 	struct fwnode_handle *fwnode;
1202 	struct phylink *phylink;
1203 
1204 	priv->phylink_config.dev = &priv->dev->dev;
1205 	priv->phylink_config.type = PHYLINK_NETDEV;
1206 	priv->phylink_config.mac_managed_pm = true;
1207 
1208 	/* Stmmac always requires an RX clock for hardware initialization */
1209 	priv->phylink_config.mac_requires_rxc = true;
1210 
1211 	mdio_bus_data = priv->plat->mdio_bus_data;
1212 	if (mdio_bus_data)
1213 		priv->phylink_config.default_an_inband =
1214 			mdio_bus_data->default_an_inband;
1215 
1216 	/* Set the platform/firmware specified interface mode. Note, phylink
1217 	 * deals with the PHY interface mode, not the MAC interface mode.
1218 	 */
1219 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1220 
1221 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1222 	if (priv->hw->xpcs)
1223 		xpcs_get_interfaces(priv->hw->xpcs,
1224 				    priv->phylink_config.supported_interfaces);
1225 
1226 	fwnode = priv->plat->port_node;
1227 	if (!fwnode)
1228 		fwnode = dev_fwnode(priv->device);
1229 
1230 	phylink = phylink_create(&priv->phylink_config, fwnode,
1231 				 mode, &stmmac_phylink_mac_ops);
1232 	if (IS_ERR(phylink))
1233 		return PTR_ERR(phylink);
1234 
1235 	priv->phylink = phylink;
1236 	return 0;
1237 }
1238 
1239 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1240 				    struct stmmac_dma_conf *dma_conf)
1241 {
1242 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1243 	unsigned int desc_size;
1244 	void *head_rx;
1245 	u32 queue;
1246 
1247 	/* Display RX rings */
1248 	for (queue = 0; queue < rx_cnt; queue++) {
1249 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1250 
1251 		pr_info("\tRX Queue %u rings\n", queue);
1252 
1253 		if (priv->extend_desc) {
1254 			head_rx = (void *)rx_q->dma_erx;
1255 			desc_size = sizeof(struct dma_extended_desc);
1256 		} else {
1257 			head_rx = (void *)rx_q->dma_rx;
1258 			desc_size = sizeof(struct dma_desc);
1259 		}
1260 
1261 		/* Display RX ring */
1262 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1263 				    rx_q->dma_rx_phy, desc_size);
1264 	}
1265 }
1266 
1267 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1268 				    struct stmmac_dma_conf *dma_conf)
1269 {
1270 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1271 	unsigned int desc_size;
1272 	void *head_tx;
1273 	u32 queue;
1274 
1275 	/* Display TX rings */
1276 	for (queue = 0; queue < tx_cnt; queue++) {
1277 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1278 
1279 		pr_info("\tTX Queue %d rings\n", queue);
1280 
1281 		if (priv->extend_desc) {
1282 			head_tx = (void *)tx_q->dma_etx;
1283 			desc_size = sizeof(struct dma_extended_desc);
1284 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1285 			head_tx = (void *)tx_q->dma_entx;
1286 			desc_size = sizeof(struct dma_edesc);
1287 		} else {
1288 			head_tx = (void *)tx_q->dma_tx;
1289 			desc_size = sizeof(struct dma_desc);
1290 		}
1291 
1292 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1293 				    tx_q->dma_tx_phy, desc_size);
1294 	}
1295 }
1296 
1297 static void stmmac_display_rings(struct stmmac_priv *priv,
1298 				 struct stmmac_dma_conf *dma_conf)
1299 {
1300 	/* Display RX ring */
1301 	stmmac_display_rx_rings(priv, dma_conf);
1302 
1303 	/* Display TX ring */
1304 	stmmac_display_tx_rings(priv, dma_conf);
1305 }
1306 
1307 static int stmmac_set_bfsize(int mtu, int bufsize)
1308 {
1309 	int ret = bufsize;
1310 
1311 	if (mtu >= BUF_SIZE_8KiB)
1312 		ret = BUF_SIZE_16KiB;
1313 	else if (mtu >= BUF_SIZE_4KiB)
1314 		ret = BUF_SIZE_8KiB;
1315 	else if (mtu >= BUF_SIZE_2KiB)
1316 		ret = BUF_SIZE_4KiB;
1317 	else if (mtu > DEFAULT_BUFSIZE)
1318 		ret = BUF_SIZE_2KiB;
1319 	else
1320 		ret = DEFAULT_BUFSIZE;
1321 
1322 	return ret;
1323 }
1324 
1325 /**
1326  * stmmac_clear_rx_descriptors - clear RX descriptors
1327  * @priv: driver private structure
1328  * @dma_conf: structure to take the dma data
1329  * @queue: RX queue index
1330  * Description: this function is called to clear the RX descriptors
1331  * in case of both basic and extended descriptors are used.
1332  */
1333 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1334 					struct stmmac_dma_conf *dma_conf,
1335 					u32 queue)
1336 {
1337 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1338 	int i;
1339 
1340 	/* Clear the RX descriptors */
1341 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1342 		if (priv->extend_desc)
1343 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1344 					priv->use_riwt, priv->mode,
1345 					(i == dma_conf->dma_rx_size - 1),
1346 					dma_conf->dma_buf_sz);
1347 		else
1348 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1349 					priv->use_riwt, priv->mode,
1350 					(i == dma_conf->dma_rx_size - 1),
1351 					dma_conf->dma_buf_sz);
1352 }
1353 
1354 /**
1355  * stmmac_clear_tx_descriptors - clear tx descriptors
1356  * @priv: driver private structure
1357  * @dma_conf: structure to take the dma data
1358  * @queue: TX queue index.
1359  * Description: this function is called to clear the TX descriptors
1360  * in case of both basic and extended descriptors are used.
1361  */
1362 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1363 					struct stmmac_dma_conf *dma_conf,
1364 					u32 queue)
1365 {
1366 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1367 	int i;
1368 
1369 	/* Clear the TX descriptors */
1370 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1371 		int last = (i == (dma_conf->dma_tx_size - 1));
1372 		struct dma_desc *p;
1373 
1374 		if (priv->extend_desc)
1375 			p = &tx_q->dma_etx[i].basic;
1376 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1377 			p = &tx_q->dma_entx[i].basic;
1378 		else
1379 			p = &tx_q->dma_tx[i];
1380 
1381 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1382 	}
1383 }
1384 
1385 /**
1386  * stmmac_clear_descriptors - clear descriptors
1387  * @priv: driver private structure
1388  * @dma_conf: structure to take the dma data
1389  * Description: this function is called to clear the TX and RX descriptors
1390  * in case of both basic and extended descriptors are used.
1391  */
1392 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1393 				     struct stmmac_dma_conf *dma_conf)
1394 {
1395 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1396 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1397 	u32 queue;
1398 
1399 	/* Clear the RX descriptors */
1400 	for (queue = 0; queue < rx_queue_cnt; queue++)
1401 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1402 
1403 	/* Clear the TX descriptors */
1404 	for (queue = 0; queue < tx_queue_cnt; queue++)
1405 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1406 }
1407 
1408 /**
1409  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1410  * @priv: driver private structure
1411  * @dma_conf: structure to take the dma data
1412  * @p: descriptor pointer
1413  * @i: descriptor index
1414  * @flags: gfp flag
1415  * @queue: RX queue index
1416  * Description: this function is called to allocate a receive buffer, perform
1417  * the DMA mapping and init the descriptor.
1418  */
1419 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1420 				  struct stmmac_dma_conf *dma_conf,
1421 				  struct dma_desc *p,
1422 				  int i, gfp_t flags, u32 queue)
1423 {
1424 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1425 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1426 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1427 
1428 	if (priv->dma_cap.host_dma_width <= 32)
1429 		gfp |= GFP_DMA32;
1430 
1431 	if (!buf->page) {
1432 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1433 		if (!buf->page)
1434 			return -ENOMEM;
1435 		buf->page_offset = stmmac_rx_offset(priv);
1436 	}
1437 
1438 	if (priv->sph && !buf->sec_page) {
1439 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1440 		if (!buf->sec_page)
1441 			return -ENOMEM;
1442 
1443 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1444 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1445 	} else {
1446 		buf->sec_page = NULL;
1447 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1448 	}
1449 
1450 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1451 
1452 	stmmac_set_desc_addr(priv, p, buf->addr);
1453 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1454 		stmmac_init_desc3(priv, p);
1455 
1456 	return 0;
1457 }
1458 
1459 /**
1460  * stmmac_free_rx_buffer - free RX dma buffers
1461  * @priv: private structure
1462  * @rx_q: RX queue
1463  * @i: buffer index.
1464  */
1465 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1466 				  struct stmmac_rx_queue *rx_q,
1467 				  int i)
1468 {
1469 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1470 
1471 	if (buf->page)
1472 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1473 	buf->page = NULL;
1474 
1475 	if (buf->sec_page)
1476 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1477 	buf->sec_page = NULL;
1478 }
1479 
1480 /**
1481  * stmmac_free_tx_buffer - free RX dma buffers
1482  * @priv: private structure
1483  * @dma_conf: structure to take the dma data
1484  * @queue: RX queue index
1485  * @i: buffer index.
1486  */
1487 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1488 				  struct stmmac_dma_conf *dma_conf,
1489 				  u32 queue, int i)
1490 {
1491 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1492 
1493 	if (tx_q->tx_skbuff_dma[i].buf &&
1494 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1495 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1496 			dma_unmap_page(priv->device,
1497 				       tx_q->tx_skbuff_dma[i].buf,
1498 				       tx_q->tx_skbuff_dma[i].len,
1499 				       DMA_TO_DEVICE);
1500 		else
1501 			dma_unmap_single(priv->device,
1502 					 tx_q->tx_skbuff_dma[i].buf,
1503 					 tx_q->tx_skbuff_dma[i].len,
1504 					 DMA_TO_DEVICE);
1505 	}
1506 
1507 	if (tx_q->xdpf[i] &&
1508 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1509 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1510 		xdp_return_frame(tx_q->xdpf[i]);
1511 		tx_q->xdpf[i] = NULL;
1512 	}
1513 
1514 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1515 		tx_q->xsk_frames_done++;
1516 
1517 	if (tx_q->tx_skbuff[i] &&
1518 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1519 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1520 		tx_q->tx_skbuff[i] = NULL;
1521 	}
1522 
1523 	tx_q->tx_skbuff_dma[i].buf = 0;
1524 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1525 }
1526 
1527 /**
1528  * dma_free_rx_skbufs - free RX dma buffers
1529  * @priv: private structure
1530  * @dma_conf: structure to take the dma data
1531  * @queue: RX queue index
1532  */
1533 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1534 			       struct stmmac_dma_conf *dma_conf,
1535 			       u32 queue)
1536 {
1537 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1538 	int i;
1539 
1540 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1541 		stmmac_free_rx_buffer(priv, rx_q, i);
1542 }
1543 
1544 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1545 				   struct stmmac_dma_conf *dma_conf,
1546 				   u32 queue, gfp_t flags)
1547 {
1548 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1549 	int i;
1550 
1551 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1552 		struct dma_desc *p;
1553 		int ret;
1554 
1555 		if (priv->extend_desc)
1556 			p = &((rx_q->dma_erx + i)->basic);
1557 		else
1558 			p = rx_q->dma_rx + i;
1559 
1560 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1561 					     queue);
1562 		if (ret)
1563 			return ret;
1564 
1565 		rx_q->buf_alloc_num++;
1566 	}
1567 
1568 	return 0;
1569 }
1570 
1571 /**
1572  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1573  * @priv: private structure
1574  * @dma_conf: structure to take the dma data
1575  * @queue: RX queue index
1576  */
1577 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1578 				struct stmmac_dma_conf *dma_conf,
1579 				u32 queue)
1580 {
1581 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1582 	int i;
1583 
1584 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1585 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1586 
1587 		if (!buf->xdp)
1588 			continue;
1589 
1590 		xsk_buff_free(buf->xdp);
1591 		buf->xdp = NULL;
1592 	}
1593 }
1594 
1595 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1596 				      struct stmmac_dma_conf *dma_conf,
1597 				      u32 queue)
1598 {
1599 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1600 	int i;
1601 
1602 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1603 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1604 	 * use this macro to make sure no size violations.
1605 	 */
1606 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1607 
1608 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1609 		struct stmmac_rx_buffer *buf;
1610 		dma_addr_t dma_addr;
1611 		struct dma_desc *p;
1612 
1613 		if (priv->extend_desc)
1614 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1615 		else
1616 			p = rx_q->dma_rx + i;
1617 
1618 		buf = &rx_q->buf_pool[i];
1619 
1620 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1621 		if (!buf->xdp)
1622 			return -ENOMEM;
1623 
1624 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1625 		stmmac_set_desc_addr(priv, p, dma_addr);
1626 		rx_q->buf_alloc_num++;
1627 	}
1628 
1629 	return 0;
1630 }
1631 
1632 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1633 {
1634 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1635 		return NULL;
1636 
1637 	return xsk_get_pool_from_qid(priv->dev, queue);
1638 }
1639 
1640 /**
1641  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1642  * @priv: driver private structure
1643  * @dma_conf: structure to take the dma data
1644  * @queue: RX queue index
1645  * @flags: gfp flag.
1646  * Description: this function initializes the DMA RX descriptors
1647  * and allocates the socket buffers. It supports the chained and ring
1648  * modes.
1649  */
1650 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1651 				    struct stmmac_dma_conf *dma_conf,
1652 				    u32 queue, gfp_t flags)
1653 {
1654 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1655 	int ret;
1656 
1657 	netif_dbg(priv, probe, priv->dev,
1658 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1659 		  (u32)rx_q->dma_rx_phy);
1660 
1661 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1662 
1663 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1664 
1665 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1666 
1667 	if (rx_q->xsk_pool) {
1668 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1669 						   MEM_TYPE_XSK_BUFF_POOL,
1670 						   NULL));
1671 		netdev_info(priv->dev,
1672 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1673 			    rx_q->queue_index);
1674 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1675 	} else {
1676 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1677 						   MEM_TYPE_PAGE_POOL,
1678 						   rx_q->page_pool));
1679 		netdev_info(priv->dev,
1680 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1681 			    rx_q->queue_index);
1682 	}
1683 
1684 	if (rx_q->xsk_pool) {
1685 		/* RX XDP ZC buffer pool may not be populated, e.g.
1686 		 * xdpsock TX-only.
1687 		 */
1688 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1689 	} else {
1690 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1691 		if (ret < 0)
1692 			return -ENOMEM;
1693 	}
1694 
1695 	/* Setup the chained descriptor addresses */
1696 	if (priv->mode == STMMAC_CHAIN_MODE) {
1697 		if (priv->extend_desc)
1698 			stmmac_mode_init(priv, rx_q->dma_erx,
1699 					 rx_q->dma_rx_phy,
1700 					 dma_conf->dma_rx_size, 1);
1701 		else
1702 			stmmac_mode_init(priv, rx_q->dma_rx,
1703 					 rx_q->dma_rx_phy,
1704 					 dma_conf->dma_rx_size, 0);
1705 	}
1706 
1707 	return 0;
1708 }
1709 
1710 static int init_dma_rx_desc_rings(struct net_device *dev,
1711 				  struct stmmac_dma_conf *dma_conf,
1712 				  gfp_t flags)
1713 {
1714 	struct stmmac_priv *priv = netdev_priv(dev);
1715 	u32 rx_count = priv->plat->rx_queues_to_use;
1716 	int queue;
1717 	int ret;
1718 
1719 	/* RX INITIALIZATION */
1720 	netif_dbg(priv, probe, priv->dev,
1721 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1722 
1723 	for (queue = 0; queue < rx_count; queue++) {
1724 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1725 		if (ret)
1726 			goto err_init_rx_buffers;
1727 	}
1728 
1729 	return 0;
1730 
1731 err_init_rx_buffers:
1732 	while (queue >= 0) {
1733 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1734 
1735 		if (rx_q->xsk_pool)
1736 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1737 		else
1738 			dma_free_rx_skbufs(priv, dma_conf, queue);
1739 
1740 		rx_q->buf_alloc_num = 0;
1741 		rx_q->xsk_pool = NULL;
1742 
1743 		queue--;
1744 	}
1745 
1746 	return ret;
1747 }
1748 
1749 /**
1750  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1751  * @priv: driver private structure
1752  * @dma_conf: structure to take the dma data
1753  * @queue: TX queue index
1754  * Description: this function initializes the DMA TX descriptors
1755  * and allocates the socket buffers. It supports the chained and ring
1756  * modes.
1757  */
1758 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1759 				    struct stmmac_dma_conf *dma_conf,
1760 				    u32 queue)
1761 {
1762 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1763 	int i;
1764 
1765 	netif_dbg(priv, probe, priv->dev,
1766 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1767 		  (u32)tx_q->dma_tx_phy);
1768 
1769 	/* Setup the chained descriptor addresses */
1770 	if (priv->mode == STMMAC_CHAIN_MODE) {
1771 		if (priv->extend_desc)
1772 			stmmac_mode_init(priv, tx_q->dma_etx,
1773 					 tx_q->dma_tx_phy,
1774 					 dma_conf->dma_tx_size, 1);
1775 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1776 			stmmac_mode_init(priv, tx_q->dma_tx,
1777 					 tx_q->dma_tx_phy,
1778 					 dma_conf->dma_tx_size, 0);
1779 	}
1780 
1781 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1782 
1783 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1784 		struct dma_desc *p;
1785 
1786 		if (priv->extend_desc)
1787 			p = &((tx_q->dma_etx + i)->basic);
1788 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1789 			p = &((tx_q->dma_entx + i)->basic);
1790 		else
1791 			p = tx_q->dma_tx + i;
1792 
1793 		stmmac_clear_desc(priv, p);
1794 
1795 		tx_q->tx_skbuff_dma[i].buf = 0;
1796 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1797 		tx_q->tx_skbuff_dma[i].len = 0;
1798 		tx_q->tx_skbuff_dma[i].last_segment = false;
1799 		tx_q->tx_skbuff[i] = NULL;
1800 	}
1801 
1802 	return 0;
1803 }
1804 
1805 static int init_dma_tx_desc_rings(struct net_device *dev,
1806 				  struct stmmac_dma_conf *dma_conf)
1807 {
1808 	struct stmmac_priv *priv = netdev_priv(dev);
1809 	u32 tx_queue_cnt;
1810 	u32 queue;
1811 
1812 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1813 
1814 	for (queue = 0; queue < tx_queue_cnt; queue++)
1815 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1816 
1817 	return 0;
1818 }
1819 
1820 /**
1821  * init_dma_desc_rings - init the RX/TX descriptor rings
1822  * @dev: net device structure
1823  * @dma_conf: structure to take the dma data
1824  * @flags: gfp flag.
1825  * Description: this function initializes the DMA RX/TX descriptors
1826  * and allocates the socket buffers. It supports the chained and ring
1827  * modes.
1828  */
1829 static int init_dma_desc_rings(struct net_device *dev,
1830 			       struct stmmac_dma_conf *dma_conf,
1831 			       gfp_t flags)
1832 {
1833 	struct stmmac_priv *priv = netdev_priv(dev);
1834 	int ret;
1835 
1836 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1837 	if (ret)
1838 		return ret;
1839 
1840 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1841 
1842 	stmmac_clear_descriptors(priv, dma_conf);
1843 
1844 	if (netif_msg_hw(priv))
1845 		stmmac_display_rings(priv, dma_conf);
1846 
1847 	return ret;
1848 }
1849 
1850 /**
1851  * dma_free_tx_skbufs - free TX dma buffers
1852  * @priv: private structure
1853  * @dma_conf: structure to take the dma data
1854  * @queue: TX queue index
1855  */
1856 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1857 			       struct stmmac_dma_conf *dma_conf,
1858 			       u32 queue)
1859 {
1860 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1861 	int i;
1862 
1863 	tx_q->xsk_frames_done = 0;
1864 
1865 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1866 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1867 
1868 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1869 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1870 		tx_q->xsk_frames_done = 0;
1871 		tx_q->xsk_pool = NULL;
1872 	}
1873 }
1874 
1875 /**
1876  * stmmac_free_tx_skbufs - free TX skb buffers
1877  * @priv: private structure
1878  */
1879 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1880 {
1881 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1882 	u32 queue;
1883 
1884 	for (queue = 0; queue < tx_queue_cnt; queue++)
1885 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1886 }
1887 
1888 /**
1889  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1890  * @priv: private structure
1891  * @dma_conf: structure to take the dma data
1892  * @queue: RX queue index
1893  */
1894 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1895 					 struct stmmac_dma_conf *dma_conf,
1896 					 u32 queue)
1897 {
1898 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1899 
1900 	/* Release the DMA RX socket buffers */
1901 	if (rx_q->xsk_pool)
1902 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1903 	else
1904 		dma_free_rx_skbufs(priv, dma_conf, queue);
1905 
1906 	rx_q->buf_alloc_num = 0;
1907 	rx_q->xsk_pool = NULL;
1908 
1909 	/* Free DMA regions of consistent memory previously allocated */
1910 	if (!priv->extend_desc)
1911 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1912 				  sizeof(struct dma_desc),
1913 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1914 	else
1915 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1916 				  sizeof(struct dma_extended_desc),
1917 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1918 
1919 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1920 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1921 
1922 	kfree(rx_q->buf_pool);
1923 	if (rx_q->page_pool)
1924 		page_pool_destroy(rx_q->page_pool);
1925 }
1926 
1927 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1928 				       struct stmmac_dma_conf *dma_conf)
1929 {
1930 	u32 rx_count = priv->plat->rx_queues_to_use;
1931 	u32 queue;
1932 
1933 	/* Free RX queue resources */
1934 	for (queue = 0; queue < rx_count; queue++)
1935 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1936 }
1937 
1938 /**
1939  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1940  * @priv: private structure
1941  * @dma_conf: structure to take the dma data
1942  * @queue: TX queue index
1943  */
1944 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1945 					 struct stmmac_dma_conf *dma_conf,
1946 					 u32 queue)
1947 {
1948 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1949 	size_t size;
1950 	void *addr;
1951 
1952 	/* Release the DMA TX socket buffers */
1953 	dma_free_tx_skbufs(priv, dma_conf, queue);
1954 
1955 	if (priv->extend_desc) {
1956 		size = sizeof(struct dma_extended_desc);
1957 		addr = tx_q->dma_etx;
1958 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1959 		size = sizeof(struct dma_edesc);
1960 		addr = tx_q->dma_entx;
1961 	} else {
1962 		size = sizeof(struct dma_desc);
1963 		addr = tx_q->dma_tx;
1964 	}
1965 
1966 	size *= dma_conf->dma_tx_size;
1967 
1968 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1969 
1970 	kfree(tx_q->tx_skbuff_dma);
1971 	kfree(tx_q->tx_skbuff);
1972 }
1973 
1974 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1975 				       struct stmmac_dma_conf *dma_conf)
1976 {
1977 	u32 tx_count = priv->plat->tx_queues_to_use;
1978 	u32 queue;
1979 
1980 	/* Free TX queue resources */
1981 	for (queue = 0; queue < tx_count; queue++)
1982 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1983 }
1984 
1985 /**
1986  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1987  * @priv: private structure
1988  * @dma_conf: structure to take the dma data
1989  * @queue: RX queue index
1990  * Description: according to which descriptor can be used (extend or basic)
1991  * this function allocates the resources for TX and RX paths. In case of
1992  * reception, for example, it pre-allocated the RX socket buffer in order to
1993  * allow zero-copy mechanism.
1994  */
1995 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
1996 					 struct stmmac_dma_conf *dma_conf,
1997 					 u32 queue)
1998 {
1999 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2000 	struct stmmac_channel *ch = &priv->channel[queue];
2001 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2002 	struct page_pool_params pp_params = { 0 };
2003 	unsigned int num_pages;
2004 	unsigned int napi_id;
2005 	int ret;
2006 
2007 	rx_q->queue_index = queue;
2008 	rx_q->priv_data = priv;
2009 
2010 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2011 	pp_params.pool_size = dma_conf->dma_rx_size;
2012 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2013 	pp_params.order = ilog2(num_pages);
2014 	pp_params.nid = dev_to_node(priv->device);
2015 	pp_params.dev = priv->device;
2016 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2017 	pp_params.offset = stmmac_rx_offset(priv);
2018 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2019 
2020 	rx_q->page_pool = page_pool_create(&pp_params);
2021 	if (IS_ERR(rx_q->page_pool)) {
2022 		ret = PTR_ERR(rx_q->page_pool);
2023 		rx_q->page_pool = NULL;
2024 		return ret;
2025 	}
2026 
2027 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2028 				 sizeof(*rx_q->buf_pool),
2029 				 GFP_KERNEL);
2030 	if (!rx_q->buf_pool)
2031 		return -ENOMEM;
2032 
2033 	if (priv->extend_desc) {
2034 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2035 						   dma_conf->dma_rx_size *
2036 						   sizeof(struct dma_extended_desc),
2037 						   &rx_q->dma_rx_phy,
2038 						   GFP_KERNEL);
2039 		if (!rx_q->dma_erx)
2040 			return -ENOMEM;
2041 
2042 	} else {
2043 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2044 						  dma_conf->dma_rx_size *
2045 						  sizeof(struct dma_desc),
2046 						  &rx_q->dma_rx_phy,
2047 						  GFP_KERNEL);
2048 		if (!rx_q->dma_rx)
2049 			return -ENOMEM;
2050 	}
2051 
2052 	if (stmmac_xdp_is_enabled(priv) &&
2053 	    test_bit(queue, priv->af_xdp_zc_qps))
2054 		napi_id = ch->rxtx_napi.napi_id;
2055 	else
2056 		napi_id = ch->rx_napi.napi_id;
2057 
2058 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2059 			       rx_q->queue_index,
2060 			       napi_id);
2061 	if (ret) {
2062 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2063 		return -EINVAL;
2064 	}
2065 
2066 	return 0;
2067 }
2068 
2069 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2070 				       struct stmmac_dma_conf *dma_conf)
2071 {
2072 	u32 rx_count = priv->plat->rx_queues_to_use;
2073 	u32 queue;
2074 	int ret;
2075 
2076 	/* RX queues buffers and DMA */
2077 	for (queue = 0; queue < rx_count; queue++) {
2078 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2079 		if (ret)
2080 			goto err_dma;
2081 	}
2082 
2083 	return 0;
2084 
2085 err_dma:
2086 	free_dma_rx_desc_resources(priv, dma_conf);
2087 
2088 	return ret;
2089 }
2090 
2091 /**
2092  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2093  * @priv: private structure
2094  * @dma_conf: structure to take the dma data
2095  * @queue: TX queue index
2096  * Description: according to which descriptor can be used (extend or basic)
2097  * this function allocates the resources for TX and RX paths. In case of
2098  * reception, for example, it pre-allocated the RX socket buffer in order to
2099  * allow zero-copy mechanism.
2100  */
2101 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2102 					 struct stmmac_dma_conf *dma_conf,
2103 					 u32 queue)
2104 {
2105 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2106 	size_t size;
2107 	void *addr;
2108 
2109 	tx_q->queue_index = queue;
2110 	tx_q->priv_data = priv;
2111 
2112 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2113 				      sizeof(*tx_q->tx_skbuff_dma),
2114 				      GFP_KERNEL);
2115 	if (!tx_q->tx_skbuff_dma)
2116 		return -ENOMEM;
2117 
2118 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2119 				  sizeof(struct sk_buff *),
2120 				  GFP_KERNEL);
2121 	if (!tx_q->tx_skbuff)
2122 		return -ENOMEM;
2123 
2124 	if (priv->extend_desc)
2125 		size = sizeof(struct dma_extended_desc);
2126 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2127 		size = sizeof(struct dma_edesc);
2128 	else
2129 		size = sizeof(struct dma_desc);
2130 
2131 	size *= dma_conf->dma_tx_size;
2132 
2133 	addr = dma_alloc_coherent(priv->device, size,
2134 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2135 	if (!addr)
2136 		return -ENOMEM;
2137 
2138 	if (priv->extend_desc)
2139 		tx_q->dma_etx = addr;
2140 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2141 		tx_q->dma_entx = addr;
2142 	else
2143 		tx_q->dma_tx = addr;
2144 
2145 	return 0;
2146 }
2147 
2148 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2149 				       struct stmmac_dma_conf *dma_conf)
2150 {
2151 	u32 tx_count = priv->plat->tx_queues_to_use;
2152 	u32 queue;
2153 	int ret;
2154 
2155 	/* TX queues buffers and DMA */
2156 	for (queue = 0; queue < tx_count; queue++) {
2157 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2158 		if (ret)
2159 			goto err_dma;
2160 	}
2161 
2162 	return 0;
2163 
2164 err_dma:
2165 	free_dma_tx_desc_resources(priv, dma_conf);
2166 	return ret;
2167 }
2168 
2169 /**
2170  * alloc_dma_desc_resources - alloc TX/RX resources.
2171  * @priv: private structure
2172  * @dma_conf: structure to take the dma data
2173  * Description: according to which descriptor can be used (extend or basic)
2174  * this function allocates the resources for TX and RX paths. In case of
2175  * reception, for example, it pre-allocated the RX socket buffer in order to
2176  * allow zero-copy mechanism.
2177  */
2178 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2179 				    struct stmmac_dma_conf *dma_conf)
2180 {
2181 	/* RX Allocation */
2182 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2183 
2184 	if (ret)
2185 		return ret;
2186 
2187 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2188 
2189 	return ret;
2190 }
2191 
2192 /**
2193  * free_dma_desc_resources - free dma desc resources
2194  * @priv: private structure
2195  * @dma_conf: structure to take the dma data
2196  */
2197 static void free_dma_desc_resources(struct stmmac_priv *priv,
2198 				    struct stmmac_dma_conf *dma_conf)
2199 {
2200 	/* Release the DMA TX socket buffers */
2201 	free_dma_tx_desc_resources(priv, dma_conf);
2202 
2203 	/* Release the DMA RX socket buffers later
2204 	 * to ensure all pending XDP_TX buffers are returned.
2205 	 */
2206 	free_dma_rx_desc_resources(priv, dma_conf);
2207 }
2208 
2209 /**
2210  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2211  *  @priv: driver private structure
2212  *  Description: It is used for enabling the rx queues in the MAC
2213  */
2214 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2215 {
2216 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2217 	int queue;
2218 	u8 mode;
2219 
2220 	for (queue = 0; queue < rx_queues_count; queue++) {
2221 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2222 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2223 	}
2224 }
2225 
2226 /**
2227  * stmmac_start_rx_dma - start RX DMA channel
2228  * @priv: driver private structure
2229  * @chan: RX channel index
2230  * Description:
2231  * This starts a RX DMA channel
2232  */
2233 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2234 {
2235 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2236 	stmmac_start_rx(priv, priv->ioaddr, chan);
2237 }
2238 
2239 /**
2240  * stmmac_start_tx_dma - start TX DMA channel
2241  * @priv: driver private structure
2242  * @chan: TX channel index
2243  * Description:
2244  * This starts a TX DMA channel
2245  */
2246 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2247 {
2248 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2249 	stmmac_start_tx(priv, priv->ioaddr, chan);
2250 }
2251 
2252 /**
2253  * stmmac_stop_rx_dma - stop RX DMA channel
2254  * @priv: driver private structure
2255  * @chan: RX channel index
2256  * Description:
2257  * This stops a RX DMA channel
2258  */
2259 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2262 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2263 }
2264 
2265 /**
2266  * stmmac_stop_tx_dma - stop TX DMA channel
2267  * @priv: driver private structure
2268  * @chan: TX channel index
2269  * Description:
2270  * This stops a TX DMA channel
2271  */
2272 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2275 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2276 }
2277 
2278 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2279 {
2280 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2281 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2282 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2283 	u32 chan;
2284 
2285 	for (chan = 0; chan < dma_csr_ch; chan++) {
2286 		struct stmmac_channel *ch = &priv->channel[chan];
2287 		unsigned long flags;
2288 
2289 		spin_lock_irqsave(&ch->lock, flags);
2290 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2291 		spin_unlock_irqrestore(&ch->lock, flags);
2292 	}
2293 }
2294 
2295 /**
2296  * stmmac_start_all_dma - start all RX and TX DMA channels
2297  * @priv: driver private structure
2298  * Description:
2299  * This starts all the RX and TX DMA channels
2300  */
2301 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2302 {
2303 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2304 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2305 	u32 chan = 0;
2306 
2307 	for (chan = 0; chan < rx_channels_count; chan++)
2308 		stmmac_start_rx_dma(priv, chan);
2309 
2310 	for (chan = 0; chan < tx_channels_count; chan++)
2311 		stmmac_start_tx_dma(priv, chan);
2312 }
2313 
2314 /**
2315  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2316  * @priv: driver private structure
2317  * Description:
2318  * This stops the RX and TX DMA channels
2319  */
2320 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2321 {
2322 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2323 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2324 	u32 chan = 0;
2325 
2326 	for (chan = 0; chan < rx_channels_count; chan++)
2327 		stmmac_stop_rx_dma(priv, chan);
2328 
2329 	for (chan = 0; chan < tx_channels_count; chan++)
2330 		stmmac_stop_tx_dma(priv, chan);
2331 }
2332 
2333 /**
2334  *  stmmac_dma_operation_mode - HW DMA operation mode
2335  *  @priv: driver private structure
2336  *  Description: it is used for configuring the DMA operation mode register in
2337  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2338  */
2339 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2340 {
2341 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2342 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2343 	int rxfifosz = priv->plat->rx_fifo_size;
2344 	int txfifosz = priv->plat->tx_fifo_size;
2345 	u32 txmode = 0;
2346 	u32 rxmode = 0;
2347 	u32 chan = 0;
2348 	u8 qmode = 0;
2349 
2350 	if (rxfifosz == 0)
2351 		rxfifosz = priv->dma_cap.rx_fifo_size;
2352 	if (txfifosz == 0)
2353 		txfifosz = priv->dma_cap.tx_fifo_size;
2354 
2355 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2356 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2357 		rxfifosz /= rx_channels_count;
2358 		txfifosz /= tx_channels_count;
2359 	}
2360 
2361 	if (priv->plat->force_thresh_dma_mode) {
2362 		txmode = tc;
2363 		rxmode = tc;
2364 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2365 		/*
2366 		 * In case of GMAC, SF mode can be enabled
2367 		 * to perform the TX COE in HW. This depends on:
2368 		 * 1) TX COE if actually supported
2369 		 * 2) There is no bugged Jumbo frame support
2370 		 *    that needs to not insert csum in the TDES.
2371 		 */
2372 		txmode = SF_DMA_MODE;
2373 		rxmode = SF_DMA_MODE;
2374 		priv->xstats.threshold = SF_DMA_MODE;
2375 	} else {
2376 		txmode = tc;
2377 		rxmode = SF_DMA_MODE;
2378 	}
2379 
2380 	/* configure all channels */
2381 	for (chan = 0; chan < rx_channels_count; chan++) {
2382 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2383 		u32 buf_size;
2384 
2385 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2386 
2387 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2388 				rxfifosz, qmode);
2389 
2390 		if (rx_q->xsk_pool) {
2391 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2392 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2393 					      buf_size,
2394 					      chan);
2395 		} else {
2396 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2397 					      priv->dma_conf.dma_buf_sz,
2398 					      chan);
2399 		}
2400 	}
2401 
2402 	for (chan = 0; chan < tx_channels_count; chan++) {
2403 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2404 
2405 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2406 				txfifosz, qmode);
2407 	}
2408 }
2409 
2410 static void stmmac_xsk_request_timestamp(void *_priv)
2411 {
2412 	struct stmmac_metadata_request *meta_req = _priv;
2413 
2414 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2415 	*meta_req->set_ic = true;
2416 }
2417 
2418 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2419 {
2420 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2421 	struct stmmac_priv *priv = tx_compl->priv;
2422 	struct dma_desc *desc = tx_compl->desc;
2423 	bool found = false;
2424 	u64 ns = 0;
2425 
2426 	if (!priv->hwts_tx_en)
2427 		return 0;
2428 
2429 	/* check tx tstamp status */
2430 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2431 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2432 		found = true;
2433 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2434 		found = true;
2435 	}
2436 
2437 	if (found) {
2438 		ns -= priv->plat->cdc_error_adj;
2439 		return ns_to_ktime(ns);
2440 	}
2441 
2442 	return 0;
2443 }
2444 
2445 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2446 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2447 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2448 };
2449 
2450 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2451 {
2452 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2453 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2454 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2455 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2456 	unsigned int entry = tx_q->cur_tx;
2457 	struct dma_desc *tx_desc = NULL;
2458 	struct xdp_desc xdp_desc;
2459 	bool work_done = true;
2460 	u32 tx_set_ic_bit = 0;
2461 
2462 	/* Avoids TX time-out as we are sharing with slow path */
2463 	txq_trans_cond_update(nq);
2464 
2465 	budget = min(budget, stmmac_tx_avail(priv, queue));
2466 
2467 	while (budget-- > 0) {
2468 		struct stmmac_metadata_request meta_req;
2469 		struct xsk_tx_metadata *meta = NULL;
2470 		dma_addr_t dma_addr;
2471 		bool set_ic;
2472 
2473 		/* We are sharing with slow path and stop XSK TX desc submission when
2474 		 * available TX ring is less than threshold.
2475 		 */
2476 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2477 		    !netif_carrier_ok(priv->dev)) {
2478 			work_done = false;
2479 			break;
2480 		}
2481 
2482 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2483 			break;
2484 
2485 		if (priv->est && priv->est->enable &&
2486 		    priv->est->max_sdu[queue] &&
2487 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2488 			priv->xstats.max_sdu_txq_drop[queue]++;
2489 			continue;
2490 		}
2491 
2492 		if (likely(priv->extend_desc))
2493 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2494 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2495 			tx_desc = &tx_q->dma_entx[entry].basic;
2496 		else
2497 			tx_desc = tx_q->dma_tx + entry;
2498 
2499 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2500 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2501 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2502 
2503 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2504 
2505 		/* To return XDP buffer to XSK pool, we simple call
2506 		 * xsk_tx_completed(), so we don't need to fill up
2507 		 * 'buf' and 'xdpf'.
2508 		 */
2509 		tx_q->tx_skbuff_dma[entry].buf = 0;
2510 		tx_q->xdpf[entry] = NULL;
2511 
2512 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2513 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2514 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2515 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2516 
2517 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2518 
2519 		tx_q->tx_count_frames++;
2520 
2521 		if (!priv->tx_coal_frames[queue])
2522 			set_ic = false;
2523 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2524 			set_ic = true;
2525 		else
2526 			set_ic = false;
2527 
2528 		meta_req.priv = priv;
2529 		meta_req.tx_desc = tx_desc;
2530 		meta_req.set_ic = &set_ic;
2531 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2532 					&meta_req);
2533 		if (set_ic) {
2534 			tx_q->tx_count_frames = 0;
2535 			stmmac_set_tx_ic(priv, tx_desc);
2536 			tx_set_ic_bit++;
2537 		}
2538 
2539 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2540 				       true, priv->mode, true, true,
2541 				       xdp_desc.len);
2542 
2543 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2544 
2545 		xsk_tx_metadata_to_compl(meta,
2546 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2547 
2548 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2549 		entry = tx_q->cur_tx;
2550 	}
2551 	u64_stats_update_begin(&txq_stats->napi_syncp);
2552 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2553 	u64_stats_update_end(&txq_stats->napi_syncp);
2554 
2555 	if (tx_desc) {
2556 		stmmac_flush_tx_descriptors(priv, queue);
2557 		xsk_tx_release(pool);
2558 	}
2559 
2560 	/* Return true if all of the 3 conditions are met
2561 	 *  a) TX Budget is still available
2562 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2563 	 *     pending XSK TX for transmission)
2564 	 */
2565 	return !!budget && work_done;
2566 }
2567 
2568 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2569 {
2570 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2571 		tc += 64;
2572 
2573 		if (priv->plat->force_thresh_dma_mode)
2574 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2575 		else
2576 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2577 						      chan);
2578 
2579 		priv->xstats.threshold = tc;
2580 	}
2581 }
2582 
2583 /**
2584  * stmmac_tx_clean - to manage the transmission completion
2585  * @priv: driver private structure
2586  * @budget: napi budget limiting this functions packet handling
2587  * @queue: TX queue index
2588  * @pending_packets: signal to arm the TX coal timer
2589  * Description: it reclaims the transmit resources after transmission completes.
2590  * If some packets still needs to be handled, due to TX coalesce, set
2591  * pending_packets to true to make NAPI arm the TX coal timer.
2592  */
2593 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2594 			   bool *pending_packets)
2595 {
2596 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2597 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2598 	unsigned int bytes_compl = 0, pkts_compl = 0;
2599 	unsigned int entry, xmits = 0, count = 0;
2600 	u32 tx_packets = 0, tx_errors = 0;
2601 
2602 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2603 
2604 	tx_q->xsk_frames_done = 0;
2605 
2606 	entry = tx_q->dirty_tx;
2607 
2608 	/* Try to clean all TX complete frame in 1 shot */
2609 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2610 		struct xdp_frame *xdpf;
2611 		struct sk_buff *skb;
2612 		struct dma_desc *p;
2613 		int status;
2614 
2615 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2616 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2617 			xdpf = tx_q->xdpf[entry];
2618 			skb = NULL;
2619 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2620 			xdpf = NULL;
2621 			skb = tx_q->tx_skbuff[entry];
2622 		} else {
2623 			xdpf = NULL;
2624 			skb = NULL;
2625 		}
2626 
2627 		if (priv->extend_desc)
2628 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2629 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2630 			p = &tx_q->dma_entx[entry].basic;
2631 		else
2632 			p = tx_q->dma_tx + entry;
2633 
2634 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2635 		/* Check if the descriptor is owned by the DMA */
2636 		if (unlikely(status & tx_dma_own))
2637 			break;
2638 
2639 		count++;
2640 
2641 		/* Make sure descriptor fields are read after reading
2642 		 * the own bit.
2643 		 */
2644 		dma_rmb();
2645 
2646 		/* Just consider the last segment and ...*/
2647 		if (likely(!(status & tx_not_ls))) {
2648 			/* ... verify the status error condition */
2649 			if (unlikely(status & tx_err)) {
2650 				tx_errors++;
2651 				if (unlikely(status & tx_err_bump_tc))
2652 					stmmac_bump_dma_threshold(priv, queue);
2653 			} else {
2654 				tx_packets++;
2655 			}
2656 			if (skb) {
2657 				stmmac_get_tx_hwtstamp(priv, p, skb);
2658 			} else if (tx_q->xsk_pool &&
2659 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2660 				struct stmmac_xsk_tx_complete tx_compl = {
2661 					.priv = priv,
2662 					.desc = p,
2663 				};
2664 
2665 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2666 							 &stmmac_xsk_tx_metadata_ops,
2667 							 &tx_compl);
2668 			}
2669 		}
2670 
2671 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2672 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2673 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2674 				dma_unmap_page(priv->device,
2675 					       tx_q->tx_skbuff_dma[entry].buf,
2676 					       tx_q->tx_skbuff_dma[entry].len,
2677 					       DMA_TO_DEVICE);
2678 			else
2679 				dma_unmap_single(priv->device,
2680 						 tx_q->tx_skbuff_dma[entry].buf,
2681 						 tx_q->tx_skbuff_dma[entry].len,
2682 						 DMA_TO_DEVICE);
2683 			tx_q->tx_skbuff_dma[entry].buf = 0;
2684 			tx_q->tx_skbuff_dma[entry].len = 0;
2685 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2686 		}
2687 
2688 		stmmac_clean_desc3(priv, tx_q, p);
2689 
2690 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2691 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2692 
2693 		if (xdpf &&
2694 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2695 			xdp_return_frame_rx_napi(xdpf);
2696 			tx_q->xdpf[entry] = NULL;
2697 		}
2698 
2699 		if (xdpf &&
2700 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2701 			xdp_return_frame(xdpf);
2702 			tx_q->xdpf[entry] = NULL;
2703 		}
2704 
2705 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2706 			tx_q->xsk_frames_done++;
2707 
2708 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2709 			if (likely(skb)) {
2710 				pkts_compl++;
2711 				bytes_compl += skb->len;
2712 				dev_consume_skb_any(skb);
2713 				tx_q->tx_skbuff[entry] = NULL;
2714 			}
2715 		}
2716 
2717 		stmmac_release_tx_desc(priv, p, priv->mode);
2718 
2719 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2720 	}
2721 	tx_q->dirty_tx = entry;
2722 
2723 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2724 				  pkts_compl, bytes_compl);
2725 
2726 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2727 								queue))) &&
2728 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2729 
2730 		netif_dbg(priv, tx_done, priv->dev,
2731 			  "%s: restart transmit\n", __func__);
2732 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2733 	}
2734 
2735 	if (tx_q->xsk_pool) {
2736 		bool work_done;
2737 
2738 		if (tx_q->xsk_frames_done)
2739 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2740 
2741 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2742 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2743 
2744 		/* For XSK TX, we try to send as many as possible.
2745 		 * If XSK work done (XSK TX desc empty and budget still
2746 		 * available), return "budget - 1" to reenable TX IRQ.
2747 		 * Else, return "budget" to make NAPI continue polling.
2748 		 */
2749 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2750 					       STMMAC_XSK_TX_BUDGET_MAX);
2751 		if (work_done)
2752 			xmits = budget - 1;
2753 		else
2754 			xmits = budget;
2755 	}
2756 
2757 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2758 	    priv->eee_sw_timer_en) {
2759 		if (stmmac_enable_eee_mode(priv))
2760 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2761 	}
2762 
2763 	/* We still have pending packets, let's call for a new scheduling */
2764 	if (tx_q->dirty_tx != tx_q->cur_tx)
2765 		*pending_packets = true;
2766 
2767 	u64_stats_update_begin(&txq_stats->napi_syncp);
2768 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2769 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2770 	u64_stats_inc(&txq_stats->napi.tx_clean);
2771 	u64_stats_update_end(&txq_stats->napi_syncp);
2772 
2773 	priv->xstats.tx_errors += tx_errors;
2774 
2775 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2776 
2777 	/* Combine decisions from TX clean and XSK TX */
2778 	return max(count, xmits);
2779 }
2780 
2781 /**
2782  * stmmac_tx_err - to manage the tx error
2783  * @priv: driver private structure
2784  * @chan: channel index
2785  * Description: it cleans the descriptors and restarts the transmission
2786  * in case of transmission errors.
2787  */
2788 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2789 {
2790 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2791 
2792 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2793 
2794 	stmmac_stop_tx_dma(priv, chan);
2795 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2796 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2797 	stmmac_reset_tx_queue(priv, chan);
2798 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2799 			    tx_q->dma_tx_phy, chan);
2800 	stmmac_start_tx_dma(priv, chan);
2801 
2802 	priv->xstats.tx_errors++;
2803 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2804 }
2805 
2806 /**
2807  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2808  *  @priv: driver private structure
2809  *  @txmode: TX operating mode
2810  *  @rxmode: RX operating mode
2811  *  @chan: channel index
2812  *  Description: it is used for configuring of the DMA operation mode in
2813  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2814  *  mode.
2815  */
2816 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2817 					  u32 rxmode, u32 chan)
2818 {
2819 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2820 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2821 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2822 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2823 	int rxfifosz = priv->plat->rx_fifo_size;
2824 	int txfifosz = priv->plat->tx_fifo_size;
2825 
2826 	if (rxfifosz == 0)
2827 		rxfifosz = priv->dma_cap.rx_fifo_size;
2828 	if (txfifosz == 0)
2829 		txfifosz = priv->dma_cap.tx_fifo_size;
2830 
2831 	/* Adjust for real per queue fifo size */
2832 	rxfifosz /= rx_channels_count;
2833 	txfifosz /= tx_channels_count;
2834 
2835 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2836 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2837 }
2838 
2839 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2840 {
2841 	int ret;
2842 
2843 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2844 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2845 	if (ret && (ret != -EINVAL)) {
2846 		stmmac_global_err(priv);
2847 		return true;
2848 	}
2849 
2850 	return false;
2851 }
2852 
2853 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2854 {
2855 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2856 						 &priv->xstats, chan, dir);
2857 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2858 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2859 	struct stmmac_channel *ch = &priv->channel[chan];
2860 	struct napi_struct *rx_napi;
2861 	struct napi_struct *tx_napi;
2862 	unsigned long flags;
2863 
2864 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2865 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2866 
2867 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2868 		if (napi_schedule_prep(rx_napi)) {
2869 			spin_lock_irqsave(&ch->lock, flags);
2870 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2871 			spin_unlock_irqrestore(&ch->lock, flags);
2872 			__napi_schedule(rx_napi);
2873 		}
2874 	}
2875 
2876 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2877 		if (napi_schedule_prep(tx_napi)) {
2878 			spin_lock_irqsave(&ch->lock, flags);
2879 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2880 			spin_unlock_irqrestore(&ch->lock, flags);
2881 			__napi_schedule(tx_napi);
2882 		}
2883 	}
2884 
2885 	return status;
2886 }
2887 
2888 /**
2889  * stmmac_dma_interrupt - DMA ISR
2890  * @priv: driver private structure
2891  * Description: this is the DMA ISR. It is called by the main ISR.
2892  * It calls the dwmac dma routine and schedule poll method in case of some
2893  * work can be done.
2894  */
2895 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2896 {
2897 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2898 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2899 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2900 				tx_channel_count : rx_channel_count;
2901 	u32 chan;
2902 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2903 
2904 	/* Make sure we never check beyond our status buffer. */
2905 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2906 		channels_to_check = ARRAY_SIZE(status);
2907 
2908 	for (chan = 0; chan < channels_to_check; chan++)
2909 		status[chan] = stmmac_napi_check(priv, chan,
2910 						 DMA_DIR_RXTX);
2911 
2912 	for (chan = 0; chan < tx_channel_count; chan++) {
2913 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2914 			/* Try to bump up the dma threshold on this failure */
2915 			stmmac_bump_dma_threshold(priv, chan);
2916 		} else if (unlikely(status[chan] == tx_hard_error)) {
2917 			stmmac_tx_err(priv, chan);
2918 		}
2919 	}
2920 }
2921 
2922 /**
2923  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2924  * @priv: driver private structure
2925  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2926  */
2927 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2928 {
2929 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2930 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2931 
2932 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2933 
2934 	if (priv->dma_cap.rmon) {
2935 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2936 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2937 	} else
2938 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2939 }
2940 
2941 /**
2942  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2943  * @priv: driver private structure
2944  * Description:
2945  *  new GMAC chip generations have a new register to indicate the
2946  *  presence of the optional feature/functions.
2947  *  This can be also used to override the value passed through the
2948  *  platform and necessary for old MAC10/100 and GMAC chips.
2949  */
2950 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2951 {
2952 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2953 }
2954 
2955 /**
2956  * stmmac_check_ether_addr - check if the MAC addr is valid
2957  * @priv: driver private structure
2958  * Description:
2959  * it is to verify if the MAC address is valid, in case of failures it
2960  * generates a random MAC address
2961  */
2962 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2963 {
2964 	u8 addr[ETH_ALEN];
2965 
2966 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2967 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2968 		if (is_valid_ether_addr(addr))
2969 			eth_hw_addr_set(priv->dev, addr);
2970 		else
2971 			eth_hw_addr_random(priv->dev);
2972 		dev_info(priv->device, "device MAC address %pM\n",
2973 			 priv->dev->dev_addr);
2974 	}
2975 }
2976 
2977 /**
2978  * stmmac_init_dma_engine - DMA init.
2979  * @priv: driver private structure
2980  * Description:
2981  * It inits the DMA invoking the specific MAC/GMAC callback.
2982  * Some DMA parameters can be passed from the platform;
2983  * in case of these are not passed a default is kept for the MAC or GMAC.
2984  */
2985 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2986 {
2987 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2988 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2989 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2990 	struct stmmac_rx_queue *rx_q;
2991 	struct stmmac_tx_queue *tx_q;
2992 	u32 chan = 0;
2993 	int ret = 0;
2994 
2995 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2996 		dev_err(priv->device, "Invalid DMA configuration\n");
2997 		return -EINVAL;
2998 	}
2999 
3000 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3001 		priv->plat->dma_cfg->atds = 1;
3002 
3003 	ret = stmmac_reset(priv, priv->ioaddr);
3004 	if (ret) {
3005 		dev_err(priv->device, "Failed to reset the dma\n");
3006 		return ret;
3007 	}
3008 
3009 	/* DMA Configuration */
3010 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3011 
3012 	if (priv->plat->axi)
3013 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3014 
3015 	/* DMA CSR Channel configuration */
3016 	for (chan = 0; chan < dma_csr_ch; chan++) {
3017 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3018 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3019 	}
3020 
3021 	/* DMA RX Channel Configuration */
3022 	for (chan = 0; chan < rx_channels_count; chan++) {
3023 		rx_q = &priv->dma_conf.rx_queue[chan];
3024 
3025 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3026 				    rx_q->dma_rx_phy, chan);
3027 
3028 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3029 				     (rx_q->buf_alloc_num *
3030 				      sizeof(struct dma_desc));
3031 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3032 				       rx_q->rx_tail_addr, chan);
3033 	}
3034 
3035 	/* DMA TX Channel Configuration */
3036 	for (chan = 0; chan < tx_channels_count; chan++) {
3037 		tx_q = &priv->dma_conf.tx_queue[chan];
3038 
3039 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3040 				    tx_q->dma_tx_phy, chan);
3041 
3042 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3043 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3044 				       tx_q->tx_tail_addr, chan);
3045 	}
3046 
3047 	return ret;
3048 }
3049 
3050 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3051 {
3052 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3053 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3054 	struct stmmac_channel *ch;
3055 	struct napi_struct *napi;
3056 
3057 	if (!tx_coal_timer)
3058 		return;
3059 
3060 	ch = &priv->channel[tx_q->queue_index];
3061 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3062 
3063 	/* Arm timer only if napi is not already scheduled.
3064 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3065 	 * again in the next scheduled napi.
3066 	 */
3067 	if (unlikely(!napi_is_scheduled(napi)))
3068 		hrtimer_start(&tx_q->txtimer,
3069 			      STMMAC_COAL_TIMER(tx_coal_timer),
3070 			      HRTIMER_MODE_REL);
3071 	else
3072 		hrtimer_try_to_cancel(&tx_q->txtimer);
3073 }
3074 
3075 /**
3076  * stmmac_tx_timer - mitigation sw timer for tx.
3077  * @t: data pointer
3078  * Description:
3079  * This is the timer handler to directly invoke the stmmac_tx_clean.
3080  */
3081 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3082 {
3083 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3084 	struct stmmac_priv *priv = tx_q->priv_data;
3085 	struct stmmac_channel *ch;
3086 	struct napi_struct *napi;
3087 
3088 	ch = &priv->channel[tx_q->queue_index];
3089 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3090 
3091 	if (likely(napi_schedule_prep(napi))) {
3092 		unsigned long flags;
3093 
3094 		spin_lock_irqsave(&ch->lock, flags);
3095 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3096 		spin_unlock_irqrestore(&ch->lock, flags);
3097 		__napi_schedule(napi);
3098 	}
3099 
3100 	return HRTIMER_NORESTART;
3101 }
3102 
3103 /**
3104  * stmmac_init_coalesce - init mitigation options.
3105  * @priv: driver private structure
3106  * Description:
3107  * This inits the coalesce parameters: i.e. timer rate,
3108  * timer handler and default threshold used for enabling the
3109  * interrupt on completion bit.
3110  */
3111 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3112 {
3113 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3114 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3115 	u32 chan;
3116 
3117 	for (chan = 0; chan < tx_channel_count; chan++) {
3118 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3119 
3120 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3121 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3122 
3123 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3124 		tx_q->txtimer.function = stmmac_tx_timer;
3125 	}
3126 
3127 	for (chan = 0; chan < rx_channel_count; chan++)
3128 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3129 }
3130 
3131 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3132 {
3133 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3134 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3135 	u32 chan;
3136 
3137 	/* set TX ring length */
3138 	for (chan = 0; chan < tx_channels_count; chan++)
3139 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3140 				       (priv->dma_conf.dma_tx_size - 1), chan);
3141 
3142 	/* set RX ring length */
3143 	for (chan = 0; chan < rx_channels_count; chan++)
3144 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3145 				       (priv->dma_conf.dma_rx_size - 1), chan);
3146 }
3147 
3148 /**
3149  *  stmmac_set_tx_queue_weight - Set TX queue weight
3150  *  @priv: driver private structure
3151  *  Description: It is used for setting TX queues weight
3152  */
3153 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3154 {
3155 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3156 	u32 weight;
3157 	u32 queue;
3158 
3159 	for (queue = 0; queue < tx_queues_count; queue++) {
3160 		weight = priv->plat->tx_queues_cfg[queue].weight;
3161 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3162 	}
3163 }
3164 
3165 /**
3166  *  stmmac_configure_cbs - Configure CBS in TX queue
3167  *  @priv: driver private structure
3168  *  Description: It is used for configuring CBS in AVB TX queues
3169  */
3170 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3171 {
3172 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3173 	u32 mode_to_use;
3174 	u32 queue;
3175 
3176 	/* queue 0 is reserved for legacy traffic */
3177 	for (queue = 1; queue < tx_queues_count; queue++) {
3178 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3179 		if (mode_to_use == MTL_QUEUE_DCB)
3180 			continue;
3181 
3182 		stmmac_config_cbs(priv, priv->hw,
3183 				priv->plat->tx_queues_cfg[queue].send_slope,
3184 				priv->plat->tx_queues_cfg[queue].idle_slope,
3185 				priv->plat->tx_queues_cfg[queue].high_credit,
3186 				priv->plat->tx_queues_cfg[queue].low_credit,
3187 				queue);
3188 	}
3189 }
3190 
3191 /**
3192  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3193  *  @priv: driver private structure
3194  *  Description: It is used for mapping RX queues to RX dma channels
3195  */
3196 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3197 {
3198 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3199 	u32 queue;
3200 	u32 chan;
3201 
3202 	for (queue = 0; queue < rx_queues_count; queue++) {
3203 		chan = priv->plat->rx_queues_cfg[queue].chan;
3204 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3205 	}
3206 }
3207 
3208 /**
3209  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3210  *  @priv: driver private structure
3211  *  Description: It is used for configuring the RX Queue Priority
3212  */
3213 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3214 {
3215 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3216 	u32 queue;
3217 	u32 prio;
3218 
3219 	for (queue = 0; queue < rx_queues_count; queue++) {
3220 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3221 			continue;
3222 
3223 		prio = priv->plat->rx_queues_cfg[queue].prio;
3224 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3225 	}
3226 }
3227 
3228 /**
3229  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3230  *  @priv: driver private structure
3231  *  Description: It is used for configuring the TX Queue Priority
3232  */
3233 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3234 {
3235 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3236 	u32 queue;
3237 	u32 prio;
3238 
3239 	for (queue = 0; queue < tx_queues_count; queue++) {
3240 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3241 			continue;
3242 
3243 		prio = priv->plat->tx_queues_cfg[queue].prio;
3244 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3245 	}
3246 }
3247 
3248 /**
3249  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3250  *  @priv: driver private structure
3251  *  Description: It is used for configuring the RX queue routing
3252  */
3253 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3254 {
3255 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3256 	u32 queue;
3257 	u8 packet;
3258 
3259 	for (queue = 0; queue < rx_queues_count; queue++) {
3260 		/* no specific packet type routing specified for the queue */
3261 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3262 			continue;
3263 
3264 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3265 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3266 	}
3267 }
3268 
3269 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3270 {
3271 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3272 		priv->rss.enable = false;
3273 		return;
3274 	}
3275 
3276 	if (priv->dev->features & NETIF_F_RXHASH)
3277 		priv->rss.enable = true;
3278 	else
3279 		priv->rss.enable = false;
3280 
3281 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3282 			     priv->plat->rx_queues_to_use);
3283 }
3284 
3285 /**
3286  *  stmmac_mtl_configuration - Configure MTL
3287  *  @priv: driver private structure
3288  *  Description: It is used for configurring MTL
3289  */
3290 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3291 {
3292 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3293 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3294 
3295 	if (tx_queues_count > 1)
3296 		stmmac_set_tx_queue_weight(priv);
3297 
3298 	/* Configure MTL RX algorithms */
3299 	if (rx_queues_count > 1)
3300 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3301 				priv->plat->rx_sched_algorithm);
3302 
3303 	/* Configure MTL TX algorithms */
3304 	if (tx_queues_count > 1)
3305 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3306 				priv->plat->tx_sched_algorithm);
3307 
3308 	/* Configure CBS in AVB TX queues */
3309 	if (tx_queues_count > 1)
3310 		stmmac_configure_cbs(priv);
3311 
3312 	/* Map RX MTL to DMA channels */
3313 	stmmac_rx_queue_dma_chan_map(priv);
3314 
3315 	/* Enable MAC RX Queues */
3316 	stmmac_mac_enable_rx_queues(priv);
3317 
3318 	/* Set RX priorities */
3319 	if (rx_queues_count > 1)
3320 		stmmac_mac_config_rx_queues_prio(priv);
3321 
3322 	/* Set TX priorities */
3323 	if (tx_queues_count > 1)
3324 		stmmac_mac_config_tx_queues_prio(priv);
3325 
3326 	/* Set RX routing */
3327 	if (rx_queues_count > 1)
3328 		stmmac_mac_config_rx_queues_routing(priv);
3329 
3330 	/* Receive Side Scaling */
3331 	if (rx_queues_count > 1)
3332 		stmmac_mac_config_rss(priv);
3333 }
3334 
3335 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3336 {
3337 	if (priv->dma_cap.asp) {
3338 		netdev_info(priv->dev, "Enabling Safety Features\n");
3339 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3340 					  priv->plat->safety_feat_cfg);
3341 	} else {
3342 		netdev_info(priv->dev, "No Safety Features support found\n");
3343 	}
3344 }
3345 
3346 /**
3347  * stmmac_hw_setup - setup mac in a usable state.
3348  *  @dev : pointer to the device structure.
3349  *  @ptp_register: register PTP if set
3350  *  Description:
3351  *  this is the main function to setup the HW in a usable state because the
3352  *  dma engine is reset, the core registers are configured (e.g. AXI,
3353  *  Checksum features, timers). The DMA is ready to start receiving and
3354  *  transmitting.
3355  *  Return value:
3356  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3357  *  file on failure.
3358  */
3359 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3360 {
3361 	struct stmmac_priv *priv = netdev_priv(dev);
3362 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3363 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3364 	bool sph_en;
3365 	u32 chan;
3366 	int ret;
3367 
3368 	/* Make sure RX clock is enabled */
3369 	if (priv->hw->phylink_pcs)
3370 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3371 
3372 	/* DMA initialization and SW reset */
3373 	ret = stmmac_init_dma_engine(priv);
3374 	if (ret < 0) {
3375 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3376 			   __func__);
3377 		return ret;
3378 	}
3379 
3380 	/* Copy the MAC addr into the HW  */
3381 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3382 
3383 	/* PS and related bits will be programmed according to the speed */
3384 	if (priv->hw->pcs) {
3385 		int speed = priv->plat->mac_port_sel_speed;
3386 
3387 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3388 		    (speed == SPEED_1000)) {
3389 			priv->hw->ps = speed;
3390 		} else {
3391 			dev_warn(priv->device, "invalid port speed\n");
3392 			priv->hw->ps = 0;
3393 		}
3394 	}
3395 
3396 	/* Initialize the MAC Core */
3397 	stmmac_core_init(priv, priv->hw, dev);
3398 
3399 	/* Initialize MTL*/
3400 	stmmac_mtl_configuration(priv);
3401 
3402 	/* Initialize Safety Features */
3403 	stmmac_safety_feat_configuration(priv);
3404 
3405 	ret = stmmac_rx_ipc(priv, priv->hw);
3406 	if (!ret) {
3407 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3408 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3409 		priv->hw->rx_csum = 0;
3410 	}
3411 
3412 	/* Enable the MAC Rx/Tx */
3413 	stmmac_mac_set(priv, priv->ioaddr, true);
3414 
3415 	/* Set the HW DMA mode and the COE */
3416 	stmmac_dma_operation_mode(priv);
3417 
3418 	stmmac_mmc_setup(priv);
3419 
3420 	if (ptp_register) {
3421 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3422 		if (ret < 0)
3423 			netdev_warn(priv->dev,
3424 				    "failed to enable PTP reference clock: %pe\n",
3425 				    ERR_PTR(ret));
3426 	}
3427 
3428 	ret = stmmac_init_ptp(priv);
3429 	if (ret == -EOPNOTSUPP)
3430 		netdev_info(priv->dev, "PTP not supported by HW\n");
3431 	else if (ret)
3432 		netdev_warn(priv->dev, "PTP init failed\n");
3433 	else if (ptp_register)
3434 		stmmac_ptp_register(priv);
3435 
3436 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3437 
3438 	/* Convert the timer from msec to usec */
3439 	if (!priv->tx_lpi_timer)
3440 		priv->tx_lpi_timer = eee_timer * 1000;
3441 
3442 	if (priv->use_riwt) {
3443 		u32 queue;
3444 
3445 		for (queue = 0; queue < rx_cnt; queue++) {
3446 			if (!priv->rx_riwt[queue])
3447 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3448 
3449 			stmmac_rx_watchdog(priv, priv->ioaddr,
3450 					   priv->rx_riwt[queue], queue);
3451 		}
3452 	}
3453 
3454 	if (priv->hw->pcs)
3455 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3456 
3457 	/* set TX and RX rings length */
3458 	stmmac_set_rings_length(priv);
3459 
3460 	/* Enable TSO */
3461 	if (priv->tso) {
3462 		for (chan = 0; chan < tx_cnt; chan++) {
3463 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3464 
3465 			/* TSO and TBS cannot co-exist */
3466 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3467 				continue;
3468 
3469 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3470 		}
3471 	}
3472 
3473 	/* Enable Split Header */
3474 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3475 	for (chan = 0; chan < rx_cnt; chan++)
3476 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3477 
3478 
3479 	/* VLAN Tag Insertion */
3480 	if (priv->dma_cap.vlins)
3481 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3482 
3483 	/* TBS */
3484 	for (chan = 0; chan < tx_cnt; chan++) {
3485 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3486 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3487 
3488 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3489 	}
3490 
3491 	/* Configure real RX and TX queues */
3492 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3493 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3494 
3495 	/* Start the ball rolling... */
3496 	stmmac_start_all_dma(priv);
3497 
3498 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3499 
3500 	return 0;
3501 }
3502 
3503 static void stmmac_hw_teardown(struct net_device *dev)
3504 {
3505 	struct stmmac_priv *priv = netdev_priv(dev);
3506 
3507 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3508 }
3509 
3510 static void stmmac_free_irq(struct net_device *dev,
3511 			    enum request_irq_err irq_err, int irq_idx)
3512 {
3513 	struct stmmac_priv *priv = netdev_priv(dev);
3514 	int j;
3515 
3516 	switch (irq_err) {
3517 	case REQ_IRQ_ERR_ALL:
3518 		irq_idx = priv->plat->tx_queues_to_use;
3519 		fallthrough;
3520 	case REQ_IRQ_ERR_TX:
3521 		for (j = irq_idx - 1; j >= 0; j--) {
3522 			if (priv->tx_irq[j] > 0) {
3523 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3524 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3525 			}
3526 		}
3527 		irq_idx = priv->plat->rx_queues_to_use;
3528 		fallthrough;
3529 	case REQ_IRQ_ERR_RX:
3530 		for (j = irq_idx - 1; j >= 0; j--) {
3531 			if (priv->rx_irq[j] > 0) {
3532 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3533 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3534 			}
3535 		}
3536 
3537 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3538 			free_irq(priv->sfty_ue_irq, dev);
3539 		fallthrough;
3540 	case REQ_IRQ_ERR_SFTY_UE:
3541 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3542 			free_irq(priv->sfty_ce_irq, dev);
3543 		fallthrough;
3544 	case REQ_IRQ_ERR_SFTY_CE:
3545 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3546 			free_irq(priv->lpi_irq, dev);
3547 		fallthrough;
3548 	case REQ_IRQ_ERR_LPI:
3549 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3550 			free_irq(priv->wol_irq, dev);
3551 		fallthrough;
3552 	case REQ_IRQ_ERR_SFTY:
3553 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3554 			free_irq(priv->sfty_irq, dev);
3555 		fallthrough;
3556 	case REQ_IRQ_ERR_WOL:
3557 		free_irq(dev->irq, dev);
3558 		fallthrough;
3559 	case REQ_IRQ_ERR_MAC:
3560 	case REQ_IRQ_ERR_NO:
3561 		/* If MAC IRQ request error, no more IRQ to free */
3562 		break;
3563 	}
3564 }
3565 
3566 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3567 {
3568 	struct stmmac_priv *priv = netdev_priv(dev);
3569 	enum request_irq_err irq_err;
3570 	cpumask_t cpu_mask;
3571 	int irq_idx = 0;
3572 	char *int_name;
3573 	int ret;
3574 	int i;
3575 
3576 	/* For common interrupt */
3577 	int_name = priv->int_name_mac;
3578 	sprintf(int_name, "%s:%s", dev->name, "mac");
3579 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3580 			  0, int_name, dev);
3581 	if (unlikely(ret < 0)) {
3582 		netdev_err(priv->dev,
3583 			   "%s: alloc mac MSI %d (error: %d)\n",
3584 			   __func__, dev->irq, ret);
3585 		irq_err = REQ_IRQ_ERR_MAC;
3586 		goto irq_error;
3587 	}
3588 
3589 	/* Request the Wake IRQ in case of another line
3590 	 * is used for WoL
3591 	 */
3592 	priv->wol_irq_disabled = true;
3593 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3594 		int_name = priv->int_name_wol;
3595 		sprintf(int_name, "%s:%s", dev->name, "wol");
3596 		ret = request_irq(priv->wol_irq,
3597 				  stmmac_mac_interrupt,
3598 				  0, int_name, dev);
3599 		if (unlikely(ret < 0)) {
3600 			netdev_err(priv->dev,
3601 				   "%s: alloc wol MSI %d (error: %d)\n",
3602 				   __func__, priv->wol_irq, ret);
3603 			irq_err = REQ_IRQ_ERR_WOL;
3604 			goto irq_error;
3605 		}
3606 	}
3607 
3608 	/* Request the LPI IRQ in case of another line
3609 	 * is used for LPI
3610 	 */
3611 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3612 		int_name = priv->int_name_lpi;
3613 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3614 		ret = request_irq(priv->lpi_irq,
3615 				  stmmac_mac_interrupt,
3616 				  0, int_name, dev);
3617 		if (unlikely(ret < 0)) {
3618 			netdev_err(priv->dev,
3619 				   "%s: alloc lpi MSI %d (error: %d)\n",
3620 				   __func__, priv->lpi_irq, ret);
3621 			irq_err = REQ_IRQ_ERR_LPI;
3622 			goto irq_error;
3623 		}
3624 	}
3625 
3626 	/* Request the common Safety Feature Correctible/Uncorrectible
3627 	 * Error line in case of another line is used
3628 	 */
3629 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3630 		int_name = priv->int_name_sfty;
3631 		sprintf(int_name, "%s:%s", dev->name, "safety");
3632 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3633 				  0, int_name, dev);
3634 		if (unlikely(ret < 0)) {
3635 			netdev_err(priv->dev,
3636 				   "%s: alloc sfty MSI %d (error: %d)\n",
3637 				   __func__, priv->sfty_irq, ret);
3638 			irq_err = REQ_IRQ_ERR_SFTY;
3639 			goto irq_error;
3640 		}
3641 	}
3642 
3643 	/* Request the Safety Feature Correctible Error line in
3644 	 * case of another line is used
3645 	 */
3646 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3647 		int_name = priv->int_name_sfty_ce;
3648 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3649 		ret = request_irq(priv->sfty_ce_irq,
3650 				  stmmac_safety_interrupt,
3651 				  0, int_name, dev);
3652 		if (unlikely(ret < 0)) {
3653 			netdev_err(priv->dev,
3654 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3655 				   __func__, priv->sfty_ce_irq, ret);
3656 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3657 			goto irq_error;
3658 		}
3659 	}
3660 
3661 	/* Request the Safety Feature Uncorrectible Error line in
3662 	 * case of another line is used
3663 	 */
3664 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3665 		int_name = priv->int_name_sfty_ue;
3666 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3667 		ret = request_irq(priv->sfty_ue_irq,
3668 				  stmmac_safety_interrupt,
3669 				  0, int_name, dev);
3670 		if (unlikely(ret < 0)) {
3671 			netdev_err(priv->dev,
3672 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3673 				   __func__, priv->sfty_ue_irq, ret);
3674 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3675 			goto irq_error;
3676 		}
3677 	}
3678 
3679 	/* Request Rx MSI irq */
3680 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3681 		if (i >= MTL_MAX_RX_QUEUES)
3682 			break;
3683 		if (priv->rx_irq[i] == 0)
3684 			continue;
3685 
3686 		int_name = priv->int_name_rx_irq[i];
3687 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3688 		ret = request_irq(priv->rx_irq[i],
3689 				  stmmac_msi_intr_rx,
3690 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3691 		if (unlikely(ret < 0)) {
3692 			netdev_err(priv->dev,
3693 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3694 				   __func__, i, priv->rx_irq[i], ret);
3695 			irq_err = REQ_IRQ_ERR_RX;
3696 			irq_idx = i;
3697 			goto irq_error;
3698 		}
3699 		cpumask_clear(&cpu_mask);
3700 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3701 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3702 	}
3703 
3704 	/* Request Tx MSI irq */
3705 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3706 		if (i >= MTL_MAX_TX_QUEUES)
3707 			break;
3708 		if (priv->tx_irq[i] == 0)
3709 			continue;
3710 
3711 		int_name = priv->int_name_tx_irq[i];
3712 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3713 		ret = request_irq(priv->tx_irq[i],
3714 				  stmmac_msi_intr_tx,
3715 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3716 		if (unlikely(ret < 0)) {
3717 			netdev_err(priv->dev,
3718 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3719 				   __func__, i, priv->tx_irq[i], ret);
3720 			irq_err = REQ_IRQ_ERR_TX;
3721 			irq_idx = i;
3722 			goto irq_error;
3723 		}
3724 		cpumask_clear(&cpu_mask);
3725 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3726 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3727 	}
3728 
3729 	return 0;
3730 
3731 irq_error:
3732 	stmmac_free_irq(dev, irq_err, irq_idx);
3733 	return ret;
3734 }
3735 
3736 static int stmmac_request_irq_single(struct net_device *dev)
3737 {
3738 	struct stmmac_priv *priv = netdev_priv(dev);
3739 	enum request_irq_err irq_err;
3740 	int ret;
3741 
3742 	ret = request_irq(dev->irq, stmmac_interrupt,
3743 			  IRQF_SHARED, dev->name, dev);
3744 	if (unlikely(ret < 0)) {
3745 		netdev_err(priv->dev,
3746 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3747 			   __func__, dev->irq, ret);
3748 		irq_err = REQ_IRQ_ERR_MAC;
3749 		goto irq_error;
3750 	}
3751 
3752 	/* Request the Wake IRQ in case of another line
3753 	 * is used for WoL
3754 	 */
3755 	priv->wol_irq_disabled = true;
3756 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3757 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3758 				  IRQF_SHARED, dev->name, dev);
3759 		if (unlikely(ret < 0)) {
3760 			netdev_err(priv->dev,
3761 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3762 				   __func__, priv->wol_irq, ret);
3763 			irq_err = REQ_IRQ_ERR_WOL;
3764 			goto irq_error;
3765 		}
3766 	}
3767 
3768 	/* Request the IRQ lines */
3769 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3770 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3771 				  IRQF_SHARED, dev->name, dev);
3772 		if (unlikely(ret < 0)) {
3773 			netdev_err(priv->dev,
3774 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3775 				   __func__, priv->lpi_irq, ret);
3776 			irq_err = REQ_IRQ_ERR_LPI;
3777 			goto irq_error;
3778 		}
3779 	}
3780 
3781 	/* Request the common Safety Feature Correctible/Uncorrectible
3782 	 * Error line in case of another line is used
3783 	 */
3784 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3785 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3786 				  IRQF_SHARED, dev->name, dev);
3787 		if (unlikely(ret < 0)) {
3788 			netdev_err(priv->dev,
3789 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3790 				   __func__, priv->sfty_irq, ret);
3791 			irq_err = REQ_IRQ_ERR_SFTY;
3792 			goto irq_error;
3793 		}
3794 	}
3795 
3796 	return 0;
3797 
3798 irq_error:
3799 	stmmac_free_irq(dev, irq_err, 0);
3800 	return ret;
3801 }
3802 
3803 static int stmmac_request_irq(struct net_device *dev)
3804 {
3805 	struct stmmac_priv *priv = netdev_priv(dev);
3806 	int ret;
3807 
3808 	/* Request the IRQ lines */
3809 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3810 		ret = stmmac_request_irq_multi_msi(dev);
3811 	else
3812 		ret = stmmac_request_irq_single(dev);
3813 
3814 	return ret;
3815 }
3816 
3817 /**
3818  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3819  *  @priv: driver private structure
3820  *  @mtu: MTU to setup the dma queue and buf with
3821  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3822  *  Allocate the Tx/Rx DMA queue and init them.
3823  *  Return value:
3824  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3825  */
3826 static struct stmmac_dma_conf *
3827 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3828 {
3829 	struct stmmac_dma_conf *dma_conf;
3830 	int chan, bfsize, ret;
3831 
3832 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3833 	if (!dma_conf) {
3834 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3835 			   __func__);
3836 		return ERR_PTR(-ENOMEM);
3837 	}
3838 
3839 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3840 	if (bfsize < 0)
3841 		bfsize = 0;
3842 
3843 	if (bfsize < BUF_SIZE_16KiB)
3844 		bfsize = stmmac_set_bfsize(mtu, 0);
3845 
3846 	dma_conf->dma_buf_sz = bfsize;
3847 	/* Chose the tx/rx size from the already defined one in the
3848 	 * priv struct. (if defined)
3849 	 */
3850 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3851 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3852 
3853 	if (!dma_conf->dma_tx_size)
3854 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3855 	if (!dma_conf->dma_rx_size)
3856 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3857 
3858 	/* Earlier check for TBS */
3859 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3860 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3861 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3862 
3863 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3864 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3865 	}
3866 
3867 	ret = alloc_dma_desc_resources(priv, dma_conf);
3868 	if (ret < 0) {
3869 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3870 			   __func__);
3871 		goto alloc_error;
3872 	}
3873 
3874 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3875 	if (ret < 0) {
3876 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3877 			   __func__);
3878 		goto init_error;
3879 	}
3880 
3881 	return dma_conf;
3882 
3883 init_error:
3884 	free_dma_desc_resources(priv, dma_conf);
3885 alloc_error:
3886 	kfree(dma_conf);
3887 	return ERR_PTR(ret);
3888 }
3889 
3890 /**
3891  *  __stmmac_open - open entry point of the driver
3892  *  @dev : pointer to the device structure.
3893  *  @dma_conf :  structure to take the dma data
3894  *  Description:
3895  *  This function is the open entry point of the driver.
3896  *  Return value:
3897  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3898  *  file on failure.
3899  */
3900 static int __stmmac_open(struct net_device *dev,
3901 			 struct stmmac_dma_conf *dma_conf)
3902 {
3903 	struct stmmac_priv *priv = netdev_priv(dev);
3904 	int mode = priv->plat->phy_interface;
3905 	u32 chan;
3906 	int ret;
3907 
3908 	ret = pm_runtime_resume_and_get(priv->device);
3909 	if (ret < 0)
3910 		return ret;
3911 
3912 	if ((!priv->hw->xpcs ||
3913 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3914 		ret = stmmac_init_phy(dev);
3915 		if (ret) {
3916 			netdev_err(priv->dev,
3917 				   "%s: Cannot attach to PHY (error: %d)\n",
3918 				   __func__, ret);
3919 			goto init_phy_error;
3920 		}
3921 	}
3922 
3923 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3924 
3925 	buf_sz = dma_conf->dma_buf_sz;
3926 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3927 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3928 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3929 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3930 
3931 	stmmac_reset_queues_param(priv);
3932 
3933 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3934 	    priv->plat->serdes_powerup) {
3935 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3936 		if (ret < 0) {
3937 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3938 				   __func__);
3939 			goto init_error;
3940 		}
3941 	}
3942 
3943 	ret = stmmac_hw_setup(dev, true);
3944 	if (ret < 0) {
3945 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3946 		goto init_error;
3947 	}
3948 
3949 	stmmac_init_coalesce(priv);
3950 
3951 	phylink_start(priv->phylink);
3952 	/* We may have called phylink_speed_down before */
3953 	phylink_speed_up(priv->phylink);
3954 
3955 	ret = stmmac_request_irq(dev);
3956 	if (ret)
3957 		goto irq_error;
3958 
3959 	stmmac_enable_all_queues(priv);
3960 	netif_tx_start_all_queues(priv->dev);
3961 	stmmac_enable_all_dma_irq(priv);
3962 
3963 	return 0;
3964 
3965 irq_error:
3966 	phylink_stop(priv->phylink);
3967 
3968 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3969 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3970 
3971 	stmmac_hw_teardown(dev);
3972 init_error:
3973 	phylink_disconnect_phy(priv->phylink);
3974 init_phy_error:
3975 	pm_runtime_put(priv->device);
3976 	return ret;
3977 }
3978 
3979 static int stmmac_open(struct net_device *dev)
3980 {
3981 	struct stmmac_priv *priv = netdev_priv(dev);
3982 	struct stmmac_dma_conf *dma_conf;
3983 	int ret;
3984 
3985 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3986 	if (IS_ERR(dma_conf))
3987 		return PTR_ERR(dma_conf);
3988 
3989 	ret = __stmmac_open(dev, dma_conf);
3990 	if (ret)
3991 		free_dma_desc_resources(priv, dma_conf);
3992 
3993 	kfree(dma_conf);
3994 	return ret;
3995 }
3996 
3997 /**
3998  *  stmmac_release - close entry point of the driver
3999  *  @dev : device pointer.
4000  *  Description:
4001  *  This is the stop entry point of the driver.
4002  */
4003 static int stmmac_release(struct net_device *dev)
4004 {
4005 	struct stmmac_priv *priv = netdev_priv(dev);
4006 	u32 chan;
4007 
4008 	if (device_may_wakeup(priv->device))
4009 		phylink_speed_down(priv->phylink, false);
4010 	/* Stop and disconnect the PHY */
4011 	phylink_stop(priv->phylink);
4012 	phylink_disconnect_phy(priv->phylink);
4013 
4014 	stmmac_disable_all_queues(priv);
4015 
4016 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4017 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4018 
4019 	netif_tx_disable(dev);
4020 
4021 	/* Free the IRQ lines */
4022 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4023 
4024 	if (priv->eee_enabled) {
4025 		priv->tx_path_in_lpi_mode = false;
4026 		del_timer_sync(&priv->eee_ctrl_timer);
4027 	}
4028 
4029 	/* Stop TX/RX DMA and clear the descriptors */
4030 	stmmac_stop_all_dma(priv);
4031 
4032 	/* Release and free the Rx/Tx resources */
4033 	free_dma_desc_resources(priv, &priv->dma_conf);
4034 
4035 	/* Disable the MAC Rx/Tx */
4036 	stmmac_mac_set(priv, priv->ioaddr, false);
4037 
4038 	/* Powerdown Serdes if there is */
4039 	if (priv->plat->serdes_powerdown)
4040 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4041 
4042 	stmmac_release_ptp(priv);
4043 
4044 	if (stmmac_fpe_supported(priv))
4045 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4046 
4047 	pm_runtime_put(priv->device);
4048 
4049 	return 0;
4050 }
4051 
4052 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4053 			       struct stmmac_tx_queue *tx_q)
4054 {
4055 	u16 tag = 0x0, inner_tag = 0x0;
4056 	u32 inner_type = 0x0;
4057 	struct dma_desc *p;
4058 
4059 	if (!priv->dma_cap.vlins)
4060 		return false;
4061 	if (!skb_vlan_tag_present(skb))
4062 		return false;
4063 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4064 		inner_tag = skb_vlan_tag_get(skb);
4065 		inner_type = STMMAC_VLAN_INSERT;
4066 	}
4067 
4068 	tag = skb_vlan_tag_get(skb);
4069 
4070 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4071 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4072 	else
4073 		p = &tx_q->dma_tx[tx_q->cur_tx];
4074 
4075 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4076 		return false;
4077 
4078 	stmmac_set_tx_owner(priv, p);
4079 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4080 	return true;
4081 }
4082 
4083 /**
4084  *  stmmac_tso_allocator - close entry point of the driver
4085  *  @priv: driver private structure
4086  *  @des: buffer start address
4087  *  @total_len: total length to fill in descriptors
4088  *  @last_segment: condition for the last descriptor
4089  *  @queue: TX queue index
4090  *  Description:
4091  *  This function fills descriptor and request new descriptors according to
4092  *  buffer length to fill
4093  */
4094 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4095 				 int total_len, bool last_segment, u32 queue)
4096 {
4097 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4098 	struct dma_desc *desc;
4099 	u32 buff_size;
4100 	int tmp_len;
4101 
4102 	tmp_len = total_len;
4103 
4104 	while (tmp_len > 0) {
4105 		dma_addr_t curr_addr;
4106 
4107 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4108 						priv->dma_conf.dma_tx_size);
4109 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4110 
4111 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4112 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4113 		else
4114 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4115 
4116 		curr_addr = des + (total_len - tmp_len);
4117 		if (priv->dma_cap.addr64 <= 32)
4118 			desc->des0 = cpu_to_le32(curr_addr);
4119 		else
4120 			stmmac_set_desc_addr(priv, desc, curr_addr);
4121 
4122 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4123 			    TSO_MAX_BUFF_SIZE : tmp_len;
4124 
4125 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4126 				0, 1,
4127 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4128 				0, 0);
4129 
4130 		tmp_len -= TSO_MAX_BUFF_SIZE;
4131 	}
4132 }
4133 
4134 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4135 {
4136 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4137 	int desc_size;
4138 
4139 	if (likely(priv->extend_desc))
4140 		desc_size = sizeof(struct dma_extended_desc);
4141 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4142 		desc_size = sizeof(struct dma_edesc);
4143 	else
4144 		desc_size = sizeof(struct dma_desc);
4145 
4146 	/* The own bit must be the latest setting done when prepare the
4147 	 * descriptor and then barrier is needed to make sure that
4148 	 * all is coherent before granting the DMA engine.
4149 	 */
4150 	wmb();
4151 
4152 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4153 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4154 }
4155 
4156 /**
4157  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4158  *  @skb : the socket buffer
4159  *  @dev : device pointer
4160  *  Description: this is the transmit function that is called on TSO frames
4161  *  (support available on GMAC4 and newer chips).
4162  *  Diagram below show the ring programming in case of TSO frames:
4163  *
4164  *  First Descriptor
4165  *   --------
4166  *   | DES0 |---> buffer1 = L2/L3/L4 header
4167  *   | DES1 |---> TCP Payload (can continue on next descr...)
4168  *   | DES2 |---> buffer 1 and 2 len
4169  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4170  *   --------
4171  *	|
4172  *     ...
4173  *	|
4174  *   --------
4175  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4176  *   | DES1 | --|
4177  *   | DES2 | --> buffer 1 and 2 len
4178  *   | DES3 |
4179  *   --------
4180  *
4181  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4182  */
4183 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4184 {
4185 	struct dma_desc *desc, *first, *mss_desc = NULL;
4186 	struct stmmac_priv *priv = netdev_priv(dev);
4187 	int tmp_pay_len = 0, first_tx, nfrags;
4188 	unsigned int first_entry, tx_packets;
4189 	struct stmmac_txq_stats *txq_stats;
4190 	struct stmmac_tx_queue *tx_q;
4191 	u32 pay_len, mss, queue;
4192 	u8 proto_hdr_len, hdr;
4193 	dma_addr_t des;
4194 	bool set_ic;
4195 	int i;
4196 
4197 	/* Always insert VLAN tag to SKB payload for TSO frames.
4198 	 *
4199 	 * Never insert VLAN tag by HW, since segments splited by
4200 	 * TSO engine will be un-tagged by mistake.
4201 	 */
4202 	if (skb_vlan_tag_present(skb)) {
4203 		skb = __vlan_hwaccel_push_inside(skb);
4204 		if (unlikely(!skb)) {
4205 			priv->xstats.tx_dropped++;
4206 			return NETDEV_TX_OK;
4207 		}
4208 	}
4209 
4210 	nfrags = skb_shinfo(skb)->nr_frags;
4211 	queue = skb_get_queue_mapping(skb);
4212 
4213 	tx_q = &priv->dma_conf.tx_queue[queue];
4214 	txq_stats = &priv->xstats.txq_stats[queue];
4215 	first_tx = tx_q->cur_tx;
4216 
4217 	/* Compute header lengths */
4218 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4219 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4220 		hdr = sizeof(struct udphdr);
4221 	} else {
4222 		proto_hdr_len = skb_tcp_all_headers(skb);
4223 		hdr = tcp_hdrlen(skb);
4224 	}
4225 
4226 	/* Desc availability based on threshold should be enough safe */
4227 	if (unlikely(stmmac_tx_avail(priv, queue) <
4228 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4229 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4230 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4231 								queue));
4232 			/* This is a hard error, log it. */
4233 			netdev_err(priv->dev,
4234 				   "%s: Tx Ring full when queue awake\n",
4235 				   __func__);
4236 		}
4237 		return NETDEV_TX_BUSY;
4238 	}
4239 
4240 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4241 
4242 	mss = skb_shinfo(skb)->gso_size;
4243 
4244 	/* set new MSS value if needed */
4245 	if (mss != tx_q->mss) {
4246 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4247 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4248 		else
4249 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4250 
4251 		stmmac_set_mss(priv, mss_desc, mss);
4252 		tx_q->mss = mss;
4253 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4254 						priv->dma_conf.dma_tx_size);
4255 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4256 	}
4257 
4258 	if (netif_msg_tx_queued(priv)) {
4259 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4260 			__func__, hdr, proto_hdr_len, pay_len, mss);
4261 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4262 			skb->data_len);
4263 	}
4264 
4265 	first_entry = tx_q->cur_tx;
4266 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4267 
4268 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4269 		desc = &tx_q->dma_entx[first_entry].basic;
4270 	else
4271 		desc = &tx_q->dma_tx[first_entry];
4272 	first = desc;
4273 
4274 	/* first descriptor: fill Headers on Buf1 */
4275 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4276 			     DMA_TO_DEVICE);
4277 	if (dma_mapping_error(priv->device, des))
4278 		goto dma_map_err;
4279 
4280 	if (priv->dma_cap.addr64 <= 32) {
4281 		first->des0 = cpu_to_le32(des);
4282 
4283 		/* Fill start of payload in buff2 of first descriptor */
4284 		if (pay_len)
4285 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4286 
4287 		/* If needed take extra descriptors to fill the remaining payload */
4288 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4289 	} else {
4290 		stmmac_set_desc_addr(priv, first, des);
4291 		tmp_pay_len = pay_len;
4292 		des += proto_hdr_len;
4293 		pay_len = 0;
4294 	}
4295 
4296 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4297 
4298 	/* In case two or more DMA transmit descriptors are allocated for this
4299 	 * non-paged SKB data, the DMA buffer address should be saved to
4300 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4301 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4302 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4303 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4304 	 * sooner or later.
4305 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4306 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4307 	 * this DMA buffer right after the DMA engine completely finishes the
4308 	 * full buffer transmission.
4309 	 */
4310 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4311 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4312 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4313 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4314 
4315 	/* Prepare fragments */
4316 	for (i = 0; i < nfrags; i++) {
4317 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4318 
4319 		des = skb_frag_dma_map(priv->device, frag, 0,
4320 				       skb_frag_size(frag),
4321 				       DMA_TO_DEVICE);
4322 		if (dma_mapping_error(priv->device, des))
4323 			goto dma_map_err;
4324 
4325 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4326 				     (i == nfrags - 1), queue);
4327 
4328 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4329 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4330 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4331 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4332 	}
4333 
4334 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4335 
4336 	/* Only the last descriptor gets to point to the skb. */
4337 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4338 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4339 
4340 	/* Manage tx mitigation */
4341 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4342 	tx_q->tx_count_frames += tx_packets;
4343 
4344 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4345 		set_ic = true;
4346 	else if (!priv->tx_coal_frames[queue])
4347 		set_ic = false;
4348 	else if (tx_packets > priv->tx_coal_frames[queue])
4349 		set_ic = true;
4350 	else if ((tx_q->tx_count_frames %
4351 		  priv->tx_coal_frames[queue]) < tx_packets)
4352 		set_ic = true;
4353 	else
4354 		set_ic = false;
4355 
4356 	if (set_ic) {
4357 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4358 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4359 		else
4360 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4361 
4362 		tx_q->tx_count_frames = 0;
4363 		stmmac_set_tx_ic(priv, desc);
4364 	}
4365 
4366 	/* We've used all descriptors we need for this skb, however,
4367 	 * advance cur_tx so that it references a fresh descriptor.
4368 	 * ndo_start_xmit will fill this descriptor the next time it's
4369 	 * called and stmmac_tx_clean may clean up to this descriptor.
4370 	 */
4371 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4372 
4373 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4374 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4375 			  __func__);
4376 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4377 	}
4378 
4379 	u64_stats_update_begin(&txq_stats->q_syncp);
4380 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4381 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4382 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4383 	if (set_ic)
4384 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4385 	u64_stats_update_end(&txq_stats->q_syncp);
4386 
4387 	if (priv->sarc_type)
4388 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4389 
4390 	skb_tx_timestamp(skb);
4391 
4392 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4393 		     priv->hwts_tx_en)) {
4394 		/* declare that device is doing timestamping */
4395 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4396 		stmmac_enable_tx_timestamp(priv, first);
4397 	}
4398 
4399 	/* Complete the first descriptor before granting the DMA */
4400 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4401 			proto_hdr_len,
4402 			pay_len,
4403 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4404 			hdr / 4, (skb->len - proto_hdr_len));
4405 
4406 	/* If context desc is used to change MSS */
4407 	if (mss_desc) {
4408 		/* Make sure that first descriptor has been completely
4409 		 * written, including its own bit. This is because MSS is
4410 		 * actually before first descriptor, so we need to make
4411 		 * sure that MSS's own bit is the last thing written.
4412 		 */
4413 		dma_wmb();
4414 		stmmac_set_tx_owner(priv, mss_desc);
4415 	}
4416 
4417 	if (netif_msg_pktdata(priv)) {
4418 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4419 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4420 			tx_q->cur_tx, first, nfrags);
4421 		pr_info(">>> frame to be transmitted: ");
4422 		print_pkt(skb->data, skb_headlen(skb));
4423 	}
4424 
4425 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4426 
4427 	stmmac_flush_tx_descriptors(priv, queue);
4428 	stmmac_tx_timer_arm(priv, queue);
4429 
4430 	return NETDEV_TX_OK;
4431 
4432 dma_map_err:
4433 	dev_err(priv->device, "Tx dma map failed\n");
4434 	dev_kfree_skb(skb);
4435 	priv->xstats.tx_dropped++;
4436 	return NETDEV_TX_OK;
4437 }
4438 
4439 /**
4440  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4441  * @skb: socket buffer to check
4442  *
4443  * Check if a packet has an ethertype that will trigger the IP header checks
4444  * and IP/TCP checksum engine of the stmmac core.
4445  *
4446  * Return: true if the ethertype can trigger the checksum engine, false
4447  * otherwise
4448  */
4449 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4450 {
4451 	int depth = 0;
4452 	__be16 proto;
4453 
4454 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4455 				    &depth);
4456 
4457 	return (depth <= ETH_HLEN) &&
4458 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4459 }
4460 
4461 /**
4462  *  stmmac_xmit - Tx entry point of the driver
4463  *  @skb : the socket buffer
4464  *  @dev : device pointer
4465  *  Description : this is the tx entry point of the driver.
4466  *  It programs the chain or the ring and supports oversized frames
4467  *  and SG feature.
4468  */
4469 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4470 {
4471 	unsigned int first_entry, tx_packets, enh_desc;
4472 	struct stmmac_priv *priv = netdev_priv(dev);
4473 	unsigned int nopaged_len = skb_headlen(skb);
4474 	int i, csum_insertion = 0, is_jumbo = 0;
4475 	u32 queue = skb_get_queue_mapping(skb);
4476 	int nfrags = skb_shinfo(skb)->nr_frags;
4477 	int gso = skb_shinfo(skb)->gso_type;
4478 	struct stmmac_txq_stats *txq_stats;
4479 	struct dma_edesc *tbs_desc = NULL;
4480 	struct dma_desc *desc, *first;
4481 	struct stmmac_tx_queue *tx_q;
4482 	bool has_vlan, set_ic;
4483 	int entry, first_tx;
4484 	dma_addr_t des;
4485 
4486 	tx_q = &priv->dma_conf.tx_queue[queue];
4487 	txq_stats = &priv->xstats.txq_stats[queue];
4488 	first_tx = tx_q->cur_tx;
4489 
4490 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4491 		stmmac_disable_eee_mode(priv);
4492 
4493 	/* Manage oversized TCP frames for GMAC4 device */
4494 	if (skb_is_gso(skb) && priv->tso) {
4495 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4496 			return stmmac_tso_xmit(skb, dev);
4497 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4498 			return stmmac_tso_xmit(skb, dev);
4499 	}
4500 
4501 	if (priv->est && priv->est->enable &&
4502 	    priv->est->max_sdu[queue] &&
4503 	    skb->len > priv->est->max_sdu[queue]){
4504 		priv->xstats.max_sdu_txq_drop[queue]++;
4505 		goto max_sdu_err;
4506 	}
4507 
4508 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4509 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4510 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4511 								queue));
4512 			/* This is a hard error, log it. */
4513 			netdev_err(priv->dev,
4514 				   "%s: Tx Ring full when queue awake\n",
4515 				   __func__);
4516 		}
4517 		return NETDEV_TX_BUSY;
4518 	}
4519 
4520 	/* Check if VLAN can be inserted by HW */
4521 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4522 
4523 	entry = tx_q->cur_tx;
4524 	first_entry = entry;
4525 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4526 
4527 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4528 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4529 	 * queues. In that case, checksum offloading for those queues that don't
4530 	 * support tx coe needs to fallback to software checksum calculation.
4531 	 *
4532 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4533 	 * also have to be checksummed in software.
4534 	 */
4535 	if (csum_insertion &&
4536 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4537 	     !stmmac_has_ip_ethertype(skb))) {
4538 		if (unlikely(skb_checksum_help(skb)))
4539 			goto dma_map_err;
4540 		csum_insertion = !csum_insertion;
4541 	}
4542 
4543 	if (likely(priv->extend_desc))
4544 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4545 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4546 		desc = &tx_q->dma_entx[entry].basic;
4547 	else
4548 		desc = tx_q->dma_tx + entry;
4549 
4550 	first = desc;
4551 
4552 	if (has_vlan)
4553 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4554 
4555 	enh_desc = priv->plat->enh_desc;
4556 	/* To program the descriptors according to the size of the frame */
4557 	if (enh_desc)
4558 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4559 
4560 	if (unlikely(is_jumbo)) {
4561 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4562 		if (unlikely(entry < 0) && (entry != -EINVAL))
4563 			goto dma_map_err;
4564 	}
4565 
4566 	for (i = 0; i < nfrags; i++) {
4567 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4568 		int len = skb_frag_size(frag);
4569 		bool last_segment = (i == (nfrags - 1));
4570 
4571 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4572 		WARN_ON(tx_q->tx_skbuff[entry]);
4573 
4574 		if (likely(priv->extend_desc))
4575 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4576 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4577 			desc = &tx_q->dma_entx[entry].basic;
4578 		else
4579 			desc = tx_q->dma_tx + entry;
4580 
4581 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4582 				       DMA_TO_DEVICE);
4583 		if (dma_mapping_error(priv->device, des))
4584 			goto dma_map_err; /* should reuse desc w/o issues */
4585 
4586 		tx_q->tx_skbuff_dma[entry].buf = des;
4587 
4588 		stmmac_set_desc_addr(priv, desc, des);
4589 
4590 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4591 		tx_q->tx_skbuff_dma[entry].len = len;
4592 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4593 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4594 
4595 		/* Prepare the descriptor and set the own bit too */
4596 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4597 				priv->mode, 1, last_segment, skb->len);
4598 	}
4599 
4600 	/* Only the last descriptor gets to point to the skb. */
4601 	tx_q->tx_skbuff[entry] = skb;
4602 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4603 
4604 	/* According to the coalesce parameter the IC bit for the latest
4605 	 * segment is reset and the timer re-started to clean the tx status.
4606 	 * This approach takes care about the fragments: desc is the first
4607 	 * element in case of no SG.
4608 	 */
4609 	tx_packets = (entry + 1) - first_tx;
4610 	tx_q->tx_count_frames += tx_packets;
4611 
4612 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4613 		set_ic = true;
4614 	else if (!priv->tx_coal_frames[queue])
4615 		set_ic = false;
4616 	else if (tx_packets > priv->tx_coal_frames[queue])
4617 		set_ic = true;
4618 	else if ((tx_q->tx_count_frames %
4619 		  priv->tx_coal_frames[queue]) < tx_packets)
4620 		set_ic = true;
4621 	else
4622 		set_ic = false;
4623 
4624 	if (set_ic) {
4625 		if (likely(priv->extend_desc))
4626 			desc = &tx_q->dma_etx[entry].basic;
4627 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4628 			desc = &tx_q->dma_entx[entry].basic;
4629 		else
4630 			desc = &tx_q->dma_tx[entry];
4631 
4632 		tx_q->tx_count_frames = 0;
4633 		stmmac_set_tx_ic(priv, desc);
4634 	}
4635 
4636 	/* We've used all descriptors we need for this skb, however,
4637 	 * advance cur_tx so that it references a fresh descriptor.
4638 	 * ndo_start_xmit will fill this descriptor the next time it's
4639 	 * called and stmmac_tx_clean may clean up to this descriptor.
4640 	 */
4641 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4642 	tx_q->cur_tx = entry;
4643 
4644 	if (netif_msg_pktdata(priv)) {
4645 		netdev_dbg(priv->dev,
4646 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4647 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4648 			   entry, first, nfrags);
4649 
4650 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4651 		print_pkt(skb->data, skb->len);
4652 	}
4653 
4654 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4655 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4656 			  __func__);
4657 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4658 	}
4659 
4660 	u64_stats_update_begin(&txq_stats->q_syncp);
4661 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4662 	if (set_ic)
4663 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4664 	u64_stats_update_end(&txq_stats->q_syncp);
4665 
4666 	if (priv->sarc_type)
4667 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4668 
4669 	skb_tx_timestamp(skb);
4670 
4671 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4672 	 * problems because all the descriptors are actually ready to be
4673 	 * passed to the DMA engine.
4674 	 */
4675 	if (likely(!is_jumbo)) {
4676 		bool last_segment = (nfrags == 0);
4677 
4678 		des = dma_map_single(priv->device, skb->data,
4679 				     nopaged_len, DMA_TO_DEVICE);
4680 		if (dma_mapping_error(priv->device, des))
4681 			goto dma_map_err;
4682 
4683 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4684 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4685 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4686 
4687 		stmmac_set_desc_addr(priv, first, des);
4688 
4689 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4690 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4691 
4692 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4693 			     priv->hwts_tx_en)) {
4694 			/* declare that device is doing timestamping */
4695 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4696 			stmmac_enable_tx_timestamp(priv, first);
4697 		}
4698 
4699 		/* Prepare the first descriptor setting the OWN bit too */
4700 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4701 				csum_insertion, priv->mode, 0, last_segment,
4702 				skb->len);
4703 	}
4704 
4705 	if (tx_q->tbs & STMMAC_TBS_EN) {
4706 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4707 
4708 		tbs_desc = &tx_q->dma_entx[first_entry];
4709 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4710 	}
4711 
4712 	stmmac_set_tx_owner(priv, first);
4713 
4714 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4715 
4716 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4717 
4718 	stmmac_flush_tx_descriptors(priv, queue);
4719 	stmmac_tx_timer_arm(priv, queue);
4720 
4721 	return NETDEV_TX_OK;
4722 
4723 dma_map_err:
4724 	netdev_err(priv->dev, "Tx DMA map failed\n");
4725 max_sdu_err:
4726 	dev_kfree_skb(skb);
4727 	priv->xstats.tx_dropped++;
4728 	return NETDEV_TX_OK;
4729 }
4730 
4731 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4732 {
4733 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4734 	__be16 vlan_proto = veth->h_vlan_proto;
4735 	u16 vlanid;
4736 
4737 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4738 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4739 	    (vlan_proto == htons(ETH_P_8021AD) &&
4740 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4741 		/* pop the vlan tag */
4742 		vlanid = ntohs(veth->h_vlan_TCI);
4743 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4744 		skb_pull(skb, VLAN_HLEN);
4745 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4746 	}
4747 }
4748 
4749 /**
4750  * stmmac_rx_refill - refill used skb preallocated buffers
4751  * @priv: driver private structure
4752  * @queue: RX queue index
4753  * Description : this is to reallocate the skb for the reception process
4754  * that is based on zero-copy.
4755  */
4756 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4757 {
4758 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4759 	int dirty = stmmac_rx_dirty(priv, queue);
4760 	unsigned int entry = rx_q->dirty_rx;
4761 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4762 
4763 	if (priv->dma_cap.host_dma_width <= 32)
4764 		gfp |= GFP_DMA32;
4765 
4766 	while (dirty-- > 0) {
4767 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4768 		struct dma_desc *p;
4769 		bool use_rx_wd;
4770 
4771 		if (priv->extend_desc)
4772 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4773 		else
4774 			p = rx_q->dma_rx + entry;
4775 
4776 		if (!buf->page) {
4777 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4778 			if (!buf->page)
4779 				break;
4780 		}
4781 
4782 		if (priv->sph && !buf->sec_page) {
4783 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4784 			if (!buf->sec_page)
4785 				break;
4786 
4787 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4788 		}
4789 
4790 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4791 
4792 		stmmac_set_desc_addr(priv, p, buf->addr);
4793 		if (priv->sph)
4794 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4795 		else
4796 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4797 		stmmac_refill_desc3(priv, rx_q, p);
4798 
4799 		rx_q->rx_count_frames++;
4800 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4801 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4802 			rx_q->rx_count_frames = 0;
4803 
4804 		use_rx_wd = !priv->rx_coal_frames[queue];
4805 		use_rx_wd |= rx_q->rx_count_frames > 0;
4806 		if (!priv->use_riwt)
4807 			use_rx_wd = false;
4808 
4809 		dma_wmb();
4810 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4811 
4812 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4813 	}
4814 	rx_q->dirty_rx = entry;
4815 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4816 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4817 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4818 }
4819 
4820 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4821 				       struct dma_desc *p,
4822 				       int status, unsigned int len)
4823 {
4824 	unsigned int plen = 0, hlen = 0;
4825 	int coe = priv->hw->rx_csum;
4826 
4827 	/* Not first descriptor, buffer is always zero */
4828 	if (priv->sph && len)
4829 		return 0;
4830 
4831 	/* First descriptor, get split header length */
4832 	stmmac_get_rx_header_len(priv, p, &hlen);
4833 	if (priv->sph && hlen) {
4834 		priv->xstats.rx_split_hdr_pkt_n++;
4835 		return hlen;
4836 	}
4837 
4838 	/* First descriptor, not last descriptor and not split header */
4839 	if (status & rx_not_ls)
4840 		return priv->dma_conf.dma_buf_sz;
4841 
4842 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4843 
4844 	/* First descriptor and last descriptor and not split header */
4845 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4846 }
4847 
4848 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4849 				       struct dma_desc *p,
4850 				       int status, unsigned int len)
4851 {
4852 	int coe = priv->hw->rx_csum;
4853 	unsigned int plen = 0;
4854 
4855 	/* Not split header, buffer is not available */
4856 	if (!priv->sph)
4857 		return 0;
4858 
4859 	/* Not last descriptor */
4860 	if (status & rx_not_ls)
4861 		return priv->dma_conf.dma_buf_sz;
4862 
4863 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4864 
4865 	/* Last descriptor */
4866 	return plen - len;
4867 }
4868 
4869 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4870 				struct xdp_frame *xdpf, bool dma_map)
4871 {
4872 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4873 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4874 	unsigned int entry = tx_q->cur_tx;
4875 	struct dma_desc *tx_desc;
4876 	dma_addr_t dma_addr;
4877 	bool set_ic;
4878 
4879 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4880 		return STMMAC_XDP_CONSUMED;
4881 
4882 	if (priv->est && priv->est->enable &&
4883 	    priv->est->max_sdu[queue] &&
4884 	    xdpf->len > priv->est->max_sdu[queue]) {
4885 		priv->xstats.max_sdu_txq_drop[queue]++;
4886 		return STMMAC_XDP_CONSUMED;
4887 	}
4888 
4889 	if (likely(priv->extend_desc))
4890 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4891 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4892 		tx_desc = &tx_q->dma_entx[entry].basic;
4893 	else
4894 		tx_desc = tx_q->dma_tx + entry;
4895 
4896 	if (dma_map) {
4897 		dma_addr = dma_map_single(priv->device, xdpf->data,
4898 					  xdpf->len, DMA_TO_DEVICE);
4899 		if (dma_mapping_error(priv->device, dma_addr))
4900 			return STMMAC_XDP_CONSUMED;
4901 
4902 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4903 	} else {
4904 		struct page *page = virt_to_page(xdpf->data);
4905 
4906 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4907 			   xdpf->headroom;
4908 		dma_sync_single_for_device(priv->device, dma_addr,
4909 					   xdpf->len, DMA_BIDIRECTIONAL);
4910 
4911 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4912 	}
4913 
4914 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4915 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4916 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4917 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4918 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4919 
4920 	tx_q->xdpf[entry] = xdpf;
4921 
4922 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4923 
4924 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4925 			       true, priv->mode, true, true,
4926 			       xdpf->len);
4927 
4928 	tx_q->tx_count_frames++;
4929 
4930 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4931 		set_ic = true;
4932 	else
4933 		set_ic = false;
4934 
4935 	if (set_ic) {
4936 		tx_q->tx_count_frames = 0;
4937 		stmmac_set_tx_ic(priv, tx_desc);
4938 		u64_stats_update_begin(&txq_stats->q_syncp);
4939 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4940 		u64_stats_update_end(&txq_stats->q_syncp);
4941 	}
4942 
4943 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4944 
4945 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4946 	tx_q->cur_tx = entry;
4947 
4948 	return STMMAC_XDP_TX;
4949 }
4950 
4951 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4952 				   int cpu)
4953 {
4954 	int index = cpu;
4955 
4956 	if (unlikely(index < 0))
4957 		index = 0;
4958 
4959 	while (index >= priv->plat->tx_queues_to_use)
4960 		index -= priv->plat->tx_queues_to_use;
4961 
4962 	return index;
4963 }
4964 
4965 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4966 				struct xdp_buff *xdp)
4967 {
4968 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4969 	int cpu = smp_processor_id();
4970 	struct netdev_queue *nq;
4971 	int queue;
4972 	int res;
4973 
4974 	if (unlikely(!xdpf))
4975 		return STMMAC_XDP_CONSUMED;
4976 
4977 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4978 	nq = netdev_get_tx_queue(priv->dev, queue);
4979 
4980 	__netif_tx_lock(nq, cpu);
4981 	/* Avoids TX time-out as we are sharing with slow path */
4982 	txq_trans_cond_update(nq);
4983 
4984 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4985 	if (res == STMMAC_XDP_TX)
4986 		stmmac_flush_tx_descriptors(priv, queue);
4987 
4988 	__netif_tx_unlock(nq);
4989 
4990 	return res;
4991 }
4992 
4993 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4994 				 struct bpf_prog *prog,
4995 				 struct xdp_buff *xdp)
4996 {
4997 	u32 act;
4998 	int res;
4999 
5000 	act = bpf_prog_run_xdp(prog, xdp);
5001 	switch (act) {
5002 	case XDP_PASS:
5003 		res = STMMAC_XDP_PASS;
5004 		break;
5005 	case XDP_TX:
5006 		res = stmmac_xdp_xmit_back(priv, xdp);
5007 		break;
5008 	case XDP_REDIRECT:
5009 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5010 			res = STMMAC_XDP_CONSUMED;
5011 		else
5012 			res = STMMAC_XDP_REDIRECT;
5013 		break;
5014 	default:
5015 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5016 		fallthrough;
5017 	case XDP_ABORTED:
5018 		trace_xdp_exception(priv->dev, prog, act);
5019 		fallthrough;
5020 	case XDP_DROP:
5021 		res = STMMAC_XDP_CONSUMED;
5022 		break;
5023 	}
5024 
5025 	return res;
5026 }
5027 
5028 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5029 					   struct xdp_buff *xdp)
5030 {
5031 	struct bpf_prog *prog;
5032 	int res;
5033 
5034 	prog = READ_ONCE(priv->xdp_prog);
5035 	if (!prog) {
5036 		res = STMMAC_XDP_PASS;
5037 		goto out;
5038 	}
5039 
5040 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5041 out:
5042 	return ERR_PTR(-res);
5043 }
5044 
5045 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5046 				   int xdp_status)
5047 {
5048 	int cpu = smp_processor_id();
5049 	int queue;
5050 
5051 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5052 
5053 	if (xdp_status & STMMAC_XDP_TX)
5054 		stmmac_tx_timer_arm(priv, queue);
5055 
5056 	if (xdp_status & STMMAC_XDP_REDIRECT)
5057 		xdp_do_flush();
5058 }
5059 
5060 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5061 					       struct xdp_buff *xdp)
5062 {
5063 	unsigned int metasize = xdp->data - xdp->data_meta;
5064 	unsigned int datasize = xdp->data_end - xdp->data;
5065 	struct sk_buff *skb;
5066 
5067 	skb = napi_alloc_skb(&ch->rxtx_napi,
5068 			     xdp->data_end - xdp->data_hard_start);
5069 	if (unlikely(!skb))
5070 		return NULL;
5071 
5072 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5073 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5074 	if (metasize)
5075 		skb_metadata_set(skb, metasize);
5076 
5077 	return skb;
5078 }
5079 
5080 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5081 				   struct dma_desc *p, struct dma_desc *np,
5082 				   struct xdp_buff *xdp)
5083 {
5084 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5085 	struct stmmac_channel *ch = &priv->channel[queue];
5086 	unsigned int len = xdp->data_end - xdp->data;
5087 	enum pkt_hash_types hash_type;
5088 	int coe = priv->hw->rx_csum;
5089 	struct sk_buff *skb;
5090 	u32 hash;
5091 
5092 	skb = stmmac_construct_skb_zc(ch, xdp);
5093 	if (!skb) {
5094 		priv->xstats.rx_dropped++;
5095 		return;
5096 	}
5097 
5098 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5099 	if (priv->hw->hw_vlan_en)
5100 		/* MAC level stripping. */
5101 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5102 	else
5103 		/* Driver level stripping. */
5104 		stmmac_rx_vlan(priv->dev, skb);
5105 	skb->protocol = eth_type_trans(skb, priv->dev);
5106 
5107 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5108 		skb_checksum_none_assert(skb);
5109 	else
5110 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5111 
5112 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5113 		skb_set_hash(skb, hash, hash_type);
5114 
5115 	skb_record_rx_queue(skb, queue);
5116 	napi_gro_receive(&ch->rxtx_napi, skb);
5117 
5118 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5119 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5120 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5121 	u64_stats_update_end(&rxq_stats->napi_syncp);
5122 }
5123 
5124 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5125 {
5126 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5127 	unsigned int entry = rx_q->dirty_rx;
5128 	struct dma_desc *rx_desc = NULL;
5129 	bool ret = true;
5130 
5131 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5132 
5133 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5134 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5135 		dma_addr_t dma_addr;
5136 		bool use_rx_wd;
5137 
5138 		if (!buf->xdp) {
5139 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5140 			if (!buf->xdp) {
5141 				ret = false;
5142 				break;
5143 			}
5144 		}
5145 
5146 		if (priv->extend_desc)
5147 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5148 		else
5149 			rx_desc = rx_q->dma_rx + entry;
5150 
5151 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5152 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5153 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5154 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5155 
5156 		rx_q->rx_count_frames++;
5157 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5158 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5159 			rx_q->rx_count_frames = 0;
5160 
5161 		use_rx_wd = !priv->rx_coal_frames[queue];
5162 		use_rx_wd |= rx_q->rx_count_frames > 0;
5163 		if (!priv->use_riwt)
5164 			use_rx_wd = false;
5165 
5166 		dma_wmb();
5167 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5168 
5169 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5170 	}
5171 
5172 	if (rx_desc) {
5173 		rx_q->dirty_rx = entry;
5174 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5175 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5176 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5177 	}
5178 
5179 	return ret;
5180 }
5181 
5182 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5183 {
5184 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5185 	 * to represent incoming packet, whereas cb field in the same structure
5186 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5187 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5188 	 */
5189 	return (struct stmmac_xdp_buff *)xdp;
5190 }
5191 
5192 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5193 {
5194 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5195 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5196 	unsigned int count = 0, error = 0, len = 0;
5197 	int dirty = stmmac_rx_dirty(priv, queue);
5198 	unsigned int next_entry = rx_q->cur_rx;
5199 	u32 rx_errors = 0, rx_dropped = 0;
5200 	unsigned int desc_size;
5201 	struct bpf_prog *prog;
5202 	bool failure = false;
5203 	int xdp_status = 0;
5204 	int status = 0;
5205 
5206 	if (netif_msg_rx_status(priv)) {
5207 		void *rx_head;
5208 
5209 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5210 		if (priv->extend_desc) {
5211 			rx_head = (void *)rx_q->dma_erx;
5212 			desc_size = sizeof(struct dma_extended_desc);
5213 		} else {
5214 			rx_head = (void *)rx_q->dma_rx;
5215 			desc_size = sizeof(struct dma_desc);
5216 		}
5217 
5218 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5219 				    rx_q->dma_rx_phy, desc_size);
5220 	}
5221 	while (count < limit) {
5222 		struct stmmac_rx_buffer *buf;
5223 		struct stmmac_xdp_buff *ctx;
5224 		unsigned int buf1_len = 0;
5225 		struct dma_desc *np, *p;
5226 		int entry;
5227 		int res;
5228 
5229 		if (!count && rx_q->state_saved) {
5230 			error = rx_q->state.error;
5231 			len = rx_q->state.len;
5232 		} else {
5233 			rx_q->state_saved = false;
5234 			error = 0;
5235 			len = 0;
5236 		}
5237 
5238 		if (count >= limit)
5239 			break;
5240 
5241 read_again:
5242 		buf1_len = 0;
5243 		entry = next_entry;
5244 		buf = &rx_q->buf_pool[entry];
5245 
5246 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5247 			failure = failure ||
5248 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5249 			dirty = 0;
5250 		}
5251 
5252 		if (priv->extend_desc)
5253 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5254 		else
5255 			p = rx_q->dma_rx + entry;
5256 
5257 		/* read the status of the incoming frame */
5258 		status = stmmac_rx_status(priv, &priv->xstats, p);
5259 		/* check if managed by the DMA otherwise go ahead */
5260 		if (unlikely(status & dma_own))
5261 			break;
5262 
5263 		/* Prefetch the next RX descriptor */
5264 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5265 						priv->dma_conf.dma_rx_size);
5266 		next_entry = rx_q->cur_rx;
5267 
5268 		if (priv->extend_desc)
5269 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5270 		else
5271 			np = rx_q->dma_rx + next_entry;
5272 
5273 		prefetch(np);
5274 
5275 		/* Ensure a valid XSK buffer before proceed */
5276 		if (!buf->xdp)
5277 			break;
5278 
5279 		if (priv->extend_desc)
5280 			stmmac_rx_extended_status(priv, &priv->xstats,
5281 						  rx_q->dma_erx + entry);
5282 		if (unlikely(status == discard_frame)) {
5283 			xsk_buff_free(buf->xdp);
5284 			buf->xdp = NULL;
5285 			dirty++;
5286 			error = 1;
5287 			if (!priv->hwts_rx_en)
5288 				rx_errors++;
5289 		}
5290 
5291 		if (unlikely(error && (status & rx_not_ls)))
5292 			goto read_again;
5293 		if (unlikely(error)) {
5294 			count++;
5295 			continue;
5296 		}
5297 
5298 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5299 		if (likely(status & rx_not_ls)) {
5300 			xsk_buff_free(buf->xdp);
5301 			buf->xdp = NULL;
5302 			dirty++;
5303 			count++;
5304 			goto read_again;
5305 		}
5306 
5307 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5308 		ctx->priv = priv;
5309 		ctx->desc = p;
5310 		ctx->ndesc = np;
5311 
5312 		/* XDP ZC Frame only support primary buffers for now */
5313 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5314 		len += buf1_len;
5315 
5316 		/* ACS is disabled; strip manually. */
5317 		if (likely(!(status & rx_not_ls))) {
5318 			buf1_len -= ETH_FCS_LEN;
5319 			len -= ETH_FCS_LEN;
5320 		}
5321 
5322 		/* RX buffer is good and fit into a XSK pool buffer */
5323 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5324 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5325 
5326 		prog = READ_ONCE(priv->xdp_prog);
5327 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5328 
5329 		switch (res) {
5330 		case STMMAC_XDP_PASS:
5331 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5332 			xsk_buff_free(buf->xdp);
5333 			break;
5334 		case STMMAC_XDP_CONSUMED:
5335 			xsk_buff_free(buf->xdp);
5336 			rx_dropped++;
5337 			break;
5338 		case STMMAC_XDP_TX:
5339 		case STMMAC_XDP_REDIRECT:
5340 			xdp_status |= res;
5341 			break;
5342 		}
5343 
5344 		buf->xdp = NULL;
5345 		dirty++;
5346 		count++;
5347 	}
5348 
5349 	if (status & rx_not_ls) {
5350 		rx_q->state_saved = true;
5351 		rx_q->state.error = error;
5352 		rx_q->state.len = len;
5353 	}
5354 
5355 	stmmac_finalize_xdp_rx(priv, xdp_status);
5356 
5357 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5358 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5359 	u64_stats_update_end(&rxq_stats->napi_syncp);
5360 
5361 	priv->xstats.rx_dropped += rx_dropped;
5362 	priv->xstats.rx_errors += rx_errors;
5363 
5364 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5365 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5366 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5367 		else
5368 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5369 
5370 		return (int)count;
5371 	}
5372 
5373 	return failure ? limit : (int)count;
5374 }
5375 
5376 /**
5377  * stmmac_rx - manage the receive process
5378  * @priv: driver private structure
5379  * @limit: napi bugget
5380  * @queue: RX queue index.
5381  * Description :  this the function called by the napi poll method.
5382  * It gets all the frames inside the ring.
5383  */
5384 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5385 {
5386 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5387 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5388 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5389 	struct stmmac_channel *ch = &priv->channel[queue];
5390 	unsigned int count = 0, error = 0, len = 0;
5391 	int status = 0, coe = priv->hw->rx_csum;
5392 	unsigned int next_entry = rx_q->cur_rx;
5393 	enum dma_data_direction dma_dir;
5394 	unsigned int desc_size;
5395 	struct sk_buff *skb = NULL;
5396 	struct stmmac_xdp_buff ctx;
5397 	int xdp_status = 0;
5398 	int buf_sz;
5399 
5400 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5401 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5402 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5403 
5404 	if (netif_msg_rx_status(priv)) {
5405 		void *rx_head;
5406 
5407 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5408 		if (priv->extend_desc) {
5409 			rx_head = (void *)rx_q->dma_erx;
5410 			desc_size = sizeof(struct dma_extended_desc);
5411 		} else {
5412 			rx_head = (void *)rx_q->dma_rx;
5413 			desc_size = sizeof(struct dma_desc);
5414 		}
5415 
5416 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5417 				    rx_q->dma_rx_phy, desc_size);
5418 	}
5419 	while (count < limit) {
5420 		unsigned int buf1_len = 0, buf2_len = 0;
5421 		enum pkt_hash_types hash_type;
5422 		struct stmmac_rx_buffer *buf;
5423 		struct dma_desc *np, *p;
5424 		int entry;
5425 		u32 hash;
5426 
5427 		if (!count && rx_q->state_saved) {
5428 			skb = rx_q->state.skb;
5429 			error = rx_q->state.error;
5430 			len = rx_q->state.len;
5431 		} else {
5432 			rx_q->state_saved = false;
5433 			skb = NULL;
5434 			error = 0;
5435 			len = 0;
5436 		}
5437 
5438 read_again:
5439 		if (count >= limit)
5440 			break;
5441 
5442 		buf1_len = 0;
5443 		buf2_len = 0;
5444 		entry = next_entry;
5445 		buf = &rx_q->buf_pool[entry];
5446 
5447 		if (priv->extend_desc)
5448 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5449 		else
5450 			p = rx_q->dma_rx + entry;
5451 
5452 		/* read the status of the incoming frame */
5453 		status = stmmac_rx_status(priv, &priv->xstats, p);
5454 		/* check if managed by the DMA otherwise go ahead */
5455 		if (unlikely(status & dma_own))
5456 			break;
5457 
5458 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5459 						priv->dma_conf.dma_rx_size);
5460 		next_entry = rx_q->cur_rx;
5461 
5462 		if (priv->extend_desc)
5463 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5464 		else
5465 			np = rx_q->dma_rx + next_entry;
5466 
5467 		prefetch(np);
5468 
5469 		if (priv->extend_desc)
5470 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5471 		if (unlikely(status == discard_frame)) {
5472 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5473 			buf->page = NULL;
5474 			error = 1;
5475 			if (!priv->hwts_rx_en)
5476 				rx_errors++;
5477 		}
5478 
5479 		if (unlikely(error && (status & rx_not_ls)))
5480 			goto read_again;
5481 		if (unlikely(error)) {
5482 			dev_kfree_skb(skb);
5483 			skb = NULL;
5484 			count++;
5485 			continue;
5486 		}
5487 
5488 		/* Buffer is good. Go on. */
5489 
5490 		prefetch(page_address(buf->page) + buf->page_offset);
5491 		if (buf->sec_page)
5492 			prefetch(page_address(buf->sec_page));
5493 
5494 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5495 		len += buf1_len;
5496 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5497 		len += buf2_len;
5498 
5499 		/* ACS is disabled; strip manually. */
5500 		if (likely(!(status & rx_not_ls))) {
5501 			if (buf2_len) {
5502 				buf2_len -= ETH_FCS_LEN;
5503 				len -= ETH_FCS_LEN;
5504 			} else if (buf1_len) {
5505 				buf1_len -= ETH_FCS_LEN;
5506 				len -= ETH_FCS_LEN;
5507 			}
5508 		}
5509 
5510 		if (!skb) {
5511 			unsigned int pre_len, sync_len;
5512 
5513 			dma_sync_single_for_cpu(priv->device, buf->addr,
5514 						buf1_len, dma_dir);
5515 
5516 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5517 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5518 					 buf->page_offset, buf1_len, true);
5519 
5520 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5521 				  buf->page_offset;
5522 
5523 			ctx.priv = priv;
5524 			ctx.desc = p;
5525 			ctx.ndesc = np;
5526 
5527 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5528 			/* Due xdp_adjust_tail: DMA sync for_device
5529 			 * cover max len CPU touch
5530 			 */
5531 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5532 				   buf->page_offset;
5533 			sync_len = max(sync_len, pre_len);
5534 
5535 			/* For Not XDP_PASS verdict */
5536 			if (IS_ERR(skb)) {
5537 				unsigned int xdp_res = -PTR_ERR(skb);
5538 
5539 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5540 					page_pool_put_page(rx_q->page_pool,
5541 							   virt_to_head_page(ctx.xdp.data),
5542 							   sync_len, true);
5543 					buf->page = NULL;
5544 					rx_dropped++;
5545 
5546 					/* Clear skb as it was set as
5547 					 * status by XDP program.
5548 					 */
5549 					skb = NULL;
5550 
5551 					if (unlikely((status & rx_not_ls)))
5552 						goto read_again;
5553 
5554 					count++;
5555 					continue;
5556 				} else if (xdp_res & (STMMAC_XDP_TX |
5557 						      STMMAC_XDP_REDIRECT)) {
5558 					xdp_status |= xdp_res;
5559 					buf->page = NULL;
5560 					skb = NULL;
5561 					count++;
5562 					continue;
5563 				}
5564 			}
5565 		}
5566 
5567 		if (!skb) {
5568 			/* XDP program may expand or reduce tail */
5569 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5570 
5571 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5572 			if (!skb) {
5573 				rx_dropped++;
5574 				count++;
5575 				goto drain_data;
5576 			}
5577 
5578 			/* XDP program may adjust header */
5579 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5580 			skb_put(skb, buf1_len);
5581 
5582 			/* Data payload copied into SKB, page ready for recycle */
5583 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5584 			buf->page = NULL;
5585 		} else if (buf1_len) {
5586 			dma_sync_single_for_cpu(priv->device, buf->addr,
5587 						buf1_len, dma_dir);
5588 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5589 					buf->page, buf->page_offset, buf1_len,
5590 					priv->dma_conf.dma_buf_sz);
5591 
5592 			/* Data payload appended into SKB */
5593 			skb_mark_for_recycle(skb);
5594 			buf->page = NULL;
5595 		}
5596 
5597 		if (buf2_len) {
5598 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5599 						buf2_len, dma_dir);
5600 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5601 					buf->sec_page, 0, buf2_len,
5602 					priv->dma_conf.dma_buf_sz);
5603 
5604 			/* Data payload appended into SKB */
5605 			skb_mark_for_recycle(skb);
5606 			buf->sec_page = NULL;
5607 		}
5608 
5609 drain_data:
5610 		if (likely(status & rx_not_ls))
5611 			goto read_again;
5612 		if (!skb)
5613 			continue;
5614 
5615 		/* Got entire packet into SKB. Finish it. */
5616 
5617 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5618 
5619 		if (priv->hw->hw_vlan_en)
5620 			/* MAC level stripping. */
5621 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5622 		else
5623 			/* Driver level stripping. */
5624 			stmmac_rx_vlan(priv->dev, skb);
5625 
5626 		skb->protocol = eth_type_trans(skb, priv->dev);
5627 
5628 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5629 			skb_checksum_none_assert(skb);
5630 		else
5631 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5632 
5633 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5634 			skb_set_hash(skb, hash, hash_type);
5635 
5636 		skb_record_rx_queue(skb, queue);
5637 		napi_gro_receive(&ch->rx_napi, skb);
5638 		skb = NULL;
5639 
5640 		rx_packets++;
5641 		rx_bytes += len;
5642 		count++;
5643 	}
5644 
5645 	if (status & rx_not_ls || skb) {
5646 		rx_q->state_saved = true;
5647 		rx_q->state.skb = skb;
5648 		rx_q->state.error = error;
5649 		rx_q->state.len = len;
5650 	}
5651 
5652 	stmmac_finalize_xdp_rx(priv, xdp_status);
5653 
5654 	stmmac_rx_refill(priv, queue);
5655 
5656 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5657 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5658 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5659 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5660 	u64_stats_update_end(&rxq_stats->napi_syncp);
5661 
5662 	priv->xstats.rx_dropped += rx_dropped;
5663 	priv->xstats.rx_errors += rx_errors;
5664 
5665 	return count;
5666 }
5667 
5668 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5669 {
5670 	struct stmmac_channel *ch =
5671 		container_of(napi, struct stmmac_channel, rx_napi);
5672 	struct stmmac_priv *priv = ch->priv_data;
5673 	struct stmmac_rxq_stats *rxq_stats;
5674 	u32 chan = ch->index;
5675 	int work_done;
5676 
5677 	rxq_stats = &priv->xstats.rxq_stats[chan];
5678 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5679 	u64_stats_inc(&rxq_stats->napi.poll);
5680 	u64_stats_update_end(&rxq_stats->napi_syncp);
5681 
5682 	work_done = stmmac_rx(priv, budget, chan);
5683 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5684 		unsigned long flags;
5685 
5686 		spin_lock_irqsave(&ch->lock, flags);
5687 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5688 		spin_unlock_irqrestore(&ch->lock, flags);
5689 	}
5690 
5691 	return work_done;
5692 }
5693 
5694 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5695 {
5696 	struct stmmac_channel *ch =
5697 		container_of(napi, struct stmmac_channel, tx_napi);
5698 	struct stmmac_priv *priv = ch->priv_data;
5699 	struct stmmac_txq_stats *txq_stats;
5700 	bool pending_packets = false;
5701 	u32 chan = ch->index;
5702 	int work_done;
5703 
5704 	txq_stats = &priv->xstats.txq_stats[chan];
5705 	u64_stats_update_begin(&txq_stats->napi_syncp);
5706 	u64_stats_inc(&txq_stats->napi.poll);
5707 	u64_stats_update_end(&txq_stats->napi_syncp);
5708 
5709 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5710 	work_done = min(work_done, budget);
5711 
5712 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5713 		unsigned long flags;
5714 
5715 		spin_lock_irqsave(&ch->lock, flags);
5716 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5717 		spin_unlock_irqrestore(&ch->lock, flags);
5718 	}
5719 
5720 	/* TX still have packet to handle, check if we need to arm tx timer */
5721 	if (pending_packets)
5722 		stmmac_tx_timer_arm(priv, chan);
5723 
5724 	return work_done;
5725 }
5726 
5727 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5728 {
5729 	struct stmmac_channel *ch =
5730 		container_of(napi, struct stmmac_channel, rxtx_napi);
5731 	struct stmmac_priv *priv = ch->priv_data;
5732 	bool tx_pending_packets = false;
5733 	int rx_done, tx_done, rxtx_done;
5734 	struct stmmac_rxq_stats *rxq_stats;
5735 	struct stmmac_txq_stats *txq_stats;
5736 	u32 chan = ch->index;
5737 
5738 	rxq_stats = &priv->xstats.rxq_stats[chan];
5739 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5740 	u64_stats_inc(&rxq_stats->napi.poll);
5741 	u64_stats_update_end(&rxq_stats->napi_syncp);
5742 
5743 	txq_stats = &priv->xstats.txq_stats[chan];
5744 	u64_stats_update_begin(&txq_stats->napi_syncp);
5745 	u64_stats_inc(&txq_stats->napi.poll);
5746 	u64_stats_update_end(&txq_stats->napi_syncp);
5747 
5748 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5749 	tx_done = min(tx_done, budget);
5750 
5751 	rx_done = stmmac_rx_zc(priv, budget, chan);
5752 
5753 	rxtx_done = max(tx_done, rx_done);
5754 
5755 	/* If either TX or RX work is not complete, return budget
5756 	 * and keep pooling
5757 	 */
5758 	if (rxtx_done >= budget)
5759 		return budget;
5760 
5761 	/* all work done, exit the polling mode */
5762 	if (napi_complete_done(napi, rxtx_done)) {
5763 		unsigned long flags;
5764 
5765 		spin_lock_irqsave(&ch->lock, flags);
5766 		/* Both RX and TX work done are compelte,
5767 		 * so enable both RX & TX IRQs.
5768 		 */
5769 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5770 		spin_unlock_irqrestore(&ch->lock, flags);
5771 	}
5772 
5773 	/* TX still have packet to handle, check if we need to arm tx timer */
5774 	if (tx_pending_packets)
5775 		stmmac_tx_timer_arm(priv, chan);
5776 
5777 	return min(rxtx_done, budget - 1);
5778 }
5779 
5780 /**
5781  *  stmmac_tx_timeout
5782  *  @dev : Pointer to net device structure
5783  *  @txqueue: the index of the hanging transmit queue
5784  *  Description: this function is called when a packet transmission fails to
5785  *   complete within a reasonable time. The driver will mark the error in the
5786  *   netdev structure and arrange for the device to be reset to a sane state
5787  *   in order to transmit a new packet.
5788  */
5789 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5790 {
5791 	struct stmmac_priv *priv = netdev_priv(dev);
5792 
5793 	stmmac_global_err(priv);
5794 }
5795 
5796 /**
5797  *  stmmac_set_rx_mode - entry point for multicast addressing
5798  *  @dev : pointer to the device structure
5799  *  Description:
5800  *  This function is a driver entry point which gets called by the kernel
5801  *  whenever multicast addresses must be enabled/disabled.
5802  *  Return value:
5803  *  void.
5804  */
5805 static void stmmac_set_rx_mode(struct net_device *dev)
5806 {
5807 	struct stmmac_priv *priv = netdev_priv(dev);
5808 
5809 	stmmac_set_filter(priv, priv->hw, dev);
5810 }
5811 
5812 /**
5813  *  stmmac_change_mtu - entry point to change MTU size for the device.
5814  *  @dev : device pointer.
5815  *  @new_mtu : the new MTU size for the device.
5816  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5817  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5818  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5819  *  Return value:
5820  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5821  *  file on failure.
5822  */
5823 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5824 {
5825 	struct stmmac_priv *priv = netdev_priv(dev);
5826 	int txfifosz = priv->plat->tx_fifo_size;
5827 	struct stmmac_dma_conf *dma_conf;
5828 	const int mtu = new_mtu;
5829 	int ret;
5830 
5831 	if (txfifosz == 0)
5832 		txfifosz = priv->dma_cap.tx_fifo_size;
5833 
5834 	txfifosz /= priv->plat->tx_queues_to_use;
5835 
5836 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5837 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5838 		return -EINVAL;
5839 	}
5840 
5841 	new_mtu = STMMAC_ALIGN(new_mtu);
5842 
5843 	/* If condition true, FIFO is too small or MTU too large */
5844 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5845 		return -EINVAL;
5846 
5847 	if (netif_running(dev)) {
5848 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5849 		/* Try to allocate the new DMA conf with the new mtu */
5850 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5851 		if (IS_ERR(dma_conf)) {
5852 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5853 				   mtu);
5854 			return PTR_ERR(dma_conf);
5855 		}
5856 
5857 		stmmac_release(dev);
5858 
5859 		ret = __stmmac_open(dev, dma_conf);
5860 		if (ret) {
5861 			free_dma_desc_resources(priv, dma_conf);
5862 			kfree(dma_conf);
5863 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5864 			return ret;
5865 		}
5866 
5867 		kfree(dma_conf);
5868 
5869 		stmmac_set_rx_mode(dev);
5870 	}
5871 
5872 	WRITE_ONCE(dev->mtu, mtu);
5873 	netdev_update_features(dev);
5874 
5875 	return 0;
5876 }
5877 
5878 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5879 					     netdev_features_t features)
5880 {
5881 	struct stmmac_priv *priv = netdev_priv(dev);
5882 
5883 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5884 		features &= ~NETIF_F_RXCSUM;
5885 
5886 	if (!priv->plat->tx_coe)
5887 		features &= ~NETIF_F_CSUM_MASK;
5888 
5889 	/* Some GMAC devices have a bugged Jumbo frame support that
5890 	 * needs to have the Tx COE disabled for oversized frames
5891 	 * (due to limited buffer sizes). In this case we disable
5892 	 * the TX csum insertion in the TDES and not use SF.
5893 	 */
5894 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5895 		features &= ~NETIF_F_CSUM_MASK;
5896 
5897 	/* Disable tso if asked by ethtool */
5898 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5899 		if (features & NETIF_F_TSO)
5900 			priv->tso = true;
5901 		else
5902 			priv->tso = false;
5903 	}
5904 
5905 	return features;
5906 }
5907 
5908 static int stmmac_set_features(struct net_device *netdev,
5909 			       netdev_features_t features)
5910 {
5911 	struct stmmac_priv *priv = netdev_priv(netdev);
5912 
5913 	/* Keep the COE Type in case of csum is supporting */
5914 	if (features & NETIF_F_RXCSUM)
5915 		priv->hw->rx_csum = priv->plat->rx_coe;
5916 	else
5917 		priv->hw->rx_csum = 0;
5918 	/* No check needed because rx_coe has been set before and it will be
5919 	 * fixed in case of issue.
5920 	 */
5921 	stmmac_rx_ipc(priv, priv->hw);
5922 
5923 	if (priv->sph_cap) {
5924 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5925 		u32 chan;
5926 
5927 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5928 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5929 	}
5930 
5931 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5932 		priv->hw->hw_vlan_en = true;
5933 	else
5934 		priv->hw->hw_vlan_en = false;
5935 
5936 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5937 
5938 	return 0;
5939 }
5940 
5941 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5942 {
5943 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5944 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5945 	u32 queues_count;
5946 	u32 queue;
5947 	bool xmac;
5948 
5949 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5950 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5951 
5952 	if (priv->irq_wake)
5953 		pm_wakeup_event(priv->device, 0);
5954 
5955 	if (priv->dma_cap.estsel)
5956 		stmmac_est_irq_status(priv, priv, priv->dev,
5957 				      &priv->xstats, tx_cnt);
5958 
5959 	if (stmmac_fpe_supported(priv))
5960 		stmmac_fpe_irq_status(priv);
5961 
5962 	/* To handle GMAC own interrupts */
5963 	if ((priv->plat->has_gmac) || xmac) {
5964 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5965 
5966 		if (unlikely(status)) {
5967 			/* For LPI we need to save the tx status */
5968 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5969 				priv->tx_path_in_lpi_mode = true;
5970 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5971 				priv->tx_path_in_lpi_mode = false;
5972 		}
5973 
5974 		for (queue = 0; queue < queues_count; queue++)
5975 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
5976 
5977 		/* PCS link status */
5978 		if (priv->hw->pcs &&
5979 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5980 			if (priv->xstats.pcs_link)
5981 				netif_carrier_on(priv->dev);
5982 			else
5983 				netif_carrier_off(priv->dev);
5984 		}
5985 
5986 		stmmac_timestamp_interrupt(priv, priv);
5987 	}
5988 }
5989 
5990 /**
5991  *  stmmac_interrupt - main ISR
5992  *  @irq: interrupt number.
5993  *  @dev_id: to pass the net device pointer.
5994  *  Description: this is the main driver interrupt service routine.
5995  *  It can call:
5996  *  o DMA service routine (to manage incoming frame reception and transmission
5997  *    status)
5998  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5999  *    interrupts.
6000  */
6001 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6002 {
6003 	struct net_device *dev = (struct net_device *)dev_id;
6004 	struct stmmac_priv *priv = netdev_priv(dev);
6005 
6006 	/* Check if adapter is up */
6007 	if (test_bit(STMMAC_DOWN, &priv->state))
6008 		return IRQ_HANDLED;
6009 
6010 	/* Check ASP error if it isn't delivered via an individual IRQ */
6011 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6012 		return IRQ_HANDLED;
6013 
6014 	/* To handle Common interrupts */
6015 	stmmac_common_interrupt(priv);
6016 
6017 	/* To handle DMA interrupts */
6018 	stmmac_dma_interrupt(priv);
6019 
6020 	return IRQ_HANDLED;
6021 }
6022 
6023 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6024 {
6025 	struct net_device *dev = (struct net_device *)dev_id;
6026 	struct stmmac_priv *priv = netdev_priv(dev);
6027 
6028 	/* Check if adapter is up */
6029 	if (test_bit(STMMAC_DOWN, &priv->state))
6030 		return IRQ_HANDLED;
6031 
6032 	/* To handle Common interrupts */
6033 	stmmac_common_interrupt(priv);
6034 
6035 	return IRQ_HANDLED;
6036 }
6037 
6038 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6039 {
6040 	struct net_device *dev = (struct net_device *)dev_id;
6041 	struct stmmac_priv *priv = netdev_priv(dev);
6042 
6043 	/* Check if adapter is up */
6044 	if (test_bit(STMMAC_DOWN, &priv->state))
6045 		return IRQ_HANDLED;
6046 
6047 	/* Check if a fatal error happened */
6048 	stmmac_safety_feat_interrupt(priv);
6049 
6050 	return IRQ_HANDLED;
6051 }
6052 
6053 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6054 {
6055 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6056 	struct stmmac_dma_conf *dma_conf;
6057 	int chan = tx_q->queue_index;
6058 	struct stmmac_priv *priv;
6059 	int status;
6060 
6061 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6062 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6063 
6064 	/* Check if adapter is up */
6065 	if (test_bit(STMMAC_DOWN, &priv->state))
6066 		return IRQ_HANDLED;
6067 
6068 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6069 
6070 	if (unlikely(status & tx_hard_error_bump_tc)) {
6071 		/* Try to bump up the dma threshold on this failure */
6072 		stmmac_bump_dma_threshold(priv, chan);
6073 	} else if (unlikely(status == tx_hard_error)) {
6074 		stmmac_tx_err(priv, chan);
6075 	}
6076 
6077 	return IRQ_HANDLED;
6078 }
6079 
6080 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6081 {
6082 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6083 	struct stmmac_dma_conf *dma_conf;
6084 	int chan = rx_q->queue_index;
6085 	struct stmmac_priv *priv;
6086 
6087 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6088 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6089 
6090 	/* Check if adapter is up */
6091 	if (test_bit(STMMAC_DOWN, &priv->state))
6092 		return IRQ_HANDLED;
6093 
6094 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6095 
6096 	return IRQ_HANDLED;
6097 }
6098 
6099 /**
6100  *  stmmac_ioctl - Entry point for the Ioctl
6101  *  @dev: Device pointer.
6102  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6103  *  a proprietary structure used to pass information to the driver.
6104  *  @cmd: IOCTL command
6105  *  Description:
6106  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6107  */
6108 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6109 {
6110 	struct stmmac_priv *priv = netdev_priv (dev);
6111 	int ret = -EOPNOTSUPP;
6112 
6113 	if (!netif_running(dev))
6114 		return -EINVAL;
6115 
6116 	switch (cmd) {
6117 	case SIOCGMIIPHY:
6118 	case SIOCGMIIREG:
6119 	case SIOCSMIIREG:
6120 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6121 		break;
6122 	case SIOCSHWTSTAMP:
6123 		ret = stmmac_hwtstamp_set(dev, rq);
6124 		break;
6125 	case SIOCGHWTSTAMP:
6126 		ret = stmmac_hwtstamp_get(dev, rq);
6127 		break;
6128 	default:
6129 		break;
6130 	}
6131 
6132 	return ret;
6133 }
6134 
6135 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6136 				    void *cb_priv)
6137 {
6138 	struct stmmac_priv *priv = cb_priv;
6139 	int ret = -EOPNOTSUPP;
6140 
6141 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6142 		return ret;
6143 
6144 	__stmmac_disable_all_queues(priv);
6145 
6146 	switch (type) {
6147 	case TC_SETUP_CLSU32:
6148 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6149 		break;
6150 	case TC_SETUP_CLSFLOWER:
6151 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6152 		break;
6153 	default:
6154 		break;
6155 	}
6156 
6157 	stmmac_enable_all_queues(priv);
6158 	return ret;
6159 }
6160 
6161 static LIST_HEAD(stmmac_block_cb_list);
6162 
6163 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6164 			   void *type_data)
6165 {
6166 	struct stmmac_priv *priv = netdev_priv(ndev);
6167 
6168 	switch (type) {
6169 	case TC_QUERY_CAPS:
6170 		return stmmac_tc_query_caps(priv, priv, type_data);
6171 	case TC_SETUP_QDISC_MQPRIO:
6172 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6173 	case TC_SETUP_BLOCK:
6174 		return flow_block_cb_setup_simple(type_data,
6175 						  &stmmac_block_cb_list,
6176 						  stmmac_setup_tc_block_cb,
6177 						  priv, priv, true);
6178 	case TC_SETUP_QDISC_CBS:
6179 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6180 	case TC_SETUP_QDISC_TAPRIO:
6181 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6182 	case TC_SETUP_QDISC_ETF:
6183 		return stmmac_tc_setup_etf(priv, priv, type_data);
6184 	default:
6185 		return -EOPNOTSUPP;
6186 	}
6187 }
6188 
6189 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6190 			       struct net_device *sb_dev)
6191 {
6192 	int gso = skb_shinfo(skb)->gso_type;
6193 
6194 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6195 		/*
6196 		 * There is no way to determine the number of TSO/USO
6197 		 * capable Queues. Let's use always the Queue 0
6198 		 * because if TSO/USO is supported then at least this
6199 		 * one will be capable.
6200 		 */
6201 		return 0;
6202 	}
6203 
6204 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6205 }
6206 
6207 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6208 {
6209 	struct stmmac_priv *priv = netdev_priv(ndev);
6210 	int ret = 0;
6211 
6212 	ret = pm_runtime_resume_and_get(priv->device);
6213 	if (ret < 0)
6214 		return ret;
6215 
6216 	ret = eth_mac_addr(ndev, addr);
6217 	if (ret)
6218 		goto set_mac_error;
6219 
6220 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6221 
6222 set_mac_error:
6223 	pm_runtime_put(priv->device);
6224 
6225 	return ret;
6226 }
6227 
6228 #ifdef CONFIG_DEBUG_FS
6229 static struct dentry *stmmac_fs_dir;
6230 
6231 static void sysfs_display_ring(void *head, int size, int extend_desc,
6232 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6233 {
6234 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6235 	struct dma_desc *p = (struct dma_desc *)head;
6236 	unsigned int desc_size;
6237 	dma_addr_t dma_addr;
6238 	int i;
6239 
6240 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6241 	for (i = 0; i < size; i++) {
6242 		dma_addr = dma_phy_addr + i * desc_size;
6243 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6244 				i, &dma_addr,
6245 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6246 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6247 		if (extend_desc)
6248 			p = &(++ep)->basic;
6249 		else
6250 			p++;
6251 	}
6252 }
6253 
6254 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6255 {
6256 	struct net_device *dev = seq->private;
6257 	struct stmmac_priv *priv = netdev_priv(dev);
6258 	u32 rx_count = priv->plat->rx_queues_to_use;
6259 	u32 tx_count = priv->plat->tx_queues_to_use;
6260 	u32 queue;
6261 
6262 	if ((dev->flags & IFF_UP) == 0)
6263 		return 0;
6264 
6265 	for (queue = 0; queue < rx_count; queue++) {
6266 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6267 
6268 		seq_printf(seq, "RX Queue %d:\n", queue);
6269 
6270 		if (priv->extend_desc) {
6271 			seq_printf(seq, "Extended descriptor ring:\n");
6272 			sysfs_display_ring((void *)rx_q->dma_erx,
6273 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6274 		} else {
6275 			seq_printf(seq, "Descriptor ring:\n");
6276 			sysfs_display_ring((void *)rx_q->dma_rx,
6277 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6278 		}
6279 	}
6280 
6281 	for (queue = 0; queue < tx_count; queue++) {
6282 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6283 
6284 		seq_printf(seq, "TX Queue %d:\n", queue);
6285 
6286 		if (priv->extend_desc) {
6287 			seq_printf(seq, "Extended descriptor ring:\n");
6288 			sysfs_display_ring((void *)tx_q->dma_etx,
6289 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6290 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6291 			seq_printf(seq, "Descriptor ring:\n");
6292 			sysfs_display_ring((void *)tx_q->dma_tx,
6293 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6294 		}
6295 	}
6296 
6297 	return 0;
6298 }
6299 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6300 
6301 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6302 {
6303 	static const char * const dwxgmac_timestamp_source[] = {
6304 		"None",
6305 		"Internal",
6306 		"External",
6307 		"Both",
6308 	};
6309 	static const char * const dwxgmac_safety_feature_desc[] = {
6310 		"No",
6311 		"All Safety Features with ECC and Parity",
6312 		"All Safety Features without ECC or Parity",
6313 		"All Safety Features with Parity Only",
6314 		"ECC Only",
6315 		"UNDEFINED",
6316 		"UNDEFINED",
6317 		"UNDEFINED",
6318 	};
6319 	struct net_device *dev = seq->private;
6320 	struct stmmac_priv *priv = netdev_priv(dev);
6321 
6322 	if (!priv->hw_cap_support) {
6323 		seq_printf(seq, "DMA HW features not supported\n");
6324 		return 0;
6325 	}
6326 
6327 	seq_printf(seq, "==============================\n");
6328 	seq_printf(seq, "\tDMA HW features\n");
6329 	seq_printf(seq, "==============================\n");
6330 
6331 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6332 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6333 	seq_printf(seq, "\t1000 Mbps: %s\n",
6334 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6335 	seq_printf(seq, "\tHalf duplex: %s\n",
6336 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6337 	if (priv->plat->has_xgmac) {
6338 		seq_printf(seq,
6339 			   "\tNumber of Additional MAC address registers: %d\n",
6340 			   priv->dma_cap.multi_addr);
6341 	} else {
6342 		seq_printf(seq, "\tHash Filter: %s\n",
6343 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6344 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6345 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6346 	}
6347 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6348 		   (priv->dma_cap.pcs) ? "Y" : "N");
6349 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6350 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6351 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6352 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6353 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6354 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6355 	seq_printf(seq, "\tRMON module: %s\n",
6356 		   (priv->dma_cap.rmon) ? "Y" : "N");
6357 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6358 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6359 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6360 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6361 	if (priv->plat->has_xgmac)
6362 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6363 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6364 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6365 		   (priv->dma_cap.eee) ? "Y" : "N");
6366 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6367 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6368 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6369 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6370 	    priv->plat->has_xgmac) {
6371 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6372 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6373 	} else {
6374 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6375 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6376 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6377 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6378 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6379 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6380 	}
6381 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6382 		   priv->dma_cap.number_rx_channel);
6383 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6384 		   priv->dma_cap.number_tx_channel);
6385 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6386 		   priv->dma_cap.number_rx_queues);
6387 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6388 		   priv->dma_cap.number_tx_queues);
6389 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6390 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6391 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6392 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6393 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6394 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6395 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6396 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6397 		   priv->dma_cap.pps_out_num);
6398 	seq_printf(seq, "\tSafety Features: %s\n",
6399 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6400 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6401 		   priv->dma_cap.frpsel ? "Y" : "N");
6402 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6403 		   priv->dma_cap.host_dma_width);
6404 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6405 		   priv->dma_cap.rssen ? "Y" : "N");
6406 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6407 		   priv->dma_cap.vlhash ? "Y" : "N");
6408 	seq_printf(seq, "\tSplit Header: %s\n",
6409 		   priv->dma_cap.sphen ? "Y" : "N");
6410 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6411 		   priv->dma_cap.vlins ? "Y" : "N");
6412 	seq_printf(seq, "\tDouble VLAN: %s\n",
6413 		   priv->dma_cap.dvlan ? "Y" : "N");
6414 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6415 		   priv->dma_cap.l3l4fnum);
6416 	seq_printf(seq, "\tARP Offloading: %s\n",
6417 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6418 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6419 		   priv->dma_cap.estsel ? "Y" : "N");
6420 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6421 		   priv->dma_cap.fpesel ? "Y" : "N");
6422 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6423 		   priv->dma_cap.tbssel ? "Y" : "N");
6424 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6425 		   priv->dma_cap.tbs_ch_num);
6426 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6427 		   priv->dma_cap.sgfsel ? "Y" : "N");
6428 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6429 		   BIT(priv->dma_cap.ttsfd) >> 1);
6430 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6431 		   priv->dma_cap.numtc);
6432 	seq_printf(seq, "\tDCB Feature: %s\n",
6433 		   priv->dma_cap.dcben ? "Y" : "N");
6434 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6435 		   priv->dma_cap.advthword ? "Y" : "N");
6436 	seq_printf(seq, "\tPTP Offload: %s\n",
6437 		   priv->dma_cap.ptoen ? "Y" : "N");
6438 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6439 		   priv->dma_cap.osten ? "Y" : "N");
6440 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6441 		   priv->dma_cap.pfcen ? "Y" : "N");
6442 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6443 		   BIT(priv->dma_cap.frpes) << 6);
6444 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6445 		   BIT(priv->dma_cap.frpbs) << 6);
6446 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6447 		   priv->dma_cap.frppipe_num);
6448 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6449 		   priv->dma_cap.nrvf_num ?
6450 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6451 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6452 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6453 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6454 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6455 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6456 		   priv->dma_cap.cbtisel ? "Y" : "N");
6457 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6458 		   priv->dma_cap.aux_snapshot_n);
6459 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6460 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6461 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6462 		   priv->dma_cap.edma ? "Y" : "N");
6463 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6464 		   priv->dma_cap.ediffc ? "Y" : "N");
6465 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6466 		   priv->dma_cap.vxn ? "Y" : "N");
6467 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6468 		   priv->dma_cap.dbgmem ? "Y" : "N");
6469 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6470 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6471 	return 0;
6472 }
6473 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6474 
6475 /* Use network device events to rename debugfs file entries.
6476  */
6477 static int stmmac_device_event(struct notifier_block *unused,
6478 			       unsigned long event, void *ptr)
6479 {
6480 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6481 	struct stmmac_priv *priv = netdev_priv(dev);
6482 
6483 	if (dev->netdev_ops != &stmmac_netdev_ops)
6484 		goto done;
6485 
6486 	switch (event) {
6487 	case NETDEV_CHANGENAME:
6488 		if (priv->dbgfs_dir)
6489 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6490 							 priv->dbgfs_dir,
6491 							 stmmac_fs_dir,
6492 							 dev->name);
6493 		break;
6494 	}
6495 done:
6496 	return NOTIFY_DONE;
6497 }
6498 
6499 static struct notifier_block stmmac_notifier = {
6500 	.notifier_call = stmmac_device_event,
6501 };
6502 
6503 static void stmmac_init_fs(struct net_device *dev)
6504 {
6505 	struct stmmac_priv *priv = netdev_priv(dev);
6506 
6507 	rtnl_lock();
6508 
6509 	/* Create per netdev entries */
6510 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6511 
6512 	/* Entry to report DMA RX/TX rings */
6513 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6514 			    &stmmac_rings_status_fops);
6515 
6516 	/* Entry to report the DMA HW features */
6517 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6518 			    &stmmac_dma_cap_fops);
6519 
6520 	rtnl_unlock();
6521 }
6522 
6523 static void stmmac_exit_fs(struct net_device *dev)
6524 {
6525 	struct stmmac_priv *priv = netdev_priv(dev);
6526 
6527 	debugfs_remove_recursive(priv->dbgfs_dir);
6528 }
6529 #endif /* CONFIG_DEBUG_FS */
6530 
6531 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6532 {
6533 	unsigned char *data = (unsigned char *)&vid_le;
6534 	unsigned char data_byte = 0;
6535 	u32 crc = ~0x0;
6536 	u32 temp = 0;
6537 	int i, bits;
6538 
6539 	bits = get_bitmask_order(VLAN_VID_MASK);
6540 	for (i = 0; i < bits; i++) {
6541 		if ((i % 8) == 0)
6542 			data_byte = data[i / 8];
6543 
6544 		temp = ((crc & 1) ^ data_byte) & 1;
6545 		crc >>= 1;
6546 		data_byte >>= 1;
6547 
6548 		if (temp)
6549 			crc ^= 0xedb88320;
6550 	}
6551 
6552 	return crc;
6553 }
6554 
6555 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6556 {
6557 	u32 crc, hash = 0;
6558 	u16 pmatch = 0;
6559 	int count = 0;
6560 	u16 vid = 0;
6561 
6562 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6563 		__le16 vid_le = cpu_to_le16(vid);
6564 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6565 		hash |= (1 << crc);
6566 		count++;
6567 	}
6568 
6569 	if (!priv->dma_cap.vlhash) {
6570 		if (count > 2) /* VID = 0 always passes filter */
6571 			return -EOPNOTSUPP;
6572 
6573 		pmatch = vid;
6574 		hash = 0;
6575 	}
6576 
6577 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6578 }
6579 
6580 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6581 {
6582 	struct stmmac_priv *priv = netdev_priv(ndev);
6583 	bool is_double = false;
6584 	int ret;
6585 
6586 	ret = pm_runtime_resume_and_get(priv->device);
6587 	if (ret < 0)
6588 		return ret;
6589 
6590 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6591 		is_double = true;
6592 
6593 	set_bit(vid, priv->active_vlans);
6594 	ret = stmmac_vlan_update(priv, is_double);
6595 	if (ret) {
6596 		clear_bit(vid, priv->active_vlans);
6597 		goto err_pm_put;
6598 	}
6599 
6600 	if (priv->hw->num_vlan) {
6601 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6602 		if (ret)
6603 			goto err_pm_put;
6604 	}
6605 err_pm_put:
6606 	pm_runtime_put(priv->device);
6607 
6608 	return ret;
6609 }
6610 
6611 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6612 {
6613 	struct stmmac_priv *priv = netdev_priv(ndev);
6614 	bool is_double = false;
6615 	int ret;
6616 
6617 	ret = pm_runtime_resume_and_get(priv->device);
6618 	if (ret < 0)
6619 		return ret;
6620 
6621 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6622 		is_double = true;
6623 
6624 	clear_bit(vid, priv->active_vlans);
6625 
6626 	if (priv->hw->num_vlan) {
6627 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6628 		if (ret)
6629 			goto del_vlan_error;
6630 	}
6631 
6632 	ret = stmmac_vlan_update(priv, is_double);
6633 
6634 del_vlan_error:
6635 	pm_runtime_put(priv->device);
6636 
6637 	return ret;
6638 }
6639 
6640 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6641 {
6642 	struct stmmac_priv *priv = netdev_priv(dev);
6643 
6644 	switch (bpf->command) {
6645 	case XDP_SETUP_PROG:
6646 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6647 	case XDP_SETUP_XSK_POOL:
6648 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6649 					     bpf->xsk.queue_id);
6650 	default:
6651 		return -EOPNOTSUPP;
6652 	}
6653 }
6654 
6655 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6656 			   struct xdp_frame **frames, u32 flags)
6657 {
6658 	struct stmmac_priv *priv = netdev_priv(dev);
6659 	int cpu = smp_processor_id();
6660 	struct netdev_queue *nq;
6661 	int i, nxmit = 0;
6662 	int queue;
6663 
6664 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6665 		return -ENETDOWN;
6666 
6667 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6668 		return -EINVAL;
6669 
6670 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6671 	nq = netdev_get_tx_queue(priv->dev, queue);
6672 
6673 	__netif_tx_lock(nq, cpu);
6674 	/* Avoids TX time-out as we are sharing with slow path */
6675 	txq_trans_cond_update(nq);
6676 
6677 	for (i = 0; i < num_frames; i++) {
6678 		int res;
6679 
6680 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6681 		if (res == STMMAC_XDP_CONSUMED)
6682 			break;
6683 
6684 		nxmit++;
6685 	}
6686 
6687 	if (flags & XDP_XMIT_FLUSH) {
6688 		stmmac_flush_tx_descriptors(priv, queue);
6689 		stmmac_tx_timer_arm(priv, queue);
6690 	}
6691 
6692 	__netif_tx_unlock(nq);
6693 
6694 	return nxmit;
6695 }
6696 
6697 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6698 {
6699 	struct stmmac_channel *ch = &priv->channel[queue];
6700 	unsigned long flags;
6701 
6702 	spin_lock_irqsave(&ch->lock, flags);
6703 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6704 	spin_unlock_irqrestore(&ch->lock, flags);
6705 
6706 	stmmac_stop_rx_dma(priv, queue);
6707 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6708 }
6709 
6710 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6711 {
6712 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6713 	struct stmmac_channel *ch = &priv->channel[queue];
6714 	unsigned long flags;
6715 	u32 buf_size;
6716 	int ret;
6717 
6718 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6719 	if (ret) {
6720 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6721 		return;
6722 	}
6723 
6724 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6725 	if (ret) {
6726 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6727 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6728 		return;
6729 	}
6730 
6731 	stmmac_reset_rx_queue(priv, queue);
6732 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6733 
6734 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6735 			    rx_q->dma_rx_phy, rx_q->queue_index);
6736 
6737 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6738 			     sizeof(struct dma_desc));
6739 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6740 			       rx_q->rx_tail_addr, rx_q->queue_index);
6741 
6742 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6743 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6744 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6745 				      buf_size,
6746 				      rx_q->queue_index);
6747 	} else {
6748 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6749 				      priv->dma_conf.dma_buf_sz,
6750 				      rx_q->queue_index);
6751 	}
6752 
6753 	stmmac_start_rx_dma(priv, queue);
6754 
6755 	spin_lock_irqsave(&ch->lock, flags);
6756 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6757 	spin_unlock_irqrestore(&ch->lock, flags);
6758 }
6759 
6760 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6761 {
6762 	struct stmmac_channel *ch = &priv->channel[queue];
6763 	unsigned long flags;
6764 
6765 	spin_lock_irqsave(&ch->lock, flags);
6766 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6767 	spin_unlock_irqrestore(&ch->lock, flags);
6768 
6769 	stmmac_stop_tx_dma(priv, queue);
6770 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6771 }
6772 
6773 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6774 {
6775 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6776 	struct stmmac_channel *ch = &priv->channel[queue];
6777 	unsigned long flags;
6778 	int ret;
6779 
6780 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6781 	if (ret) {
6782 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6783 		return;
6784 	}
6785 
6786 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6787 	if (ret) {
6788 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6789 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6790 		return;
6791 	}
6792 
6793 	stmmac_reset_tx_queue(priv, queue);
6794 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6795 
6796 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6797 			    tx_q->dma_tx_phy, tx_q->queue_index);
6798 
6799 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6800 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6801 
6802 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6803 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6804 			       tx_q->tx_tail_addr, tx_q->queue_index);
6805 
6806 	stmmac_start_tx_dma(priv, queue);
6807 
6808 	spin_lock_irqsave(&ch->lock, flags);
6809 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6810 	spin_unlock_irqrestore(&ch->lock, flags);
6811 }
6812 
6813 void stmmac_xdp_release(struct net_device *dev)
6814 {
6815 	struct stmmac_priv *priv = netdev_priv(dev);
6816 	u32 chan;
6817 
6818 	/* Ensure tx function is not running */
6819 	netif_tx_disable(dev);
6820 
6821 	/* Disable NAPI process */
6822 	stmmac_disable_all_queues(priv);
6823 
6824 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6825 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6826 
6827 	/* Free the IRQ lines */
6828 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6829 
6830 	/* Stop TX/RX DMA channels */
6831 	stmmac_stop_all_dma(priv);
6832 
6833 	/* Release and free the Rx/Tx resources */
6834 	free_dma_desc_resources(priv, &priv->dma_conf);
6835 
6836 	/* Disable the MAC Rx/Tx */
6837 	stmmac_mac_set(priv, priv->ioaddr, false);
6838 
6839 	/* set trans_start so we don't get spurious
6840 	 * watchdogs during reset
6841 	 */
6842 	netif_trans_update(dev);
6843 	netif_carrier_off(dev);
6844 }
6845 
6846 int stmmac_xdp_open(struct net_device *dev)
6847 {
6848 	struct stmmac_priv *priv = netdev_priv(dev);
6849 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6850 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6851 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6852 	struct stmmac_rx_queue *rx_q;
6853 	struct stmmac_tx_queue *tx_q;
6854 	u32 buf_size;
6855 	bool sph_en;
6856 	u32 chan;
6857 	int ret;
6858 
6859 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6860 	if (ret < 0) {
6861 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6862 			   __func__);
6863 		goto dma_desc_error;
6864 	}
6865 
6866 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6867 	if (ret < 0) {
6868 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6869 			   __func__);
6870 		goto init_error;
6871 	}
6872 
6873 	stmmac_reset_queues_param(priv);
6874 
6875 	/* DMA CSR Channel configuration */
6876 	for (chan = 0; chan < dma_csr_ch; chan++) {
6877 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6878 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6879 	}
6880 
6881 	/* Adjust Split header */
6882 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6883 
6884 	/* DMA RX Channel Configuration */
6885 	for (chan = 0; chan < rx_cnt; chan++) {
6886 		rx_q = &priv->dma_conf.rx_queue[chan];
6887 
6888 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6889 				    rx_q->dma_rx_phy, chan);
6890 
6891 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6892 				     (rx_q->buf_alloc_num *
6893 				      sizeof(struct dma_desc));
6894 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6895 				       rx_q->rx_tail_addr, chan);
6896 
6897 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6898 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6899 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6900 					      buf_size,
6901 					      rx_q->queue_index);
6902 		} else {
6903 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6904 					      priv->dma_conf.dma_buf_sz,
6905 					      rx_q->queue_index);
6906 		}
6907 
6908 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6909 	}
6910 
6911 	/* DMA TX Channel Configuration */
6912 	for (chan = 0; chan < tx_cnt; chan++) {
6913 		tx_q = &priv->dma_conf.tx_queue[chan];
6914 
6915 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6916 				    tx_q->dma_tx_phy, chan);
6917 
6918 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6919 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6920 				       tx_q->tx_tail_addr, chan);
6921 
6922 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6923 		tx_q->txtimer.function = stmmac_tx_timer;
6924 	}
6925 
6926 	/* Enable the MAC Rx/Tx */
6927 	stmmac_mac_set(priv, priv->ioaddr, true);
6928 
6929 	/* Start Rx & Tx DMA Channels */
6930 	stmmac_start_all_dma(priv);
6931 
6932 	ret = stmmac_request_irq(dev);
6933 	if (ret)
6934 		goto irq_error;
6935 
6936 	/* Enable NAPI process*/
6937 	stmmac_enable_all_queues(priv);
6938 	netif_carrier_on(dev);
6939 	netif_tx_start_all_queues(dev);
6940 	stmmac_enable_all_dma_irq(priv);
6941 
6942 	return 0;
6943 
6944 irq_error:
6945 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6946 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6947 
6948 	stmmac_hw_teardown(dev);
6949 init_error:
6950 	free_dma_desc_resources(priv, &priv->dma_conf);
6951 dma_desc_error:
6952 	return ret;
6953 }
6954 
6955 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6956 {
6957 	struct stmmac_priv *priv = netdev_priv(dev);
6958 	struct stmmac_rx_queue *rx_q;
6959 	struct stmmac_tx_queue *tx_q;
6960 	struct stmmac_channel *ch;
6961 
6962 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6963 	    !netif_carrier_ok(priv->dev))
6964 		return -ENETDOWN;
6965 
6966 	if (!stmmac_xdp_is_enabled(priv))
6967 		return -EINVAL;
6968 
6969 	if (queue >= priv->plat->rx_queues_to_use ||
6970 	    queue >= priv->plat->tx_queues_to_use)
6971 		return -EINVAL;
6972 
6973 	rx_q = &priv->dma_conf.rx_queue[queue];
6974 	tx_q = &priv->dma_conf.tx_queue[queue];
6975 	ch = &priv->channel[queue];
6976 
6977 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6978 		return -EINVAL;
6979 
6980 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6981 		/* EQoS does not have per-DMA channel SW interrupt,
6982 		 * so we schedule RX Napi straight-away.
6983 		 */
6984 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6985 			__napi_schedule(&ch->rxtx_napi);
6986 	}
6987 
6988 	return 0;
6989 }
6990 
6991 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6992 {
6993 	struct stmmac_priv *priv = netdev_priv(dev);
6994 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6995 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6996 	unsigned int start;
6997 	int q;
6998 
6999 	for (q = 0; q < tx_cnt; q++) {
7000 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7001 		u64 tx_packets;
7002 		u64 tx_bytes;
7003 
7004 		do {
7005 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7006 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7007 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7008 		do {
7009 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7010 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7011 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7012 
7013 		stats->tx_packets += tx_packets;
7014 		stats->tx_bytes += tx_bytes;
7015 	}
7016 
7017 	for (q = 0; q < rx_cnt; q++) {
7018 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7019 		u64 rx_packets;
7020 		u64 rx_bytes;
7021 
7022 		do {
7023 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7024 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7025 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7026 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7027 
7028 		stats->rx_packets += rx_packets;
7029 		stats->rx_bytes += rx_bytes;
7030 	}
7031 
7032 	stats->rx_dropped = priv->xstats.rx_dropped;
7033 	stats->rx_errors = priv->xstats.rx_errors;
7034 	stats->tx_dropped = priv->xstats.tx_dropped;
7035 	stats->tx_errors = priv->xstats.tx_errors;
7036 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7037 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7038 	stats->rx_length_errors = priv->xstats.rx_length;
7039 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7040 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7041 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7042 }
7043 
7044 static const struct net_device_ops stmmac_netdev_ops = {
7045 	.ndo_open = stmmac_open,
7046 	.ndo_start_xmit = stmmac_xmit,
7047 	.ndo_stop = stmmac_release,
7048 	.ndo_change_mtu = stmmac_change_mtu,
7049 	.ndo_fix_features = stmmac_fix_features,
7050 	.ndo_set_features = stmmac_set_features,
7051 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7052 	.ndo_tx_timeout = stmmac_tx_timeout,
7053 	.ndo_eth_ioctl = stmmac_ioctl,
7054 	.ndo_get_stats64 = stmmac_get_stats64,
7055 	.ndo_setup_tc = stmmac_setup_tc,
7056 	.ndo_select_queue = stmmac_select_queue,
7057 	.ndo_set_mac_address = stmmac_set_mac_address,
7058 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7059 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7060 	.ndo_bpf = stmmac_bpf,
7061 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7062 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7063 };
7064 
7065 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7066 {
7067 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7068 		return;
7069 	if (test_bit(STMMAC_DOWN, &priv->state))
7070 		return;
7071 
7072 	netdev_err(priv->dev, "Reset adapter.\n");
7073 
7074 	rtnl_lock();
7075 	netif_trans_update(priv->dev);
7076 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7077 		usleep_range(1000, 2000);
7078 
7079 	set_bit(STMMAC_DOWN, &priv->state);
7080 	dev_close(priv->dev);
7081 	dev_open(priv->dev, NULL);
7082 	clear_bit(STMMAC_DOWN, &priv->state);
7083 	clear_bit(STMMAC_RESETING, &priv->state);
7084 	rtnl_unlock();
7085 }
7086 
7087 static void stmmac_service_task(struct work_struct *work)
7088 {
7089 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7090 			service_task);
7091 
7092 	stmmac_reset_subtask(priv);
7093 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7094 }
7095 
7096 /**
7097  *  stmmac_hw_init - Init the MAC device
7098  *  @priv: driver private structure
7099  *  Description: this function is to configure the MAC device according to
7100  *  some platform parameters or the HW capability register. It prepares the
7101  *  driver to use either ring or chain modes and to setup either enhanced or
7102  *  normal descriptors.
7103  */
7104 static int stmmac_hw_init(struct stmmac_priv *priv)
7105 {
7106 	int ret;
7107 
7108 	/* dwmac-sun8i only work in chain mode */
7109 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7110 		chain_mode = 1;
7111 	priv->chain_mode = chain_mode;
7112 
7113 	/* Initialize HW Interface */
7114 	ret = stmmac_hwif_init(priv);
7115 	if (ret)
7116 		return ret;
7117 
7118 	/* Get the HW capability (new GMAC newer than 3.50a) */
7119 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7120 	if (priv->hw_cap_support) {
7121 		dev_info(priv->device, "DMA HW capability register supported\n");
7122 
7123 		/* We can override some gmac/dma configuration fields: e.g.
7124 		 * enh_desc, tx_coe (e.g. that are passed through the
7125 		 * platform) with the values from the HW capability
7126 		 * register (if supported).
7127 		 */
7128 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7129 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7130 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7131 		priv->hw->pmt = priv->plat->pmt;
7132 		if (priv->dma_cap.hash_tb_sz) {
7133 			priv->hw->multicast_filter_bins =
7134 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7135 			priv->hw->mcast_bits_log2 =
7136 					ilog2(priv->hw->multicast_filter_bins);
7137 		}
7138 
7139 		/* TXCOE doesn't work in thresh DMA mode */
7140 		if (priv->plat->force_thresh_dma_mode)
7141 			priv->plat->tx_coe = 0;
7142 		else
7143 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7144 
7145 		/* In case of GMAC4 rx_coe is from HW cap register. */
7146 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7147 
7148 		if (priv->dma_cap.rx_coe_type2)
7149 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7150 		else if (priv->dma_cap.rx_coe_type1)
7151 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7152 
7153 	} else {
7154 		dev_info(priv->device, "No HW DMA feature register supported\n");
7155 	}
7156 
7157 	if (priv->plat->rx_coe) {
7158 		priv->hw->rx_csum = priv->plat->rx_coe;
7159 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7160 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7161 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7162 	}
7163 	if (priv->plat->tx_coe)
7164 		dev_info(priv->device, "TX Checksum insertion supported\n");
7165 
7166 	if (priv->plat->pmt) {
7167 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7168 		device_set_wakeup_capable(priv->device, 1);
7169 	}
7170 
7171 	if (priv->dma_cap.tsoen)
7172 		dev_info(priv->device, "TSO supported\n");
7173 
7174 	priv->hw->vlan_fail_q_en =
7175 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7176 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7177 
7178 	/* Run HW quirks, if any */
7179 	if (priv->hwif_quirks) {
7180 		ret = priv->hwif_quirks(priv);
7181 		if (ret)
7182 			return ret;
7183 	}
7184 
7185 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7186 	 * In some case, for example on bugged HW this feature
7187 	 * has to be disable and this can be done by passing the
7188 	 * riwt_off field from the platform.
7189 	 */
7190 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7191 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7192 		priv->use_riwt = 1;
7193 		dev_info(priv->device,
7194 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7195 	}
7196 
7197 	return 0;
7198 }
7199 
7200 static void stmmac_napi_add(struct net_device *dev)
7201 {
7202 	struct stmmac_priv *priv = netdev_priv(dev);
7203 	u32 queue, maxq;
7204 
7205 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7206 
7207 	for (queue = 0; queue < maxq; queue++) {
7208 		struct stmmac_channel *ch = &priv->channel[queue];
7209 
7210 		ch->priv_data = priv;
7211 		ch->index = queue;
7212 		spin_lock_init(&ch->lock);
7213 
7214 		if (queue < priv->plat->rx_queues_to_use) {
7215 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7216 		}
7217 		if (queue < priv->plat->tx_queues_to_use) {
7218 			netif_napi_add_tx(dev, &ch->tx_napi,
7219 					  stmmac_napi_poll_tx);
7220 		}
7221 		if (queue < priv->plat->rx_queues_to_use &&
7222 		    queue < priv->plat->tx_queues_to_use) {
7223 			netif_napi_add(dev, &ch->rxtx_napi,
7224 				       stmmac_napi_poll_rxtx);
7225 		}
7226 	}
7227 }
7228 
7229 static void stmmac_napi_del(struct net_device *dev)
7230 {
7231 	struct stmmac_priv *priv = netdev_priv(dev);
7232 	u32 queue, maxq;
7233 
7234 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7235 
7236 	for (queue = 0; queue < maxq; queue++) {
7237 		struct stmmac_channel *ch = &priv->channel[queue];
7238 
7239 		if (queue < priv->plat->rx_queues_to_use)
7240 			netif_napi_del(&ch->rx_napi);
7241 		if (queue < priv->plat->tx_queues_to_use)
7242 			netif_napi_del(&ch->tx_napi);
7243 		if (queue < priv->plat->rx_queues_to_use &&
7244 		    queue < priv->plat->tx_queues_to_use) {
7245 			netif_napi_del(&ch->rxtx_napi);
7246 		}
7247 	}
7248 }
7249 
7250 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7251 {
7252 	struct stmmac_priv *priv = netdev_priv(dev);
7253 	int ret = 0, i;
7254 
7255 	if (netif_running(dev))
7256 		stmmac_release(dev);
7257 
7258 	stmmac_napi_del(dev);
7259 
7260 	priv->plat->rx_queues_to_use = rx_cnt;
7261 	priv->plat->tx_queues_to_use = tx_cnt;
7262 	if (!netif_is_rxfh_configured(dev))
7263 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7264 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7265 									rx_cnt);
7266 
7267 	stmmac_napi_add(dev);
7268 
7269 	if (netif_running(dev))
7270 		ret = stmmac_open(dev);
7271 
7272 	return ret;
7273 }
7274 
7275 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7276 {
7277 	struct stmmac_priv *priv = netdev_priv(dev);
7278 	int ret = 0;
7279 
7280 	if (netif_running(dev))
7281 		stmmac_release(dev);
7282 
7283 	priv->dma_conf.dma_rx_size = rx_size;
7284 	priv->dma_conf.dma_tx_size = tx_size;
7285 
7286 	if (netif_running(dev))
7287 		ret = stmmac_open(dev);
7288 
7289 	return ret;
7290 }
7291 
7292 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7293 {
7294 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7295 	struct dma_desc *desc_contains_ts = ctx->desc;
7296 	struct stmmac_priv *priv = ctx->priv;
7297 	struct dma_desc *ndesc = ctx->ndesc;
7298 	struct dma_desc *desc = ctx->desc;
7299 	u64 ns = 0;
7300 
7301 	if (!priv->hwts_rx_en)
7302 		return -ENODATA;
7303 
7304 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7305 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7306 		desc_contains_ts = ndesc;
7307 
7308 	/* Check if timestamp is available */
7309 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7310 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7311 		ns -= priv->plat->cdc_error_adj;
7312 		*timestamp = ns_to_ktime(ns);
7313 		return 0;
7314 	}
7315 
7316 	return -ENODATA;
7317 }
7318 
7319 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7320 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7321 };
7322 
7323 /**
7324  * stmmac_dvr_probe
7325  * @device: device pointer
7326  * @plat_dat: platform data pointer
7327  * @res: stmmac resource pointer
7328  * Description: this is the main probe function used to
7329  * call the alloc_etherdev, allocate the priv structure.
7330  * Return:
7331  * returns 0 on success, otherwise errno.
7332  */
7333 int stmmac_dvr_probe(struct device *device,
7334 		     struct plat_stmmacenet_data *plat_dat,
7335 		     struct stmmac_resources *res)
7336 {
7337 	struct net_device *ndev = NULL;
7338 	struct stmmac_priv *priv;
7339 	u32 rxq;
7340 	int i, ret = 0;
7341 
7342 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7343 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7344 	if (!ndev)
7345 		return -ENOMEM;
7346 
7347 	SET_NETDEV_DEV(ndev, device);
7348 
7349 	priv = netdev_priv(ndev);
7350 	priv->device = device;
7351 	priv->dev = ndev;
7352 
7353 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7354 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7355 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7356 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7357 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7358 	}
7359 
7360 	priv->xstats.pcpu_stats =
7361 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7362 	if (!priv->xstats.pcpu_stats)
7363 		return -ENOMEM;
7364 
7365 	stmmac_set_ethtool_ops(ndev);
7366 	priv->pause = pause;
7367 	priv->plat = plat_dat;
7368 	priv->ioaddr = res->addr;
7369 	priv->dev->base_addr = (unsigned long)res->addr;
7370 	priv->plat->dma_cfg->multi_msi_en =
7371 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7372 
7373 	priv->dev->irq = res->irq;
7374 	priv->wol_irq = res->wol_irq;
7375 	priv->lpi_irq = res->lpi_irq;
7376 	priv->sfty_irq = res->sfty_irq;
7377 	priv->sfty_ce_irq = res->sfty_ce_irq;
7378 	priv->sfty_ue_irq = res->sfty_ue_irq;
7379 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7380 		priv->rx_irq[i] = res->rx_irq[i];
7381 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7382 		priv->tx_irq[i] = res->tx_irq[i];
7383 
7384 	if (!is_zero_ether_addr(res->mac))
7385 		eth_hw_addr_set(priv->dev, res->mac);
7386 
7387 	dev_set_drvdata(device, priv->dev);
7388 
7389 	/* Verify driver arguments */
7390 	stmmac_verify_args();
7391 
7392 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7393 	if (!priv->af_xdp_zc_qps)
7394 		return -ENOMEM;
7395 
7396 	/* Allocate workqueue */
7397 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7398 	if (!priv->wq) {
7399 		dev_err(priv->device, "failed to create workqueue\n");
7400 		ret = -ENOMEM;
7401 		goto error_wq_init;
7402 	}
7403 
7404 	INIT_WORK(&priv->service_task, stmmac_service_task);
7405 
7406 	/* Override with kernel parameters if supplied XXX CRS XXX
7407 	 * this needs to have multiple instances
7408 	 */
7409 	if ((phyaddr >= 0) && (phyaddr <= 31))
7410 		priv->plat->phy_addr = phyaddr;
7411 
7412 	if (priv->plat->stmmac_rst) {
7413 		ret = reset_control_assert(priv->plat->stmmac_rst);
7414 		reset_control_deassert(priv->plat->stmmac_rst);
7415 		/* Some reset controllers have only reset callback instead of
7416 		 * assert + deassert callbacks pair.
7417 		 */
7418 		if (ret == -ENOTSUPP)
7419 			reset_control_reset(priv->plat->stmmac_rst);
7420 	}
7421 
7422 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7423 	if (ret == -ENOTSUPP)
7424 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7425 			ERR_PTR(ret));
7426 
7427 	/* Wait a bit for the reset to take effect */
7428 	udelay(10);
7429 
7430 	/* Init MAC and get the capabilities */
7431 	ret = stmmac_hw_init(priv);
7432 	if (ret)
7433 		goto error_hw_init;
7434 
7435 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7436 	 */
7437 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7438 		priv->plat->dma_cfg->dche = false;
7439 
7440 	stmmac_check_ether_addr(priv);
7441 
7442 	ndev->netdev_ops = &stmmac_netdev_ops;
7443 
7444 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7445 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7446 
7447 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7448 			    NETIF_F_RXCSUM;
7449 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7450 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7451 
7452 	ret = stmmac_tc_init(priv, priv);
7453 	if (!ret) {
7454 		ndev->hw_features |= NETIF_F_HW_TC;
7455 	}
7456 
7457 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7458 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7459 		if (priv->plat->has_gmac4)
7460 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7461 		priv->tso = true;
7462 		dev_info(priv->device, "TSO feature enabled\n");
7463 	}
7464 
7465 	if (priv->dma_cap.sphen &&
7466 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7467 		ndev->hw_features |= NETIF_F_GRO;
7468 		priv->sph_cap = true;
7469 		priv->sph = priv->sph_cap;
7470 		dev_info(priv->device, "SPH feature enabled\n");
7471 	}
7472 
7473 	/* Ideally our host DMA address width is the same as for the
7474 	 * device. However, it may differ and then we have to use our
7475 	 * host DMA width for allocation and the device DMA width for
7476 	 * register handling.
7477 	 */
7478 	if (priv->plat->host_dma_width)
7479 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7480 	else
7481 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7482 
7483 	if (priv->dma_cap.host_dma_width) {
7484 		ret = dma_set_mask_and_coherent(device,
7485 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7486 		if (!ret) {
7487 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7488 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7489 
7490 			/*
7491 			 * If more than 32 bits can be addressed, make sure to
7492 			 * enable enhanced addressing mode.
7493 			 */
7494 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7495 				priv->plat->dma_cfg->eame = true;
7496 		} else {
7497 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7498 			if (ret) {
7499 				dev_err(priv->device, "Failed to set DMA Mask\n");
7500 				goto error_hw_init;
7501 			}
7502 
7503 			priv->dma_cap.host_dma_width = 32;
7504 		}
7505 	}
7506 
7507 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7508 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7509 #ifdef STMMAC_VLAN_TAG_USED
7510 	/* Both mac100 and gmac support receive VLAN tag detection */
7511 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7512 	if (priv->plat->has_gmac4) {
7513 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7514 		priv->hw->hw_vlan_en = true;
7515 	}
7516 	if (priv->dma_cap.vlhash) {
7517 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7518 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7519 	}
7520 	if (priv->dma_cap.vlins) {
7521 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7522 		if (priv->dma_cap.dvlan)
7523 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7524 	}
7525 #endif
7526 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7527 
7528 	priv->xstats.threshold = tc;
7529 
7530 	/* Initialize RSS */
7531 	rxq = priv->plat->rx_queues_to_use;
7532 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7533 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7534 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7535 
7536 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7537 		ndev->features |= NETIF_F_RXHASH;
7538 
7539 	ndev->vlan_features |= ndev->features;
7540 
7541 	/* MTU range: 46 - hw-specific max */
7542 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7543 	if (priv->plat->has_xgmac)
7544 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7545 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7546 		ndev->max_mtu = JUMBO_LEN;
7547 	else
7548 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7549 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7550 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7551 	 */
7552 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7553 	    (priv->plat->maxmtu >= ndev->min_mtu))
7554 		ndev->max_mtu = priv->plat->maxmtu;
7555 	else if (priv->plat->maxmtu < ndev->min_mtu)
7556 		dev_warn(priv->device,
7557 			 "%s: warning: maxmtu having invalid value (%d)\n",
7558 			 __func__, priv->plat->maxmtu);
7559 
7560 	if (flow_ctrl)
7561 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7562 
7563 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7564 
7565 	/* Setup channels NAPI */
7566 	stmmac_napi_add(ndev);
7567 
7568 	mutex_init(&priv->lock);
7569 
7570 	stmmac_fpe_init(priv);
7571 
7572 	/* If a specific clk_csr value is passed from the platform
7573 	 * this means that the CSR Clock Range selection cannot be
7574 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7575 	 * set the MDC clock dynamically according to the csr actual
7576 	 * clock input.
7577 	 */
7578 	if (priv->plat->clk_csr >= 0)
7579 		priv->clk_csr = priv->plat->clk_csr;
7580 	else
7581 		stmmac_clk_csr_set(priv);
7582 
7583 	stmmac_check_pcs_mode(priv);
7584 
7585 	pm_runtime_get_noresume(device);
7586 	pm_runtime_set_active(device);
7587 	if (!pm_runtime_enabled(device))
7588 		pm_runtime_enable(device);
7589 
7590 	ret = stmmac_mdio_register(ndev);
7591 	if (ret < 0) {
7592 		dev_err_probe(priv->device, ret,
7593 			      "MDIO bus (id: %d) registration failed\n",
7594 			      priv->plat->bus_id);
7595 		goto error_mdio_register;
7596 	}
7597 
7598 	if (priv->plat->speed_mode_2500)
7599 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7600 
7601 	ret = stmmac_pcs_setup(ndev);
7602 	if (ret)
7603 		goto error_pcs_setup;
7604 
7605 	ret = stmmac_phy_setup(priv);
7606 	if (ret) {
7607 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7608 		goto error_phy_setup;
7609 	}
7610 
7611 	ret = register_netdev(ndev);
7612 	if (ret) {
7613 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7614 			__func__, ret);
7615 		goto error_netdev_register;
7616 	}
7617 
7618 #ifdef CONFIG_DEBUG_FS
7619 	stmmac_init_fs(ndev);
7620 #endif
7621 
7622 	if (priv->plat->dump_debug_regs)
7623 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7624 
7625 	/* Let pm_runtime_put() disable the clocks.
7626 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7627 	 */
7628 	pm_runtime_put(device);
7629 
7630 	return ret;
7631 
7632 error_netdev_register:
7633 	phylink_destroy(priv->phylink);
7634 error_phy_setup:
7635 	stmmac_pcs_clean(ndev);
7636 error_pcs_setup:
7637 	stmmac_mdio_unregister(ndev);
7638 error_mdio_register:
7639 	stmmac_napi_del(ndev);
7640 error_hw_init:
7641 	destroy_workqueue(priv->wq);
7642 error_wq_init:
7643 	bitmap_free(priv->af_xdp_zc_qps);
7644 
7645 	return ret;
7646 }
7647 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7648 
7649 /**
7650  * stmmac_dvr_remove
7651  * @dev: device pointer
7652  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7653  * changes the link status, releases the DMA descriptor rings.
7654  */
7655 void stmmac_dvr_remove(struct device *dev)
7656 {
7657 	struct net_device *ndev = dev_get_drvdata(dev);
7658 	struct stmmac_priv *priv = netdev_priv(ndev);
7659 
7660 	netdev_info(priv->dev, "%s: removing driver", __func__);
7661 
7662 	pm_runtime_get_sync(dev);
7663 
7664 	stmmac_stop_all_dma(priv);
7665 	stmmac_mac_set(priv, priv->ioaddr, false);
7666 	unregister_netdev(ndev);
7667 
7668 #ifdef CONFIG_DEBUG_FS
7669 	stmmac_exit_fs(ndev);
7670 #endif
7671 	phylink_destroy(priv->phylink);
7672 	if (priv->plat->stmmac_rst)
7673 		reset_control_assert(priv->plat->stmmac_rst);
7674 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7675 
7676 	stmmac_pcs_clean(ndev);
7677 	stmmac_mdio_unregister(ndev);
7678 
7679 	destroy_workqueue(priv->wq);
7680 	mutex_destroy(&priv->lock);
7681 	bitmap_free(priv->af_xdp_zc_qps);
7682 
7683 	pm_runtime_disable(dev);
7684 	pm_runtime_put_noidle(dev);
7685 }
7686 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7687 
7688 /**
7689  * stmmac_suspend - suspend callback
7690  * @dev: device pointer
7691  * Description: this is the function to suspend the device and it is called
7692  * by the platform driver to stop the network queue, release the resources,
7693  * program the PMT register (for WoL), clean and release driver resources.
7694  */
7695 int stmmac_suspend(struct device *dev)
7696 {
7697 	struct net_device *ndev = dev_get_drvdata(dev);
7698 	struct stmmac_priv *priv = netdev_priv(ndev);
7699 	u32 chan;
7700 
7701 	if (!ndev || !netif_running(ndev))
7702 		return 0;
7703 
7704 	mutex_lock(&priv->lock);
7705 
7706 	netif_device_detach(ndev);
7707 
7708 	stmmac_disable_all_queues(priv);
7709 
7710 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7711 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7712 
7713 	if (priv->eee_enabled) {
7714 		priv->tx_path_in_lpi_mode = false;
7715 		del_timer_sync(&priv->eee_ctrl_timer);
7716 	}
7717 
7718 	/* Stop TX/RX DMA */
7719 	stmmac_stop_all_dma(priv);
7720 
7721 	if (priv->plat->serdes_powerdown)
7722 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7723 
7724 	/* Enable Power down mode by programming the PMT regs */
7725 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7726 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7727 		priv->irq_wake = 1;
7728 	} else {
7729 		stmmac_mac_set(priv, priv->ioaddr, false);
7730 		pinctrl_pm_select_sleep_state(priv->device);
7731 	}
7732 
7733 	mutex_unlock(&priv->lock);
7734 
7735 	rtnl_lock();
7736 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7737 		phylink_suspend(priv->phylink, true);
7738 	} else {
7739 		if (device_may_wakeup(priv->device))
7740 			phylink_speed_down(priv->phylink, false);
7741 		phylink_suspend(priv->phylink, false);
7742 	}
7743 	rtnl_unlock();
7744 
7745 	if (stmmac_fpe_supported(priv))
7746 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7747 
7748 	priv->speed = SPEED_UNKNOWN;
7749 	return 0;
7750 }
7751 EXPORT_SYMBOL_GPL(stmmac_suspend);
7752 
7753 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7754 {
7755 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7756 
7757 	rx_q->cur_rx = 0;
7758 	rx_q->dirty_rx = 0;
7759 }
7760 
7761 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7762 {
7763 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7764 
7765 	tx_q->cur_tx = 0;
7766 	tx_q->dirty_tx = 0;
7767 	tx_q->mss = 0;
7768 
7769 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7770 }
7771 
7772 /**
7773  * stmmac_reset_queues_param - reset queue parameters
7774  * @priv: device pointer
7775  */
7776 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7777 {
7778 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7779 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7780 	u32 queue;
7781 
7782 	for (queue = 0; queue < rx_cnt; queue++)
7783 		stmmac_reset_rx_queue(priv, queue);
7784 
7785 	for (queue = 0; queue < tx_cnt; queue++)
7786 		stmmac_reset_tx_queue(priv, queue);
7787 }
7788 
7789 /**
7790  * stmmac_resume - resume callback
7791  * @dev: device pointer
7792  * Description: when resume this function is invoked to setup the DMA and CORE
7793  * in a usable state.
7794  */
7795 int stmmac_resume(struct device *dev)
7796 {
7797 	struct net_device *ndev = dev_get_drvdata(dev);
7798 	struct stmmac_priv *priv = netdev_priv(ndev);
7799 	int ret;
7800 
7801 	if (!netif_running(ndev))
7802 		return 0;
7803 
7804 	/* Power Down bit, into the PM register, is cleared
7805 	 * automatically as soon as a magic packet or a Wake-up frame
7806 	 * is received. Anyway, it's better to manually clear
7807 	 * this bit because it can generate problems while resuming
7808 	 * from another devices (e.g. serial console).
7809 	 */
7810 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7811 		mutex_lock(&priv->lock);
7812 		stmmac_pmt(priv, priv->hw, 0);
7813 		mutex_unlock(&priv->lock);
7814 		priv->irq_wake = 0;
7815 	} else {
7816 		pinctrl_pm_select_default_state(priv->device);
7817 		/* reset the phy so that it's ready */
7818 		if (priv->mii)
7819 			stmmac_mdio_reset(priv->mii);
7820 	}
7821 
7822 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7823 	    priv->plat->serdes_powerup) {
7824 		ret = priv->plat->serdes_powerup(ndev,
7825 						 priv->plat->bsp_priv);
7826 
7827 		if (ret < 0)
7828 			return ret;
7829 	}
7830 
7831 	rtnl_lock();
7832 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7833 		phylink_resume(priv->phylink);
7834 	} else {
7835 		phylink_resume(priv->phylink);
7836 		if (device_may_wakeup(priv->device))
7837 			phylink_speed_up(priv->phylink);
7838 	}
7839 	rtnl_unlock();
7840 
7841 	rtnl_lock();
7842 	mutex_lock(&priv->lock);
7843 
7844 	stmmac_reset_queues_param(priv);
7845 
7846 	stmmac_free_tx_skbufs(priv);
7847 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7848 
7849 	stmmac_hw_setup(ndev, false);
7850 	stmmac_init_coalesce(priv);
7851 	stmmac_set_rx_mode(ndev);
7852 
7853 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7854 
7855 	stmmac_enable_all_queues(priv);
7856 	stmmac_enable_all_dma_irq(priv);
7857 
7858 	mutex_unlock(&priv->lock);
7859 	rtnl_unlock();
7860 
7861 	netif_device_attach(ndev);
7862 
7863 	return 0;
7864 }
7865 EXPORT_SYMBOL_GPL(stmmac_resume);
7866 
7867 #ifndef MODULE
7868 static int __init stmmac_cmdline_opt(char *str)
7869 {
7870 	char *opt;
7871 
7872 	if (!str || !*str)
7873 		return 1;
7874 	while ((opt = strsep(&str, ",")) != NULL) {
7875 		if (!strncmp(opt, "debug:", 6)) {
7876 			if (kstrtoint(opt + 6, 0, &debug))
7877 				goto err;
7878 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7879 			if (kstrtoint(opt + 8, 0, &phyaddr))
7880 				goto err;
7881 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7882 			if (kstrtoint(opt + 7, 0, &buf_sz))
7883 				goto err;
7884 		} else if (!strncmp(opt, "tc:", 3)) {
7885 			if (kstrtoint(opt + 3, 0, &tc))
7886 				goto err;
7887 		} else if (!strncmp(opt, "watchdog:", 9)) {
7888 			if (kstrtoint(opt + 9, 0, &watchdog))
7889 				goto err;
7890 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7891 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7892 				goto err;
7893 		} else if (!strncmp(opt, "pause:", 6)) {
7894 			if (kstrtoint(opt + 6, 0, &pause))
7895 				goto err;
7896 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7897 			if (kstrtoint(opt + 10, 0, &eee_timer))
7898 				goto err;
7899 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7900 			if (kstrtoint(opt + 11, 0, &chain_mode))
7901 				goto err;
7902 		}
7903 	}
7904 	return 1;
7905 
7906 err:
7907 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7908 	return 1;
7909 }
7910 
7911 __setup("stmmaceth=", stmmac_cmdline_opt);
7912 #endif /* MODULE */
7913 
7914 static int __init stmmac_init(void)
7915 {
7916 #ifdef CONFIG_DEBUG_FS
7917 	/* Create debugfs main directory if it doesn't exist yet */
7918 	if (!stmmac_fs_dir)
7919 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7920 	register_netdevice_notifier(&stmmac_notifier);
7921 #endif
7922 
7923 	return 0;
7924 }
7925 
7926 static void __exit stmmac_exit(void)
7927 {
7928 #ifdef CONFIG_DEBUG_FS
7929 	unregister_netdevice_notifier(&stmmac_notifier);
7930 	debugfs_remove_recursive(stmmac_fs_dir);
7931 #endif
7932 }
7933 
7934 module_init(stmmac_init)
7935 module_exit(stmmac_exit)
7936 
7937 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7938 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7939 MODULE_LICENSE("GPL");
7940